query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
A limited number of items is in the feed. | def test_limit_items(self):
AnnouncementFactory(
title="Not going to be there",
expires_at=timezone.now() - datetime.timedelta(days=1),
)
for i in range(5):
AnnouncementFactory()
response = self.get("announcements:feed")
assert "Not going to be there" not in response.content.decode() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def limit(requestContext, seriesList, n):\n return seriesList[0:n]",
"def limit(self, count):\n self._limit = count\n return self",
"def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)",
"def test_limit(self) -> None:\n\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=5\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 5)\n self.assertEqual(channel.json_body[\"next_token\"], 5)\n self._check_fields(channel.json_body[\"event_reports\"])",
"def _check_for_more_pages(self):\n self._has_more = len(self._items) > self.per_page\n\n self._items = self._items[0:self.per_page]",
"def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2",
"def get_rss(limit):\n rss_data = feedparser.parse(URL)\n if limit == 1:\n title = rss_data.entries[0].title\n link = rss_data.entries[0].link\n rss_print(title, link)\n else:\n for i in range(0, limit):\n title = rss_data.entries[i].title\n link = rss_data.entries[i].link\n\n print(Back.CYAN + str(i + 1) + \"\\t\")\n rss_print(title, link)",
"def _check_items_limit(self):\n if self.items_limit and self.items_limit == self.get_metadata('items_count'):\n raise ItemsLimitReached('Finishing job after items_limit reached:'\n ' {} items written.'.format(self.get_metadata('items_count')))",
"def limit(self, limit):\n raise NotImplementedError(\"This should have been implemented.\")",
"def get_select_all_max_items(self):\n return 1500",
"def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])",
"def testRetrievingWithoutSpecifyingLimit(self):\n cached_items = cached_list_logic.getCachedItems('test_list', start=2)\n self.assertListEqual([self.item3, self.item4, self.item5], cached_items)",
"def keep_n(self, n=100):\n before = self.item_count()\n\n item_count = self.item_count()\n if item_count > n: self.filter(self.sample(n))\n\n after = self.item_count()\n with msg(f'Keeping (at most) {n} items: {after} of {before}', done=False, enabled=self.output):pass",
"def get_number_of_extra_items_in_page_with_initially_selected(self):\n return 10",
"def test_maximum_items(self):\n total = 4711\n self.es.set_maximum_items(total)\n self.assertEqual(self.es._total, total)",
"def limit(self, limit):\n self._limit = limit",
"def test_stream(self):\n with skipping(NotImplementedError):\n self.es = EventStreamsTestClass(streams='recentchange')\n limit = 50\n self.es.set_maximum_items(limit)\n self.assertLength(list(self.es), limit)",
"def test_messenger_limit():\n all_messages_resp = requests.get(BASE_URL)\n all_messages = all_messages_resp.json()\n total_message_count = len(all_messages)\n message_limit = total_message_count // 2\n\n query_params = {\"limit\": message_limit}\n limit_resp = requests.get(BASE_URL, params=query_params)\n limited_messages = limit_resp.json()\n assert limit_resp.status_code == 200\n assert len(limited_messages) == message_limit",
"def limit(self, max_size):\n return self.__class__(itertools.islice(self, max_size))",
"def get_custom_feeds(request):\n start = int(request.paginate_number) * 10\n end = start + 10\n feeds = Feed.objects.all().order_by('-id')[start: end]\n return get_feed_list(feeds)",
"def getMaxItems(self, obj=None):\n if obj is not None:\n max_items = self.getSyndicationInfo(obj).max_items\n else:\n max_items = self.max_items\n return max_items",
"def limit(iterable, n):\n for count, element in enumerate(iterable):\n if count >= n: break\n else: yield element",
"def limit(self, amount):\n self._limit = amount\n return self",
"def limit(self, key):\n if self._debug:\n return False\n\n counter = self.database.List(self.name + ':' + key)\n n = len(counter)\n is_limited = False\n if n < self._limit:\n counter.prepend(str(time.time()))\n else:\n oldest = counter[-1]\n if (oldest is not None) and (time.time() - float(oldest) < self._per):\n is_limited = True\n else:\n counter.prepend(str(time.time()))\n del counter[:self._limit]\n counter.pexpire(int(self._per * 2000))\n return is_limited",
"def how_many_comments(comment_queryset, count=10):\n\n if count == \"all\":\n return comment_queryset\n return comment_queryset[:int(count)]",
"def get_num_items(self):\r\n return self.num_items",
"def limit(self, limit):\n self._limit = limit\n return self",
"def createFeedItems(self):\r\n for item in self.item_data:\r\n self.initCreateFeedItem(item)\r\n self.createItem(item)",
"def limit(self, limit):\n\n # Return between 1 and 250 results, defaults to 10\n return max(1, min(250, int(limit) if limit else 10))",
"def get_max_item(self):\n return self._get_page('maxitem').json()"
] | [
"0.66845256",
"0.6452016",
"0.64328825",
"0.63778573",
"0.63408464",
"0.6280421",
"0.6278055",
"0.62466717",
"0.62423396",
"0.6150281",
"0.6145422",
"0.61124396",
"0.6075904",
"0.60351187",
"0.6013344",
"0.5996759",
"0.59329146",
"0.5901975",
"0.5897077",
"0.5894792",
"0.5893717",
"0.5852139",
"0.58312654",
"0.58253616",
"0.58198047",
"0.5812394",
"0.58120203",
"0.5805706",
"0.57665825",
"0.5753272"
] | 0.7206275 | 0 |
Check the mandatory services. | def check_services(self):
for service in self.services:
try:
self.cloud.search_services(service)[0]
except Exception: # pylint: disable=broad-except
self.is_skipped = True
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_services_ready(self, services):\n for ser in services:\n services[ser] = False\n response = self.bus.wait_for_response(Message(\n 'mycroft.{}.is_ready'.format(ser)))\n if response and response.data['status']:\n services[ser] = True\n return all([services[ser] for ser in services])",
"def test_100_services(self):\n u.log.debug('Checking system services...')\n swift_storage_services = ['swift-account',\n 'swift-account-auditor',\n 'swift-account-reaper',\n 'swift-account-replicator',\n 'swift-container',\n 'swift-container-auditor',\n 'swift-container-replicator',\n 'swift-container-updater',\n 'swift-object',\n 'swift-object-auditor',\n 'swift-object-replicator',\n 'swift-object-updater',\n 'swift-container-sync']\n service_names = {\n self.keystone_sentry: ['keystone'],\n self.glance_sentry: ['glance-registry',\n 'glance-api'],\n self.swift_proxy_sentry: ['swift-proxy'],\n self.swift_storage_sentry: swift_storage_services\n }\n\n if self._get_openstack_release() >= self.trusty_liberty:\n service_names[self.keystone_sentry] = ['apache2']\n\n ret = u.validate_services_by_name(service_names)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)",
"def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")",
"def checkNeededParams(self):\n for clp,value in self.neededParamsNames.items():\n if value[0] not in self.neededParams:\n print >> sys.stderr, clp+\" is a mandatory parameter \"\n self.printUsage()\n sys.exit(1)",
"def test_100_services(self):\n u.log.debug('Checking system services on units...')\n\n services = {\n self.compute_sentry: ['nova-compute',\n 'neutron-plugin-openvswitch-agent'],\n self.rabbitmq_sentry: ['rabbitmq-server'],\n self.neutron_api_sentry: ['neutron-server'],\n }\n\n if self._get_openstack_release() >= self.trusty_mitaka:\n services[self.compute_sentry] = [\n 'nova-compute',\n 'neutron-openvswitch-agent'\n ]\n\n ret = u.validate_services_by_name(services)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)\n\n u.log.debug('OK')",
"def test_100_services(self):\n services = {\n self.keystone_sentry: ['keystone'],\n self.cinder_sentry: ['cinder-api',\n 'cinder-scheduler',\n 'cinder-volume']\n }\n if self.is_liberty_or_newer():\n services[self.keystone_sentry] = ['apache2']\n else:\n services[self.keystone_sentry] = ['keystone']\n ret = u.validate_services_by_name(services)\n if ret:\n amulet.raise_status(amulet.FAIL, msg=ret)",
"async def _start_nested_services(self):\n loaded = set()\n members = inspect.getmembers(self, predicate=inspect.ismethod)\n ordering_required = [name for name, method in members\n if hasattr(method, \"requirements_definition\")]\n self.log.debug(\"Requirements will be gathered from %s\",\n ', '.join(ordering_required))\n while ordering_required:\n ordered_count = 0\n for name in ordering_required[:]:\n self.log.debug(\"Check %s\", name)\n method = getattr(self, name)\n requirements = getattr(method, \"service_requirements\")\n if len(requirements) > 0 and not loaded.issuperset(requirements):\n self.log.debug(\"Not enought requirements. Loaded: %s, Required: %s\",\n loaded, requirements)\n continue\n self.log.debug(\"Getting requirements from %s\", name)\n try:\n services = await method()\n except Exception:\n self.log.exception(\"Exception while receiving %s requirements\", name)\n raise\n self.log.debug(\"Requirements from %s: %s\", method, services)\n if not (services is None or isinstance(services, list)):\n raise TypeError(\"Requirements method must return list or None. \"\n \"It returns %s (%s type) instead.\",\n services, type(services))\n if services:\n for service in services:\n self.nested_service_pre_start(service)\n self._services.add(service)\n ordering_required.remove(name)\n ordered_count += 1\n loaded.add(name)\n self.log.debug(\"Nested service %s was loaded\", name)\n if ordered_count == 0:\n raise RuntimeError(\n \"Can't resolve services dependencies \"\n \"from %s\" % ', '.join(ordering_required)\n )\n\n await self._services.start_all()",
"def _check_required_if_provider(self):\n return",
"def _checkServices(self, expectedServices):\n it = iter(self._getServices())\n for (type_uri, service_uri) in expectedServices:\n for element in it:\n if type_uri in xrds.getTypeURIs(element):\n self.assertEqual(xrds.getURI(element), service_uri)\n break\n else:\n self.fail('Did not find %r service' % (type_uri,))",
"async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()",
"def _check_all_systems_ready(self):\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS READY\")",
"def check_requirements():\n\n # Which programs are reqired?\n required_programs = ['virtualbox', 'vagrant']\n\n # Make sure the required programs are installed.\n for program in required_programs:\n\n # What's the path to the executable?\n try:\n subprocess.check_output(['which', program])\n except subprocess.CalledProcessError:\n message = \"Please install \" + program + \" before proceeding.\"\n Utilities.log(message)\n exit(1)",
"def _check_all_systems_ready(self):\n raise NotImplementedError()",
"def the_service_should_be_enabled_with_no_errors(driver):\n assert wait_on_element_disappear(driver, 30, xpaths.progress.spinner)\n assert wait_for_attribute_value(driver, 20, xpaths.services.ssh_Service_Toggle, 'class', 'mat-checked')",
"def check_required_parameters(required_params_dict=dict()):\r\n print threading.currentThread().getName(), 'Starting'\r\n is_valid = True\r\n required_params_not_set = pythontools.validate_required_parameters(required_params_dict)\r\n if len(required_params_not_set) > 0:\r\n is_valid = False\r\n msg = \"Validate all required input parameters are set failed.\"\r\n for param in required_params_not_set:\r\n steplog.error(\"Required parameter %s is not set.\" % param)\r\n else:\r\n msg = \"Validate all required input parameters are set succeeded.\"\r\n return is_valid, msg",
"def check_for_setup_error(self):\r\n self.helper._check_conf_file()\r\n self.helper._check_service()",
"def _check_all_systems_ready(self):\n \n self._check_all_sensors_ready()\n #self._check_joint_states_ready()\n self._check_cmd_vel_pub()\n \n return True",
"def test_services(self):\n self.assertTrue(setup_component(self.hass, remote.DOMAIN,\n TEST_PLATFORM))",
"def check(self):\n # Determine which services to test\n # TODO: use a smarter algorithm to detect which services to check\n max_lag = max(service.lag for service in self.services)\n now = datetime.utcnow()\n services = [ service\n for service in self.services\n if service.next_update_in(now) <= max_lag\n ]\n if not services:\n return 0, []\n\n period = max(service.period for service in services)\n\n # Test them\n service_states = self._check_services(services)\n\n # Report\n return int(period), service_states",
"def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')",
"def test_expected_services_exist(self, expected_service):\n descriptor = ItemFactory(category=\"pure\", parent=self.course)\n runtime = _preview_module_system(\n self.request,\n descriptor,\n self.field_data,\n )\n service = runtime.service(descriptor, expected_service)\n self.assertIsNotNone(service)",
"def check_services(services):\n dir_list = os.listdir(BASE_DIR)\n\n for service in services:\n # Check to see if they are in the root directory\n if service not in dir_list or not os.path.isdir(os.path.join(BASE_DIR, service)):\n log.error('Could not find service [{}] folder in the root directory'.format(service))\n sys.exit(1)\n\n # Check to see if there's a docker-compose file in the directory\n directory = os.path.join(BASE_DIR, service)\n if 'docker-compose.yml' not in os.listdir(directory):\n log.error('Could not find docker-compose.yml file in [{}] service directory'.format(service))\n sys.exit(1)",
"def check_requirements(self): # pylint: disable=no-self-use\n self.is_skipped = False",
"def check_requirement(self):\n raise NotImplementedError",
"def validate_services(self, commands):\n for k, v in commands.iteritems():\n for cmd in v:\n output, code = k.run(cmd)\n if code != 0:\n return \"command `{}` returned {}\".format(cmd, str(code))\n return None",
"def check_dependencies(cls):\n\n missing = []\n for name in cls.DEPENDENCIES:\n try:\n import_module(name)\n except ModuleNotFoundError:\n missing.append(name)\n\n if any(missing):\n msg = ('The sup3r stitching module depends on the following '\n 'special dependencies that were not found in the active '\n 'environment: {}'.format(missing))\n logger.error(msg)\n raise ModuleNotFoundError(msg)",
"def _check_services(self, services):\n now = datetime.utcnow()\n\n # Worker\n service_states = []\n def task(service):\n # Get state, measure lag\n start = datetime.utcnow()\n state = service.get_state()\n finish = datetime.utcnow()\n\n # Update lag\n service.lag = (finish - start).total_seconds()\n\n # Add state\n service_states.append(state)\n logger.debug(u'Checked service {} (lag={}, real_period={}): last checked {} ago, state={}: {}'.format(\n service.name,\n service.lag,\n service.real_period,\n now - service.last_tested if service.last_tested else '(never)',\n state['state'], state['info']\n ))\n\n # Update timestamp\n service.last_tested = now\n\n # Run\n threads = [threading.Thread(target=task, args=(service,)) for service in services]\n for t in threads: t.start()\n for t in threads: t.join()\n # TODO: declare max waiting time. If any process doesnt manage to finish in time -- report it as a separate request\n\n return service_states",
"def check_services_status(system, **kwargs):\n logger = kwargs[\"logger\"]\n hosts = list(set([host.host_name for host in system.api.hosts.list()]))\n hosts_agents = dict()\n hosts_status = dict()\n services = kwargs['services']\n for host in hosts:\n # if a hostname contains localhost, we want to avoid trying to connect\n if 'localhost' in host:\n continue\n try:\n service_for_host = services[host]\n with ssh_client(host, username=\"root\", password=system.password) as ssh:\n service_status_dict = get_services_status_list(ssh)\n except KeyError:\n logger.info(\"Skipping host {} as it is not in yml.\".format(host))\n continue\n for service_name, expected_status in service_for_host.items():\n # if service_status_dict has service `service_name` get its status\n # compare it with expected_status\n try:\n logger.debug(\"service:{} status: {} expected_status: {}\"\n .format(service_name, service_status_dict[service_name], expected_status))\n service_status = (expected_status in service_status_dict[service_name])\n except KeyError:\n # This is because not all hosts may have all services installed\n logger.debug(\"Service {} not found on host {}\".format(service_name, host))\n continue\n try:\n hosts_agents[host].update({service_name: service_status})\n except KeyError:\n hosts_agents[host] = {service_name: service_status}\n hosts_status[host] = all(hosts_agents[host].values())\n overall_status = all(hosts_status.values())\n\n if overall_status: # all true, everything is running\n msg = (\"Ok: all services {} are in the desired state on all hosts\".format(services.keys()))\n logger.info(msg)\n print(msg)\n sys.exit(0)\n else:\n trouble_hosts = [host for host, status in hosts_status.iteritems() if not status]\n msg = (\"Critical: These hosts don't have all agents in the desired state: {}.\"\n \"Overall status is {} (where False means not in desired state)\"\n .format(trouble_hosts, hosts_agents))\n logger.error(msg)\n print(msg)\n sys.exit(2)",
"def check_dependencies():\n\n vars_valid = check_variables(\n AirflowVars.PROJECT_ID, AirflowVars.TERRAFORM_ORGANIZATION, AirflowVars.VM_DAGS_WATCH_LIST\n )\n conns_valid = check_connections(AirflowConns.TERRAFORM)\n\n if not vars_valid or not conns_valid:\n raise AirflowException(\"Required variables or connections are missing\")",
"def verify_services(self):\n services = [\"metric_collector\", \"log_collector\"]\n service_version_9 = [\"lma_collector\"]\n pids = {}\n processes_count = {\n \"collectd \": 1,\n \"collectdmon \": 1\n }\n\n if self.settings.version.startswith(\"0.9\"):\n processes_count[\n \"hekad -config[= ]/etc/{}\".format(service_version_9)] = 1\n else:\n # Starting with 0.10, there are one collector for logs and one for\n # metrics\n for service in services:\n processes_count[\"hekad -config[= ]/etc/{}\".format(service)] = 1\n online_nodes = [node for node in self.helpers.get_all_ready_nodes()\n if node[\"online\"]]\n for node in online_nodes:\n pids[node[\"name\"]] = {}\n with self.env.d_env.get_ssh_to_remote(node[\"ip\"]) as remote:\n for process, count in processes_count.items():\n logger.info(\"Checking process {0} on node {1}\".format(\n process, node[\"name\"]\n ))\n pids[node[\"name\"]][process] = (\n self.checkers.check_process_count(\n remote, process, count))\n return pids"
] | [
"0.68495893",
"0.68265027",
"0.67144567",
"0.6654075",
"0.6653682",
"0.66230726",
"0.64997345",
"0.6361119",
"0.6352509",
"0.62955403",
"0.6243788",
"0.6225547",
"0.6216019",
"0.6185492",
"0.6181142",
"0.61648095",
"0.6110558",
"0.60903686",
"0.60701483",
"0.60660607",
"0.6041867",
"0.6041478",
"0.60288763",
"0.60226744",
"0.6015627",
"0.60144037",
"0.6007532",
"0.5999191",
"0.59889793",
"0.59873223"
] | 0.7447906 | 0 |
Check the mandatory network extensions. | def check_extensions(self):
extensions = self.cloud.get_network_extensions()
for network_extension in self.neutron_extensions:
if network_extension not in extensions:
LOGGER.warning(
"Cannot find Neutron extension: %s", network_extension)
self.is_skipped = True
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed, issue error\n self._display_semantic_error(\"network\")",
"def _sanityCheckExtensions(other):\n if other.useEncryptThenMAC not in (True, False):\n raise ValueError(\"useEncryptThenMAC can only be True or False\")\n\n if other.usePaddingExtension not in (True, False):\n raise ValueError(\"usePaddingExtension must be True or False\")\n\n if other.use_heartbeat_extension not in (True, False):\n raise ValueError(\"use_heartbeat_extension must be True or False\")\n\n if other.heartbeat_response_callback and not other.use_heartbeat_extension:\n raise ValueError(\"heartbeat_response_callback requires \"\n \"use_heartbeat_extension\")\n\n if other.record_size_limit is not None and \\\n not 64 <= other.record_size_limit <= 2**14 + 1:\n raise ValueError(\"record_size_limit cannot exceed 2**14+1 bytes\")\n\n HandshakeSettings._sanityCheckEMSExtension(other)",
"def check_model(self):\n layers_map = self.core.query_network(network=self.network,\n device_name=self.device)\n\n unsupported_layers = [\n l for l in self.network.layers.keys() if l not in layers_map\n ]\n\n if (unsupported_layers != []):\n sys.exit(\"Those mention layers in your model are not supported by OpenVino Inference Engine:\" \\\n \" \\n\\t\" + \"\\n\\t\".join(unsupported_layers))",
"def _validate_extensions(self):\n valid_set = self._check_duplicate_extensions()\n\n if valid_set:\n while True:\n decision = input(\n \"Extensions are scattered in your folders.\\n\"\n \"Do you want to move them all to specific folder\\n\"\n \"or just run basic cleaning? [move/basic]: \"\n )\n if decision.lower() == \"move\":\n for record in valid_set:\n self.move_files_with_extension(record)\n break\n elif decision.lower() == \"basic\":\n break\n else:\n print(\"Invalid Input\")",
"def _validate_create_network(self, context, net_data):\n external = net_data.get(extnet_apidef.EXTERNAL)\n is_external_net = validators.is_attr_set(external) and external\n with_qos = validators.is_attr_set(\n net_data.get(qos_consts.QOS_POLICY_ID))\n\n if with_qos:\n self._validate_qos_policy_id(\n context, net_data.get(qos_consts.QOS_POLICY_ID))\n if is_external_net:\n raise nsx_exc.QoSOnExternalNet()",
"def verify(self):\n if len(self.headers) not in [1, 5]:\n raise IncorrectNumberOfExtensions(\"header\", \"5\", self)\n if len(self.pixeldata) not in [1, 2, 3]:\n raise IncorrectNumberOfExtensions(\"pixel\", \"1, 2, or 3\", self)\n if len(self.tabledata) not in [0,4]:\n raise IncorrectNumberOfExtensions(\"table\", \"4\", self)",
"def test_get_enabled_extensions_returns_empty(self):\n self.manager = TestExtensionManager([], '')\n self.manager.load()\n\n self.assertEqual(len(self.manager.get_enabled_extensions()), 0)",
"def check_supported_features(self):",
"def validate_extension(extension):\n\n error_flag = 0\n error_string = ''\n\n if isinstance(extension, dict):\n try:\n schema = jsonref.load_uri(extension['extension_schema'])\n try:\n print(\"Loaded Extension Schema: \", schema['title'])\n name = schema['title']\n error_string, error_flag = bco_validator(schema, extension)\n\n # For if the schema has no ['title']\n except KeyError:\n print(\"Loaded Extension Schema: \", schema['$id'])\n name = schema['$id']\n\n except json.decoder.JSONDecodeError:\n print('Failed to load extension schema', schema['$id'])\n error_flag += 1\n\n except TypeError:\n print('Failed to load extension schema. \\nInvalid format ', )\n print(extension)\n error_string += json.dumps(extension)\n error_flag += 1\n\n else:\n print('Invalid BCO extension format')\n error_string += json.dumps(extension)\n error_flag = 1\n\n if error_flag == 0:\n print(name + ' PASSED \\U0001F44D')\n return error_string, error_flag",
"def check_network(network_type, path_data):\n\n if network_type in include and 'tags' in path_data:\n for keyword in include[network_type]:\n if keyword not in path_data['tags']:\n #logger.debug('Excluded path %d - keyword %s not in path tags' % (path_data['id'], keyword))\n return False\n\n if network_type not in exclude or 'tags' not in path_data:\n return True\n\n for key in exclude[network_type]:\n if key in path_data['tags'] and path_data['tags'][key] in exclude[network_type][key]:\n return False\n\n return True",
"def get_required_extensions(self):\n return []",
"def _check_coms(self):\n self.com._check_rep()",
"def is_extension_supported(request, extension_alias):\n extensions = list_extensions(request)\n for extension in extensions:\n if extension['alias'] == extension_alias:\n return True\n else:\n return False",
"def check_requirements():\n\n # Which programs are reqired?\n required_programs = ['virtualbox', 'vagrant']\n\n # Make sure the required programs are installed.\n for program in required_programs:\n\n # What's the path to the executable?\n try:\n subprocess.check_output(['which', program])\n except subprocess.CalledProcessError:\n message = \"Please install \" + program + \" before proceeding.\"\n Utilities.log(message)\n exit(1)",
"def run_protocol_checks(sub: Submission, logger):\n\n protocols = sub.protocol\n\n codes = []\n names = set()\n p_types = set()\n allowed_types = ontology_term(\"protocol_types\")\n mandatory = [label for label, attrib in allowed_types.items()\n if attrib[\"exp_type\"] == \"all\" and\n (attrib[\"mandatory\"] == \"ma\" or attrib[\"mandatory\"] == \"seq\")]\n exclusive = [label for label, attrib in allowed_types.items()\n if attrib[\"exp_type\"] == \"all\" and\n attrib[\"mandatory\"] == \"one of\"]\n found_exclusive = False\n\n if not protocols:\n logger.error(\"Experiment has no protocols. At least one expected.\")\n codes.append(\"PROT-E01\")\n return codes\n for p in protocols:\n if p.alias:\n # Protocol names should be unique.\n if p.alias in names:\n logger.error(\"Protocol name \\\"{}\\\" is not unique.\".format(p.alias))\n codes.append(\"PROT-E04\")\n names.add(p.alias)\n # Protocol must have a name\n else:\n logger.error(\"Protocol found with no name. Not checking it further.\")\n codes.append(\"PROT-E02\")\n continue\n if p.description:\n # Protocol description should be longer than 50 characters\n if len(p.description) < 50:\n logger.warning(\"Protocol \\\"{}\\\" is shorter than 50 characters.\".format(p.alias))\n codes.append(\"PROT-W01\")\n # Protocol must have description\n else:\n logger.error(\"Protocol \\\"{}\\\" has no description.\".format(p.alias))\n codes.append(\"PROT-E03\")\n if p.protocol_type:\n # Protocol type must be from controlled vocabulary (EFO)\n p_types.add(p.protocol_type.value)\n if p.protocol_type.value not in allowed_types:\n logger.error(\"Protocol \\\"{}\\\" has a type that is not from controlled vocabulary/EFO: \"\n \"\\\"{}\\\"\".format(p.alias, p.protocol_type.value))\n codes.append(\"PROT-E05\")\n if p.protocol_type.value in exclusive:\n found_exclusive = True\n else:\n # Protocol must have a protocol type\n logger.warn(\"Protocol \\\"{}\\\" has no protocol type.\".format(p.alias))\n codes.append(\"PROT-E07\")\n\n # Mandatory protocol types (for all experiment types) must be present\n for p_type in mandatory:\n if p_type not in p_types:\n logger.error(\"A {} must be included.\".format(p_type))\n codes.append(\"PROT-E06\")\n\n # Every experiment must have at least one growth/treatment/sample collection protocol\n if not found_exclusive:\n logger.error(\"A growth, treatment or sample collection protocol must be included.\")\n codes.append(\"PROT-E07\")\n\n return codes",
"def check_file_extensions(fname, extensions):\n if fname is None:\n return\n assert isinstance(extensions, tuple), \"The 'extensions' must be a tuple.\"\n if not fname.endswith(extensions):\n raise ValueError(\"Invalid file extension (%s). Must be one of %s\" % extensions)",
"def hasExtensions(self):\n return len(self.__extensions) > 0",
"def no_ext_grid(net):\n\n if net.ext_grid.in_service.sum() + (net.gen.slack & net.gen.in_service).sum() == 0:\n return True",
"def checkRequiredDependencies(self):\n \n # skip dependency check for downloading only\n if( self.downloadOnly ):\n return\n\n # hard dependencies\n for req in self.reqmodules:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( self.name + \" requires \" + req \\\n + \" and it wasn't found in your config file!!\" )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version\n \n # build only dependencies\n if( self.mode == \"install\" ):\n mods = self.reqmodules_buildonly + self.reqmodules_external\n for req in mods:\n if( self.parent.module(req) == None ):\n # check if there is an auto detected module\n if( self.parent.module(req, True) == None ):\n self.abort( req + \" not found in your config file!! \" + self.name \\\n + \" cannot be built without \" + req )\n else:\n # use auto detected module\n self.parent.use( self.parent.module(req, True) )\n self.parent.module( req ).init()\n\n print \" - \" + self.name + \": auto-detected \" + req + \" version \" + self.parent.module( req ).version",
"def check_extension(f):\n parts = f.split('.')\n last = parts[len(parts) - 1]\n return last in allowed_extensions",
"def test_11_is_allowed_file_correct_ext(self):\n\n for ext in list(ALLOWED_EXTENSIONS):\n filename = f\"somename.{ext}\"\n is_allowed = utils.is_allowed_file(filename)\n self.assertTrue(is_allowed)",
"def _check_required_if_provider(self):\n return",
"def sanity_check_step(self):\n\n incs = [\"netcdf.h\"]\n libs = [\"libnetcdf.so\", \"libnetcdf.a\"]\n # since v4.2, the non-C libraries have been split off in seperate extensions_step\n # see netCDF-Fortran and netCDF-C++\n if LooseVersion(self.version) < LooseVersion(\"4.2\"):\n incs += [\"netcdf%s\" % x for x in [\"cpp.h\", \".hh\", \".inc\", \".mod\"]] + \\\n [\"ncvalues.h\", \"typesizes.mod\"]\n libs += [\"libnetcdf_c++.so\", \"libnetcdff.so\",\n \"libnetcdf_c++.a\", \"libnetcdff.a\"]\n\n custom_paths = {\n 'files': [\"bin/nc%s\" % x for x in [\"-config\", \"copy\", \"dump\",\n \"gen\", \"gen3\"]] +\n [\"lib/%s\" % x for x in libs] +\n [\"include/%s\" % x for x in incs],\n 'dirs': []\n }\n\n super(EB_netCDF, self).sanity_check_step(custom_paths=custom_paths)",
"def check_in_front(self, components, extension):\n protocol, root, directory, filename = components\n check_filename = extension + filename\n\n self.request(protocol, root, directory, check_filename)",
"def _check_unsupported_packages(self):\n for package in UNSUPPORTED_PACKAGES:\n version = self.device.get_installed_package_version(package)\n if version is None:\n continue\n\n if '-' in version:\n version = version.split('-')[0] # ignore abi version\n\n if version in UNSUPPORTED_PACKAGES[package]:\n message = 'This workload does not support version \"{}\" of package \"{}\"'\n raise WorkloadError(message.format(version, package))",
"def _sanityCheckEMSExtension(other):\n if other.useExtendedMasterSecret not in (True, False):\n raise ValueError(\"useExtendedMasterSecret must be True or False\")\n if other.requireExtendedMasterSecret not in (True, False):\n raise ValueError(\"requireExtendedMasterSecret must be True \"\n \"or False\")\n if other.requireExtendedMasterSecret and \\\n not other.useExtendedMasterSecret:\n raise ValueError(\"requireExtendedMasterSecret requires \"\n \"useExtendedMasterSecret\")",
"def test_get_valid_networks_for_virtualization_realm(self):\n pass",
"def check_network(config_name, urls = ''):\n\n logging.info(\"calling obsolete network diagnotic. Use '-interactive' instead\")\n\n config = config_namespace.ConfigNameSpace({})\n config.ExecFile(config_name)\n # get relevant parameters from config file:\n dns_servers = string.split(config.namespace['BOT_DNS_SERVERS'], ',')\n\n if Check_Gateway(config.namespace['EXTERNAL_DEFAULT_ROUTE']) != 0:\n return 1\n\n good_dns_servers = 0\n for s in dns_servers:\n if Check_DNS(s) != 4: # all other errors are non-fatal\n good_dns_servers = good_dns_servers + 1\n # if no DNS servers are up, we give up:\n if good_dns_servers == 0:\n return 1\n\n # First check the SMTP server\n logging.info(\"testing SMTP server %s\" % config.namespace['SMTP_SERVER'] )\n Check_SMTP(config.namespace['SMTP_SERVER'],\n config.namespace['EXTERNAL_CRAWL_IP'])\n\n # what about NTP:\n logging.info(\"testing NTP server %s\" % config.namespace['NTP_SERVERS'])\n for s in config.namespace['NTP_SERVERS']:\n Check_NTP(s)\n\n # SYSLOG server:\n logging.info(\"testing SYSLOG server %s\" % config.namespace['SYSLOG_SERVER'] )\n Check_SYSLOG(config.namespace['SYSLOG_SERVER'])\n\n # OK, now walk over all collections and try to get starturls\n for u in urls:\n check_url(u, dns_servers)\n\n return 0",
"def test_badge_should_have_extensions(self):\n\n badge = self.get_sample_badge()\n self.assertTrue(hasattr(badge, 'extensions'))",
"def cmd_net_contest():\n\n print(\"DNS: %s\" % contest.check_dns())\n print(\"FTP: %s\" % contest.check_ftp())\n print(\"SSH: %s\" % contest.check_ssh())\n print(\"HTTP: %s\" % contest.check_http())\n print(\"HTTPS: %s\" % contest.check_https())"
] | [
"0.63620454",
"0.6263202",
"0.59751266",
"0.5877078",
"0.5833472",
"0.5802308",
"0.57483894",
"0.5673013",
"0.56446946",
"0.56413287",
"0.5634714",
"0.5619895",
"0.5614505",
"0.559705",
"0.55942374",
"0.5593726",
"0.55906713",
"0.5572346",
"0.5547773",
"0.55424345",
"0.5539692",
"0.55367064",
"0.5530567",
"0.5530247",
"0.5511316",
"0.55000746",
"0.548354",
"0.5472368",
"0.546489",
"0.5429176"
] | 0.7781191 | 0 |
Copy config file to tempest results directory | def backup_tempest_config(conf_file, res_dir):
if not os.path.exists(res_dir):
os.makedirs(res_dir)
shutil.copyfile(conf_file,
os.path.join(res_dir, 'tempest.conf')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy_config(RESULTSDIR, main_config, io_config):\n print(\"Saving results to: {}\".format(RESULTSDIR))\n\n if not os.path.exists(RESULTSDIR):\n os.makedirs(RESULTSDIR)\n\n mconfig = os.path.join(\n RESULTSDIR, \"copy_main_config_\" + main_config.split(os.sep)[-1]\n )\n dconfig = os.path.join(RESULTSDIR, \"copy_io_config_\" + io_config.split(os.sep)[-1])\n\n shutil.copyfile(main_config, mconfig)\n shutil.copyfile(io_config, dconfig)",
"def _copy_snpeff_config(self):\n\n CONFIG = sequana_data(\"snpEff.config\", \"snpeff\")\n os.makedirs(self.snpeff_datadir, exist_ok=True)\n shutil.copyfile(CONFIG, self.configfile)",
"def copy_marvin_config(self):\n print(\"==> Making local copy of Marvin Config file\")\n marvin_filename = self.marvin_config.split('/')[-1]\n open(marvin_filename, \"w\").write(json.dumps(self.config, indent=4))",
"def copy_config(config_name: str, dest_path: Path) -> Path:\n runway_yml = dest_path / \"runway.yml\"\n if not config_name.startswith(\".yml\"):\n config_name += \".yml\"\n shutil.copy(configs / config_name, runway_yml)\n return runway_yml",
"def move_file_to_config(path):\n destination = str(os.path.expanduser('~')) +'/.config/hackerjobs/'\n shutil.copy(path,destination)",
"def deploy_config():\n run('cp {}/tools/WebGrab++.config.xml {}'.format(env.repo_dir, env.wg_dir))",
"def copy_test_configuration(self, source_dir, dest_dir):\n for root, dirs, files in os.walk(source_dir):\n if '.svn' in dirs:\n dirs.remove('.svn')\n dirs = [ d for d in dirs if not d.startswith('gyptest') ]\n files = [ f for f in files if not f.startswith('gyptest') ]\n for dirname in dirs:\n source = os.path.join(root, dirname)\n destination = source.replace(source_dir, dest_dir)\n os.mkdir(destination)\n if sys.platform != 'win32':\n shutil.copystat(source, destination)\n for filename in files:\n source = os.path.join(root, filename)\n destination = source.replace(source_dir, dest_dir)\n shutil.copy2(source, destination)",
"def sync_config():\n rsync_project(remote_dir='/apps/sharejs-rethinkdb-example/config/', local_dir='./config/')",
"def create_dir(self):\n\n os.makedirs(self.path)\n\n instance_config_dir = p.abspath(p.join(self.path, \"configs\"))\n os.makedirs(instance_config_dir)\n\n print(\n f\"Copy common default production configuration from {self.base_config_dir}. Files: {self.main_config_name}, {self.users_config_name}\"\n )\n\n shutil.copyfile(\n p.join(self.base_config_dir, self.main_config_name),\n p.join(instance_config_dir, self.main_config_name),\n )\n shutil.copyfile(\n p.join(self.base_config_dir, self.users_config_name),\n p.join(instance_config_dir, self.users_config_name),\n )\n\n logging.debug(\"Create directory for configuration generated in this helper\")\n # used by all utils with any config\n conf_d_dir = p.abspath(p.join(instance_config_dir, \"conf.d\"))\n os.mkdir(conf_d_dir)\n\n logging.debug(\"Create directory for common tests configuration\")\n # used by server with main config.xml\n self.config_d_dir = p.abspath(p.join(instance_config_dir, \"config.d\"))\n os.mkdir(self.config_d_dir)\n users_d_dir = p.abspath(p.join(instance_config_dir, \"users.d\"))\n os.mkdir(users_d_dir)\n dictionaries_dir = p.abspath(p.join(instance_config_dir, \"dictionaries\"))\n os.mkdir(dictionaries_dir)\n extra_conf_dir = p.abspath(p.join(instance_config_dir, \"extra_conf.d\"))\n os.mkdir(extra_conf_dir)\n\n def write_embedded_config(name, dest_dir, fix_log_level=False):\n with open(p.join(HELPERS_DIR, name), \"r\") as f:\n data = f.read()\n data = data.replace(\"clickhouse\", self.config_root_name)\n if fix_log_level:\n data = data.replace(\"<level>test</level>\", \"<level>trace</level>\")\n with open(p.join(dest_dir, name), \"w\") as r:\n r.write(data)\n\n logging.debug(\"Copy common configuration from helpers\")\n # The file is named with 0_ prefix to be processed before other configuration overloads.\n if self.copy_common_configs:\n write_embedded_config(\n \"0_common_instance_config.xml\",\n self.config_d_dir,\n self.with_installed_binary,\n )\n\n write_embedded_config(\"0_common_instance_users.xml\", users_d_dir)\n if (\n os.environ.get(\"CLICKHOUSE_USE_NEW_ANALYZER\") is not None\n and self.allow_analyzer\n ):\n write_embedded_config(\"0_common_enable_analyzer.xml\", users_d_dir)\n\n if len(self.custom_dictionaries_paths):\n write_embedded_config(\"0_common_enable_dictionaries.xml\", self.config_d_dir)\n\n logging.debug(\"Generate and write macros file\")\n macros = self.macros.copy()\n macros[\"instance\"] = self.name\n with open(p.join(conf_d_dir, \"macros.xml\"), \"w\") as macros_config:\n macros_config.write(self.dict_to_xml({\"macros\": macros}))\n\n # Put ZooKeeper config\n if self.with_zookeeper:\n shutil.copy(self.zookeeper_config_path, conf_d_dir)\n\n if self.with_secrets:\n if self.with_kerberos_kdc:\n base_secrets_dir = self.cluster.instances_dir\n else:\n base_secrets_dir = self.path\n from_dir = self.secrets_dir\n to_dir = p.abspath(p.join(base_secrets_dir, \"secrets\"))\n logging.debug(f\"Copy secret from {from_dir} to {to_dir}\")\n shutil.copytree(\n self.secrets_dir,\n p.abspath(p.join(base_secrets_dir, \"secrets\")),\n dirs_exist_ok=True,\n )\n\n if self.with_coredns:\n shutil.copytree(\n self.coredns_config_dir, p.abspath(p.join(self.path, \"coredns_config\"))\n )\n\n # Copy config.d configs\n logging.debug(\n f\"Copy custom test config files {self.custom_main_config_paths} to {self.config_d_dir}\"\n )\n for path in self.custom_main_config_paths:\n shutil.copy(path, self.config_d_dir)\n\n # Copy users.d configs\n for path in self.custom_user_config_paths:\n shutil.copy(path, users_d_dir)\n\n # Copy dictionaries configs to configs/dictionaries\n for path in self.custom_dictionaries_paths:\n shutil.copy(path, dictionaries_dir)\n for path in self.custom_extra_config_paths:\n shutil.copy(path, extra_conf_dir)\n\n db_dir = p.abspath(p.join(self.path, \"database\"))\n logging.debug(f\"Setup database dir {db_dir}\")\n if self.clickhouse_path_dir is not None:\n logging.debug(f\"Database files taken from {self.clickhouse_path_dir}\")\n shutil.copytree(self.clickhouse_path_dir, db_dir)\n logging.debug(\n f\"Database copied from {self.clickhouse_path_dir} to {db_dir}\"\n )\n else:\n os.mkdir(db_dir)\n\n logs_dir = p.abspath(p.join(self.path, \"logs\"))\n logging.debug(f\"Setup logs dir {logs_dir}\")\n os.mkdir(logs_dir)\n self.logs_dir = logs_dir\n\n depends_on = []\n\n if self.with_mysql_client:\n depends_on.append(self.cluster.mysql_client_host)\n\n if self.with_mysql:\n depends_on.append(\"mysql57\")\n\n if self.with_mysql8:\n depends_on.append(\"mysql80\")\n\n if self.with_mysql_cluster:\n depends_on.append(\"mysql57\")\n depends_on.append(\"mysql2\")\n depends_on.append(\"mysql3\")\n depends_on.append(\"mysql4\")\n\n if self.with_postgres_cluster:\n depends_on.append(\"postgres2\")\n depends_on.append(\"postgres3\")\n depends_on.append(\"postgres4\")\n\n if self.with_kafka:\n depends_on.append(\"kafka1\")\n depends_on.append(\"schema-registry\")\n\n if self.with_kerberized_kafka:\n depends_on.append(\"kerberized_kafka1\")\n\n if self.with_kerberos_kdc:\n depends_on.append(\"kerberoskdc\")\n\n if self.with_kerberized_hdfs:\n depends_on.append(\"kerberizedhdfs1\")\n\n if self.with_rabbitmq:\n depends_on.append(\"rabbitmq1\")\n\n if self.with_nats:\n depends_on.append(\"nats1\")\n\n if self.with_zookeeper:\n depends_on.append(\"zoo1\")\n depends_on.append(\"zoo2\")\n depends_on.append(\"zoo3\")\n\n if self.with_minio:\n depends_on.append(\"minio1\")\n\n if self.with_azurite:\n depends_on.append(\"azurite1\")\n\n self.cluster.env_variables.update(self.env_variables)\n\n odbc_ini_path = \"\"\n if self.odbc_ini_path:\n self._create_odbc_config_file()\n odbc_ini_path = \"- \" + self.odbc_ini_path\n\n entrypoint_cmd = self.clickhouse_start_command\n\n if self.stay_alive:\n entrypoint_cmd = self.clickhouse_stay_alive_command.replace(\n \"{main_config_file}\", self.main_config_name\n )\n else:\n entrypoint_cmd = (\n \"[\"\n + \", \".join(map(lambda x: '\"' + x + '\"', entrypoint_cmd.split()))\n + \"]\"\n )\n\n logging.debug(\"Entrypoint cmd: {}\".format(entrypoint_cmd))\n\n networks = app_net = ipv4_address = ipv6_address = net_aliases = net_alias1 = \"\"\n if (\n self.ipv4_address is not None\n or self.ipv6_address is not None\n or self.hostname != self.name\n ):\n networks = \"networks:\"\n app_net = \"default:\"\n if self.ipv4_address is not None:\n ipv4_address = \"ipv4_address: \" + self.ipv4_address\n if self.ipv6_address is not None:\n ipv6_address = \"ipv6_address: \" + self.ipv6_address\n if self.hostname != self.name:\n net_aliases = \"aliases:\"\n net_alias1 = \"- \" + self.hostname\n\n if not self.with_installed_binary:\n binary_volume = \"- \" + self.server_bin_path + \":/usr/bin/clickhouse\"\n odbc_bridge_volume = (\n \"- \" + self.odbc_bridge_bin_path + \":/usr/bin/clickhouse-odbc-bridge\"\n )\n library_bridge_volume = (\n \"- \"\n + self.library_bridge_bin_path\n + \":/usr/bin/clickhouse-library-bridge\"\n )\n else:\n binary_volume = \"- \" + self.server_bin_path + \":/usr/share/clickhouse_fresh\"\n odbc_bridge_volume = (\n \"- \"\n + self.odbc_bridge_bin_path\n + \":/usr/share/clickhouse-odbc-bridge_fresh\"\n )\n library_bridge_volume = (\n \"- \"\n + self.library_bridge_bin_path\n + \":/usr/share/clickhouse-library-bridge_fresh\"\n )\n\n external_dirs_volumes = \"\"\n if self.external_dirs:\n for external_dir in self.external_dirs:\n external_dir_abs_path = p.abspath(\n p.join(self.cluster.instances_dir, external_dir.lstrip(\"/\"))\n )\n logging.info(f\"external_dir_abs_path={external_dir_abs_path}\")\n os.makedirs(external_dir_abs_path, exist_ok=True)\n external_dirs_volumes += (\n \"- \" + external_dir_abs_path + \":\" + external_dir + \"\\n\"\n )\n\n with open(self.docker_compose_path, \"w\") as docker_compose:\n docker_compose.write(\n DOCKER_COMPOSE_TEMPLATE.format(\n image=self.image,\n tag=self.tag,\n name=self.name,\n hostname=self.hostname,\n binary_volume=binary_volume,\n odbc_bridge_volume=odbc_bridge_volume,\n library_bridge_volume=library_bridge_volume,\n instance_config_dir=instance_config_dir,\n config_d_dir=self.config_d_dir,\n db_dir=db_dir,\n external_dirs_volumes=external_dirs_volumes,\n tmpfs=str(self.tmpfs),\n logs_dir=logs_dir,\n depends_on=str(depends_on),\n user=os.getuid(),\n env_file=self.env_file,\n odbc_ini_path=odbc_ini_path,\n keytab_path=self.keytab_path,\n krb5_conf=self.krb5_conf,\n entrypoint_cmd=entrypoint_cmd,\n networks=networks,\n app_net=app_net,\n ipv4_address=ipv4_address,\n ipv6_address=ipv6_address,\n net_aliases=net_aliases,\n net_alias1=net_alias1,\n )\n )",
"def create_config_file(original_file, copy_file):\n copy(original_file, copy_file)",
"def _prepare(self):\n logging.info('-> copy configuration...')\n path_cofig = self.params['path_config_bUnwarpJ']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_SIFT' in self.params:\n path_cofig = self.params['path_config_IJ_SIFT']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))\n if 'path_config_IJ_MOPS' in self.params:\n path_cofig = self.params['path_config_IJ_MOPS']\n shutil.copy(path_cofig, os.path.join(self.params['path_exp'],\n os.path.basename(path_cofig)))",
"def _copy_asoundconf(asoundconf_file):\n this_dir, this_filename = os.path.split(__file__)\n asoundconf_path = os.path.join(this_dir, MicrophoneSetup.ASOUNDCONF_PATH, asoundconf_file)\n shutil.copy2(asoundconf_path, ASOUNDCONF_DEST_PATH)",
"def copy_kml(results_dir):\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n copy2(\n os.path.join(os.path.dirname(__file__), '..', 'raw', 'KML_Samples.kml'),\n results_dir\n )",
"def generate_conf(self):\n if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):\n os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)\n\n self.tempestconf = TempestConf()\n self.tempestconf.generate_tempestconf()",
"def copydir(self):\n pass",
"def _use_custom_config(self, standard_conf_path):\n conf_filename = os.path.basename(standard_conf_path)\n custom_conf_expected_path = CUSTOM_CONFIG_DIR + '/' + self._get_tempdir() + '/' + conf_filename\n shutil.copy(custom_conf_expected_path,\n self._get_tempdir() + '/' + standard_conf_path)",
"def tmp_configuration_copy(chmod=0o600, include_env=True, include_cmds=True):\n cfg_dict = conf.as_dict(\n display_sensitive=True, raw=True, include_cmds=include_cmds, include_env=include_env\n )\n temp_fd, cfg_path = mkstemp()\n\n with os.fdopen(temp_fd, \"w\") as temp_file:\n # Set the permissions before we write anything to it.\n if chmod is not None and not IS_WINDOWS:\n os.fchmod(temp_fd, chmod)\n json.dump(cfg_dict, temp_file)\n\n return cfg_path",
"def setup_configuration_file(self):\n\n with open(self.config_path, \"w+\") as f_config:\n\n f_config.write(get_configuration_file_form())",
"def cp_config(configs: Path) -> Callable[[str, Path], Path]:\n\n def copy_config(config_name: str, dest_path: Path) -> Path:\n \"\"\"Copy a config file by name to a destination directory.\n\n The resulting config will be named runway.yml.\n\n \"\"\"\n runway_yml = dest_path / \"runway.yml\"\n if not config_name.startswith(\".yml\"):\n config_name += \".yml\"\n shutil.copy(configs / config_name, runway_yml)\n return runway_yml\n\n return copy_config",
"def make_config(config):\n config.set(\"dxr\", \"source_folder\", os.path.expanduser(\"~/dxr\"))",
"def save_config(log_dir, config):\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n shutil.copy(config, os.path.join(log_dir, 'config.gin'))",
"def _parse_config_and_setup_directory(config_file):\n with open(config_file, \"rb\") as f:\n config = yaml.load(f)\n\n # Create error output directory\n if not os.path.exists(config[\"error_analysis_dir\"]):\n os.makedirs(config[\"error_analysis_dir\"])\n else:\n choice = input(\n \"Directory {} exists. Do you want to overwrite? (Hit y to overwrite, any other key to abort): \".format(\n config[\"error_analysis_dir\"]\n )\n )\n if choice != \"y\":\n sys.exit(\"Aborting run. Error analysis directory exists.\")\n\n # Copy config file into error_analysis_dir so that we can keep the configuration of the experiment with the results.\n # This way the entire directory can be zipped up and sent around with the bookkeeping intact.\n with open(os.path.join(config[\"error_analysis_dir\"], \"config.yml\"), \"w\") as f:\n yaml.dump(config, f)\n\n return config",
"def config_copy(ipydir, profile):\n for fpath in profile_files(profile):\n filename = osp.basename(fpath)\n dest_file = osp.join(ipydir, 'profile_' + profile, 'startup',\n filename)\n shutil.copy(fpath, dest_file)\n logger.info(\"Copy files '%s' for profile '%s'.\",\n osp.basename(filename), profile)",
"def copyAndLinkConfig(config):\n\n basename = os.path.basename(config)\n new_config_path = os.path.join(basedefs.DIR_CONFIG, basename)\n\n # Verify destination dir exists, create it if necessary\n if not os.path.isdir(basedefs.DIR_CONFIG):\n try:\n logging.debug(\"Creating ovirt-engine config directory\")\n os.makedirs(basedefs.DIR_CONFIG)\n except:\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_FAILED_CREATE_RHEVM_CONFIG_DIR % basedefs.DIR_CONFIG)\n\n # Verify original config is not already linked\n if os.path.islink(config):\n if (os.readlink(config) == new_config_path):\n logging.debug(\"%s is already linked to %s\"%(config, new_config_path))\n return(os.path.join(basedefs.DIR_CONFIG, basename))\n else:\n raise Exception(output_messages.ERR_EXP_LINK_EXISTS%(config, new_config_path))\n\n # Verify original config is a normal file, and copy it to the new location\n elif os.path.isfile(config):\n try:\n utils.copyFile(config, basedefs.DIR_CONFIG)\n\n # Remove old file\n logging.debug(\"Removing %s\" %(config))\n os.remove(config)\n\n # Linking\n logging.debug(\"Linking %s to %s/%s\" %(config, basedefs.DIR_CONFIG, config))\n os.symlink(new_config_path, config)\n except:\n logging.error(traceback.format_exc())\n raise Exception(output_messages.ERR_EXP_CPY_RHEVM_CFG % (config, \"%s/%s\" % (basedefs.DIR_CONFIG, config)))\n # return new path\n return new_config_path",
"def _store_test_result(ptfhost):\n logger.info(\"Copying file from folder: {0} to folder: {1}\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))\n ptfhost.shell(\"cp {0}/*.* {1}/\".format(\n\t\tSAI_TEST_REPORT_TMP_DIR_ON_PTF, \n\t\tSAI_TEST_REPORT_DIR_ON_PTF))",
"def backup_config(context):\n context.copy_from(DNF_PLUGIN_DATA_PATH, DNF_PLUGIN_DATA_LOG_PATH)",
"def deploy_conf(self, source_path, dest_path):\n if not os.path.exists(source_path):\n raise RuntimeError('Expected configuration file to exist in {}, but does not.'.format(source_path))\n\n self._shell_client.copy(source_path, dest_path)\n # Must set permissions of conf to '600' for security purposes.\n self._shell_client.exec_command('chmod 600 {}'.format(dest_path), error_on_failure=True)",
"def preparation(self):\n # [1] Makes a dir for saving results.\n # if 'Result' dir already exists,\n # a 'temporary' dir will be made.\n\n try:\n os.mkdir(self.dir_for_saving_result)\n except FileExistsError:\n self.viewer.display_message(\"Made a temporary directory.\")\n self.dir_for_saving_result = 'temporary'\n os.mkdir('temporary')\n\n # [2] Copies config file into the same dir as the one where results will be stored\n shutil.copy2(self.config_file_name, self.dir_for_saving_result)",
"def test_create_copy(self):\n\n config = {\n 'version': '2.0',\n 'input_files': {\n 'INPUT_1': [{\n 'id': 1234,\n 'type': 'PRODUCT',\n 'workspace_name': 'wksp-name',\n 'workspace_path': 'the/workspace/path/file.json',\n 'local_file_name': 'file_abcdfeg.json',\n 'is_deleted': False,\n }]\n },\n 'output_workspaces': {\n 'OUTPUT_1': 'WORKSPACE_1'\n },\n 'tasks': [\n {\n 'task_id': 'task-1234',\n 'type': 'main',\n 'resources': {'cpu': 1.0},\n 'args': 'foo ${INPUT_1} ${JOB_OUTPUT_DIR}',\n 'env_vars': {'ENV_VAR_NAME': 'ENV_VAR_VALUE'},\n 'workspaces': {'WORKSPACE_NAME': {'mode': 'ro'}},\n 'mounts': {'MOUNT_NAME': 'MOUNT_VOLUME_NAME'},\n 'settings': {'SETTING_NAME': 'SETTING_VALUE'},\n 'volumes': {\n 'VOLUME_NAME_1': {\n 'container_path': '/the/container/path',\n 'mode': 'ro',\n 'type': 'host',\n 'host_path': '/the/host/path'\n },\n 'VOLUME_NAME_2': {\n 'container_path': '/the/other/container/path',\n 'mode': 'rw',\n 'type': 'volume',\n 'driver': 'SUPER_DRIVER_5000',\n 'driver_opts': {'turbo': 'yes-pleez'}\n }\n },\n 'docker_params': [{'flag': 'hello', 'value': 'scale'}]\n }\n ]\n }\n exe_config = ExecutionConfiguration(config)\n\n copy = exe_config.create_copy()\n self.assertDictEqual(copy.get_dict(), config)",
"def install_config(self, config):\n for fn in config:\n dst = [p for p in self.config if basename(p) == fn][0]\n src = pathjoin(self.watch, fn)\n\n try:\n os.makedirs(dirname(dst))\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n LOGGER.debug('Overwriting %s with %s', src, dst)\n shutil.move(src, dst)\n\n if self.chown is not None:\n os.chown(dst, *self.chown)\n\n if self.chmod is not None:\n os.chmod(dst, self.chmod)"
] | [
"0.72231483",
"0.6721714",
"0.6498246",
"0.6410498",
"0.6409924",
"0.6284228",
"0.6280437",
"0.62706035",
"0.6260962",
"0.6219964",
"0.61791456",
"0.6174911",
"0.61536056",
"0.61037135",
"0.6049953",
"0.6039108",
"0.6026846",
"0.6011455",
"0.59960604",
"0.5987644",
"0.5975415",
"0.5971033",
"0.5962124",
"0.59266794",
"0.58914715",
"0.5865346",
"0.58612186",
"0.58599836",
"0.5852535",
"0.58482355"
] | 0.7394381 | 0 |
Returns verifier id for current Tempest | def get_verifier_id():
cmd = ("rally verify list-verifiers | awk '/" +
getattr(config.CONF, 'tempest_verifier_name') +
"/ {print $2}'")
with subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL) as proc:
verifier_uuid = proc.stdout.readline().rstrip()
return verifier_uuid.decode("utf-8") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_verifier_id():\n cmd = (\"rally verify list-verifiers | awk '/\" +\n getattr(config.CONF, 'tempest_verifier_name') +\n \"/ {print $2}'\")\n proc = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n verifier_uuid = proc.stdout.readline().rstrip()\n return verifier_uuid",
"def get_verifier():\n return get_current_registry().getUtility(IBrowserIdVerifier)",
"def getId(self):\n return self.__vmId",
"def run_verifier_tests(self, **kwargs):\n cmd = [\"rally\", \"verify\", \"start\", \"--load-list\",\n self.list]\n cmd.extend(kwargs.get('option', []))\n LOGGER.info(\"Starting Tempest test suite: '%s'.\", cmd)\n\n with open(\n os.path.join(self.res_dir, \"tempest.log\"), 'w+',\n encoding='utf-8') as f_stdout:\n with subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n bufsize=1) as proc:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n if re.search(r\"\\} tempest\\.\", line.decode(\"utf-8\")):\n LOGGER.info(line.rstrip())\n elif re.search(r'(?=\\(UUID=(.*)\\))',\n line.decode(\"utf-8\")):\n self.verification_id = re.search(\n r'(?=\\(UUID=(.*)\\))',\n line.decode(\"utf-8\")).group(1)\n f_stdout.write(line.decode(\"utf-8\"))\n proc.wait()\n\n if self.verification_id is None:\n raise Exception('Verification UUID not found')\n LOGGER.info('Verification UUID: %s', self.verification_id)\n\n shutil.copy(\n f\"{self.deployment_dir}/tempest.log\",\n f\"{self.res_dir}/tempest.debug.log\")",
"def vm_id(self):\n return self.vm_info.get('id', 'Error retrieving ID')",
"def teid(self):\n return self._teid",
"def latest_id(self):\n return self.checkpoints[-1]",
"def get_vm_id(self):\n return self.instance_metadata.vm_id",
"def compute_transaction_id(self):\n self.tx_id = self.get_sign_data()",
"def getSerpentId(self):\n raise NotImplementedError",
"def vm_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"vm_id\")",
"def getIdent (self) :\n return self.id",
"def vmid(self):\n return self.raw[\"VMId\"]",
"def vpp_token_id(self):\n if \"vppTokenId\" in self._prop_dict:\n return self._prop_dict[\"vppTokenId\"]\n else:\n return None",
"def _get_msti_root_id(self):\n return self.__msti_root_id",
"def get_ident():\n return -1",
"def vm_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"vm_id\")",
"def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")",
"def getID():",
"def tracking_generation_seed():\n return 112",
"def honeypot_probe_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"honeypot_probe_id\")",
"def id(self):\n if not self.parent:\n return 's1'\n return '%s-s%d' % (self.parent.id, self.parent.suites.index(self)+1)",
"def GetToolId(self):\r\n\r\n return self.tool_id",
"def get_id(self): # pragma: no cover\n pass",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def get_id(self):\n pass",
"def get(self):\n if not self._current_transid:\n self._current_transid = self.fresh_transid()\n return self._current_transid",
"def id(self):\n return self.__pairs[-1][1]"
] | [
"0.6726382",
"0.5993047",
"0.5835138",
"0.57405263",
"0.571625",
"0.56476825",
"0.5630613",
"0.5571045",
"0.5566842",
"0.5514997",
"0.5463051",
"0.53477794",
"0.5343376",
"0.5300335",
"0.5232395",
"0.52306646",
"0.52277946",
"0.52036935",
"0.51771",
"0.5170933",
"0.5119616",
"0.5108579",
"0.5097447",
"0.5078276",
"0.50681317",
"0.50681317",
"0.50681317",
"0.50681317",
"0.50351673",
"0.5008938"
] | 0.6849755 | 0 |
Returns installed verifier repo directory for Tempest | def get_verifier_repo_dir(verifier_id):
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
f'verifier-{verifier_id}',
'repo') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_verifier_repo_dir(verifier_id):\n return os.path.join(getattr(config.CONF, 'dir_rally_inst'),\n 'verification',\n 'verifier-{}'.format(verifier_id),\n 'repo')",
"def get_verifier_deployment_dir(verifier_id, deployment_id):\n return os.path.join(getattr(config.CONF, 'dir_rally_inst'),\n 'verification',\n 'verifier-{}'.format(verifier_id),\n 'for-deployment-{}'.format(deployment_id))",
"def get_verifier_deployment_dir(verifier_id, deployment_id):\n return os.path.join(getattr(config.CONF, 'dir_rally_inst'),\n 'verification',\n f'verifier-{verifier_id}',\n f'for-deployment-{deployment_id}')",
"def get_repository_dir():\n expected = os.path.abspath(__file__).rsplit('/', 2)[0]\n\n # get_path verifies the existance of these directories\n get_path(expected, 'data')\n get_path(expected, 'latex')\n\n return expected",
"def configure_verifier(deployment_dir):\n cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',\n '--id', str(getattr(config.CONF, 'tempest_verifier_name'))]\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n\n LOGGER.debug(\"Looking for tempest.conf file...\")\n tempest_conf_file = os.path.join(deployment_dir, \"tempest.conf\")\n if not os.path.isfile(tempest_conf_file):\n LOGGER.error(\"Tempest configuration file %s NOT found.\",\n tempest_conf_file)\n return None\n return tempest_conf_file",
"def repo_root() -> str:\n path = os.path.realpath(os.curdir)\n\n while True:\n if os.path.exists(os.path.join(path, \"setup.py\")):\n return path\n path = os.path.realpath(os.path.join(path, \"..\"))",
"def configure_verifier(deployment_dir):\n cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',\n '--id', str(getattr(config.CONF, 'tempest_verifier_name'))]\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n\n LOGGER.debug(\"Looking for tempest.conf file...\")\n tempest_conf_file = os.path.join(deployment_dir, \"tempest.conf\")\n if not os.path.isfile(tempest_conf_file):\n LOGGER.error(\"Tempest configuration file %s NOT found.\",\n tempest_conf_file)\n return None\n return tempest_conf_file",
"def get_target_folder() -> str:\n return os.path.abspath(os.path.join(dirname(__file__), os.pardir, os.pardir, \"provider_packages\"))",
"def get_scratch_dir():\n scratch_dir = os.path.join(get_repo_dir(), \"target\", \"compat-check\")\n if not os.path.exists(scratch_dir):\n os.makedirs(scratch_dir)\n return scratch_dir",
"def get_repo_dir():\n dirname, _ = os.path.split(os.path.abspath(__file__))\n dirname = os.path.dirname(dirname)\n logging.debug(\"Repo dir is %s\", dirname)\n return dirname",
"def dependency_dir(self) -> Path:",
"def GetPackageDirectory():\n return os.path.dirname(__file__)",
"def get_local_repository_path():\n result = subprocess.run(\"cmd /c mvn help:evaluate -Dexpression=settings.localRepository\",\n stdout=subprocess.PIPE)\n\n regex = re.compile('.*[INFO].*')\n path = regex.sub(\"\", result.stdout.decode(\"utf-8\")).rstrip().lstrip()\n return path",
"def get_package_dir():\n return Path(__file__).parent",
"def get_pytest():\n return path.join(TaskCreator.bin_dir, \"py.test\")",
"def source_directory(self):\r\n return self.pip_requirement.source_dir",
"def SvnPath(self):\n return self._module.root_path",
"def get_target_providers_folder() -> str:\n return os.path.abspath(os.path.join(get_target_folder(), \"airflow\", \"providers\"))",
"def get_setup_file():\n repo_fs()\n return SETUP_FILES",
"def test_dir():\n return os.path.abspath(os.path.dirname(__file__))",
"def test_llvm_prebuilt_dir(self):\n self.assertEqual(\n self.ndk.llvm_prebuilt_dir,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}\",\n )",
"def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph5/src\"",
"def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph4/src\"",
"def __get_module_root_dir(self):\n # type: () -> str\n if self.location in ['.', '.' + os.sep]:\n return self.env_root\n if self.source != 'local':\n return self.__fetch_remote_source()\n return os.path.join(self.env_root, self.location)",
"def get_tmuxinator_dir() -> pathlib.Path:\n if \"TMUXINATOR_CONFIG\" in os.environ:\n return pathlib.Path(os.environ[\"TMUXINATOR_CONFIG\"]).expanduser()\n\n return pathlib.Path(\"~/.tmuxinator/\").expanduser()",
"def acquire_package_directory():\n top_plugin_dir = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n expected_package_dir = '/extras/MockApp'\n app_dir = top_plugin_dir + expected_package_dir\n return app_dir",
"def local_finder_artifacts() -> Path:\n return Path()",
"def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)",
"def get_checks_path():\n rel_path = os.path.join(os.pardir, os.pardir, os.pardir, \"checks\")\n return os.path.abspath(os.path.join(__file__, rel_path))",
"def proof_dir(self):\n return self.dir"
] | [
"0.7211886",
"0.6550398",
"0.6523984",
"0.6448193",
"0.6078293",
"0.6072598",
"0.60545844",
"0.6008985",
"0.59839743",
"0.5949114",
"0.591674",
"0.58978075",
"0.57718587",
"0.57658106",
"0.57482255",
"0.57396054",
"0.57301205",
"0.57203543",
"0.5713774",
"0.5703848",
"0.56487525",
"0.56273985",
"0.5623205",
"0.56184775",
"0.5618008",
"0.5616837",
"0.55922085",
"0.55678505",
"0.55607563",
"0.5555431"
] | 0.71335983 | 1 |
Returns Rally deployment directory for current verifier | def get_verifier_deployment_dir(verifier_id, deployment_id):
return os.path.join(getattr(config.CONF, 'dir_rally_inst'),
'verification',
f'verifier-{verifier_id}',
f'for-deployment-{deployment_id}') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_verifier_deployment_dir(verifier_id, deployment_id):\n return os.path.join(getattr(config.CONF, 'dir_rally_inst'),\n 'verification',\n 'verifier-{}'.format(verifier_id),\n 'for-deployment-{}'.format(deployment_id))",
"def get_verifier_repo_dir(verifier_id):\n return os.path.join(getattr(config.CONF, 'dir_rally_inst'),\n 'verification',\n 'verifier-{}'.format(verifier_id),\n 'repo')",
"def get_verifier_repo_dir(verifier_id):\n return os.path.join(getattr(config.CONF, 'dir_rally_inst'),\n 'verification',\n f'verifier-{verifier_id}',\n 'repo')",
"def get_appdir():\n\n return APP_PATH",
"def GetPackageDirectory():\n return os.path.dirname(__file__)",
"def get_verifier_deployment_id():\n cmd = (\"rally deployment list | awk '/\" +\n getattr(config.CONF, 'rally_deployment_name') +\n \"/ {print $2}'\")\n proc = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n deployment_uuid = proc.stdout.readline().rstrip()\n return deployment_uuid",
"def getTradeOutputDir():\n\tglobal config\n\treturn config['directory']['output']",
"def get_verifier_deployment_id():\n cmd = (\"rally deployment list | awk '/\" +\n getattr(config.CONF, 'rally_deployment_name') +\n \"/ {print $2}'\")\n with subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT) as proc:\n deployment_uuid = proc.stdout.readline().rstrip()\n return deployment_uuid.decode(\"utf-8\")",
"def get_directory() -> str:\n return directory",
"def getRootPath()->str:\n if '--develop' in sys.argv:\n return eel._get_real_path('public') + '/'\n\n return eel._get_real_path('build') + '/'",
"def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)",
"def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)",
"def _new_release_dir(self, connection):\n release_dir_timestamp = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')\n commit_hash = self._get_commit_hash(connection)\n\n release_dir = f'{release_dir_timestamp}-{self.config.deployment_user}-{commit_hash}-{self.project_version}'\n print(blue(f\"Release directory set to {release_dir}\"))\n\n return release_dir",
"def output_dir(self):\n return self.c.build_dir.join(self.c.build_config_fs)",
"def folder(self):\n root_xml_folder = pkg_resources.resource_filename('mockstashop', 'xml')\n\n # XXX: May be handle minor versions if there are API changes inside\n # minor releases ?\n return os.path.join(root_xml_folder, self.version)",
"def app_dir(self):\n return self._app_dir",
"def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)",
"def GetLauncherPath(self):\n return os.path.dirname(__file__)",
"def GetLauncherPath(self):\n return os.path.dirname(__file__)",
"def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path",
"def manifest_output_directory(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"manifest_output_directory\")",
"def get_base_dir(self):\n return self._config_dict['output']['@baseDirectory']",
"def get_working_dir(self):\n return self.role.directory",
"def _getCodeFolder(self):\n if getattr(sys, 'frozen', False):\n # we are running in a bundle (frozen)\n bundle_dir = sys._MEIPASS\n else:\n # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\n return bundle_dir",
"def GetSwigOutDir(cls):\n return os.path.join(FileUtils.GetEDir(), 'swig')",
"def _get_reporoot():\n from os import path\n import acorn\n medpath = path.abspath(acorn.__file__)\n return path.dirname(path.dirname(medpath))",
"def get_package_dir():\n return Path(__file__).parent",
"def output_dir(self):\n return os.path.join(self._sandbox, 'output' + os.path.sep)",
"def get_enry_dir() -> str:\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"build\"))",
"def get_base_dir(config: Mapping[str, Any]) -> str:\n return normalize_base_dir(config.get(\"base_dir\"))"
] | [
"0.8431518",
"0.70921344",
"0.7018849",
"0.6559742",
"0.6509258",
"0.6363346",
"0.63590264",
"0.6337463",
"0.62616277",
"0.6258049",
"0.6232877",
"0.618148",
"0.6174993",
"0.6173079",
"0.6141118",
"0.613199",
"0.61280704",
"0.61003804",
"0.61003804",
"0.6097256",
"0.60898834",
"0.60793287",
"0.60585654",
"0.6056266",
"0.60474616",
"0.60310936",
"0.5998097",
"0.5992713",
"0.59901977",
"0.59818715"
] | 0.8463894 | 0 |
Add/update needed parameters into tempest.conf file | def configure_tempest_update_params(
tempest_conf_file, image_id=None, flavor_id=None,
compute_cnt=1, image_alt_id=None, flavor_alt_id=None,
admin_role_name='admin', cidr='192.168.120.0/24',
domain_id='default'):
# pylint: disable=too-many-branches,too-many-arguments
# pylint: disable=too-many-statements,too-many-locals
LOGGER.debug("Updating selected tempest.conf parameters...")
rconfig = configparser.RawConfigParser()
rconfig.read(tempest_conf_file)
rconfig.set(
'compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))
if image_id is not None:
rconfig.set('compute', 'image_ref', image_id)
if image_alt_id is not None:
rconfig.set('compute', 'image_ref_alt', image_alt_id)
if flavor_id is not None:
rconfig.set('compute', 'flavor_ref', flavor_id)
if flavor_alt_id is not None:
rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)
if compute_cnt > 1:
# enable multinode tests
rconfig.set('compute', 'min_compute_nodes', compute_cnt)
rconfig.set('compute-feature-enabled', 'live_migration', True)
if os.environ.get('OS_REGION_NAME'):
rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))
rconfig.set('identity', 'admin_role', admin_role_name)
rconfig.set('identity', 'default_domain_id', domain_id)
if not rconfig.has_section('network'):
rconfig.add_section('network')
rconfig.set('network', 'default_network', cidr)
rconfig.set('network', 'project_network_cidr', cidr)
rconfig.set('network', 'project_networks_reachable', False)
rconfig.set(
'identity', 'v3_endpoint_type',
os.environ.get('OS_INTERFACE', 'public'))
sections = rconfig.sections()
services_list = [
'compute', 'volume', 'image', 'network', 'data-processing',
'object-storage', 'orchestration']
for service in services_list:
if service not in sections:
rconfig.add_section(service)
rconfig.set(service, 'endpoint_type',
os.environ.get('OS_INTERFACE', 'public'))
LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '
'into tempest.conf file')
TempestCommon.update_tempest_conf_file(tempest_conf_file, rconfig) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_tempest_update_params(\n tempest_conf_file, image_id=None, flavor_id=None,\n compute_cnt=1, image_alt_id=None, flavor_alt_id=None,\n admin_role_name='admin', cidr='192.168.120.0/24',\n domain_id='default'):\n # pylint: disable=too-many-branches,too-many-arguments,too-many-statements\n LOGGER.debug(\"Updating selected tempest.conf parameters...\")\n rconfig = configparser.RawConfigParser()\n rconfig.read(tempest_conf_file)\n rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))\n if image_id is not None:\n rconfig.set('compute', 'image_ref', image_id)\n if image_alt_id is not None:\n rconfig.set('compute', 'image_ref_alt', image_alt_id)\n if flavor_id is not None:\n rconfig.set('compute', 'flavor_ref', flavor_id)\n if flavor_alt_id is not None:\n rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)\n if compute_cnt > 1:\n # enable multinode tests\n rconfig.set('compute', 'min_compute_nodes', compute_cnt)\n rconfig.set('compute-feature-enabled', 'live_migration', True)\n filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',\n 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',\n 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']\n rconfig.set(\n 'compute-feature-enabled', 'scheduler_available_filters',\n functest_utils.convert_list_to_ini(filters))\n if os.environ.get('OS_REGION_NAME'):\n rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n rconfig.set(\n 'auth', 'tempest_roles',\n functest_utils.convert_list_to_ini([env.get(\"NEW_USER_ROLE\")]))\n if not json.loads(env.get(\"USE_DYNAMIC_CREDENTIALS\").lower()):\n rconfig.set('auth', 'use_dynamic_credentials', False)\n account_file = os.path.join(\n getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')\n assert os.path.exists(\n account_file), \"{} doesn't exist\".format(account_file)\n rconfig.set('auth', 'test_accounts_file', account_file)\n rconfig.set('identity', 'auth_version', 'v3')\n rconfig.set('identity', 'admin_role', admin_role_name)\n rconfig.set('identity', 'admin_domain_scope', True)\n rconfig.set('identity', 'default_domain_id', domain_id)\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'default_network', cidr)\n rconfig.set('network', 'project_network_cidr', cidr)\n rconfig.set('network', 'project_networks_reachable', False)\n rconfig.set(\n 'validation', 'ssh_timeout',\n getattr(config.CONF, 'tempest_validation_ssh_timeout'))\n rconfig.set('object-storage', 'operator_role',\n getattr(config.CONF, 'tempest_object_storage_operator_role'))\n rconfig.set(\n 'identity', 'v3_endpoint_type',\n os.environ.get('OS_INTERFACE', 'public'))\n\n sections = rconfig.sections()\n services_list = [\n 'compute', 'volume', 'image', 'network', 'data-processing',\n 'object-storage', 'orchestration']\n for service in services_list:\n if service not in sections:\n rconfig.add_section(service)\n rconfig.set(\n service, 'endpoint_type', os.environ.get('OS_INTERFACE', 'public'))\n\n LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '\n 'into tempest.conf file')\n update_tempest_conf_file(tempest_conf_file, rconfig)",
"def update_tempest_conf_file(conf_file, rconfig):\n with open(TempestCommon.tempest_conf_yaml, encoding='utf-8') as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def configure_tempest(deployment_dir):\n\n logger.debug(\"Generating tempest.conf file...\")\n cmd = \"rally verify genconfig\"\n ft_utils.execute_command(cmd, logger)\n\n logger.debug(\"Finding tempest.conf file...\")\n tempest_conf_file = deployment_dir + \"/tempest.conf\"\n if not os.path.isfile(tempest_conf_file):\n logger.error(\"Tempest configuration file %s NOT found.\"\n % tempest_conf_file)\n exit(-1)\n\n logger.debug(\"Updating selected tempest.conf parameters...\")\n config = ConfigParser.RawConfigParser()\n config.read(tempest_conf_file)\n config.set('compute', 'fixed_network_name', PRIVATE_NET_NAME)\n config.set('identity', 'tenant_name', TENANT_NAME)\n config.set('identity', 'username', USER_NAME)\n config.set('identity', 'password', USER_PASSWORD)\n with open(tempest_conf_file, 'wb') as config_file:\n config.write(config_file)\n\n # Copy tempest.conf to /home/opnfv/functest/results/tempest/\n shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf')\n return True",
"def update_tempest_conf_file(conf_file, rconfig):\n with open(TEMPEST_CONF_YAML) as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'wb') as config_file:\n rconfig.write(config_file)",
"def configure(self, options, conf):",
"def setup_params():\n Script.fullname = os.path.splitext(os.path.abspath(__file__))[0]\n Script.basename = os.path.basename(__file__)\n Script.name = os.path.splitext(Script.basename)[0]\n Script.service = modUtils.check_service(Script.name)",
"def config(self, **kw):\n self.cfg_fixture.config(**kw)",
"def setup(override: str=''):\n\n try:\n base_config_data = open(BASE_CONFIGURATION).read()\n base_config = json.loads(base_config_data)\n except FileNotFoundError:\n logging.error('Base configuration file in config/base.json not found.')\n raise RuntimeError('Base configuration file not found.')\n\n # Check if override is required\n if override is not '':\n try:\n override_config_data = open('config/{0}'.format(override)).read()\n override_config = json.loads(override_config_data)\n except FileNotFoundError:\n logging.error('Override configuration file config/{0} not found.')\n raise RuntimeError('Invalid configuraiton override file.')\n\n # Update base config with override parameters\n base_config = update(base_config, override_config)\n\n # Add to parameters\n global Parameters\n Parameters.__dict__.update(base_config)",
"def set_conf_files(application):\n example_dir = \"./docs/examples/configs/example_4\"\n application.config['GROUPS_FILE'] = example_dir + \"/groups.yml\"\n application.config['POLICIES_FILE'] = example_dir + \"/policies.yml\"",
"def _configure(self):\n test_lib.test_config.setdefault('config_files', []).append(\n self.filename)\n self._write_config_content()",
"def configure(self, conf):\n\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))",
"def pytest_configure_node(node: Node):\n node.workerinput[\"options\"] = { # type: ignore\n \"dist\": node.config.option.dist, # type: ignore\n \"numprocesses\": node.config.option.numprocesses, # type: ignore\n }",
"def conf_update(self):\n pass",
"def _set_config(self):\n\n self.config.data_path = \"http://{0}:{1}/db/data\".format(\n self.config.host,\n self.config.port)\n\n self.config.node_path = \"/\".join([self.config.data_path, \"node\"])\n self.config.headers = dict([])\n self.config.headers[\"get\"] = {\"Accept\": \"application/json\"}\n self.config.headers[\"put\"] = {\"Content-Type\": \"application/json\"}",
"def configure(self, options, conf):\n pass",
"def configure_test(self, test, config_json):\n pass",
"def configure(self, conf):\n return",
"def _config(self):\n tmpl = self._template_interface\n for p in tmpl._params:\n setattr(self, p._name, p.get_value())",
"def setUpConfig(self):\n pass",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')",
"def config():",
"def config():",
"def config(monkeypatch):\n\n monkeypatch.setenv(\"NESTOR_CONFIG_PATH\", \"/fixtures-nestor-config\")\n monkeypatch.setenv(\"NESTOR_PRISTINE_PATH\", \"/fixtures-nestor-pristine\")\n monkeypatch.setenv(\"NESTOR_WORK_PATH\", \"/fixtures-nestor-work\")",
"def set_params():\n global module \n global ora_inst\n global response_loc\n\n module_args=dict(\n ora_inst=dict(type='str', required=True),\n response_loc=dict(type='str', required=True)\n )\n\n module=AnsibleModule(\n argument_spec=module_args\n )\n\n ora_inst = module.params['ora_inst']\n response_loc = module.params['response_loc']",
"def configure(task):\n r = task.run(\n name=\"Base Configuration\",\n task=template_file,\n template=\"base.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # r.result holds the result of rendering the template\n config = r.result\n\n r = task.run(\n name=\"Loading extra underlay data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/underlay.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"underlay\"] = r.result\n\n r = task.run(\n name=\"Loading extra evpn data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/evpn.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"evpn\"] = r.result\n\n r = task.run(\n name=\"Loading extra vxlan data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/vxlan.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"vxlan\"] = r.result\n\n r = task.run(\n name=\"Interfaces Configuration\",\n task=template_file,\n template=\"interfaces.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we append the generated configuration\n config += r.result\n\n r = task.run(\n name=\"Routing Configuration\",\n task=template_file,\n template=\"routing.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"EVPN Configuration\",\n task=template_file,\n template=\"evpn.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"Role-specific Configuration\",\n task=template_file,\n template=f\"{task.host['role']}.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we update our hosts' config\n config += r.result\n\n task.run(\n name=\"Loading Configuration on the device\",\n task=napalm_configure,\n replace=True,\n configuration=config,\n )",
"def configure(self, section):"
] | [
"0.6802182",
"0.64552474",
"0.6451729",
"0.63806546",
"0.63676286",
"0.6205747",
"0.6151945",
"0.6128843",
"0.6123329",
"0.6080413",
"0.6043109",
"0.60316396",
"0.60109645",
"0.60026515",
"0.6000327",
"0.5974877",
"0.59499687",
"0.59417",
"0.5926952",
"0.5922703",
"0.5922703",
"0.5922703",
"0.5922703",
"0.59166336",
"0.58766675",
"0.58766675",
"0.587012",
"0.5867644",
"0.5864179",
"0.58626723"
] | 0.67729205 | 1 |
Execute rally verify configureverifier, which generates tempest.conf | def configure_verifier(deployment_dir):
cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',
'--id', str(getattr(config.CONF, 'tempest_verifier_name'))]
output = subprocess.check_output(cmd)
LOGGER.info("%s\n%s", " ".join(cmd), output.decode("utf-8"))
LOGGER.debug("Looking for tempest.conf file...")
tempest_conf_file = os.path.join(deployment_dir, "tempest.conf")
if not os.path.isfile(tempest_conf_file):
LOGGER.error("Tempest configuration file %s NOT found.",
tempest_conf_file)
return None
return tempest_conf_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_verifier(deployment_dir):\n cmd = ['rally', 'verify', 'configure-verifier', '--reconfigure',\n '--id', str(getattr(config.CONF, 'tempest_verifier_name'))]\n output = subprocess.check_output(cmd)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output)\n\n LOGGER.debug(\"Looking for tempest.conf file...\")\n tempest_conf_file = os.path.join(deployment_dir, \"tempest.conf\")\n if not os.path.isfile(tempest_conf_file):\n LOGGER.error(\"Tempest configuration file %s NOT found.\",\n tempest_conf_file)\n return None\n return tempest_conf_file",
"def run_verifier_tests(self, **kwargs):\n cmd = [\"rally\", \"verify\", \"start\", \"--load-list\",\n self.list]\n cmd.extend(kwargs.get('option', []))\n LOGGER.info(\"Starting Tempest test suite: '%s'.\", cmd)\n\n with open(\n os.path.join(self.res_dir, \"tempest.log\"), 'w+',\n encoding='utf-8') as f_stdout:\n with subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,\n bufsize=1) as proc:\n with proc.stdout:\n for line in iter(proc.stdout.readline, b''):\n if re.search(r\"\\} tempest\\.\", line.decode(\"utf-8\")):\n LOGGER.info(line.rstrip())\n elif re.search(r'(?=\\(UUID=(.*)\\))',\n line.decode(\"utf-8\")):\n self.verification_id = re.search(\n r'(?=\\(UUID=(.*)\\))',\n line.decode(\"utf-8\")).group(1)\n f_stdout.write(line.decode(\"utf-8\"))\n proc.wait()\n\n if self.verification_id is None:\n raise Exception('Verification UUID not found')\n LOGGER.info('Verification UUID: %s', self.verification_id)\n\n shutil.copy(\n f\"{self.deployment_dir}/tempest.log\",\n f\"{self.res_dir}/tempest.debug.log\")",
"def configure_tempest(deployment_dir):\n\n logger.debug(\"Generating tempest.conf file...\")\n cmd = \"rally verify genconfig\"\n ft_utils.execute_command(cmd, logger)\n\n logger.debug(\"Finding tempest.conf file...\")\n tempest_conf_file = deployment_dir + \"/tempest.conf\"\n if not os.path.isfile(tempest_conf_file):\n logger.error(\"Tempest configuration file %s NOT found.\"\n % tempest_conf_file)\n exit(-1)\n\n logger.debug(\"Updating selected tempest.conf parameters...\")\n config = ConfigParser.RawConfigParser()\n config.read(tempest_conf_file)\n config.set('compute', 'fixed_network_name', PRIVATE_NET_NAME)\n config.set('identity', 'tenant_name', TENANT_NAME)\n config.set('identity', 'username', USER_NAME)\n config.set('identity', 'password', USER_PASSWORD)\n with open(tempest_conf_file, 'wb') as config_file:\n config.write(config_file)\n\n # Copy tempest.conf to /home/opnfv/functest/results/tempest/\n shutil.copyfile(tempest_conf_file, TEMPEST_RESULTS_DIR + '/tempest.conf')\n return True",
"def configure(self, **kwargs): # pylint: disable=unused-argument\n if not os.path.exists(self.res_dir):\n os.makedirs(self.res_dir)\n self.deployment_id = rally.RallyBase.create_rally_deployment(\n environ=self.project.get_environ())\n if not self.deployment_id:\n raise Exception(\"Deployment create failed\")\n self.verifier_id = self.create_verifier()\n if not self.verifier_id:\n raise Exception(\"Verifier create failed\")\n self.verifier_repo_dir = self.get_verifier_repo_dir(\n self.verifier_id)\n self.deployment_dir = self.get_verifier_deployment_dir(\n self.verifier_id, self.deployment_id)\n\n compute_cnt = self.count_hypervisors() if self.count_hypervisors(\n ) <= 10 else 10\n self.image_alt = self.publish_image_alt()\n self.flavor_alt = self.create_flavor_alt()\n LOGGER.debug(\"flavor: %s\", self.flavor_alt)\n\n self.conf_file = self.configure_verifier(self.deployment_dir)\n if not self.conf_file:\n raise Exception(\"Tempest verifier configuring failed\")\n self.configure_tempest_update_params(\n self.conf_file,\n image_id=self.image.id,\n flavor_id=self.flavor.id,\n compute_cnt=compute_cnt,\n image_alt_id=self.image_alt.id,\n flavor_alt_id=self.flavor_alt.id,\n admin_role_name=self.role_name, cidr=self.cidr,\n domain_id=self.project.domain.id)\n self.update_auth_section()\n self.update_network_section()\n self.update_compute_section()\n self.update_validation_section()\n self.update_scenario_section()\n self.update_dashboard_section()\n self.backup_tempest_config(self.conf_file, self.res_dir)",
"def test_using_cfg_config(line_sorted_checker, capsys):\n want = \"\"\"\nphmdoctest- project.md => .gendir-suite-cfg/test_project.py\nphmdoctest- doc/directive1.md => .gendir-suite-cfg/test_doc__directive1.py\nphmdoctest- doc/directive2.md => .gendir-suite-cfg/test_doc__directive2.py\nphmdoctest- doc/directive3.md => .gendir-suite-cfg/test_doc__directive3.py\nphmdoctest- doc/example1.md => .gendir-suite-cfg/test_doc__example1.py\nphmdoctest- doc/example2.md => .gendir-suite-cfg/test_doc__example2.py\nphmdoctest- doc/inline_example.md => .gendir-suite-cfg/test_doc__inline_example.py\nphmdoctest- tests/managenamespace.md => .gendir-suite-cfg/test_tests__managenamespace.py\nphmdoctest- tests/one_code_block.md => .gendir-suite-cfg/test_tests__one_code_block.py\nphmdoctest- tests/output_has_blank_lines.md => .gendir-suite-cfg/test_tests__output_has_blank_lines.py\nphmdoctest- tests/setup_only.md => .gendir-suite-cfg/test_tests__setup_only.py\nphmdoctest- tests/twentysix_session_blocks.md => .gendir-suite-cfg/test_tests__twentysix_session_blocks.py\nphmdoctest- tests/generate.cfg generated 12 pytest files\n\"\"\"\n phmdoctest.main.generate_using(config_file=Path(\"tests/generate.cfg\"))\n drop_newline = want.lstrip()\n line_sorted_checker(drop_newline, capsys.readouterr().out)",
"def test_pkgutil(self):\n print(utilities.CONFIG_FILE)\n assert utilities.get_config('ROBINHOOD', 'oauth_endpoint')",
"def generate_conf(self):\n if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):\n os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)\n\n self.tempestconf = TempestConf()\n self.tempestconf.generate_tempestconf()",
"def validate_config():\n\n # diff/sync settings, not including templates (see below)\n nori.setting_check_list('action', ['diff', 'sync'])\n nori.setting_check_type('reverse', bool)\n nori.setting_check_type('bidir', bool)\n nori.setting_check_callbacks('pre_action_callbacks')\n nori.setting_check_callbacks('post_action_callbacks', 1, 1)\n for i, cb_t in enumerate(nori.core.cfg['post_action_callbacks']):\n nori.setting_check_type(('post_action_callbacks', i, 3), bool)\n nori.setting_check_list('source_type', ['generic', 'drupal'])\n nori.setting_check_callable('source_query_func', may_be_none=False)\n nori.setting_check_callable('source_query_defaulter', may_be_none=True)\n nori.setting_check_callable('source_query_validator', may_be_none=False)\n nori.setting_check_callbacks('source_template_change_callbacks')\n nori.setting_check_callbacks('source_global_change_callbacks')\n nori.setting_check_list('dest_type', ['generic', 'drupal'])\n nori.setting_check_callable('dest_query_func', may_be_none=False)\n nori.setting_check_callable('dest_query_defaulter', may_be_none=True)\n nori.setting_check_callable('dest_query_validator', may_be_none=False)\n nori.setting_check_callbacks('dest_template_change_callbacks')\n nori.setting_check_callbacks('dest_global_change_callbacks')\n nori.setting_check_list('template_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['template_mode'] != 'all':\n nori.setting_check_not_empty('template_list')\n for i, t_name in enumerate(nori.core.cfg['template_list']):\n nori.setting_check_type(('template_list', i),\n nori.core.STRING_TYPES)\n nori.setting_check_list('key_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['key_mode'] != 'all':\n nori.setting_check_not_empty('key_list')\n\n # templates: general\n nori.setting_check_not_empty(\n 'templates', types=nori.core.MAIN_SEQUENCE_TYPES\n )\n for i, template in enumerate(nori.core.cfg['templates']):\n nori.setting_check_type(('templates', i), nori.core.MAPPING_TYPES)\n # bogus elements\n for k in template:\n if k not in T_KEYS:\n nori.err_exit(\n \"Warning: cfg['templates'][{0}][{1}] is set\\n\"\n \"(to {2}), but there is no such setting.\" .\n format(i, *map(nori.pps, [k, template[k]])),\n nori.core.exitvals['startup']['num']\n )\n # template name\n nori.setting_check_type(('templates', i, T_NAME_KEY),\n nori.core.STRING_TYPES)\n # multiple-valued value columns?\n nori.setting_check_type(('templates', i, T_MULTIPLE_KEY), bool)\n # source-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_S_QUERY_ARGS_KEY))\n # to-dest transform function\n nori.setting_check_callable(('templates', i, T_TO_D_FUNC_KEY),\n may_be_none=True)\n # source-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_S_NO_REPL_KEY), bool)\n # source-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_S_CHANGE_CB_KEY))\n # dest-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_D_QUERY_ARGS_KEY))\n # to-source transform function\n nori.setting_check_callable(('templates', i, T_TO_S_FUNC_KEY),\n may_be_none=True)\n # dest-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_D_NO_REPL_KEY), bool)\n # dest-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_D_CHANGE_CB_KEY))\n # key mode\n nori.setting_check_list(('templates', i, T_KEY_MODE_KEY),\n ['all', 'include', 'exclude'])\n if template[T_KEY_MODE_KEY] != 'all':\n # key list\n nori.setting_check_not_empty(('templates', i, T_KEY_LIST_KEY))\n\n # templates: query-function arguments\n for (sd, t_key, validator_key) in [\n ('s', T_S_QUERY_ARGS_KEY, 'source_query_validator'),\n ('d', T_D_QUERY_ARGS_KEY, 'dest_query_validator')\n ]:\n # args tuple\n args_idx = ('templates', i, t_key)\n args_t = template[t_key]\n # key_cv, value_cv (somewhat)\n for cv_str in ['key_cv', 'value_cv']:\n cv_idx = args_idx + (1, cv_str)\n nori.setting_check_not_empty(\n cv_idx, types=nori.core.MAIN_SEQUENCE_TYPES\n )\n cv_seq = args_t[1][cv_str]\n for j, cv in enumerate(cv_seq):\n nori.setting_check_length(cv_idx + (j, ), 2, 3,\n types=tuple)\n # the rest of the arguments\n nori.core.cfg[validator_key](sd, args_idx, args_t, i)\n\n # reporting settings\n nori.setting_check_list('report_order', ['template', 'keys'])\n # the rest are handled by nori.validate_email_config()",
"def check_config(cfg):",
"def test_cfg_example(checker):\n want = labeled.contents(label=\"generate-cfg\")\n got = Path(\"tests/generate.cfg\").read_text(encoding=\"utf-8\")\n checker(want, got)",
"def Checktest(self, expectedoutput):\n\n if expectedoutput == 0:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"The configuration file does not exist.\", result.output)\n return\n\n if expectedoutput == 1:\n result = self.runner.invoke(yoda.cli, [\"setup\", \"check\"])\n self.assertEqual(result.exit_code, 0)\n self.assertIn(\"Name: Name\", result.output)\n self.assertIn(\"Email: [email protected]\", result.output)\n self.assertIn(\"Github username: GhUser\", result.output)",
"def test_compliance_configuration(self, evidence):\n evidence_config = json.loads(evidence.content)\n if evidence_config != self.config.raw_config:\n evidence = json.dumps(evidence_config, indent=2).split('\\n')\n config = json.dumps(self.config.raw_config, indent=2).split('\\n')\n self.add_failures(\n 'Differences found',\n {\n 'Fetcher Configuration': evidence,\n 'Check Configuration': config\n }\n )",
"def test_everything():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/config.toml\",\n \"ENV\", # Temporarily enabled, needs seperate optional dotenv test\n ]\n )\n\n assert \"root_url\" in str(c._crve_configs)\n assert c.root_url == \"test url\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n )\n assert c.defaults_toml() == default_toml",
"def config_validate(ctx, **kwargs):\n # Validates pf9-express config file and obtains Auth Token\n #Load Active Config into ctx\n GetConfig(ctx).GetActiveConfig()\n #Get Token\n token = GetToken().get_token_v3(\n ctx.params[\"du_url\"],\n ctx.params[\"du_username\"],\n ctx.params[\"du_password\"],\n ctx.params[\"du_tenant\"] )\n if token is not None:\n click.echo('Config Validated!')\n click.echo('Token: %s' % token)\n else:\n click.echo('Config Validation Failed!')",
"def pytest_configure(config):\n config._metadata['Project Name'] = 'nop Commerce'\n config._metadata['Module Name'] = 'Customers'\n config._metadata['Tester'] = 'Tester'",
"def create_verification_from_config(self):\n\n resp = self.create_verification(\n resource_type=self.config.resource_type,\n resource_ref=self.config.resource_ref,\n resource_action=self.config.resource_action,\n impersonation_allowed=self.config.impersonation_allowed)\n return resp",
"def pytest_configure(config):\n # add environment details to the pytest-html plugin\n msd_files = ['/boot/kenv.sh', '/etc/mvl7/conf/local-content.conf']\n msd_file = None\n for f in msd_files:\n if os.path.isfile(f):\n msd_file = f\n break\n\n msd = 'Unkown'\n msd_version = 'Unknown'\n msd_output = run_cmd('cat %s' % msd_file, check_rc=False)\n if msd_output:\n match = re.findall(r'MSD.*VERSION=\"(.*)\"', msd_output, re.M)\n if match:\n msd_version = match[0]\n match = re.findall(r'.*MACHINE=\"(.*)\"', msd_output, re.M)\n if match:\n msd = match[0]\n\n config._metadata['MSD'] = msd\n config._metadata['MSD Version'] = msd_version\n\n msd_release = run_cmd('cat /etc/mvl-release', check_rc=False)\n if not msd_release:\n msd_release = 'Unknown'\n config._metadata['MSD Release'] = msd_release\n\n hostname = run_cmd('hostname', check_rc=False)\n if not hostname:\n hostname = 'Unknown'\n config._metadata['Target'] = hostname\n\n kernel_preemption = 'Unknown'\n if check_kernel_configs('PREEMPT_RT_FULL', logging=False):\n kernel_preemption = 'PREEMPT_RT_FULL'\n elif check_kernel_configs('PREEMPT__LL', logging=False):\n kernel_preemption = 'PREEMPT__LL'\n elif check_kernel_configs('PREEMPT_NONE', logging=False):\n kernel_preemption = 'PREEMPT_NONE'\n config._metadata['Kernel Preemption'] = kernel_preemption\n\n uname_output = run_cmd('uname -mr', check_rc=False)\n kernel_release = 'Unknown'\n arch = 'Unknown'\n if uname_output:\n kernel_release, arch = uname_output.split()\n config._metadata['Kernel Release'] = kernel_release\n config._metadata['Arch'] = arch",
"def cli(ctx):\n if ctx.invoked_subcommand not in ['configure', 'generate_key', 'start_agent']:\n config = get_config_file()\n if config is None:\n raise click.UsageError(\"Configuration not found!\"\n \"Please run configure before first use\")",
"def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]",
"def check_configuration(self):\n try:\n self.config.commit_check()\n self.queue_message(\"log\", \"Configuration checked.\")\n except (self.pyez_exception.RpcError,\n self.pyez_exception.ConnectError) as ex:\n raise AnsibleError('Failure checking the configuraton: %s' %\n (str(ex)))",
"def test_new_config(self, context, permissions, wizard):\n context.config_exists.return_value = False\n permissions.return_value = True\n wizard.return_value = \"/some/file/path\"\n\n runner = CliRunner()\n result = runner.invoke(cli_node_new_configuration, [\n \"--name\", \"some-name\",\n \"--environment\", \"application\"\n ])\n\n # check that info message is produced\n self.assertEqual(result.output[:6], \"[info]\")\n\n # check OK exit code\n self.assertEqual(result.exit_code, 0)",
"def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()",
"def check():\n # Initialize key variables\n config_directory = os.environ['PATTOO_CONFIGDIR']\n\n # Print Status\n print('??: Checking configuration parameters.')\n\n # Check config (pattoo.yaml)\n config_file = configuration.agent_config_filename('pattoo')\n config = files.read_yaml_file(config_file)\n\n # Check main keys\n keys = ['pattoo', 'pattoo_web_api', 'pattoo_agent_api']\n for key in keys:\n if key not in config:\n log_message = ('''\\\nSection \"{}\" not found in configuration file in directory {}. Please fix.\\\n'''.format(key, config_directory))\n log.log2die_safe(80007, log_message)\n\n # Check secondary keys\n secondaries = [\n 'log_level', 'log_directory', 'cache_directory',\n 'daemon_directory']\n secondary_key_check(config, 'pattoo', secondaries)\n secondaries = ['ip_address', 'ip_bind_port']\n secondary_key_check(config, 'pattoo_agent_api', secondaries)\n secondaries = ['ip_address', 'ip_bind_port']\n secondary_key_check(config, 'pattoo_web_api', secondaries)\n\n # Check config (pattoo_webd.yaml)\n config_file = configuration.agent_config_filename('pattoo_webd')\n config = files.read_yaml_file(config_file)\n\n # Check main keys\n keys = ['pattoo_webd']\n for key in keys:\n if key not in config:\n log_message = ('''\\\nSection \"{}\" not found in configuration file in directory {}. Please fix.\\\n'''.format(key, config_directory))\n log.log2die_safe(80020, log_message)\n\n # Check secondary keys\n secondaries = ['ip_listen_address', 'ip_bind_port']\n secondary_key_check(config, 'pattoo_webd', secondaries)\n\n # Print Status\n print('OK: Configuration parameter check passed.')",
"def test_tox_usage(checker):\n setup_config = configparser.ConfigParser()\n setup_config.read(\"setup.cfg\")\n setup_tool = setup_config[\"tool.phmdoctest\"]\n\n tox_config = configparser.ConfigParser()\n tox_config.read(\"tox.ini\")\n tox_tool = tox_config[\"tool.phmdoctest\"]\n\n assert setup_tool[\"markdown_globs\"] == tox_tool[\"markdown_globs\"]\n assert setup_tool[\"exclude_globs\"] == tox_tool[\"exclude_globs\"]\n assert setup_tool[\"print\"] == tox_tool[\"print\"]\n assert \".gendir-cfg\" in setup_tool[\"output_directory\"]\n assert \".gendir-ini\" in tox_tool[\"output_directory\"]",
"def test_generateconfig(self):\n args = mock.Mock()\n args.debug = None\n args.generateconfig = True\n args.config = None\n expected_text = ('Sample configuration file written to sample_config.json\\n'\n \"Replicate the site JSON for each site.\\n\"\n \" Valid values for use_https and local are 'True' and 'False'\\n\"\n \" One site must have local set to 'True'\\n\"\n 'Replicate the export JSON for each exported contract.\\n')\n with mock.patch('sys.stdout', new=StringIO()) as fake_out:\n execute_tool(args)\n self.assertEqual(fake_out.getvalue(), expected_text)",
"def test_define():\n client = TestClient()\n client.run(\"config set general.fakeos=Linux\")\n conf_file = load(client.cache.conan_conf_path)\n assert \"fakeos = Linux\" in conf_file\n\n client.run('config set general.compiler=\"Other compiler\"')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler = Other compiler' in conf_file\n\n client.run('config set general.compiler.version=123.4.5')\n conf_file = load(client.cache.conan_conf_path)\n assert 'compiler.version = 123.4.5' in conf_file\n assert \"14\" not in conf_file\n\n client.run('config set general.new_setting=mysetting')\n conf_file = load(client.cache.conan_conf_path)\n assert 'new_setting = mysetting' in conf_file\n\n client.run('config set proxies.https=myurl')\n conf_file = load(client.cache.conan_conf_path)\n assert \"https = myurl\" in conf_file.splitlines()",
"def verify_runconfig(master_host, namespace, job_name, replica, num_ps,\n num_workers, num_evaluators):\n is_chief = True\n num_replicas = 1\n if replica == \"ps\":\n is_chief = False\n num_replicas = num_ps\n elif replica == \"worker\":\n is_chief = False\n num_replicas = num_workers\n elif replica == \"evaluator\":\n is_chief = False\n num_replicas = num_evaluators\n\n # Construct the expected cluster spec\n chief_list = [\n \"{name}-chief-0.{ns}.svc:2222\".format(name=job_name, ns=namespace)\n ]\n ps_list = []\n for i in range(num_ps):\n ps_list.append(\"{name}-ps-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n worker_list = []\n for i in range(num_workers):\n worker_list.append(\"{name}-worker-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n evaluator_list = []\n for i in range(num_evaluators):\n evaluator_list.append(\"{name}-evaluator-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n cluster_spec = {\n \"chief\": chief_list,\n \"ps\": ps_list,\n \"worker\": worker_list,\n }\n if num_evaluators > 0:\n cluster_spec[\"evaluator\"] = evaluator_list\n\n for i in range(num_replicas):\n full_target = \"{name}-{replica}-{index}\".format(\n name=job_name, replica=replica.lower(), index=i)\n actual_config = get_runconfig(master_host, namespace, full_target)\n full_svc = \"{ft}.{ns}.svc\".format(ft=full_target, ns=namespace)\n expected_config = {\n \"task_type\": replica,\n \"task_id\": i,\n \"cluster_spec\": cluster_spec,\n \"is_chief\": is_chief,\n \"master\": \"grpc://{fs}:2222\".format(fs=full_svc),\n \"num_worker_replicas\": num_workers + 1, # Chief is also a worker\n \"num_ps_replicas\": num_ps,\n } if not replica == \"evaluator\" else {\n # Evaluator has special config.\n \"task_type\": replica,\n \"task_id\": 0,\n \"cluster_spec\": {},\n \"is_chief\": is_chief,\n \"master\": \"\",\n \"num_worker_replicas\": 0,\n \"num_ps_replicas\": 0,\n }\n\n # Compare expected and actual configs\n if actual_config != expected_config:\n msg = \"Actual runconfig differs from expected. Expected: {0} Actual: {1}\".format(\n str(expected_config), str(actual_config))\n logging.error(msg)\n raise RuntimeError(msg)",
"def _configure(self):\n test_lib.test_config.setdefault('config_files', []).append(\n self.filename)\n self._write_config_content()",
"def test_config_options_fixture(testdir):\n\n # create a temporary pytest test module\n testdir.makepyfile(\"\"\"\n def test_sth(pytestconfig):\n assert pytestconfig.option.leaks == \":\"\n \"\"\")\n\n # run pytest with the following cmd args in a subprocess\n # for some reason an in-process run reports leaks\n result = testdir.runpytest_subprocess(\n '-R', ':',\n '-v'\n )\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines([\n '*::test_sth PASSED',\n ])\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0",
"def check_configs(self):\n\n pass"
] | [
"0.7838847",
"0.622833",
"0.62158334",
"0.6102185",
"0.5732694",
"0.5659994",
"0.5577749",
"0.5537725",
"0.55346674",
"0.5523457",
"0.55181223",
"0.54858464",
"0.545961",
"0.5390529",
"0.5359512",
"0.53589606",
"0.53571224",
"0.53334934",
"0.5332391",
"0.53290427",
"0.53244954",
"0.5323629",
"0.53194624",
"0.53011966",
"0.52963305",
"0.52859926",
"0.52825403",
"0.5263443",
"0.5262947",
"0.5260817"
] | 0.77928317 | 1 |
Generate test list based on the test mode. | def generate_test_list(self, **kwargs):
LOGGER.debug("Generating test case list...")
self.backup_tempest_config(self.conf_file, '/etc')
if kwargs.get('mode') == 'custom':
if os.path.isfile(self.tempest_custom):
shutil.copyfile(
self.tempest_custom, self.list)
else:
raise Exception(
f"Tempest test list file {self.tempest_custom} NOT found.")
else:
testr_mode = kwargs.get(
'mode', r'^tempest\.(api|scenario).*\[.*\bsmoke\b.*\]$')
cmd = (f"(cd {self.verifier_repo_dir}; "
f"stestr list '{testr_mode}' > {self.list} 2>/dev/null)")
output = subprocess.check_output(cmd, shell=True)
LOGGER.info("%s\n%s", cmd, output.decode("utf-8"))
os.remove('/etc/tempest.conf') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_test_list(tdir):\n\n # Skip this if it already exists\n if os.path.exists(os.path.join(tdir.name, \"kstest-list\")):\n return\n\n kstest_log = os.path.join(tdir.name, \"kstest.log\")\n with open(kstest_log) as f:\n for line in f.readlines():\n if not line.startswith(\"Running tests: \"):\n continue\n\n tests = [os.path.basename(os.path.splitext(s)[0]) for s in line[15:].split()]\n with open(os.path.join(tdir.name, \"kstest-list\"), \"wt\") as klf:\n for t in tests:\n print(t, file=klf)\n break",
"def tests_generator(self):\n cb_bin = os.path.join(bin_path, 'compilebench')\n cmd_list = [\n (\"Initial Create/Compile/Read Compiled Tree\", \"{0} -D {1} -i 10 --makej -s {2}\"),\n ]\n\n tests = []\n for idx, (desc, cmd) in enumerate(cmd_list):\n test_name = \"compile_bench_{0}_{1}\".format(idx + 1, to_safe_name(desc))\n test = TestProfile(\n name=test_name,\n desc=desc,\n test_path=self.test_path,\n bin_path=bin_path,\n command=cmd.format(cb_bin, self.test_path, bin_path))\n tests.append(test)\n return tests",
"def get_tests():\n # tests = ['test_build_gaussian_pyramid_random', 'test_build_gaussian_pyramid_static', 'test_build_laplacian_pyramid_random', 'test_build_laplacian_pyramid_static', 'test_laplacian_to_image', 'test_render_pyramid_random', 'test_render_pyramid_static']\n # return [tester.TestEx3(method) for method in tests]\n return [tester.TestEx3(method) for method in dir(tester.TestEx3) if method.startswith('test')]",
"def test_generate_all_testing(self):\n pass",
"def list(self):\n print \"\\nAvailable Test Cases\"\n print \"====================\"\n for case in self.cases:\n print case.__name__",
"def tests():",
"def __generate_test_file_list(self):\n allowed_tests = []\n exclude_tests = self.get_exclusions()\n exclude_tests.append('expected.')\n exclude_tests.append('actual.')\n\n #Allowed/exclude can be filenames or directory fragments.\n tests_to_run = []\n added_test = len(tests_to_run)\n allowed_path = ''\n\n #Check local dir first then the root package directory.\n checked_paths = []\n for test_dir in self.get_test_dirs():\n allowed_path = os.path.join(test_dir, self.test_pattern)\n checked_paths.append(allowed_path)\n if os.path.isfile(allowed_path):\n logging.debug(\"Adding file \" + allowed_path)\n tests_to_run.append(TestFile(test_dir, allowed_path))\n elif os.path.isdir(allowed_path):\n logging.debug(\"Iterating directory \" + allowed_path)\n for f in os.listdir(allowed_path):\n full_filename = os.path.join(allowed_path, f)\n if os.path.isfile(full_filename):\n logging.debug(\"Adding file \" + full_filename)\n tests_to_run.append(TestFile(test_dir, full_filename))\n else:\n for f in glob.glob(allowed_path):\n full_filename = os.path.join(allowed_path, f)\n if os.path.isfile(full_filename):\n logging.debug(\"Adding globbed file \" + full_filename)\n tests_to_run.append(TestFile(test_dir, full_filename))\n if tests_to_run:\n break\n\n if added_test == len(tests_to_run):\n logging.debug(\"Could not find any tests for [\" + \"] or [\".join(checked_paths) + \"]. Check the path.\")\n\n logging.debug(\"Found \" + str(len(tests_to_run)) + \" tests to run before exclusions.\")\n\n regexes = []\n for ex in exclude_tests:\n try:\n ex = ex.strip()\n if not ex:\n continue\n regex = re.compile(ex)\n regexes.append(regex)\n except BaseException as e:\n print (\"Error compiling regular expression for test file exclusions: '\" + str(ex) + \"' exception: \" +\n str(e))\n\n final_test_list = list(tests_to_run)\n for test in tests_to_run:\n for regex in regexes:\n if re.search(regex, test.test_path) and test in final_test_list:\n logging.debug(\"Removing test that matched: \" + str(regex))\n final_test_list.remove(test)\n\n logging.debug(\"Found \" + str(len(final_test_list)) + \" tests to run after exclusions.\")\n return sorted(final_test_list, key = lambda x: x.test_path)",
"def init_test_cases():\n test_cases = []\n\n # add info to list in memory, one by one, following signature values\n test_case_ID = 1\n test_case_name = \"auto-resiliency-pif-001\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-9\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 2\n test_case_name = \"auto-resiliency-pif-002\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-10\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 3\n test_case_name = \"auto-resiliency-pif-003\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-11\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 4\n test_case_name = \"auto-resiliency-pif-004\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-12\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 5\n test_case_name = \"auto-resiliency-vif-001\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-13\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 6\n test_case_name = \"auto-resiliency-vif-002\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-14\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 7\n test_case_name = \"auto-resiliency-vif-003\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-15\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 8\n test_case_name = \"auto-resiliency-sec-001\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-16\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 9\n test_case_name = \"auto-resiliency-sec-002\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-17\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n test_case_ID = 10\n test_case_name = \"auto-resiliency-sec-003\"\n test_case_JIRA_URL = \"https://jira.opnfv.org/browse/AUTO-18\"\n test_cases.append(TestCase(test_case_ID, test_case_name, test_case_JIRA_URL))\n\n # write list to binary file\n write_list_bin(test_cases, FILE_TEST_CASES)\n\n return test_cases",
"def list_feature_tests(self):\n\t\treturn self.test_names",
"def list_test_cases(program):\n\n return list(INFO[program].test_cases)",
"def test_cases(self) -> list[str]:\n cases = []\n for t in self._test_cases:\n if t not in cases:\n cases.append(t)\n return cases",
"def test_list(self):\n pass",
"def test_list(self):\n pass",
"def gen_suite(tests):\n cases = [gen_case(test) for test in tests]\n return {\n 'cases': cases,\n 'scored': True,\n 'setup': '',\n 'teardown': '',\n 'type': 'doctest'\n }",
"def test_list_runs(self):\n pass",
"def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]",
"def get_test_suite():\n # max for a and p\n MAX = 2**31 - 1 # INT32_MAX, max value for a and p\n sqrt_MAX = floor(sqrt(MAX)) # max for n\n \n # first test suite\n a_list = [0, 0, 0, 1, 1, 2, 7, 2, 1, 0, 0, 3, 1, 0, 0, 0, 1]\n p_list = [5, 3, 3, 0, 0, 0, 8, 1, 1, 0, 0, 0, 0, 1, 2, 0, 1]\n n_list = [7, 2, 2, 7, 3, 3, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1]\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n \n # second test suite\n a_list = [3, 5, 23, 25, 100, 200, MAX, MAX-1, MAX]\n p_list = [10, 5, 23, 25, 100, 200, 1000, 100, 500]\n n_list = [23, 1, 0, 7, 1, 100, sqrt_MAX, 3, 23]\n \n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n\n # third test suite\n a_list = []\n p_list = []\n n_list = []\n\n # keep a = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n # keep p = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep n = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(0)\n # keep a = 0 and p = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep all non-zero\n for _ in range(30):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite",
"def tests(self):\n return [self]",
"def test():\n\t\treturn [\"vice.core.objects.tests\",\n\t\t\t[\n\t\t\t\tagb.test_agb_grid_constructor(),\n\t\t\t\tagb.test_agb_grid_destructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_constructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_destructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_constructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_destructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_constructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_destructor(),\n\t\t\t\tchannel.test_channel_constructor(),\n\t\t\t\tchannel.test_channel_destructor(),\n\t\t\t\telement.test_element_constructor(),\n\t\t\t\telement.test_element_destructor(),\n\t\t\t\tfromfile.test_fromfile_constructor(),\n\t\t\t\tfromfile.test_fromfile_destructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_constructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_destructor(),\n\t\t\t\timf.test_imf_constructor(),\n\t\t\t\timf.test_imf_destructor(),\n\t\t\t\tintegral.test_integral_constructor(),\n\t\t\t\tintegral.test_integral_destructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_constructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_destructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_constructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_destructor(),\n\t\t\t\tism.test_ism_constructor(),\n\t\t\t\tism.test_ism_destructor(),\n\t\t\t\tmdf.test_mdf_constructor(),\n\t\t\t\tmdf.test_mdf_destructor(),\n\t\t\t\tmigration.test_migration_constructor(),\n\t\t\t\tmigration.test_migration_destructor(),\n\t\t\t\tmultizone.test_multizone_constructor(),\n\t\t\t\tmultizone.test_multizone_destructor(),\n\t\t\t\tsinglezone.test_singlezone_constructor(),\n\t\t\t\tsinglezone.test_singlezone_destructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_constructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_destructor(),\n\t\t\t\tssp.test_ssp_constructor(),\n\t\t\t\tssp.test_ssp_destructor(),\n\t\t\t\ttracer.test_tracer_constructor(),\n\t\t\t\ttracer.test_tracer_destructor()\n\t\t\t]\n\t\t]",
"def run_tests(tests):\n return [test(t) for t in tests]",
"def list(ctx):\n handler = ValidateCommandHandler(ctx.obj['qa_dir'])\n if handler.validate():\n handler = ListCommandHandler(ctx.obj['qa_dir'])\n handler.show_test_case_tree()\n else:\n exit(1)",
"def list_tests(arn=None, nextToken=None):\n pass",
"def build_suite(self, test_case_list):\n if not test_case_list:\n raise ValueError('No test cases provided.')\n\n loader = unittest.TestLoader()\n\n # TODO(ewiseblatt): 20150521\n # This doesnt seem to take effect. The intent here is to not sort the order\n # of tests. But it still is. So I've renamed the tests to lexographically\n # sort in place. Leaving this around anyway in hopes to eventually figure\n # out why it doesnt work.\n loader.sortTestMethodsUsing = None\n\n suite = unittest.TestSuite()\n for test in test_case_list:\n suite.addTests(loader.loadTestsFromTestCase(test))\n return suite",
"def test_get_scenarios(self):\n pass",
"def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)",
"def getTestSuite():\n test_suite = unittest.TestSuite([])\n\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistReaders))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPySnpTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestDistributedBed))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestFileCache))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestUtilTools))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestIntRangeSet))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKrDocStrings))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpGen))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestGenerate))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestExampleFile))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstMemMap))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestSnpMemMap))\n test_suite.addTests(NaNCNCTestCases.factory_iterator())\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestPstReader))\n test_suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestKernelReader))\n\n return test_suite",
"def test_cases():\n CasesTestCase.generate_tests()\n yield CasesTestCase\n yield DocTestsTestCase",
"def get_all_platform_tests(self):\n for testitem in self.get_tests(self.discover_tests()):\n if not testitem:\n continue\n prefix = \"tests.\" + self.platform + \".\"\n self.formatted_tests_set.append(\n prefix + self.format_into_test_path(testitem)\n )\n\n if self.denylist:\n try:\n with open(self.denylist, \"r\") as f:\n denylist = f.read().splitlines()\n except FileNotFoundError:\n denylist = []\n\n self.formatted_tests_set = [\n t for t in self.formatted_tests_set if t not in denylist\n ]\n\n return self.formatted_tests_set",
"def _get_tests(self, chunks):\n tests = []\n for path in chunks[self.chunk_number - 1].paths:\n tests.extend(path.tests)\n\n return tests",
"def test(self):\n for doc, label in zip(self.test_docs(), self.test_labels()):\n yield doc, label"
] | [
"0.7224584",
"0.7033538",
"0.7009234",
"0.6971499",
"0.6883801",
"0.6699291",
"0.6692297",
"0.66435987",
"0.6586937",
"0.65175205",
"0.64307505",
"0.6429683",
"0.6429683",
"0.64266276",
"0.63944846",
"0.63704103",
"0.6363472",
"0.6362816",
"0.6362721",
"0.6325249",
"0.6308668",
"0.6279207",
"0.62715864",
"0.6263317",
"0.62343866",
"0.6210749",
"0.6191069",
"0.61852986",
"0.6162624",
"0.61617446"
] | 0.77115947 | 0 |
Parse and save test results. | def parse_verifier_result(self):
stat = self.get_verifier_result(self.verification_id)
try:
num_executed = stat['num_tests'] - stat['num_skipped']
try:
self.result = 100 * stat['num_success'] / num_executed
except ZeroDivisionError:
self.result = 0
if stat['num_tests'] > 0:
LOGGER.info("All tests have been skipped")
else:
LOGGER.error("No test has been executed")
return
with open(os.path.join(self.res_dir, "rally.log"),
'r', encoding='utf-8') as logfile:
output = logfile.read()
success_testcases = []
for match in re.findall(r'.*\{\d{1,2}\} (.*?) \.{3} success ',
output):
success_testcases.append(match)
failed_testcases = []
for match in re.findall(r'.*\{\d{1,2}\} (.*?) \.{3} fail',
output):
failed_testcases.append(match)
skipped_testcases = []
for match in re.findall(r'.*\{\d{1,2}\} (.*?) \.{3} skip(?::| )',
output):
skipped_testcases.append(match)
self.details = {"tests_number": stat['num_tests'],
"success_number": stat['num_success'],
"skipped_number": stat['num_skipped'],
"failures_number": stat['num_failures'],
"success": success_testcases,
"skipped": skipped_testcases,
"failures": failed_testcases}
except Exception: # pylint: disable=broad-except
self.result = 0
LOGGER.info("Tempest %s success_rate is %s%%",
self.case_name, self.result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse(self): \n\n results = self.parser.parse()\n self.assertEqual(results, test_case_data['parse_output'])",
"def __parse(self, results):\n in_doc = False\n document_txt = None\n cases = []\n for line in results:\n line = line.rstrip()\n if line.startswith(DOCTEST_DOCUMENT_BEGIN):\n # parse previous results\n if document_txt:\n cases.extend(self.__parse_document(document_txt))\n document_txt = [line]\n in_doc = True\n continue\n if line.startswith(DOCTEST_SUMMARY_TITLE): # end of tests\n in_doc = False\n cases.extend(self.__parse_document(document_txt))\n document_txt = None\n if in_doc and line != \"\":\n document_txt.append(line)\n # endfor\n return TestSuiteReport(name=\"doctests\", cases=cases,\n package=PACKAGE_NAME)",
"def _save_results(self, test_name, task_id):\n # check for result directory and create it otherwise\n if not os.path.exists(self.results_dir):\n LOGGER.debug('%s does not exist, we create it.',\n self.results_dir)\n os.makedirs(self.results_dir)\n\n # put detailed result to log\n cmd = ([\"rally\", \"task\", \"detailed\", \"--uuid\", task_id])\n LOGGER.debug('running command: %s', cmd)\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n\n # save report as JSON\n report_json_name = f'{test_name}.json'\n report_json_dir = os.path.join(self.results_dir, report_json_name)\n cmd = ([\"rally\", \"task\", \"report\", \"--json\", \"--uuid\", task_id,\n \"--out\", report_json_dir])\n LOGGER.debug('running command: %s', cmd)\n output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n LOGGER.info(\"%s\\n%s\", \" \".join(cmd), output.decode(\"utf-8\"))\n\n with open(report_json_dir, encoding='utf-8') as json_file:\n json_results = json_file.read()\n self._append_summary(json_results, test_name)\n\n # parse JSON operation result\n if self.task_succeed(json_results):\n LOGGER.info('Test scenario: \"%s\" OK.', test_name)\n else:\n LOGGER.info('Test scenario: \"%s\" Failed.', test_name)",
"def load_data(self):\n try:\n data = etree.parse(self.resultfilename).getroot()\n except OSError:\n data = []\n\n testresults = []\n for testcase in data:\n category = Category.OK\n status = 'ok'\n module = testcase.get('classname')\n name = testcase.get('name')\n message = ''\n time = float(testcase.get('time'))\n extras = []\n\n for child in testcase:\n if child.tag in ('error', 'failure', 'skipped'):\n if child.tag == 'skipped':\n category = Category.SKIP\n else:\n category = Category.FAIL\n status = child.tag\n type_ = child.get('type')\n message = child.get('message', default='')\n if type_ and message:\n message = '{0}: {1}'.format(type_, message)\n elif type_:\n message = type_\n if child.text:\n extras.append(child.text)\n elif child.tag in ('system-out', 'system-err'):\n if child.tag == 'system-out':\n heading = _('Captured stdout')\n else:\n heading = _('Captured stderr')\n contents = child.text.rstrip('\\n')\n extras.append('----- {} -----\\n{}'.format(heading,\n contents))\n\n extra_text = '\\n\\n'.join(extras)\n testresults.append(\n TestResult(category, status, name, module, message, time,\n extra_text))\n\n return testresults",
"def __parse_document(self, results):\n fullname = self.__extract_fullname(results[0])\n if not results[1].startswith(\"-\"):\n raise ValueError(\"Invalid second line of output: '%s'. \"\\\n \"Expected a title underline.\"\n % text[1])\n results = results[2:] # trim off top two lines of header information\n maintests, cleanup = self.__split_on_cleanup(results)\n overall_success = not (maintests[0] == FAILURE_MARKER)\n\n if overall_success:\n testcases = self.__parse_success(fullname, maintests)\n else:\n testcases = self.__parse_failures(fullname, maintests)\n\n return testcases",
"def getTestResults():",
"def save(self,filename):\n f = open(filename,'w')\n f.write('Test results for %s v%s\\n' % (self.description,self.version))\n f.write('Series ran by %s\\n\\n' % self.person_name)\n for result in self.values():\n f.write('%-70s : %s\\n' % (result.id,result.outcome))\n if result.outcome != Result.PASS:\n for (kind, annotation) in result.annotations.items():\n f.write('%s:\\n%s\\n' % (kind, as_utf8(annotation)))\n f.write('\\n')\n f.write('\\n\\nPasses: %i\\n' % self.get_pass_count())\n f.write('Fails: %i\\n' % self.get_fail_count())\n f.write('Errors: %i\\n' % self.get_error_count())\n f.write('Untested: %i\\n' % self.get_untested_count())\n f.write('Skipped: %i\\n' % self.get_skipped_count())\n f.close()",
"def extract_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"input\": {\"molecules\": [\"DDSPDLPK\"], \"score_threshold\": 0.95},\n \"output\": {\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"file_name\": \"BSA1.mzML\",\n \"scaling_factor\": 100,\n \"spec_id\": 1337,\n },\n }\n ]\n for test_dict in TESTS:\n for key, n, entry in self.results.extract_results(**test_dict[\"input\"]):\n print(key, entry)\n assert key.formula == test_dict[\"output\"][\"formula\"]\n assert key.file_name == test_dict[\"output\"][\"file_name\"]\n assert entry.scaling_factor == test_dict[\"output\"][\"scaling_factor\"]\n assert entry.spec_id == test_dict[\"output\"][\"spec_id\"]\n # print(self.results)\n # print(self.results.lookup)\n assert n == 0",
"def save_result(self, results: Dict[str, Dict[str, Any]]) -> None:\n if self.out_dir:\n os.makedirs(self.out_dir, exist_ok=True)\n with open(self.eval_result_file, 'w') as f:\n json.dump(results, f, indent=2)\n else:\n raise ValueError(f'Invalid output dir: {self.out_dir}')\n\n if self.verbose:\n print(f\"======\\nPanoptic nuScenes {self.task} evaluation for {self.eval_set}\")\n print(json.dumps(results, indent=4, sort_keys=False))\n print(\"======\")",
"def _process_output(self, driver_output):\n fs = self._port._filesystem\n failures = self._handle_error(driver_output)\n expected_driver_output = self._expected_driver_output()\n\n # Check the output and save the results.\n start_time = time.time()\n time_for_diffs = {}\n for test_type in self._test_types:\n start_diff_time = time.time()\n new_failures = test_type.compare_output(\n self._port, self._filename, self._options, driver_output,\n expected_driver_output)\n # Don't add any more failures if we already have a crash, so we don't\n # double-report those tests. We do double-report for timeouts since\n # we still want to see the text and image output.\n if not driver_output.crash:\n failures.extend(new_failures)\n test_result_writer.write_test_result(\n self._port, self._options.results_directory, self._filename,\n driver_output, expected_driver_output, new_failures)\n time_for_diffs[test_type.__class__.__name__] = (\n time.time() - start_diff_time)\n\n total_time_for_all_diffs = time.time() - start_diff_time\n return TestResult(self._filename, failures, driver_output.test_time,\n total_time_for_all_diffs, time_for_diffs)",
"def save(self):\n payload = {\n \"test_id\": self.test_id,\n \"test_case_name\": self.test_case_name,\n \"epoch_timestamp\": self.epoch_timestamp,\n \"human_timestamp\": self.human_timestamp,\n \"status\": self.status,\n \"boundaries_breached\": self.boundaries_breached,\n \"regression_found\": self.regression_found\n }\n if self.check_if_test_id_exists_in_test_report(self.test_case_name, self.test_id):\n\n # Update existing test results\n return self.update_results_in_test_report(self.test_case_name, self.test_id, payload)\n\n else:\n\n # Insert new test results\n return self.insert_results_into_test_report(self.test_case_name, payload)",
"def task_parse_results():\n pass",
"def save_results(test_name, start_time, end_time, population_size, number_of_generations, pop, stats):\n record = stats.compile(pop)\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n palette_width = int(config[\"palette\"][\"width\"])\n palette_depth = int(config[\"palette\"][\"depth\"])\n palette_height = int(config[\"palette\"][\"height\"])\n palette_max_weight = int(config[\"palette\"][\"weight\"])\n print(record)\n fitness_max = record['max']\n fitness_min = record['min']\n fitness_avg = record['avg']\n Result.create(test_name=test_name, start_time=start_time, end_time=end_time,\n number_of_generations=number_of_generations, population_size=population_size,\n max_fitness=fitness_max, min_fitness=fitness_min, average_fitness=fitness_avg,\n palette_max_weight=palette_max_weight,\n palette_width=palette_width, palette_height=palette_height, palette_depth=palette_depth)",
"def extract_format_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"output\": [\n {\n \"file_name\": \"BSA1.mzML\",\n \"spec_id\": 1337,\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"scaling_factor\": 100,\n \"score\": 1,\n \"charge\": 2,\n },\n {\n \"file_name\": \"BSA1.mzML\",\n \"spec_id\": 1338,\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"scaling_factor\": 100,\n \"score\": 0.9,\n \"charge\": 2,\n },\n {\n \"file_name\": \"BSA2.mzML\",\n \"spec_id\": 1337,\n \"formula\": \"C(43)H(75)N(15)O(17)S(2)\",\n \"scaling_factor\": 10,\n \"score\": 1,\n \"charge\": 3,\n },\n ]\n }\n ]\n for test_dict in TESTS:\n values = self.results.format_all_results()\n\n assert isinstance(values, pd.DataFrame)\n\n for out_data in test_dict[\"output\"]:\n result = values.loc[\n (values[\"file_name\"] == out_data[\"file_name\"])\n & (values[\"spec_id\"] == out_data[\"spec_id\"])\n ]\n assert (result[\"formula\"] == out_data[\"formula\"]).all()\n assert (result[\"scaling_factor\"] == out_data[\"scaling_factor\"]).all()\n assert (result[\"score\"] == out_data[\"score\"]).all()\n assert (result[\"charge\"] == out_data[\"charge\"]).all()",
"def save_parsing_result(self, parsing_result):\n saver.save_item(parsing_result, mongo_db=self.mongo_client.spirit)",
"def read_results(self):\n for system, filenames in SmokeTests.INPUT_FILES.items():\n input_file = filenames[\"results\"]\n with open(input_file) as fin:\n self._results[system] = fin.read().strip() == \"0\"",
"def test_save_serialization(self):\n\n results = GenomePropertiesResultsWithMatches(*self.test_genome_property_results,\n properties_tree=self.test_tree)\n\n serialization = results.to_serialization()\n\n new_results = load_results_from_serialization(serialized_results=serialization, properties_tree=self.test_tree)\n\n self.assertEqual(results.sample_names, new_results.sample_names)\n self.assertEqual(results.property_results.equals(new_results.property_results), True)\n self.assertEqual(results.step_results.equals(new_results.step_results), True)\n self.assertEqual(results.step_matches.equals(new_results.step_matches), True)\n self.assertIsInstance(new_results, GenomePropertiesResultsWithMatches)",
"def test_make_results_simple(self):\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\ttest.do_run()\n\t\ttest.make_results_simple()\n\t\tobj_ut = test.results_simple\n\t\tself.assertEqual(obj_ut, {'.text id': '100', '.text score': -1, \n\t\t\t'total wordcount': 7, 'total hits': 2, 'pos hits': 0,\n\t\t\t'neg hits': 2})",
"def _dump_test_parser_log(self):\n\t\tFileSystem.dump_to(self._result_directory_name + \"/\" + \"Test_Parser.log\", self._form_test_parser_log())",
"def after_test(self, test_results):\n pass",
"def __saveGithubResults(self):\n\t\tself.__debugInfo(\"Saving JSON results into file {}\".format(self.output_file))\n\t\ttry:\n\t\t\twith open(self.output_file, 'w') as wfile:\n\t\t\t\tjson.dump(self.final_results, wfile)\n\t\texcept Exception as exception:\n\t\t\traise MsgException('Output file could not be written', exception)",
"def save_results(results):\n json.dump(results, open(\"results.json\", \"w\"))",
"def _parse_results(self):\n for line in self.file_dic['output'].splitlines():\n if line.startswith(' * GAMESS VERSION = '):\n temp = line.split('=')[1]\n temp = temp.split('*')[0]\n self.version = temp.strip()\n\n if line[1:25] == 'FREE ENERGY OF SOLVATION' and line.find('1 ATM') == -1:\n temp = line.split()\n #Take the next number after =\n #In KCAL/MOL\n self.solvation_energy = float(temp[temp.index(\"=\") + 1])",
"def __parse_success(self, fullname, results):\n match = NUMBER_PASSED_RE.match(results[0])\n if not match:\n raise ValueError(\"All passed line incorrect: '%s'\"\n % results[0])\n classname = self.__create_classname(fullname)\n nitems = int(match.group(1))\n cases = []\n for line in results[1:1+nitems]:\n match = ALLPASS_TEST_NAMES_RE.match(line)\n if not match:\n raise ValueError(\"Unexpected information line in \"\n \"all pass case: %s\" % line)\n ntests, name = int(match.group(1)), match.group(2)\n for idx in range(ntests):\n cases.append(TestCaseReport(classname, name, failure_descr=None))\n #endfor\n return cases",
"def reportResult(self):\n\n fRc = True;\n if self.sResult is not None:\n try:\n asLines = self.sResult.splitlines();\n for sLine in asLines:\n sLine = sLine.strip();\n if sLine.startswith('Children') is True:\n # Extract the value\n idxValue = sLine.rfind('=');\n if idxValue == -1:\n raise Exception('IozoneTest: Invalid state');\n\n idxValue += 1;\n while sLine[idxValue] == ' ':\n idxValue += 1;\n\n # Get the reported value, cut off after the decimal point\n # it is not supported by the testmanager yet and is not really\n # relevant anyway.\n idxValueEnd = idxValue;\n while sLine[idxValueEnd].isdigit():\n idxValueEnd += 1;\n\n for sNeedle, sTestVal in self.lstTests:\n if sLine.rfind(sNeedle) != -1:\n reporter.testValue(sTestVal, sLine[idxValue:idxValueEnd],\n constants.valueunit.g_asNames[constants.valueunit.KILOBYTES_PER_SEC]);\n break;\n except:\n fRc = False;\n else:\n fRc = False;\n\n return fRc;",
"def parse_REB5Test_results_file(results_file):\n output = dict()\n with open(results_file) as input_:\n csv_reader = csv.reader(input_, delimiter=',', quotechar='\"')\n for tokens in csv_reader:\n if tokens[0] in ('PASS', 'FAIL'):\n ikey = 1\n ivalue = 0\n else:\n ikey = 0\n ivalue = 1\n output[tokens[ikey].replace(' ', '_')] = tokens[ivalue]\n return output",
"def process_sceneset_results(self, training_results, validation_results,\n tmp_dir):\n pass",
"def postparse(self, parse_result):\n return parse_result",
"def write_result_to_file(self):\n self.__test_result[Result.__RUN] = self.__run\n with open(self.__json_file_path, \"w+\") as outfile:\n json.dump(self.__test_result, outfile,\n ensure_ascii=False, indent=2)",
"def process_results(self, episode, eval):\n if episode % 10 == 9:\n ave = np.mean(self.scores[episode - 9:episode])\n print('Episodes: {}, AveScores: {}, Alpha: {}, Steps: {}'.format(\n episode + 1, ave, self.alpha.item(), self.step_count))\n if eval:\n if episode % 100 == 99:\n s1 = './' + self.game_name + '/'\n np.save(s1 + 'scores_eval{}.npy'.format(episode + 1), self.scores)\n print('Evaluation results saved!')\n else:\n if episode % 200 == 199:\n self.save_episode_models(episode)\n self.plot_array(episode)\n print('Model salved!')\n print('Total {} frames!'.format(self.frames_count))"
] | [
"0.6879487",
"0.6688299",
"0.6649461",
"0.6597607",
"0.6562047",
"0.64070547",
"0.62938046",
"0.6256871",
"0.611408",
"0.61135256",
"0.6107557",
"0.6100069",
"0.60896564",
"0.60574466",
"0.6056988",
"0.6002144",
"0.6000668",
"0.59979934",
"0.5993954",
"0.5988928",
"0.5985693",
"0.59727514",
"0.5963174",
"0.5930537",
"0.5925773",
"0.5922051",
"0.5909746",
"0.5900284",
"0.58998966",
"0.5897538"
] | 0.70347 | 0 |
Detect and update the default role if required | def update_default_role(self, rally_conf='/etc/rally/rally.conf'):
role = self.get_default_role(self.cloud)
if not role:
return
rconfig = configparser.RawConfigParser()
rconfig.read(rally_conf)
if not rconfig.has_section('openstack'):
rconfig.add_section('openstack')
rconfig.set('openstack', 'swift_operator_role', role.name)
with open(rally_conf, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _overrideRole(self, newRole, args):\n oldRole = args.get('role', None)\n args['role'] = newRole\n return oldRole",
"def changeRole(self, node, role):",
"def _set_override_role_called(self):\n self.__override_role_called = True",
"async def temprole(self, ctx: commands.Context, *, role: discord.Role = None):\n guild = ctx.guild\n role_config = self.config.guild(guild)\n role_set = await role_config.temprole()\n if role is None and role_set:\n await role_config.temprole.clear()\n return await ctx.send(\"Cleared the role being used.\")\n if role:\n if role >= ctx.author.top_role:\n return await ctx.send(\"You can't set a role equal to or higher than your own.\")\n\n if role >= ctx.guild.me.top_role:\n return await ctx.send(\n \"You can't set a role that's equal to or higher than the bot.\"\n )\n await role_config.temprole.set(role.id)\n await ctx.send(\n \"Set the role to {}.\".format(role.mention),\n allowed_mentions=discord.AllowedMentions(roles=False),\n )\n else:\n await ctx.send_help()",
"def update_keystone_default_role(rally_conf='/etc/rally/rally.conf'):\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n rconfig = configparser.RawConfigParser()\n rconfig.read(rally_conf)\n if not rconfig.has_section('openstack'):\n rconfig.add_section('openstack')\n rconfig.set(\n 'openstack', 'keystone_default_role', env.get(\"NEW_USER_ROLE\"))\n with open(rally_conf, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def _override_role(self, test_obj, toggle_rbac_role=False):\n self.user_id = test_obj.os_primary.credentials.user_id\n self.project_id = test_obj.os_primary.credentials.tenant_id\n self.token = test_obj.os_primary.auth_provider.get_token()\n\n LOG.debug('Overriding role to: %s.', toggle_rbac_role)\n role_already_present = False\n\n try:\n if not all([self.admin_role_id, self.rbac_role_id]):\n self._get_roles_by_name()\n\n target_role = (\n self.rbac_role_id if toggle_rbac_role else self.admin_role_id)\n role_already_present = self._list_and_clear_user_roles_on_project(\n target_role)\n\n # Do not override roles if `target_role` already exists.\n if not role_already_present:\n self._create_user_role_on_project(target_role)\n except Exception as exp:\n with excutils.save_and_reraise_exception():\n LOG.exception(exp)\n finally:\n auth_providers = test_obj.get_auth_providers()\n for provider in auth_providers:\n provider.clear_auth()\n # Fernet tokens are not subsecond aware so sleep to ensure we are\n # passing the second boundary before attempting to authenticate.\n # Only sleep if a token revocation occurred as a result of role\n # overriding. This will optimize test runtime in the case where\n # ``[identity] admin_role`` == ``[patrole] rbac_test_role``.\n if not role_already_present:\n time.sleep(1)\n\n for provider in auth_providers:\n provider.set_auth()",
"async def _role_repl(self, ctx: Context, *, role: discord.Role):\n\n msg = await ctx.send(\n _(\n \"Are you sure you want to set `{}` as replacement role?\"\n ).format(role.name)\n )\n start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)\n\n pred = ReactionPredicate.yes_or_no(msg, ctx.author)\n await ctx.bot.wait_for(\"reaction_add\", check=pred)\n\n if pred.result:\n await self.config.guild(ctx.guild).repl_id.set(role.id)\n await ctx.send(\n _(\"Set `{}` as replacement role!\").format(role.name)\n )\n else:\n await ctx.send(_(\"Aborted replacement role setup.\"))",
"async def role(ctx, role: discord.Role = None):\n if role is None:\n await ctx.send(\"List of assignable roles: \" + str(allowed_roles))\n if role.name in allowed_roles:\n if not role in ctx.message.author.roles:\n await ctx.message.author.add_roles(role)\n await ctx.send(\"Role added.\")\n else:\n await ctx.message.author.remove_roles(role)\n await ctx.send(\"Role removed.\") \n else:\n await ctx.send(\"That role doesn't exist, or you don't have permission to modify it.\")",
"async def apply_role(self, *, reason: str = None):\n if self.role not in self.member.roles:\n try:\n await self.member.add_roles(self.role, reason=reason)\n except discord.HTTPException:\n pass",
"def assign_default_role(course_id, user):\r\n role, __ = Role.objects.get_or_create(course_id=course_id, name=\"Student\")\r\n user.roles.add(role)",
"def updateRole(role_name):\n\n if role_name == 'gsoc_mentor':\n updater = RoleUpdater(GSoCMentor, GSoCProfile, 'program', 'mentor_for')\n elif role_name == 'gsoc_org_admin':\n updater = RoleUpdater(\n GSoCOrgAdmin, GSoCProfile, 'program', 'org_admin_for')\n elif role_name == 'gsoc_student':\n updater = RoleUpdater(GSoCStudent, GSoCProfile, 'scope')\n\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def test_replace_roles(self):\n pass",
"async def userrole(self, ctx, *, role=None):\n server = ctx.message.guild\n\n if not role:\n result = await self.bot.db.config.find_one({'_id': str(server.id)})\n if result and result.get('user_role'):\n await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \\n'\n f'The current user role is `{result.get(\"user_role\")}`. '\n f'To change it type `{result.get(\"prefix\")}userrole <role name>`')\n else:\n await ctx.send(f'The user role restricts which users are able to create and manage their own polls. \\n'\n f'No user role set. '\n f'To set one type `{result.get(\"prefix\")}userrole <role name>`')\n elif role in [r.name for r in server.roles]:\n await self.bot.db.config.update_one({'_id': str(server.id)}, {'$set': {'user_role': str(role)}}, upsert=True)\n await ctx.send(f'Server role `{role}` can now create and manage their own polls.')\n else:\n await ctx.send(f'Server role `{role}` not found.')",
"def _users_assign_default_role(course_id):\r\n enrollments = CourseEnrollment.objects.filter(course_id=course_id)\r\n for enrollment in enrollments:\r\n assign_default_role(course_id, enrollment.user)",
"async def reacrole(self, ctx: commands.Context):\n pass",
"def _validate_default_role(self, default_role):\n if default_role is '':\n raise UserException(\"default_role cannot be empty.\")\n elif default_role not in Employee.allowed_roles():\n raise UserException(\"Allowed values for default_role are %s\" %\n str(Employee.allowed_roles()))",
"def set_role(userid, role, group, request=None):",
"def changeRoleInfo(self, role, info):",
"def assign_default_role_on_enrollment(sender, instance, **kwargs):\r\n # The code below would remove all forum Roles from a user when they unenroll\r\n # from a course. Concerns were raised that it should apply only to students,\r\n # or that even the history of student roles is important for research\r\n # purposes. Since this was new functionality being added in this release,\r\n # I'm just going to comment it out for now and let the forums team deal with\r\n # implementing the right behavior.\r\n #\r\n # # We've unenrolled the student, so remove all roles for this course\r\n # if not instance.is_active:\r\n # course_roles = list(Role.objects.filter(course_id=instance.course_id))\r\n # instance.user.roles.remove(*course_roles)\r\n # return\r\n\r\n # We've enrolled the student, so make sure they have the Student role\r\n assign_default_role(instance.course_id, instance.user)",
"def test_replace_cluster_role(self):\n pass",
"def single_role(self):\n return None",
"def _restoreRole(self, oldRole, args):\n if oldRole:\n args['role'] = oldRole\n else:\n del args['role']",
"def __setRole(self, session):\r\n self.__role = session.role\r\n if self._config.has_key('purpose'):\r\n co_role = ccm.get_role_for_purpose(session, self._config['purpose'])\r\n _logger.info(\"Switching user to role: %s\" % co_role)\r\n session.role = co_role\r\n _logger.info(\"Switched user to role: %s\" % session.role)",
"def check(self):\r\n PreparationAction.check(self)\r\n\r\n session = self.get_session()\r\n ccm_object = session.create(self._config.name)\r\n role = session.role\r\n co_role = ccm.get_role_for_status(ccm_object['status'])\r\n _logger.info(\"Try to switch user to role: %s\" % co_role)\r\n session.role = co_role\r\n session.role = role",
"def _validate_override_role_called(self):\n was_called = self.__override_role_called\n self.__override_role_called = False\n return was_called",
"def test_ipam_roles_update(self):\n pass",
"def test_patch_cluster_role(self):\n pass",
"def test_patch_namespaced_role(self):\n pass",
"def role_command():",
"def InspireRole(self, default=None):\n return self.data.get('inspire_role', default)"
] | [
"0.7115389",
"0.66387266",
"0.65806973",
"0.6452636",
"0.6433589",
"0.6405648",
"0.6374333",
"0.63238055",
"0.6319324",
"0.630769",
"0.630502",
"0.6269636",
"0.6241185",
"0.61868477",
"0.6167082",
"0.61618745",
"0.61153567",
"0.6087079",
"0.6085021",
"0.60780394",
"0.60377514",
"0.5985935",
"0.59656817",
"0.5954664",
"0.593007",
"0.5929032",
"0.5911372",
"0.5908725",
"0.5864143",
"0.5860603"
] | 0.6722509 | 1 |
Update auth section in tempest.conf | def update_auth_section(self):
rconfig = configparser.RawConfigParser()
rconfig.read(self.conf_file)
if not rconfig.has_section("auth"):
rconfig.add_section("auth")
if env.get("NEW_USER_ROLE").lower() != "member":
tempest_roles = []
if rconfig.has_option("auth", "tempest_roles"):
tempest_roles = functest_utils.convert_ini_to_list(
rconfig.get("auth", "tempest_roles"))
rconfig.set(
'auth', 'tempest_roles',
functest_utils.convert_list_to_ini(
[env.get("NEW_USER_ROLE")] + tempest_roles))
if not json.loads(env.get("USE_DYNAMIC_CREDENTIALS").lower()):
rconfig.set('auth', 'use_dynamic_credentials', False)
account_file = os.path.join(
getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')
assert os.path.exists(
account_file), f"{account_file} doesn't exist"
rconfig.set('auth', 'test_accounts_file', account_file)
if env.get('NO_TENANT_NETWORK').lower() == 'true':
rconfig.set('auth', 'create_isolated_networks', False)
with open(self.conf_file, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def override_config(self):\n super(AuthedConfigFixture, self).override_config()\n self.conf.register_opts(auth_token._OPTS, group='keystone_authtoken')\n self.conf.set_override('auth_uri', 'http://127.0.0.1:35357',\n group='keystone_authtoken')",
"def set_auth_credentials():\n import os\n from passlib.apps import custom_app_context as pwd_context\n\n os.environ[\"AUTH_USERNAME\"] = \"testme\"\n os.environ[\"AUTH_PASSWORD\"] = pwd_context.hash(\"foobar\")",
"def setUpAuth(self):\n self.user, self.user_headers = self.authUser()\n self.admin, self.admin_headers = self.authAdmin()",
"def reindex_auth(self):\n username, password = None, None\n\n http_auth = self.config['params'].get('http_auth', None)\n if http_auth:\n if isinstance(http_auth, six.string_types):\n username, password = http_auth.split(':')\n else:\n username, password = http_auth\n\n return username, password",
"def test_auth0_config_anon(anontestapp, registry):\n _test_auth_config(anontestapp, registry)",
"def test_auth0_config_admin(testapp, registry):\n _test_auth_config(testapp, registry)",
"def auth():\n pass",
"def auth():\n pass",
"def setup_auth_turing(cluster):\n # Read in auth info\n azure_file = os.path.join(ABSOLUTE_HERE, \"secrets\", \"turing-auth-key-prod.json\")\n with open(azure_file, \"r\") as stream:\n azure = json.load(stream)\n\n # Login in to Azure\n login_cmd = [\n \"az\", \"login\", \"--service-principal\",\n \"--username\", azure[\"sp-app-id\"],\n \"--password\", azure[\"sp-app-key\"],\n \"--tenant\", azure[\"tenant-id\"]\n ]\n subprocess.check_output(login_cmd)\n\n # Set kubeconfig\n creds_cmd = [\n \"az\", \"aks\", \"get-credentials\",\n \"--name\", cluster,\n \"--resource-group\", \"binder-prod\"\n\n ]\n stdout = subprocess.check_output(creds_cmd)\n print(stdout.decode('utf-8'))",
"def set_credentials():",
"def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)",
"def configure_aaa_local_auth(device):\n try:\n device.configure([\n \"aaa authentication dot1x default local\",\n \"aaa local authentication default authorization default\",\n \"aaa authorization network default local\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA local auth'\n )",
"def add_auth(self, http_request):\r\n pass",
"def authentication_hook(self):\n pass",
"def main():\n\n config_file = 'auth_demo.cfg'\n config = ConfigParser.SafeConfigParser({\n 'username':'',\n })\n config.read(config_file)\n if not config.has_section('auth_demo_login'):\n config.add_section('auth_demo_login')\n\n username = config.get('auth_demo_login','username')\n password = None\n if username != '':\n password = keyring.get_password('auth_demo_login', username)\n\n if password == None or not auth(username, password):\n\n while 1:\n username = raw_input(\"Username:\\n\")\n password = getpass.getpass(\"Password:\\n\")\n\n if auth(username, password):\n break\n else:\n print \"Authorization failed.\"\n \n # store the username\n config.set('auth_demo_login', 'username', username)\n config.write(open(config_file, 'w'))\n\n # store the password\n keyring.set_password('auth_demo_login', username, password)\n\n # the stuff that needs authorization here\n print \"Authorization successful.\"",
"def _setup_threat_intel_auth_subparser(subparsers):\n generate_subparser(\n subparsers,\n 'update-auth',\n description='Enable, disable, or configure the threat intel downloader function',\n subcommand=True\n )",
"def configure_aaa_auth_proxy(device, server_grp):\n try:\n device.configure([\n f\"aaa authorization auth-proxy default group {server_grp}\"\n ])\n except SubCommandFailure:\n raise SubCommandFailure(\n 'Could not configure AAA auth proxy'\n )",
"def update_keystone_default_role(rally_conf='/etc/rally/rally.conf'):\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n rconfig = configparser.RawConfigParser()\n rconfig.read(rally_conf)\n if not rconfig.has_section('openstack'):\n rconfig.add_section('openstack')\n rconfig.set(\n 'openstack', 'keystone_default_role', env.get(\"NEW_USER_ROLE\"))\n with open(rally_conf, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def test_replace_o_auth_client(self):\n pass",
"def register_auth_opts(conf, group, service_type=None):\n ks_loading.register_session_conf_options(conf, group)\n ks_loading.register_auth_conf_options(conf, group)\n CONF.set_default('auth_type', default='password', group=group)\n ks_loading.register_adapter_conf_options(conf, group)\n conf.set_default('valid_interfaces', DEFAULT_VALID_INTERFACES, group=group)\n if service_type:\n conf.set_default('service_type', service_type, group=group)\n else:\n types = os_service_types.get_service_types()\n key = 'ironic-inspector' if group == 'inspector' else group\n service_types = types.service_types_by_project.get(key)\n if service_types:\n conf.set_default('service_type', service_types[0], group=group)",
"def auth_kubeconfig(self):\n config = self.spec['kubeconfig']\n config_path = config['file']\n\n with decrypt_file(config_path) as decrypted_key_path:\n # FIXME: Unset this after our yield\n os.environ['KUBECONFIG'] = decrypted_key_path\n yield",
"def auth(self, user):",
"def configure_auth(self, auth_type, ha_type):\n yield self.configure_kerberos(auth_type, ha_type)\n self.configure_radius(auth_type)",
"def __gitEditUserConfig(self):\n self.vcs.gitEditUserConfig()",
"def test_replace_o_auth_client_authorization(self):\n pass",
"def _set_authenticator(self):\n pass",
"def _set_credentials():\n # Override credentials here if necessary\n if env.user == 'ubuntu':\n env.key_filename = [\n os.path.expanduser('~/.ssh/ubuntu-id_dsa')]\n env.abort_on_prompts = True\n env.disable_known_hosts = True\n env.use_shell = False",
"def test_update_virt_realm_remote_access_config(self):\n pass",
"def test_patch_o_auth_client(self):\n pass",
"def test_patch_o_auth_client_authorization(self):\n pass"
] | [
"0.7055258",
"0.6269132",
"0.6093893",
"0.60797375",
"0.6063509",
"0.6012555",
"0.59593356",
"0.59593356",
"0.59572875",
"0.5940929",
"0.5885352",
"0.58571965",
"0.5832692",
"0.5775332",
"0.5770373",
"0.57187825",
"0.5698276",
"0.56846017",
"0.5671843",
"0.563528",
"0.5626709",
"0.5612409",
"0.5581429",
"0.55804676",
"0.5576583",
"0.55533123",
"0.55463743",
"0.554531",
"0.5539894",
"0.55302036"
] | 0.767204 | 0 |
Update network section in tempest.conf | def update_network_section(self):
rconfig = configparser.RawConfigParser()
rconfig.read(self.conf_file)
if self.ext_net:
if not rconfig.has_section('network'):
rconfig.add_section('network')
rconfig.set('network', 'public_network_id', self.ext_net.id)
rconfig.set('network', 'floating_network_name', self.ext_net.name)
rconfig.set('network-feature-enabled', 'floating_ips', True)
else:
if not rconfig.has_section('network-feature-enabled'):
rconfig.add_section('network-feature-enabled')
rconfig.set('network-feature-enabled', 'floating_ips', False)
with open(self.conf_file, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_compute_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('compute'):\n rconfig.add_section('compute')\n rconfig.set(\n 'compute', 'fixed_network_name',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def test_networking_project_network_update(self):\n pass",
"def set_new_configuration(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()",
"def dvs_update_network(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n self.show_step(2)\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n net_1 = os_conn.create_network(\n network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n self.show_step(3)\n os_conn.neutron.update_network(net_1[\"id\"],\n {\"network\": {\"name\": 'net_2'}})\n\n assert_true(os_conn.get_network('net_2')['id'] == net_1['id'])\n\n self.show_step(4)\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n os_conn.neutron.update_network(\n default_net.id, {\"network\": {\"name\": 'spring'}})\n\n assert_true(os_conn.get_network('spring')['id'] == default_net.id)",
"def update_network(self, dbnetwork, qipinfo):\n\n # We don't want to add the plenary to self.plenaries if we aren't going\n # to change anything\n plenary = Plenary.get_plenary(dbnetwork)\n updated = False\n\n if dbnetwork.name != qipinfo.name:\n self.logger.client_info(\"Setting network {0!s} name to {1}\"\n .format(dbnetwork, qipinfo.name))\n dbnetwork.name = qipinfo.name\n if dbnetwork.network_type != qipinfo.network_type:\n self.logger.client_info(\"Setting network {0!s} type to {1}\"\n .format(dbnetwork, qipinfo.network_type))\n dbnetwork.network_type = qipinfo.network_type\n if dbnetwork.location != qipinfo.location:\n self.logger.client_info(\"Setting network {0!s} location to {1:l}\"\n .format(dbnetwork, qipinfo.location))\n dbnetwork.location = qipinfo.location\n if dbnetwork.side != qipinfo.side:\n self.logger.client_info(\"Setting network {0!s} side to {1}\"\n .format(dbnetwork, qipinfo.side))\n dbnetwork.side = qipinfo.side\n if dbnetwork.network_compartment != qipinfo.compartment:\n self.logger.client_info(\"Setting network {0!s} compartment to {1!s}\"\n .format(dbnetwork, qipinfo.compartment))\n dbnetwork.network_compartment = qipinfo.compartment\n\n if dbnetwork in self.session.dirty:\n updated = True\n\n old_rtrs = set(dbnetwork.router_ips)\n new_rtrs = set(qipinfo.routers)\n\n del_routers = []\n for router in dbnetwork.routers:\n if router.ip in old_rtrs - new_rtrs:\n del_routers.append(router)\n\n for router in del_routers:\n self.logger.client_info(\"Removing router {0:s} from \"\n \"{1:l}\".format(router.ip, dbnetwork))\n for dns_rec in router.dns_records:\n if dns_rec.is_unused:\n delete_dns_record(dns_rec)\n dbnetwork.routers.remove(router)\n updated = True\n\n for ip in new_rtrs - old_rtrs:\n self.add_router(dbnetwork, ip)\n updated = True\n\n if updated:\n self.plenaries.append(plenary)\n\n # TODO: add support for updating router locations\n\n return dbnetwork.netmask == qipinfo.address.netmask",
"def setup_net(self):\n pass",
"def test_update_node_driveconfig(self):\n pass",
"def modif_network(self):\n print \"preparation du fichier network interfaces\"\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp %s/etc/sysconfig/network_scripts/ifcfg-eth0 %s/etc/sysconfig/network_scripts/ifcfg-eth0.pre.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n else:\n self.exec_cmd(\"cp %s/etc/network/interfaces %s/etc/network/interfaces.post.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"cp %s/etc/network/interfaces.pre.p2v %s/etc/network/interfaces\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))",
"def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass",
"def configure_net(self):\n try:\n transport_type = Conf.get(self._index,\n f'cluster>{self._server_id}')['network']['data']['transport_type']\n except:\n raise MotrError(errno.EINVAL, \"transport_type not found\")\n check_type(transport_type, str, \"transport_type\")\n\n if transport_type == \"lnet\":\n configure_lnet(self)\n elif transport_type == \"libfabric\":\n configure_libfabric(self)\n else:\n raise MotrError(errno.EINVAL, \"Unknown data transport type\\n\")",
"def test_update_hyperflex_node_config_policy(self):\n pass",
"def test_update_hyperflex_cluster_network_policy(self):\n pass",
"def update_network(self, context, net_id, network):\n\n LOG.debug(_(\"QuantumRestProxyV2.update_network() called\"))\n\n # Validate Args\n if network[\"network\"].get(\"admin_state_up\"):\n if network[\"network\"][\"admin_state_up\"] is False:\n LOG.warning(_(\"Network with admin_state_up=False are not yet \"\n \"supported by this plugin. Ignoring setting for \"\n \"network %s\", net_name))\n\n # update DB\n orig_net = super(QuantumRestProxyV2, self).get_network(context, net_id)\n tenant_id = orig_net[\"tenant_id\"]\n new_net = super(QuantumRestProxyV2, self).update_network(\n context, net_id, network)\n\n # update network on network controller\n if new_net[\"name\"] != orig_net[\"name\"]:\n try:\n resource = NETWORKS_PATH % (tenant_id, net_id)\n data = {\n \"network\": new_net,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n except RemoteRestError as e:\n LOG.error(_(\"QuantumRestProxyV2: Unable to update remote \"\n \"network: %s\"), e.message)\n # reset network to original state\n super(QuantumRestProxyV2, self).update_network(\n context, id, orig_net)\n raise\n\n # return updated network\n return new_net",
"def test_modify_znode(self):\n z = self.test_start_one_value()\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\"}))\n z.loop(2, timeout=self.TIMEOUT)\n self.conf.write.assert_called_with({\"1.1.1.1\": {\"enabled\": \"0\"}})",
"def modify_network(self, username, machine_name, new_network, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n try:\n vmware.update_network(username, machine_name, new_network)\n except ValueError as doh:\n logger.error('Task failed: {}'.format(doh))\n resp['error'] = '{}'.format(doh)\n logger.info('Task complete')\n return resp",
"def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)",
"def _update_network_config(port_config, allow_multiple=False):\n # Get network id from port config\n network_id = port_config.get('network_id')\n\n # Get the network id from relationship if any\n rel_network_ids = find_openstack_ids_of_connected_nodes_by_openstack_type(\n ctx, NETWORK_OPENSTACK_TYPE)\n\n rel_network_id = rel_network_ids[0] if rel_network_ids else None\n # Check if network config comes from two sources or not\n if network_id and rel_network_id and not allow_multiple:\n raise NonRecoverableError('Port can\\'t both have the '\n '\"network_id\" property and be '\n 'connected to a network via a '\n 'relationship at the same time')\n\n port_config['network_id'] = network_id or rel_network_id",
"def update_validation_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section('validation'):\n rconfig.add_section('validation')\n rconfig.set(\n 'validation', 'connect_method',\n 'floating' if self.ext_net else 'fixed')\n rconfig.set(\n 'validation', 'network_for_ssh',\n self.network.name if self.network else env.get(\"EXTERNAL_NETWORK\"))\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def test_add_network(self):\n pass",
"def fusion_api_edit_ethernet_network(self, body, uri, api=None, headers=None):\n return self.ethernet_network.update(body, uri, api, headers)",
"def update_neutron_advanced_configuration(self, option, value):\n attributes = self.nailgun_client.get_cluster_attributes(\n self.cluster_id)\n nac_subdict = attributes['editable']['neutron_advanced_configuration']\n nac_subdict[option]['value'] = value\n self.nailgun_client.update_cluster_attributes(\n self.cluster_id, attributes)",
"def fusion_api_edit_network_set(self, body=None, uri=None, api=None, headers=None):\n return self.network_set.update(body, uri, api, headers)",
"def test_replace_cluster_network(self):\n pass",
"def set_start_configuration(self):\r\n with open('config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n print(configuration_data[0][0])\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()",
"def test_patch_cluster_network(self):\n pass",
"def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))",
"def update_tempest_conf_file(conf_file, rconfig):\n with open(TempestCommon.tempest_conf_yaml, encoding='utf-8') as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def _update_target_net(self):\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()",
"def _get_physnet_patch(self, physnet, port):\n if (not CONF.processing.overwrite_existing\n or port.physical_network == physnet):\n return\n return {'op': 'add', 'path': '/physical_network', 'value': physnet}",
"def configure_tempest_update_params(\n tempest_conf_file, image_id=None, flavor_id=None,\n compute_cnt=1, image_alt_id=None, flavor_alt_id=None,\n admin_role_name='admin', cidr='192.168.120.0/24',\n domain_id='default'):\n # pylint: disable=too-many-branches,too-many-arguments,too-many-statements\n LOGGER.debug(\"Updating selected tempest.conf parameters...\")\n rconfig = configparser.RawConfigParser()\n rconfig.read(tempest_conf_file)\n rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))\n if image_id is not None:\n rconfig.set('compute', 'image_ref', image_id)\n if image_alt_id is not None:\n rconfig.set('compute', 'image_ref_alt', image_alt_id)\n if flavor_id is not None:\n rconfig.set('compute', 'flavor_ref', flavor_id)\n if flavor_alt_id is not None:\n rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)\n if compute_cnt > 1:\n # enable multinode tests\n rconfig.set('compute', 'min_compute_nodes', compute_cnt)\n rconfig.set('compute-feature-enabled', 'live_migration', True)\n filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',\n 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',\n 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']\n rconfig.set(\n 'compute-feature-enabled', 'scheduler_available_filters',\n functest_utils.convert_list_to_ini(filters))\n if os.environ.get('OS_REGION_NAME'):\n rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n rconfig.set(\n 'auth', 'tempest_roles',\n functest_utils.convert_list_to_ini([env.get(\"NEW_USER_ROLE\")]))\n if not json.loads(env.get(\"USE_DYNAMIC_CREDENTIALS\").lower()):\n rconfig.set('auth', 'use_dynamic_credentials', False)\n account_file = os.path.join(\n getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')\n assert os.path.exists(\n account_file), \"{} doesn't exist\".format(account_file)\n rconfig.set('auth', 'test_accounts_file', account_file)\n rconfig.set('identity', 'auth_version', 'v3')\n rconfig.set('identity', 'admin_role', admin_role_name)\n rconfig.set('identity', 'admin_domain_scope', True)\n rconfig.set('identity', 'default_domain_id', domain_id)\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'default_network', cidr)\n rconfig.set('network', 'project_network_cidr', cidr)\n rconfig.set('network', 'project_networks_reachable', False)\n rconfig.set(\n 'validation', 'ssh_timeout',\n getattr(config.CONF, 'tempest_validation_ssh_timeout'))\n rconfig.set('object-storage', 'operator_role',\n getattr(config.CONF, 'tempest_object_storage_operator_role'))\n rconfig.set(\n 'identity', 'v3_endpoint_type',\n os.environ.get('OS_INTERFACE', 'public'))\n\n sections = rconfig.sections()\n services_list = [\n 'compute', 'volume', 'image', 'network', 'data-processing',\n 'object-storage', 'orchestration']\n for service in services_list:\n if service not in sections:\n rconfig.add_section(service)\n rconfig.set(\n service, 'endpoint_type', os.environ.get('OS_INTERFACE', 'public'))\n\n LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '\n 'into tempest.conf file')\n update_tempest_conf_file(tempest_conf_file, rconfig)"
] | [
"0.696991",
"0.6750749",
"0.63753676",
"0.6196263",
"0.59945536",
"0.5972814",
"0.59464866",
"0.5930914",
"0.590459",
"0.5895841",
"0.58937216",
"0.58452964",
"0.5844862",
"0.5843245",
"0.5838263",
"0.5832796",
"0.5832103",
"0.5824422",
"0.5822572",
"0.5797349",
"0.57946074",
"0.57888913",
"0.5772605",
"0.5746298",
"0.57456905",
"0.5736027",
"0.5710217",
"0.56766015",
"0.56676865",
"0.56663173"
] | 0.7767487 | 0 |
Update compute section in tempest.conf | def update_compute_section(self):
rconfig = configparser.RawConfigParser()
rconfig.read(self.conf_file)
if not rconfig.has_section('compute'):
rconfig.add_section('compute')
rconfig.set(
'compute', 'fixed_network_name',
self.network.name if self.network else env.get("EXTERNAL_NETWORK"))
with open(self.conf_file, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_tempest_update_params(\n tempest_conf_file, image_id=None, flavor_id=None,\n compute_cnt=1, image_alt_id=None, flavor_alt_id=None,\n admin_role_name='admin', cidr='192.168.120.0/24',\n domain_id='default'):\n # pylint: disable=too-many-branches,too-many-arguments,too-many-statements\n LOGGER.debug(\"Updating selected tempest.conf parameters...\")\n rconfig = configparser.RawConfigParser()\n rconfig.read(tempest_conf_file)\n rconfig.set('compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))\n if image_id is not None:\n rconfig.set('compute', 'image_ref', image_id)\n if image_alt_id is not None:\n rconfig.set('compute', 'image_ref_alt', image_alt_id)\n if flavor_id is not None:\n rconfig.set('compute', 'flavor_ref', flavor_id)\n if flavor_alt_id is not None:\n rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)\n if compute_cnt > 1:\n # enable multinode tests\n rconfig.set('compute', 'min_compute_nodes', compute_cnt)\n rconfig.set('compute-feature-enabled', 'live_migration', True)\n filters = ['RetryFilter', 'AvailabilityZoneFilter', 'ComputeFilter',\n 'ComputeCapabilitiesFilter', 'ImagePropertiesFilter',\n 'ServerGroupAntiAffinityFilter', 'ServerGroupAffinityFilter']\n rconfig.set(\n 'compute-feature-enabled', 'scheduler_available_filters',\n functest_utils.convert_list_to_ini(filters))\n if os.environ.get('OS_REGION_NAME'):\n rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n rconfig.set(\n 'auth', 'tempest_roles',\n functest_utils.convert_list_to_ini([env.get(\"NEW_USER_ROLE\")]))\n if not json.loads(env.get(\"USE_DYNAMIC_CREDENTIALS\").lower()):\n rconfig.set('auth', 'use_dynamic_credentials', False)\n account_file = os.path.join(\n getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')\n assert os.path.exists(\n account_file), \"{} doesn't exist\".format(account_file)\n rconfig.set('auth', 'test_accounts_file', account_file)\n rconfig.set('identity', 'auth_version', 'v3')\n rconfig.set('identity', 'admin_role', admin_role_name)\n rconfig.set('identity', 'admin_domain_scope', True)\n rconfig.set('identity', 'default_domain_id', domain_id)\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'default_network', cidr)\n rconfig.set('network', 'project_network_cidr', cidr)\n rconfig.set('network', 'project_networks_reachable', False)\n rconfig.set(\n 'validation', 'ssh_timeout',\n getattr(config.CONF, 'tempest_validation_ssh_timeout'))\n rconfig.set('object-storage', 'operator_role',\n getattr(config.CONF, 'tempest_object_storage_operator_role'))\n rconfig.set(\n 'identity', 'v3_endpoint_type',\n os.environ.get('OS_INTERFACE', 'public'))\n\n sections = rconfig.sections()\n services_list = [\n 'compute', 'volume', 'image', 'network', 'data-processing',\n 'object-storage', 'orchestration']\n for service in services_list:\n if service not in sections:\n rconfig.add_section(service)\n rconfig.set(\n service, 'endpoint_type', os.environ.get('OS_INTERFACE', 'public'))\n\n LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '\n 'into tempest.conf file')\n update_tempest_conf_file(tempest_conf_file, rconfig)",
"def configure_tempest_update_params(\n tempest_conf_file, image_id=None, flavor_id=None,\n compute_cnt=1, image_alt_id=None, flavor_alt_id=None,\n admin_role_name='admin', cidr='192.168.120.0/24',\n domain_id='default'):\n # pylint: disable=too-many-branches,too-many-arguments\n # pylint: disable=too-many-statements,too-many-locals\n LOGGER.debug(\"Updating selected tempest.conf parameters...\")\n rconfig = configparser.RawConfigParser()\n rconfig.read(tempest_conf_file)\n rconfig.set(\n 'compute', 'volume_device_name', env.get('VOLUME_DEVICE_NAME'))\n if image_id is not None:\n rconfig.set('compute', 'image_ref', image_id)\n if image_alt_id is not None:\n rconfig.set('compute', 'image_ref_alt', image_alt_id)\n if flavor_id is not None:\n rconfig.set('compute', 'flavor_ref', flavor_id)\n if flavor_alt_id is not None:\n rconfig.set('compute', 'flavor_ref_alt', flavor_alt_id)\n if compute_cnt > 1:\n # enable multinode tests\n rconfig.set('compute', 'min_compute_nodes', compute_cnt)\n rconfig.set('compute-feature-enabled', 'live_migration', True)\n if os.environ.get('OS_REGION_NAME'):\n rconfig.set('identity', 'region', os.environ.get('OS_REGION_NAME'))\n rconfig.set('identity', 'admin_role', admin_role_name)\n rconfig.set('identity', 'default_domain_id', domain_id)\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'default_network', cidr)\n rconfig.set('network', 'project_network_cidr', cidr)\n rconfig.set('network', 'project_networks_reachable', False)\n rconfig.set(\n 'identity', 'v3_endpoint_type',\n os.environ.get('OS_INTERFACE', 'public'))\n\n sections = rconfig.sections()\n services_list = [\n 'compute', 'volume', 'image', 'network', 'data-processing',\n 'object-storage', 'orchestration']\n for service in services_list:\n if service not in sections:\n rconfig.add_section(service)\n rconfig.set(service, 'endpoint_type',\n os.environ.get('OS_INTERFACE', 'public'))\n\n LOGGER.debug('Add/Update required params defined in tempest_conf.yaml '\n 'into tempest.conf file')\n TempestCommon.update_tempest_conf_file(tempest_conf_file, rconfig)",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def setupCompute():\n #Update /etc/hosts with mongo-server and monitoring-server\n sudo(\"pip2 install chariot-runtime\")\n #update configuration file located in /etc/chariot/chariot.conf\n run(\"cd /etc/init.d && sudo update-rc.d chariot-nm defaults 99\")\n run(\"cd /etc/init.d && sudo update-rc.d chariot-dm defaults 99\")\n print(\"\\n after reboot check the MongoDB server for the presence of ConfigSpace database and Nodes collection. This collection should have a document each for every compute node.\")\n sudo(\"reboot\")",
"def test_update_hyperflex_node_config_policy(self):\n pass",
"def apply_config_for_node_with_multiple_role(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n target_node = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute', 'cinder'])\n config_for_compute_role = utils.get_config_template('nova_disk')\n config_for_compute_role['nova_config'].update(\n {'DEFAULT/debug': {'value': 'False'}})\n config_for_cinder_role = utils.get_config_template(\n 'nova_disk_cinder_role')\n\n self.show_step(2)\n self.fuel_web.client.upload_configuration(config_for_compute_role,\n cluster_id,\n role='compute')\n\n self.show_step(3)\n self.fuel_web.client.upload_configuration(config_for_cinder_role,\n cluster_id,\n role='cinder')\n\n # Configs are merging with ID-priority\n general_config = {}\n general_config.update(config_for_compute_role)\n general_config.update(config_for_cinder_role)\n structured_config = get_structured_config_dict(general_config)\n service_name = 'nova-compute'\n uptime = self.get_service_uptime(target_node, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_node[0]['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted(target_node,\n uptime,\n service_name)\n\n self.show_step(6)\n self.check_config_on_remote(target_node, structured_config)\n\n snapshot_name = \"apply_config_for_node_with_multiple_role\"\n self.env.make_snapshot(snapshot_name)",
"def test_update_node_driveconfig(self):\n pass",
"def config(self, cluster_name, name, username, version, int_netmask, int_ip_low,\n int_ip_high, ext_netmask, ext_ip_low, ext_ip_high, gateway, dns_servers,\n encoding, sc_zonename, smartconnect_ip, join_cluster, compliance, txn_id):\n logger = get_task_logger(txn_id=txn_id, task_id=self.request.id, loglevel=const.VLAB_ONEFS_LOG_LEVEL.upper())\n resp = {'content' : {}, 'error': None, 'params': {}}\n logger.info('Task starting')\n nodes = vmware.show_onefs(username)\n node = nodes.get(name, None)\n if not node:\n error = \"No node named {} found\".format(name)\n resp['error'] = error\n logger.error(error)\n return resp\n elif node['meta']['configured']:\n error = \"Cannot configure a node that's already configured\"\n resp['error'] = error\n logger.error(error)\n else:\n # Lets set it up!\n logger.info('Found node')\n console_url = node['console']\n if join_cluster:\n logger.info('Joining node to cluster {}'.format(cluster_name))\n setup_onefs.join_existing_cluster(console_url, cluster_name, compliance, logger)\n else:\n logger.info('Setting up new cluster named {}'.format(cluster_name))\n setup_onefs.configure_new_cluster(version=version,\n console_url=console_url,\n cluster_name=cluster_name,\n int_netmask=int_netmask,\n int_ip_low=int_ip_low,\n int_ip_high=int_ip_high,\n ext_netmask=ext_netmask,\n ext_ip_low=ext_ip_low,\n ext_ip_high=ext_ip_high,\n gateway=gateway,\n dns_servers=dns_servers,\n encoding=encoding,\n sc_zonename=sc_zonename,\n smartconnect_ip=smartconnect_ip,\n compliance=compliance,\n logger=logger)\n node['meta']['configured'] = True\n vmware.update_meta(username, name, node['meta'])\n logger.info('Task complete')\n return resp",
"def test_update_hyperflex_cluster(self):\n pass",
"def test_update_hyperflex_vcenter_config_policy(self):\n pass",
"def reconfigure_with_new_fields(self):\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(2)\n config_controller = utils.get_config_template('new_fields_controller')\n structured_config = get_structured_config_dict(config_controller)\n self.fuel_web.client.upload_configuration(config_controller,\n cluster_id,\n role=\"controller\")\n\n self.show_step(3)\n service_list = ['neutron-server', 'neutron-dhcp-agent',\n 'neutron-l3-agent', 'neutron-metadata-agent',\n 'nova-scheduler', 'nova-novncproxy', 'nova-conductor',\n 'nova-api', 'nova-consoleauth', 'nova-cert']\n services_uptime = {}\n for service_name in service_list:\n services_uptime[service_name] = self.get_service_uptime(\n controllers, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role=\"controller\")\n\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n for service_name in service_list:\n self.check_service_was_restarted(\n controllers,\n services_uptime[service_name],\n service_name)\n\n self.show_step(6)\n self.check_config_on_remote(controllers, structured_config)\n\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n\n self.show_step(7)\n config_copmute = utils.get_config_template('new_fields_compute')\n structured_config = get_structured_config_dict(config_copmute)\n self.fuel_web.client.upload_configuration(config_copmute,\n cluster_id,\n role='compute')\n\n self.show_step(8)\n uptimes_nova = self.get_service_uptime(computes, 'nova-compute')\n\n self.show_step(9)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='compute')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(10)\n self.check_service_was_restarted(computes,\n uptimes_nova,\n 'nova-compute')\n\n self.show_step(11)\n self.check_config_on_remote(computes, structured_config)\n self.env.make_snapshot(\"reconfigure_with_new_fields\")",
"def reconfigure_nova_ephemeral_disk(self):\n self.check_run('reconfigure_nova_ephemeral_disk')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_overcommit_ratio\")\n\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n\n self.show_step(2)\n existing_configs = self.fuel_web.client.list_configuration(\n cluster_id)\n for existing_config in existing_configs:\n self.fuel_web.client.delete_configuration(existing_config[\"id\"])\n\n self.show_step(3)\n config = utils.get_config_template('nova_disk')\n structured_config = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='compute')\n\n service_name = \"nova-compute\"\n\n uptimes = self.get_service_uptime(computes, service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='compute')\n self.show_step(5)\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(6)\n self.check_service_was_restarted(computes, uptimes, service_name)\n\n self.show_step(7)\n self.check_config_on_remote(computes, structured_config)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.show_step(12)\n self.check_nova_ephemeral_disk(os_conn, cluster_id)\n\n self.env.make_snapshot(\"reconfigure_nova_ephemeral_disk\",\n is_make=True)",
"def update_tempest_conf_file(conf_file, rconfig):\n with open(TempestCommon.tempest_conf_yaml, encoding='utf-8') as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def compute_node_update(context, compute_id, values, auto_adjust):\n session = get_session()\n if auto_adjust:\n _adjust_compute_node_values_for_utilization(context, values, session)\n with session.begin(subtransactions=True):\n values['updated_at'] = timeutils.utcnow()\n convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at')\n compute_ref = compute_node_get(context, compute_id, session=session)\n for (key, value) in values.iteritems():\n compute_ref[key] = value\n compute_ref.save(session=session)",
"def reconfiguration_scalability(self):\n\n self.check_run('reconfiguration_scalability')\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_nova_ephemeral_disk\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n config = utils.get_config_template('nova_disk')\n structured_config_nova = get_structured_config_dict(config)\n config = utils.get_config_template('keystone')\n structured_config_keystone = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n\n self.show_step(3)\n task = self.fuel_web.client.apply_configuration(cluster_id,\n role='controller')\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(4)\n self.check_config_on_remote(controllers, structured_config_keystone)\n\n self.show_step(5)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n time_expiration = config[\n 'keystone_config']['token/expiration']['value']\n self.check_token_expiration(os_conn, time_expiration)\n\n self.show_step(6)\n bs_nodes = [x for x in self.env.d_env.get_nodes()\n if x.name == 'slave-05' or x.name == 'slave-06']\n self.env.bootstrap_nodes(bs_nodes)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute', 'cinder']})\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-06': ['controller']})\n\n self.show_step(7)\n self.fuel_web.verify_network(cluster_id)\n\n self.show_step(8)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(9)\n self.fuel_web.run_ostf(cluster_id=cluster_id)\n\n self.show_step(10)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_controller = [x for x in controllers\n if 'slave-06' in x['name']]\n target_compute = [x for x in computes\n if 'slave-05' in x['name']]\n self.check_config_on_remote(target_controller,\n structured_config_keystone)\n\n self.show_step(11)\n self.check_config_on_remote(target_compute, structured_config_nova)\n\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.show_step(16)\n\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n hypervisor_name = target_compute[0]['fqdn']\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.show_step(17)\n self.check_token_expiration(os_conn, time_expiration)\n\n self.env.make_snapshot(\"reconfiguration_scalability\", is_make=True)",
"def runtime_update(conf):\n conf['time'] = time.strftime(\"%m-%d-%H-%M-%S\", time.localtime())\n conf['hash'] = hash(str(conf))\n if conf.has_key('filesystem') and conf['filesystem'] != None:\n fs = str(conf['filesystem'])\n else:\n fs = 'fsnotset'\n conf['result_dir'] = \"{targetdir}/{expname}/{subexpname}-{unique}\".format(\n targetdir = conf['targetdir'], expname = conf['expname'],\n subexpname = conf['subexpname'],\n unique = '-'.join((fs, conf['time'], str(conf['hash']))))",
"def conf_update(self):\n pass",
"def test_patch_hyperflex_vcenter_config_policy(self):\n pass",
"def update_tempest_conf_file(conf_file, rconfig):\n with open(TEMPEST_CONF_YAML) as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'wb') as config_file:\n rconfig.write(config_file)",
"def test_set_new_section_property():\n\n value = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def upload_config_for_node_and_env_in_transitional_state(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"basic_env_for_reconfiguration\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n bs_node = [\n node for node in self.env.d_env.get_nodes()\n if node.name == 'slave-05']\n self.env.bootstrap_nodes(bs_node)\n self.fuel_web.update_nodes(\n cluster_id,\n {'slave-05': ['compute']})\n target_node = bs_node[0]\n target_node_id = self.fuel_web.get_nailgun_node_by_devops_node(\n target_node)['id']\n\n config = {'nova_config': {'foo': {'value': 'bar'}}}\n\n self.show_step(3)\n task = self.fuel_web.deploy_cluster(cluster_id)\n # wait for creation of child 'deployment' task\n self.fuel_web.wait_for_tasks_presence(self.fuel_web.client.get_tasks,\n name='deployment',\n parent_id=task.get('id'))\n\n self.show_step(4)\n self.show_step(5)\n expected_code = 403\n err_msg = 'A configuration was applied for env in deploying state'\n check_response_code(\n expected_code, err_msg,\n self.fuel_web.client.upload_configuration,\n config, cluster_id)\n\n self.show_step(6)\n self.wait_for_node_status(target_node, 'provisioning')\n\n self.show_step(7)\n self.show_step(8)\n err_msg = 'A configuration was applied for node in provisioning state'\n check_response_code(\n expected_code, err_msg,\n self.fuel_web.client.upload_configuration,\n config, cluster_id, node_id=target_node_id)\n\n self.show_step(9)\n self.wait_for_node_status(target_node, 'deploying')\n\n self.show_step(10)\n self.show_step(11)\n err_msg = 'A configuration was applied for node in deploying state'\n check_response_code(\n expected_code, err_msg,\n self.fuel_web.client.upload_configuration,\n config, cluster_id, node_id=target_node_id)\n\n self.show_step(12)\n self.fuel_web.assert_task_success(task, timeout=7800, interval=30)\n\n snapshot_name = \"upload_config_for_node_and_env_in_transitional_state\"\n self.env.make_snapshot(snapshot_name)",
"def reconfigure(\n name,\n cpu=None,\n cpuset=None,\n cpushare=None,\n memory=None,\n profile=None,\n network_profile=None,\n nic_opts=None,\n bridge=None,\n gateway=None,\n autostart=None,\n utsname=None,\n rootfs=None,\n path=None,\n **kwargs,\n):\n changes = {}\n cpath = get_root_path(path)\n path = os.path.join(cpath, name, \"config\")\n ret = {\n \"name\": name,\n \"comment\": f\"config for {name} up to date\",\n \"result\": True,\n \"changes\": changes,\n }\n profile = get_container_profile(copy.deepcopy(profile))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, _marker)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is _marker:\n return profile_match\n return kw_overrides_match\n\n if nic_opts is not None and not network_profile:\n network_profile = DEFAULT_NIC\n\n if autostart is not None:\n autostart = select(\"autostart\", autostart)\n else:\n autostart = \"keep\"\n if not utsname:\n utsname = select(\"utsname\", utsname)\n if os.path.exists(path):\n old_chunks = read_conf(path, out_format=\"commented\")\n make_kw = salt.utils.odict.OrderedDict(\n [\n (\"utsname\", utsname),\n (\"rootfs\", rootfs),\n (\"autostart\", autostart),\n (\"cpu\", cpu),\n (\"gateway\", gateway),\n (\"cpuset\", cpuset),\n (\"cpushare\", cpushare),\n (\"network_profile\", network_profile),\n (\"nic_opts\", nic_opts),\n (\"bridge\", bridge),\n ]\n )\n # match 0 and none as memory = 0 in lxc config is harmful\n if memory:\n make_kw[\"memory\"] = memory\n kw = salt.utils.odict.OrderedDict()\n for key, val in make_kw.items():\n if val is not None:\n kw[key] = val\n new_cfg = _config_list(conf_tuples=old_chunks, **kw)\n if new_cfg:\n edit_conf(path, out_format=\"commented\", lxc_config=new_cfg)\n chunks = read_conf(path, out_format=\"commented\")\n if old_chunks != chunks:\n ret[\"comment\"] = f\"{name} lxc config updated\"\n if state(name, path=path) == \"running\":\n cret = reboot(name, path=path)\n ret[\"result\"] = cret[\"result\"]\n return ret",
"def test_patch_hyperflex_node_config_policy(self):\n pass",
"def pytest_configure_node(node: Node):\n node.workerinput[\"options\"] = { # type: ignore\n \"dist\": node.config.option.dist, # type: ignore\n \"numprocesses\": node.config.option.numprocesses, # type: ignore\n }",
"def update_network_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if self.ext_net:\n if not rconfig.has_section('network'):\n rconfig.add_section('network')\n rconfig.set('network', 'public_network_id', self.ext_net.id)\n rconfig.set('network', 'floating_network_name', self.ext_net.name)\n rconfig.set('network-feature-enabled', 'floating_ips', True)\n else:\n if not rconfig.has_section('network-feature-enabled'):\n rconfig.add_section('network-feature-enabled')\n rconfig.set('network-feature-enabled', 'floating_ips', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def configure(self, section):",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def GenerateConfig(context):\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',\n 'ubuntu-os-cloud/global/',\n 'images/family/ubuntu-1604-lts'])\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.' + context.properties['network']\n + '.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''.join(['#!/bin/bash\\n',\n 'sudo apt-get install openjdk-9-jre-headless -y\\n',\n 'sudo python -m SimpleHTTPServer 80'])\n }]\n }\n }\n }]\n return {'resources': resources}",
"def vm_update(args):\n ip1 = args.ip1\n flavor = args.flavor\n numcpus = args.numcpus\n memory = args.memory\n plan = args.plan\n autostart = args.autostart\n noautostart = args.noautostart\n dns = args.dns\n host = args.host\n domain = args.domain\n cloudinit = args.cloudinit\n template = args.template\n net = args.network\n information = args.information\n iso = args.iso\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n names = [common.get_lastvm(config.client)] if not args.names else args.names\n for name in names:\n if dns:\n common.pprint(\"Creating Dns entry for %s...\" % name)\n if net is not None:\n nets = [net]\n else:\n nets = k.vm_ports(name)\n if nets and domain is None:\n domain = nets[0]\n if not nets:\n return\n else:\n k.reserve_dns(name=name, nets=nets, domain=domain, ip=ip1)\n elif ip1 is not None:\n common.pprint(\"Updating ip of vm %s to %s...\" % (name, ip1))\n k.update_metadata(name, 'ip', ip1)\n elif cloudinit:\n common.pprint(\"Removing cloudinit information of vm %s\" % name)\n k.remove_cloudinit(name)\n return\n elif plan is not None:\n common.pprint(\"Updating plan of vm %s to %s...\" % (name, plan))\n k.update_metadata(name, 'plan', plan)\n elif template is not None:\n common.pprint(\"Updating template of vm %s to %s...\" % (name, template))\n k.update_metadata(name, 'template', template)\n elif memory is not None:\n common.pprint(\"Updating memory of vm %s to %s...\" % (name, memory))\n k.update_memory(name, memory)\n elif numcpus is not None:\n common.pprint(\"Updating numcpus of vm %s to %s...\" % (name, numcpus))\n k.update_cpus(name, numcpus)\n elif autostart:\n common.pprint(\"Setting autostart for vm %s...\" % name)\n k.update_start(name, start=True)\n elif noautostart:\n common.pprint(\"Removing autostart for vm %s...\" % name)\n k.update_start(name, start=False)\n elif information:\n common.pprint(\"Setting information for vm %s...\" % name)\n k.update_descrmation(name, information)\n elif iso is not None:\n common.pprint(\"Switching iso for vm %s to %s...\" % (name, iso))\n k.update_iso(name, iso)\n elif flavor is not None:\n common.pprint(\"Updating flavor of vm %s to %s...\" % (name, flavor))\n k.update_flavor(name, flavor)\n elif host:\n common.pprint(\"Creating Host entry for vm %s...\" % name)\n nets = k.vm_ports(name)\n if not nets:\n return\n if domain is None:\n domain = nets[0]\n k.reserve_host(name, nets, domain)",
"def pre_config_node_update(self, resource_id, resource_dict):\n pass"
] | [
"0.68594015",
"0.6751863",
"0.6320202",
"0.6044343",
"0.6011546",
"0.58627504",
"0.58251506",
"0.5803778",
"0.579407",
"0.57885367",
"0.57636315",
"0.5726174",
"0.5714447",
"0.5692595",
"0.56901807",
"0.5688542",
"0.56050336",
"0.55926406",
"0.5581348",
"0.5575753",
"0.5550431",
"0.55470115",
"0.5516178",
"0.5510983",
"0.5501897",
"0.5470908",
"0.5468452",
"0.54263043",
"0.54176754",
"0.53948456"
] | 0.7990638 | 0 |
Update validation section in tempest.conf | def update_validation_section(self):
rconfig = configparser.RawConfigParser()
rconfig.read(self.conf_file)
if not rconfig.has_section('validation'):
rconfig.add_section('validation')
rconfig.set(
'validation', 'connect_method',
'floating' if self.ext_net else 'fixed')
rconfig.set(
'validation', 'network_for_ssh',
self.network.name if self.network else env.get("EXTERNAL_NETWORK"))
with open(self.conf_file, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_config(self):\n pass",
"def validate_config(self):\n pass",
"def _validate_config(self):\n pass",
"def validate_settings(_cfg, _ctx):\n pass",
"def validate_config():\n\n # diff/sync settings, not including templates (see below)\n nori.setting_check_list('action', ['diff', 'sync'])\n nori.setting_check_type('reverse', bool)\n nori.setting_check_type('bidir', bool)\n nori.setting_check_callbacks('pre_action_callbacks')\n nori.setting_check_callbacks('post_action_callbacks', 1, 1)\n for i, cb_t in enumerate(nori.core.cfg['post_action_callbacks']):\n nori.setting_check_type(('post_action_callbacks', i, 3), bool)\n nori.setting_check_list('source_type', ['generic', 'drupal'])\n nori.setting_check_callable('source_query_func', may_be_none=False)\n nori.setting_check_callable('source_query_defaulter', may_be_none=True)\n nori.setting_check_callable('source_query_validator', may_be_none=False)\n nori.setting_check_callbacks('source_template_change_callbacks')\n nori.setting_check_callbacks('source_global_change_callbacks')\n nori.setting_check_list('dest_type', ['generic', 'drupal'])\n nori.setting_check_callable('dest_query_func', may_be_none=False)\n nori.setting_check_callable('dest_query_defaulter', may_be_none=True)\n nori.setting_check_callable('dest_query_validator', may_be_none=False)\n nori.setting_check_callbacks('dest_template_change_callbacks')\n nori.setting_check_callbacks('dest_global_change_callbacks')\n nori.setting_check_list('template_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['template_mode'] != 'all':\n nori.setting_check_not_empty('template_list')\n for i, t_name in enumerate(nori.core.cfg['template_list']):\n nori.setting_check_type(('template_list', i),\n nori.core.STRING_TYPES)\n nori.setting_check_list('key_mode', ['all', 'include', 'exclude'])\n if nori.core.cfg['key_mode'] != 'all':\n nori.setting_check_not_empty('key_list')\n\n # templates: general\n nori.setting_check_not_empty(\n 'templates', types=nori.core.MAIN_SEQUENCE_TYPES\n )\n for i, template in enumerate(nori.core.cfg['templates']):\n nori.setting_check_type(('templates', i), nori.core.MAPPING_TYPES)\n # bogus elements\n for k in template:\n if k not in T_KEYS:\n nori.err_exit(\n \"Warning: cfg['templates'][{0}][{1}] is set\\n\"\n \"(to {2}), but there is no such setting.\" .\n format(i, *map(nori.pps, [k, template[k]])),\n nori.core.exitvals['startup']['num']\n )\n # template name\n nori.setting_check_type(('templates', i, T_NAME_KEY),\n nori.core.STRING_TYPES)\n # multiple-valued value columns?\n nori.setting_check_type(('templates', i, T_MULTIPLE_KEY), bool)\n # source-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_S_QUERY_ARGS_KEY))\n # to-dest transform function\n nori.setting_check_callable(('templates', i, T_TO_D_FUNC_KEY),\n may_be_none=True)\n # source-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_S_NO_REPL_KEY), bool)\n # source-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_S_CHANGE_CB_KEY))\n # dest-DB query function arguments\n nori.setting_check_arg_tuple(('templates', i, T_D_QUERY_ARGS_KEY))\n # to-source transform function\n nori.setting_check_callable(('templates', i, T_TO_S_FUNC_KEY),\n may_be_none=True)\n # dest-DB don't-replicate flag\n nori.setting_check_type(('templates', i, T_D_NO_REPL_KEY), bool)\n # dest-DB change callbacks\n nori.setting_check_callbacks(('templates', i, T_D_CHANGE_CB_KEY))\n # key mode\n nori.setting_check_list(('templates', i, T_KEY_MODE_KEY),\n ['all', 'include', 'exclude'])\n if template[T_KEY_MODE_KEY] != 'all':\n # key list\n nori.setting_check_not_empty(('templates', i, T_KEY_LIST_KEY))\n\n # templates: query-function arguments\n for (sd, t_key, validator_key) in [\n ('s', T_S_QUERY_ARGS_KEY, 'source_query_validator'),\n ('d', T_D_QUERY_ARGS_KEY, 'dest_query_validator')\n ]:\n # args tuple\n args_idx = ('templates', i, t_key)\n args_t = template[t_key]\n # key_cv, value_cv (somewhat)\n for cv_str in ['key_cv', 'value_cv']:\n cv_idx = args_idx + (1, cv_str)\n nori.setting_check_not_empty(\n cv_idx, types=nori.core.MAIN_SEQUENCE_TYPES\n )\n cv_seq = args_t[1][cv_str]\n for j, cv in enumerate(cv_seq):\n nori.setting_check_length(cv_idx + (j, ), 2, 3,\n types=tuple)\n # the rest of the arguments\n nori.core.cfg[validator_key](sd, args_idx, args_t, i)\n\n # reporting settings\n nori.setting_check_list('report_order', ['template', 'keys'])\n # the rest are handled by nori.validate_email_config()",
"def setup_validation(self, client):\n raise NotImplementedError(\"Please fix me.\")",
"def setup_validation(self, client):\n raise NotImplementedError(\"Please fix me.\")",
"def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")",
"def setup_validation(self, client, *args, **keyword_args):\n raise NotImplementedError(\"Please fix me.\")",
"def validate(self, config_json):\n pass",
"def revalidate(self):\n *_, validation_error = validate_model(self.__class__, self.__dict__)\n if validation_error:\n raise validation_error\n self.validate_config()",
"def check(self) -> None:\n # validate training config\n super().check()",
"def add_validators():\n vc = VimageConfig(getattr(settings, CONFIG_NAME))\n vc.add_validators()",
"def test_valid_configuration(self):\n\n conf = [\n 'gasoline', '228i', 'model_luxury_line', 'silver', 'rims_384',\n 'tapistry_black', 'steptronic', 'smoker_package', 'tow_hook'\n ]\n\n attr_val_ids = self.get_attr_val_ids(conf)\n validation = self.cfg_tmpl.validate_configuration(attr_val_ids)\n self.assertTrue(validation, \"Valid configuration failed validation\")",
"def test_validate(self):\n pass",
"def validate(self):\n ...",
"def _validate(self, config):\n validator = Validator()\n try:\n results = config.validate(validator, preserve_errors=True)\n except ConfigObjError as e:\n raise ConfigError(e.message)\n if results is not True:\n error_msg = \"\"\n for (section_list, key, res) in flatten_errors(config, results):\n if key is not None:\n if res is False:\n msg = 'key \"%s\" in section \"%s\" is missing.'\n msg = msg % (key, \", \".join(section_list))\n else:\n msg = 'key \"%s\" in section \"%s\" failed validation: %s'\n msg = msg % (key, \", \".join(section_list), res)\n else:\n msg = 'section \"%s\" is missing' % \".\".join(section_list)\n error_msg += msg + \"\\n\"\n raise ConfigError(error_msg)\n return config",
"def test_update_hyperflex_node_config_policy(self):\n pass",
"def test_patch_hyperflex_node_config_policy(self):\n pass",
"def validate(self):\n for key, value in self._configurations.items():\n value.validate()",
"def validate(configuration_file):\n import jsonschema\n\n with open(configuration_file) as f:\n config = syaml.load(f)\n\n # Ensure we have a \"container\" attribute with sensible defaults set\n env_dict = ev.config_dict(config)\n env_dict.setdefault(\n \"container\", {\"format\": \"docker\", \"images\": {\"os\": \"ubuntu:18.04\", \"spack\": \"develop\"}}\n )\n env_dict[\"container\"].setdefault(\"format\", \"docker\")\n env_dict[\"container\"].setdefault(\"images\", {\"os\": \"ubuntu:18.04\", \"spack\": \"develop\"})\n\n # Remove attributes that are not needed / allowed in the\n # container recipe\n for subsection in (\"cdash\", \"gitlab_ci\", \"modules\"):\n if subsection in env_dict:\n msg = (\n 'the subsection \"{0}\" in \"{1}\" is not used when generating'\n \" container recipes and will be discarded\"\n )\n warnings.warn(msg.format(subsection, configuration_file))\n env_dict.pop(subsection)\n\n # Set the default value of the concretization strategy to unify and\n # warn if the user explicitly set another value\n env_dict.setdefault(\"concretizer\", {\"unify\": True})\n if not env_dict[\"concretizer\"][\"unify\"] is True:\n warnings.warn(\n '\"concretizer:unify\" is not set to \"true\", which means the '\n \"generated image may contain different variants of the same \"\n 'packages. Set to \"true\" to get a consistent set of packages.'\n )\n\n # Check if the install tree was explicitly set to a custom value and warn\n # that it will be overridden\n environment_config = env_dict.get(\"config\", {})\n if environment_config.get(\"install_tree\", None):\n msg = (\n 'the \"config:install_tree\" attribute has been set explicitly '\n \"and will be overridden in the container image\"\n )\n warnings.warn(msg)\n\n # Likewise for the view\n environment_view = env_dict.get(\"view\", None)\n if environment_view:\n msg = (\n 'the \"view\" attribute has been set explicitly '\n \"and will be overridden in the container image\"\n )\n warnings.warn(msg)\n\n jsonschema.validate(config, schema=env.schema)\n return config",
"def validate_settings(event):\n key = event.info['key']\n val = event.info['value']\n\n if key == 'minerva.geonames_folder':\n ModelImporter.model('folder').load(val, exc=True, force=True)\n event.preventDefault().stopPropagation()",
"def _check_config(self):",
"def validate_settings(self, settings):\n pass",
"def validate_config(self, changed):\n logger.debug(\"[%s] Validating config (Legacy path)\", self.name)\n if not self.to_validate(changed):\n return\n # Validate (Legacy Path)\n from noc.cm.engine import Engine\n\n engine = Engine(self)\n try:\n engine.check()\n except: # noqa\n logger.error(\"Failed to validate config for %s\", self.name)\n error_report()",
"def validate_config(self):\n reference = data_file(\"../config/template/minimum_aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/aiscalator.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"In Global Application Configuration file \"\n _validate_configs(self._app_conf, ref, msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._step_name:\n reference = data_file(\"../config/template/minimum_step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in step named \" + self._step_name\n _validate_configs(self._step,\n ref[\"steps\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)\n if self._dag_name:\n reference = data_file(\"../config/template/minimum_dag.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=True,\n type_mismatch_exception=True)\n reference = data_file(\"../config/template/step.conf\")\n ref = pyhocon.ConfigFactory.parse_file(reference)\n msg = \"in dag named \" + self._dag_name\n _validate_configs(self._dag,\n ref[\"dags\"][\"Untitled\"],\n msg,\n missing_exception=False,\n type_mismatch_exception=True)",
"def test_patch_hyperflex_vcenter_config_policy(self):\n pass",
"def test_validate_error_wrong_schema(tmp_config): # noqa # pylint: disable=W0621\n from canarieapi.api import APP # isort: skip # noqa\n\n APP.config.update({\n \"SERVICES\": {\"random\": \"bad\"},\n \"PLATFORM\": {\"invalid\": \"error\"},\n })\n\n with pytest.raises(jsonschema.ValidationError):\n validate_config_schema(False)",
"def test_validators():",
"def test_kyc_get_validation(self):\n pass"
] | [
"0.65538985",
"0.65538985",
"0.6428385",
"0.6422519",
"0.6317255",
"0.62712306",
"0.62712306",
"0.58824056",
"0.58824056",
"0.58192813",
"0.57234484",
"0.56826675",
"0.5652729",
"0.5652712",
"0.56522995",
"0.56318414",
"0.5625317",
"0.5567985",
"0.55444556",
"0.55250084",
"0.55087966",
"0.55057806",
"0.5477694",
"0.5474006",
"0.5467815",
"0.5459682",
"0.5457478",
"0.542644",
"0.54221934",
"0.5418544"
] | 0.7201769 | 0 |
Update scenario section in tempest.conf | def update_scenario_section(self):
rconfig = configparser.RawConfigParser()
rconfig.read(self.conf_file)
filename = getattr(
config.CONF, f'{self.case_name}_image', self.filename)
if not rconfig.has_section('scenario'):
rconfig.add_section('scenario')
rconfig.set('scenario', 'img_file', filename)
rconfig.set('scenario', 'img_disk_format', getattr(
config.CONF, f'{self.case_name}_image_format',
self.image_format))
extra_properties = self.extra_properties.copy()
if env.get('IMAGE_PROPERTIES'):
extra_properties.update(
functest_utils.convert_ini_to_dict(
env.get('IMAGE_PROPERTIES')))
extra_properties.update(
getattr(config.CONF, f'{self.case_name}_extra_properties', {}))
rconfig.set(
'scenario', 'img_properties',
functest_utils.convert_dict_to_ini(extra_properties))
with open(self.conf_file, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure(self, section):",
"def test_update_scenario(self):\n pass",
"def test_set_new_section_property():\n\n value = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def test_vault_update_vault_section(self):\n pass",
"def configure_test(self, test, config_json):\n pass",
"def test_update_hyperflex_node_config_policy(self):\n pass",
"def set_up(self):\n for section_name, section_body in self.sections.iteritems():\n scenario = Scenario(section_name, section_body)\n self.scenarios.append(scenario)",
"def configureScenario(out, templatepath, geometrypath, nx, ny, nz, sx, sy, sz,\n vtkpath, enable_timing, timingpath, enable_vtk, initial,\n bakspath):\n tree = ET.parse(templatepath)\n root = tree.getroot()\n\n for child in root.findall(\"parallel\"):\n child.attrib[\"numProcessorsX\"] = str(nx)\n child.attrib[\"numProcessorsY\"] = str(ny)\n child.attrib[\"numProcessorsZ\"] = str(nz)\n\n for child in root.findall(\"vtk\"):\n child.attrib[\"enabled\"] = str(enable_vtk).lower()\n child.text = os.path.join(vtkpath, \"vtk\")\n\n for child in root.findall(\"timing\"):\n child.attrib[\"enabled\"] = str(enable_timing).lower()\n child.text = timingpath\n else:\n node = ET.Element(\"timing\", {\"enabled\": str(enable_timing).lower()})\n node.text = timingpath\n root.append(node)\n\n if sx != None and sy != None:\n for child in root.findall(\"geometry\"):\n child.attrib[\"sizeX\"] = str(sx)\n child.attrib[\"sizeY\"] = str(sy)\n child.attrib[\"sizeZ\"] = str(sz)\n\n for child in root.findall(\"geometry\"):\n attrs = child.attrib\n\n if \"obstacle\" in attrs and os.path.isfile(attrs[\"obstacle\"]):\n shutil.copyfile(attrs[\"obstacle\"], geometrypath)\n child.attrib[\"obstacle\"] = geometrypath\n\n for child in root.findall(\"restart\"):\n attrs = child.attrib\n\n if \"in\" in attrs and os.path.isfile(attrs[\"in\"] + \".bak\"):\n shutil.copyfile(attrs[\"in\"] + \".bak\", initial + \".bak\")\n child.attrib[\"in\"] = initial\n\n if \"out\" in attrs:\n child.attrib[\"out\"] = bakspath + \"//\" + child.attrib[\"out\"]\n\n tree.write(out)",
"def update_tempest_conf_file(conf_file, rconfig):\n with open(TempestCommon.tempest_conf_yaml, encoding='utf-8') as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def test_patch_namespaced_deployment_config(self):\n pass",
"def test_patch_hyperflex_node_config_policy(self):\n pass",
"def test_update_hyperflex_ucsm_config_policy(self):\n pass",
"def configure_specie(self, specie):\r\n pass",
"def configure_step(self):\n pass",
"def test_update_deployment(self):\n pass",
"def update_feature(selfs, k, v, cfg_path):\n with open(cfg_path, 'r') as cfg:\n file_dict = yaml.safe_load(cfg)\n # overprint the entries with the new config_dict\n file_dict['{}'.format(k)] = v\n with open(cfg_path, 'w') as w_file:\n w_file.write(yaml.dump(file_dict))",
"def test_configuration_changes(self):\n config = serialization.load_file(join(EXAMPLES, 'complete.yml'))[0]\n s = simulation.from_config(config)\n for i in range(5):\n s.run_simulation(dry_run=True)\n nconfig = s.to_dict()\n del nconfig['topology']\n assert config == nconfig",
"def test_update_hyperflex_sys_config_policy(self):\n pass",
"def configure_step(self):\n\n pass",
"def update_tempest_conf_file(conf_file, rconfig):\n with open(TEMPEST_CONF_YAML) as yfile:\n conf_yaml = yaml.safe_load(yfile)\n if conf_yaml:\n sections = rconfig.sections()\n for section in conf_yaml:\n if section not in sections:\n rconfig.add_section(section)\n sub_conf = conf_yaml.get(section)\n for key, value in sub_conf.items():\n rconfig.set(section, key, value)\n\n with open(conf_file, 'wb') as config_file:\n rconfig.write(config_file)",
"def test_create_scenario1(self):\n pass",
"def test_patch_hyperflex_ucsm_config_policy(self):\n pass",
"def test_pytest_bdd_scenario(self):\n self.testdir.makefile(\n \".feature\",\n simple=_SIMPLE_SCENARIO,\n )\n py_file = self.testdir.makepyfile(\n \"\"\"\n from pytest_bdd import scenario, given, then, when\n\n @scenario(\"simple.feature\", \"Simple scenario\")\n def test_simple():\n pass\n\n BAR = None\n\n @given(\"I have a bar\")\n def bar():\n global BAR\n BAR = 1\n\n @when(\"I eat it\")\n def eat():\n global BAR\n BAR -= 1\n\n @then(\"I don't have a bar\")\n def check():\n assert BAR == 0\n \"\"\"\n )\n file_name = os.path.basename(py_file.strpath)\n self.inline_run(\"--ddtrace\", file_name)\n spans = self.pop_spans()\n\n assert len(spans) == 7\n assert spans[0].get_tag(\"component\") == \"pytest\"\n assert spans[0].get_tag(\"test.name\") == \"Simple scenario\"\n assert spans[0].span_type == \"test\"\n assert spans[1].resource == \"I have a bar\"\n assert spans[1].name == \"given\"\n assert spans[2].resource == \"I eat it\"\n assert spans[2].name == \"when\"\n assert spans[3].resource == \"I don't have a bar\"\n assert spans[3].name == \"then\"",
"def test_create_scenario(self):\n pass",
"def test_patch_namespaced_deployment_config_status(self):\n pass",
"def test_edit_configuration(self):\n configuration = copy.deepcopy(self.configuration)\n configuration['settings'] = {'DB_HOST': 'other_scale_db'}\n configuration['mounts'] = {\n 'dted': {\n 'type': 'host',\n 'host_path': '/some/new/path'\n }\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n json_data = {\n 'configuration': configuration,\n 'auto_update': False\n }\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})",
"async def test_setting_attribute_with_template(\n hass: HomeAssistant, mqtt_mock_entry: MqttMockHAClientGenerator\n) -> None:\n await help_test_setting_attribute_with_template(\n hass, mqtt_mock_entry, select.DOMAIN, DEFAULT_CONFIG\n )",
"def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()",
"def test_update_node_driveconfig(self):\n pass",
"def set_yaml_config(self) -> None:\n\n # LT-248: We can pick Artillery Phase configuration from conf file\n self.yaml_config = {\n \"config\": {\n \"target\": self.get_swagger_url(),\n \"processor\": f\"./{self.OUT_FILE}\",\n \"phases\": [\n {\n \"duration\": settings.DURATION or 1,\n \"arrivalRate\": settings.SPAWN_RATE or 1\n }\n ]\n },\n \"scenarios\": self.task_set.yaml_flow\n }"
] | [
"0.61347264",
"0.60817313",
"0.60120815",
"0.5875747",
"0.5773806",
"0.5770869",
"0.57243615",
"0.5709222",
"0.5686607",
"0.5598393",
"0.5580826",
"0.5557054",
"0.55527663",
"0.551544",
"0.5515208",
"0.54568624",
"0.54538155",
"0.5447713",
"0.5440486",
"0.5423029",
"0.54104966",
"0.53976095",
"0.538654",
"0.53864557",
"0.5381347",
"0.537818",
"0.5356381",
"0.5351695",
"0.5351011",
"0.5322219"
] | 0.6915758 | 0 |
Update dashboard section in tempest.conf | def update_dashboard_section(self):
rconfig = configparser.RawConfigParser()
rconfig.read(self.conf_file)
if env.get('DASHBOARD_URL'):
if not rconfig.has_section('dashboard'):
rconfig.add_section('dashboard')
rconfig.set('dashboard', 'dashboard_url', env.get('DASHBOARD_URL'))
else:
rconfig.set('service_available', 'horizon', False)
with open(self.conf_file, 'w', encoding='utf-8') as config_file:
rconfig.write(config_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dashboards_v2_update(self):\n pass",
"def dashboard():",
"def conf_update(self):\n pass",
"def configure(self, section):",
"def dashboard(self):\r\n return {}",
"def put_cloudwatch_dashboard(self):\n\n cloudwatch_config = self.provider_config[\"cloudwatch\"]\n dashboard_config = cloudwatch_config \\\n .get(CloudwatchConfigType.DASHBOARD.value, {})\n dashboard_name = dashboard_config.get(\"name\", self.cluster_name)\n widgets = self. \\\n CLOUDWATCH_CONFIG_TYPE_TO_CONFIG_VARIABLE_REPLACE_FUNC. \\\n get(CloudwatchConfigType.DASHBOARD.value)()\n\n # upload cloudwatch dashboard config to the SSM parameter store\n dashboard_config_ssm_param_name = self \\\n ._get_ssm_param_name(CloudwatchConfigType.DASHBOARD.value)\n self._put_ssm_param(widgets, dashboard_config_ssm_param_name)\n response = self.cloudwatch_client.put_dashboard(\n DashboardName=dashboard_name,\n DashboardBody=json.dumps({\n \"widgets\": widgets\n }))\n issue_count = len(response.get(\"DashboardValidationMessages\", []))\n if issue_count > 0:\n for issue in response.get(\"DashboardValidationMessages\"):\n logging.error(\"Error in dashboard config: {} - {}\".format(\n issue[\"Message\"], issue[\"DataPath\"]))\n raise Exception(\n \"Errors in dashboard configuration: {} issues raised\".format(\n issue_count))\n else:\n logger.info(\"Successfully put dashboard to cloudwatch console\")\n return response",
"def handle_panel_update(self, section_dict):",
"async def dashboard(request):\n return [\n {'name': 'application config', 'value': {k: str(v) for k, v in app.cfg}},\n {'name': 'request headers', 'value': dict(request.headers)},\n ]",
"def dashboard():\r\n return render_template('{}/dashboard.html'.format(MODULE_DIR))",
"def _replace_dashboard_config_variables(self):\n data = self._load_config_file(CloudwatchConfigType.DASHBOARD.value)\n widgets = []\n for item in data:\n self._replace_all_config_variables(\n item,\n None,\n self.cluster_name,\n self.provider_config[\"region\"],\n )\n for node_id in self.node_ids:\n item_out = copy.deepcopy(item)\n (item_out, modified_str_count) = \\\n self._replace_all_config_variables(\n item_out,\n str(node_id),\n None,\n None,\n )\n widgets.append(item_out)\n if not modified_str_count:\n break # no per-node dashboard widgets specified\n return widgets",
"def dashboard():\n return render_template(\"admin/dashboard.html\", title=\"Dashboard\")",
"def test_update_dashboard(self):\n os.unlink(self.dboard._path)\n self.dboard.update_dashboard()\n self.assertTrue(os.path.isfile(self.dboard._path))",
"def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_resource('fanstatic', 'mingus')\n return",
"def set_dash_layout_settings(self, values=None, user_info=None):\n if not user_info:\n user = users.get_current_user()\n if not user:\n return\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n except Exception as err:\n logging.exception(err)\n pass\n if user_info:\n if type(values) is not dict:\n # Assign values to the default admin template.\n values = {\n \"nav\": [\"app_management\", \"appscale_management\",\n \"debugging_monitoring\"],\n \"panel\": [\"app_console\", \"upload_app\", \"cloud_stats\",\n \"database_stats\",\n \"memcache_stats\"]\n }\n layout_settings = values\n lookup_dict = self.build_dict(user_info=user_info)\n layout_settings['nav'] = [{key: lookup_dict.get(key)} for key in\n layout_settings.get('nav') if\n key in lookup_dict]\n\n layout_settings['panel'] = [{key: lookup_dict.get(key)} for key in\n layout_settings.get('panel') if\n key in lookup_dict and (\n lookup_dict.get(key).get(\n 'is_admin_panel') ==\n user_info.is_user_cloud_admin\n or not lookup_dict.get(key).get(\n 'is_admin_panel'))]\n user_info.dash_layout_settings = layout_settings\n user_info.put()\n return user_info.dash_layout_settings\n return",
"def update_website_configuration():\n put('config/supervisor_website.conf', \n '/etc/supervisor/conf.d/gunicorn.conf', \n use_sudo=True)\n sudo('supervisorctl update')\n sudo('supervisorctl reload')",
"def config():\n if app.args.ui_mode == \"jinja\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": None,\n \"show\": False,\n \"text\": None,\n \"url\": None\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"jinja2\"\n },\n \"title\": \"RENDER\",\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"Render\",\n \"url\": \"/render\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": 'text'\n },\n \"title\": \"RESULT\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n elif app.args.ui_mode == \"schema\":\n ui_config = {\n \"p1\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\":\"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\",\n \"indentUnit\": 2,\n \"tabSize\": 2\n },\n \"title\": \"DATA\",\n \"inventory\": bool(app.args.inventory_source),\n \"b1\": {\n \"icon\": \"create\",\n \"show\": True,\n \"text\": \"schema\",\n \"url\": \"/schema\"\n }\n },\n \"p2\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"SCHEMA\",\n \"b1\": {\n \"icon\": \"check\",\n \"show\": True,\n \"text\": \"Validate\",\n \"url\": \"/validate\"\n }\n },\n \"p3\": {\n \"options\": {\n \"lineNumbers\": True,\n \"theme\": \"material\",\n \"lineWrapping\" : True,\n \"mode\": \"yaml\"\n },\n \"title\": \"VALIDATION SUCCESS/ERRORS\",\n \"b1\": {\n \"icon\": \"link\",\n \"show\": bool(app.args.url),\n \"text\": \"link\"\n }\n }\n }\n return jsonify(ui_config)",
"def updateSettingsUI(self):\n\n pass",
"def test_update_config_root(self):\n config_root = self._create_config_root()\n config_root_uuid = config_root['config-root']['uuid']\n updated_name = data_utils.rand_name('new_config_root')\n with self.override_role():\n self.config_client.update_config_root(\n config_root_uuid, display_name=updated_name)",
"def _on_config_changed(self, _):\n self._configure_pod()",
"def update_auth_section(self):\n rconfig = configparser.RawConfigParser()\n rconfig.read(self.conf_file)\n if not rconfig.has_section(\"auth\"):\n rconfig.add_section(\"auth\")\n if env.get(\"NEW_USER_ROLE\").lower() != \"member\":\n tempest_roles = []\n if rconfig.has_option(\"auth\", \"tempest_roles\"):\n tempest_roles = functest_utils.convert_ini_to_list(\n rconfig.get(\"auth\", \"tempest_roles\"))\n rconfig.set(\n 'auth', 'tempest_roles',\n functest_utils.convert_list_to_ini(\n [env.get(\"NEW_USER_ROLE\")] + tempest_roles))\n if not json.loads(env.get(\"USE_DYNAMIC_CREDENTIALS\").lower()):\n rconfig.set('auth', 'use_dynamic_credentials', False)\n account_file = os.path.join(\n getattr(config.CONF, 'dir_functest_data'), 'accounts.yaml')\n assert os.path.exists(\n account_file), f\"{account_file} doesn't exist\"\n rconfig.set('auth', 'test_accounts_file', account_file)\n if env.get('NO_TENANT_NETWORK').lower() == 'true':\n rconfig.set('auth', 'create_isolated_networks', False)\n with open(self.conf_file, 'w', encoding='utf-8') as config_file:\n rconfig.write(config_file)",
"def test_set_new_section_property():\n\n value = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def test_update_hyperflex_cluster_profile(self):\n pass",
"def write_config(self):\n cfg = {\n 'ALERT_API_KEY':self.api_key,\n 'APP_NAME':self.title,\n 'alertes':self.alertes\n }\n write_conf(self.CONF_FILE,cfg)",
"def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))",
"def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'syngenta')",
"def dashboard():\n return render_template('home/dashboard.html',title='SycliQ Dashboard')",
"def update(self):\n self.save_config_file()",
"def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)",
"def includeme(config):\n\n config.add_translation_dirs('kotti_dashboard:locale')\n config.add_static_view('static-kotti_dashboard', 'kotti_dashboard:static')\n\n config.scan(__name__)",
"def rebuild_dash_layout_settings_dict(self, email=None):\n if email is None:\n return {}\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n try:\n if user_info.dash_layout_settings:\n lookup_dict = self.build_dict(user_info=user_info)\n values = user_info.dash_layout_settings\n default_nav = [\"app_management\", \"appscale_management\",\n \"debugging_monitoring\"]\n\n nav_list = []\n for key_dict in values.get('nav'):\n for temp_key in key_dict:\n nav_list.append(temp_key)\n\n if set(nav_list) != set(default_nav):\n for key in default_nav:\n if nav_list.count(key) == 0:\n nav_list.append(key)\n\n default_panel = [\"app_console\", \"upload_app\", \"cloud_stats\",\n \"database_stats\", \"memcache_stats\"]\n\n panel_list = []\n for key_dict in values.get('panel'):\n for temp_key in key_dict:\n panel_list.append(temp_key)\n\n if set(panel_list) != set(default_panel):\n for key in default_panel:\n if panel_list.count(key) == 0:\n panel_list.append(key)\n\n values['nav'] = [{key: lookup_dict.get(key)}\n for key in nav_list if key in lookup_dict]\n\n new_panel_vals = []\n for key in panel_list:\n is_admin_panel = lookup_dict.get(key).get('is_admin_panel')\n if key in lookup_dict and (not is_admin_panel or\n is_admin_panel ==\n user_info.is_user_cloud_admin):\n new_panel_vals.append({key: lookup_dict.get(key)})\n\n values['panel'] = new_panel_vals\n user_info.dash_layout_settings = values\n user_info.put()\n return user_info.dash_layout_settings\n else:\n return self.set_dash_layout_settings(user_info=user_info)\n except Exception as err:\n logging.exception(err)\n return self.set_dash_layout_settings(user_info=user_info)\n except Exception as err:\n logging.exception(err)"
] | [
"0.6211464",
"0.6072598",
"0.5836697",
"0.57798123",
"0.5621353",
"0.5581383",
"0.5558267",
"0.547862",
"0.54553175",
"0.53960663",
"0.53905696",
"0.53771514",
"0.5368407",
"0.53436995",
"0.53193396",
"0.53101474",
"0.5291737",
"0.5288086",
"0.5276987",
"0.52725184",
"0.52718085",
"0.5240094",
"0.5225146",
"0.5202859",
"0.519413",
"0.5183507",
"0.51779395",
"0.5177287",
"0.51772225",
"0.51742375"
] | 0.7722257 | 0 |
Turns a waze linestring into a geojson linestring | def get_linestring(value):
line = value['line']
coords = [(x['x'], x['y']) for x in line]
return geojson.Feature(
geometry=geojson.LineString(coords),
properties=value
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lineToPolygon(geom):\n assert(geom[\"type\"] == \"LineString\")\n # LineString is only the exterior line of a polygon (no holes possible)\n return geojson.Polygon(coordinates=[geom[\"coordinates\"]], validate=True)",
"def parse_point(line):\n return json.loads(line)",
"def ways2geometry(overpass_result_object):\n tunnel_json = overpass_result_object.toJSON()\n # Read ['elements'] attribute into a df:\n df = pd.DataFrame.from_records(tunnel_json['elements'])\n df.rename(columns={'nodes': 'node_IDs'}, inplace=True)\n # Clean up the geometry column which contains the coordinates, but has 'lat', 'lon' strings etc.\n df['geometry'] = df['geometry'].astype(str)\n df['geometry'].replace({r\"{'lat': \": r'(',\n r\"'lon': \": '',\n r'}': r')'}, inplace=True, regex=True)\n # Convert string representations into a list of tuples of floats.\n df['geometry'] = [literal_eval(row) for row in df['geometry']]\n if not isinstance(df.geometry[1][1], tuple):\n raise ValueError(\"Geometry coordinates must be of <class 'tuple'>. Conversion failed.\")\n\n\n # Unpack the 'tags' into a dictionary. This way we avoid NaNs and just have unique dict for every way ID key.\n way_tags = {}\n for way in df[['id', 'tags']].itertuples():\n way_tags[way.id] = way.tags\n # Finally delete the 'tags' col (no longer needed). axis=1 specifies column, not row.\n df.drop(columns='tags', axis=1, inplace=True)\n\n # Construct a Geopandas gdf and enable the 'geometry' column.\n gdf = gpd.GeoDataFrame(df, geometry=df['geometry'].apply(lambda row: LineString(row)), crs='epsg:4326') # EPSG: 4326 is WGS84 (Lat and Long)\n # Flip the LineString coords as they are the wrong way around.\n gdf['geometry'] = gdf.geometry.map(lambda linestring: transform(lambda x, y: (y, x), linestring))\n gdf.set_crs(epsg='4326', inplace=True) # Set lon lat system again.\n\n return gdf, way_tags",
"def text_to_json(file):\n\n #--------------------------------------------------------------------------\n # First read in the data\n #--------------------------------------------------------------------------\n x = []\n y = []\n z = []\n isFile = False\n if isinstance(file, str):\n isFile = True\n file = open(file, 'rt')\n lines = file.readlines()\n else:\n lines = file.readlines()\n reference = ''\n for line in lines:\n sline = line.strip()\n if sline.startswith('#'):\n reference += sline\n continue\n if sline.startswith('>'):\n if len(x): # start of new line segment\n x.append(np.nan)\n y.append(np.nan)\n z.append(np.nan)\n continue\n else: # start of file\n continue\n if not len(sline.strip()):\n continue\n parts = sline.split()\n if len(parts) < 3:\n raise ShakeLibException(\n 'Rupture file %s has no depth values.' % file)\n y.append(float(parts[0]))\n x.append(float(parts[1]))\n z.append(float(parts[2]))\n if isFile:\n file.close()\n\n # Construct GeoJSON dictionary\n\n coords = []\n poly = []\n for lon, lat, dep in zip(x, y, z):\n if np.isnan(lon):\n coords.append(poly)\n poly = []\n else:\n poly.append([lon, lat, dep])\n if poly != []:\n coords.append(poly)\n\n d = {\n \"type\": \"FeatureCollection\",\n \"metadata\": {},\n \"features\": [\n {\n \"type\": \"Feature\",\n \"properties\": {\n \"rupture type\": \"rupture extent\",\n \"reference\": reference\n },\n \"geometry\": {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [coords]\n }\n }\n ]\n }\n return d",
"def create_ogr_linestring_from_list(geom: list) -> ogr.Geometry:\n return ogr.CreateGeometryFromJson(json.dumps({\"type\": 'LineString', 'coordinates': geom}))",
"def line(points):\n return LineString(points)",
"def json2polygon(geojson_str):\n geojson_object = geojson.loads(geojson_str)\n return geometry.shape(geojson_object)",
"def polygon_from_str(line):\n # remove possible utf-8 BOM\n if line.startswith('\\xef\\xbb\\xbf'):\n line = line[3:]\n polygon_points = [float(o) for o in line.split(',')[:8]]\n polygon_points = np.array(polygon_points).reshape(4, 2)\n polygon = Polygon(polygon_points).convex_hull\n return polygon",
"def updateLine(self, objectId, points):\n objectId = GeometryReference(objectId, self)\n\n # This works with just the points and is is by no means efficient.\n data = self.geometry(objectId)\n\n if data['type'] != 'LineString':\n raise TypeError(\"The geoJSON object is not a line.\")\n\n data['coordinates'] = points\n\n request = urllib2.Request(self.baseUri + 'geometry/%d' % objectId.id,\n data=json.dumps(data))\n request.add_header('Content-Type', 'application/json')\n request.get_method = lambda: 'PUT'\n r = urllib2.urlopen(request)\n data = json.load(r)\n return data",
"def parse_line(obj):\n quadrilateral = []\n for point in obj['points']:\n quadrilateral += point\n xmin = min(quadrilateral[0::2])\n xmax = max(quadrilateral[0::2])\n\n ymin = min(quadrilateral[1::2])\n ymax = max(quadrilateral[1::2])\n if not (xmin < xmax and ymin < ymax):\n logging.warning(f\"skip: {obj}\")\n return None\n language = obj['language'].lower()\n legibility = 1 - int(obj['illegibility'])\n transcription = obj['transcription']\n if transcription == '###':\n transcription = ''\n legibility = 0\n language = ''\n\n word_annotation = {\n 'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],\n 'segmentation': [quadrilateral],\n 'attributes': {\n 'transcription': transcription,\n 'legible': legibility,\n 'language': language,\n }\n }\n return word_annotation",
"def polygon_from_str(line):\r\n polygon_points = [float(o) for o in line.split(',')[:8]]\r\n polygon_points = np.array(polygon_points).reshape(4, 2)\r\n polygon = Polygon(polygon_points).convex_hull\r\n return polygon",
"def make_map(filename, datadir):\n items = json.load(open(filename))\n geojson_items = []\n for item in items:\n geojson_items.append(get_linestring(item))\n with open(os.path.join(datadir, 'waze.geojson'), 'w') as outfile:\n geojson.dump(geojson.FeatureCollection(geojson_items), outfile)",
"def normalizeGeometry(geom):\n\t# Convert string GEOSGeometry object to python dict\n\tgeom = json.loads(geom)\n\n\t# Normalize longitude to range [-180, 180) using saw tooth function\n\tc = geom['coordinates'][0]\n\tgeom['coordinates'][0] = (c+180 - ( math.floor( (c+180)/360 ) )*360) - 180\n\n\t# Normalize latitude to range [-90, 90) using saw tooth function\n\tc = geom['coordinates'][1]\n\tgeom['coordinates'][1] = (c+90 - ( math.floor( (c+90)/180 ) )*180) - 90\n\n\t# Encode and return GEOSGeometry object\n\treturn GEOSGeometry(json.dumps(geom))",
"def __dump_linestring(obj, big_endian):\n wkb_string = b''\n\n if big_endian:\n wkb_string += BIG_ENDIAN\n else:\n wkb_string += LITTLE_ENDIAN\n\n coords = obj['coordinates']\n vertex = coords[0]\n # Infer the number of dimensions from the first vertex\n num_dims = len(vertex)\n if num_dims == 2:\n type_byte_str = __WKB['2D']['LineString']\n elif num_dims == 3:\n type_byte_str = __WKB['Z']['LineString']\n elif num_dims == 4:\n type_byte_str = __WKB['ZM']['LineString']\n else:\n pass\n # TODO: raise\n if not big_endian:\n # reverse the byte ordering for little endian\n type_byte_str = type_byte_str[::-1]\n wkb_string += type_byte_str\n\n if big_endian:\n byte_fmt = '>'\n else:\n byte_fmt = '<'\n byte_fmt += 'd' * num_dims\n\n for vertex in coords:\n wkb_string += struct.pack(byte_fmt, *vertex)\n\n return wkb_string",
"def get_geojson_feature(id, raw_bbox_string, properties_dict):\n coords = raw_bbox_string.split()\n \n # Tesseract uses ints, but allow floats\n for i, val in enumerate(coords):\n coords[i] = float(val)\n # bbox order = # x0 y0 x1 y1\n \n bbox_json_obj = geojson.Polygon([[\n (coords[0], coords[1]), \n (coords[0], coords[3]), \n (coords[2], coords[3]), \n (coords[2], coords[1]),\n (coords[0], coords[1])\n ]])\n return geojson.Feature(id, bbox_json_obj, properties=properties_dict)",
"def to2D(geometry):\n\n return LineString(np.column_stack(geometry.xy))",
"def interpolate_points(route_line, line_points):\n\n segment_size = 0.1 # value to break the entire route into 1/10 segments\n distance_along_line = 0.1 # start distance along line at the segment size\n\n # break up the line into 1/10 segments, iterate. We are ignoring the 0th\n # element as that's the start position and that's already stored\n segmented_points = [] # creating an empty list to store these points\n\n # hold all the waypoints and other data\n segmented_points.append({'data': {'waypoints': []}})\n\n # for our start points that the user defines, geocoded\n segmented_points[0]['data']['start'] = {}\n segmented_points[0]['data']['end'] = {}\n\n for i in range(1, 10): # excluding the start and the end points\n # Note: the output of interpolate is a Point data type\n # Return a point at the specified distance along a linear geometric object.\n point = route_line.interpolate(distance_along_line, normalized=True)\n print \"Point \", i, point\n\n # call the function that checks to see what geohash the line falls under\n # and if it is a high crime area\n # geohash_data is a dict: crime_index, total_crimes, lng, lat, geohash\n geohash_data = get_position_geohash([(point.x, point.y)])[0] # dict\n\n # set the is_high_crime variable value to false, for testing\n geohash_data['is_high_crime'] = False\n\n # extract the datapoints from the point datatype\n geohash_data['lat'] = point.x\n geohash_data['lng'] = point.y\n\n segmented_points.append(geohash_data) # append data on location\n distance_along_line += segment_size\n\n # also add the point A, point B latitude and longitude that the user gives\n # to the data that will be sent back to JS\n segmented_points[0]['data']['start'] = {\n 'lat': line_points[0][0],\n 'lng': line_points[0][1]\n }\n\n segmented_points[0]['data']['end'] = {\n 'lat': line_points[-1][0],\n 'lng': line_points[-1][1]\n }\n\n return segmented_points",
"def test_generalized_linestring_is_valid():\n road = query_row(db_conf, 'osm_roads', 7201)\n # geometry is not simple, but valid\n # check that geometry 'survives' simplification\n assert not road['geometry'].is_simple, road['geometry'].wkt\n assert road['geometry'].is_valid, road['geometry'].wkt\n assert road['geometry'].length > 1000000\n road = query_row(db_conf, 'osm_roads_gen0', 7201)\n # but simplified geometies are simple\n assert road['geometry'].is_valid, road['geometry'].wkt\n assert road['geometry'].length > 1000000\n road = query_row(db_conf, 'osm_roads_gen1', 7201)\n assert road['geometry'].is_valid, road['geometry'].wkt\n assert road['geometry'].length > 1000000",
"def _get_geometry(self, val):\n g = OGRGeometry(val)\n return json.loads(g.json)",
"def string_to_json_position(x):\n\n s = x.split(',')\n return {'lat': float(s[0]), 'lng': float(s[1])}",
"def linestring(\n table: Table,\n lat_a: str = \"latA\",\n lat_b: str = \"latB\",\n lon_a: str = \"lonA\",\n lon_b: str = \"lonB\",\n linestring_column: str = \"linestring\",\n error: str = \"22 -22\",\n) -> Table:\n for row in table:\n try:\n lla = f\"{row[lon_a]:4d} {row[lat_a]:4d}\"\n except IndexError:\n lla = error\n\n try:\n llb = f\"{row[lon_b]:4d} {row[lat_b]:4d}\"\n except IndexError:\n llb = error\n\n row[linestring_column] = f\"linestring({lla}, {llb})\"\n return table",
"def vegref2geojson( vegref, dagensverdi=False): \r\n \r\n \r\n vegstr = vvi2vegrefstring( vegref) \r\n \r\n \r\n fradato = vegref['ValidFrom'][0:10]\r\n tildato = vegref['ValidTo'][0:10]\r\n veglenkeid = vegref['ReflinkOID']\r\n veglenkeposisjon = round( float( vegref['Measure'] ), 8) \r\n \r\n X = float( vegref['RoadNetPosition']['X'] ) \r\n Y = float( vegref['RoadNetPosition']['Y'] ) \r\n coordinates = [X, Y]\r\n if 'Z' in vegref['RoadNetPosition']:\r\n coordinates.append( float( vegref['RoadNetPosition']['Z'] ) )\r\n \r\n geoj = { \"type\": \"Feature\",\r\n \"geometry\": {\r\n \"type\": \"Point\",\r\n \"coordinates\": coordinates\r\n },\r\n \"properties\": {\r\n \"vegref\" : vegstr, \r\n \"fradato\" : fradato, \r\n \"tildato\" : tildato,\r\n \"veglenkeid\" : veglenkeid, \r\n \"veglenkeposisjon\" : veglenkeposisjon\r\n }\r\n }\r\n \r\n if dagensverdi: \r\n params = { 'viewDate' : '2022-10-31', \r\n 'reflinkoid' : veglenkeid, \r\n 'rellen' : veglenkeposisjon } \r\n \r\n url = 'https://visveginfo-static.opentns.org/RoadInfoService/GetRoadReferenceForNVDBReference' \r\n r = requests.get( url, params=params) \r\n if r.ok and 'RoadReference' in r.text: \r\n data = xmltodict.parse( r.text ) \r\n if 'RoadCategory' in data['RoadReference'].keys(): \r\n geoj['properties']['dagensvegref'] = vvi2vegrefstring( data['RoadReference'] ) \r\n else: \r\n geoj['properties']['dagensvegref'] = '' \r\n else: \r\n geoj['properties']['dagensvegref'] = '' \r\n \r\n return geoj",
"def gpvtg_convert(line):\r\n gps = line.strip().split(',')\r\n #check data\r\n if gps[1] == '0.00': \r\n return\r\n #jsondata = {'Horizontal speed': gps[7] + ' kmph or ' + gps[5] + 'knots'}\r\n return []",
"def geojson2postgis(self, filepath, table_name, geo_type):\n map_data = gpd.GeoDataFrame.from_file(filepath)\n # Maybe you want to change link address\n link = \"postgresql://{0}:{1}@{3}:5432/{2}\".format(self.username, self.password, self.dbname, self.host)\n engine = create_engine(link, encoding='utf-8')\n map_data = self.dict_to_json(map_data)\n map_data['geometry'] = map_data['geometry'].apply(lambda x: WKTElement(x.wkt, 4326))\n # Maybe you want to change 'replace' to 'append' in the future\n map_data.to_sql(\n name=table_name,\n con=engine,\n if_exists='replace',\n dtype={'geometry': Geometry(geometry_type=geo_type, srid=4326)}\n )",
"def geomFromOutlineCoords(coords):\n if isinstance(coords, numpy.ndarray):\n coords = coords.tolist()\n geomDict = {'type':'Polygon', 'coordinates':[coords]}\n geom = ogr.CreateGeometryFromJson(repr(geomDict))\n return geom",
"def convertor(geometry, method=\"wgs2gcj\"):\n if geometry['type'] == 'Point':\n coords = geometry['coordinates']\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'LineString' or geometry['type'] == 'MutliPoint':\n coordinates = geometry['coordinates']\n for coords in coordinates:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'Polygon' or geometry['type'] == 'MultiLineString':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for coords in rings:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n elif geometry['type'] == 'MultiPolygon':\n coordinates = geometry['coordinates']\n for rings in coordinates:\n for lines in rings:\n for coords in lines:\n coords[0], coords[1] = methods[method](coords[0], coords[1])\n return geometry",
"def _parse_wkt(s):\n if s.startswith('SRID'):\n s = s[s.index(';') + 1:]\n return shapely.wkt.loads(s)",
"def parse_string_line(self, data_line):\n if data_line:\n data_line = data_line.rstrip()\n if data_line:\n if data_line[0] == '#':\n extraparams = json.loads(data_line[1:])\n if 'glyph_cap_line' in extraparams:\n self.__capline = extraparams['glyph_cap_line']\n if 'glyph_base_line' in extraparams:\n self.__baseline = extraparams['glyph_base_line']\n if 'glyph_bottom_line' in extraparams:\n self.__bottomline = extraparams['glyph_bottom_line']\n elif len(data_line) > 9:\n strokes = []\n xmin = xmax = ymin = ymax = None\n # individual strokes are stored separated by <space>+R\n # starting at col 11\n for s in split(data_line[10:], ' R'):\n if len(s):\n stroke = list(zip(map(self.__char2val, s[::2]), map(self.__char2val, s[1::2])))\n xmin = min(stroke + ([xmin] if xmin else []), key=lambda t: t[0])\n ymin = min(stroke + ([ymin] if ymin else []), key=lambda t: t[1])\n xmax = max(stroke + ([xmax] if xmax else []), key=lambda t: t[0])\n ymax = max(stroke + ([ymax] if ymax else []), key=lambda t: t[1])\n strokes.append(stroke)\n self.__charcode = int(data_line[0:5])\n self.__left_side = self.__char2val(data_line[8])\n self.__right_side = self.__char2val(data_line[9])\n self.__strokes = strokes\n self.__xmin, self.__ymin, self.__xmax, self.__ymax = (xmin[0], ymin[1], xmax[0], ymax[1]) if strokes else (0, 0, 0, 0)\n return True\n return False",
"def createLine(self, points):\n\n data = {\n \"type\": \"LineString\",\n \"coordinates\": points,\n }\n\n req = urllib2.Request(self.baseUri + 'geometry', data=json.dumps(data))\n r = urllib2.urlopen(req)\n data = json.load(r)\n return GeometryReference(data.get('databaseId'), self)",
"def rltn2poly(osm_container, relation):\n cltn = []\n for m in relation.members:\n if m.type == Way:\n way = osm_container.get_osm_way_by_id(m.member_id)\n ln = way2line(osm_container, way)\n cltn.append(ln)\n merged_line = linemerge(cltn)\n return shpgeo.Polygon(merged_line)"
] | [
"0.67091656",
"0.6261151",
"0.6204115",
"0.61178505",
"0.6099191",
"0.5865695",
"0.5834806",
"0.5778352",
"0.57694143",
"0.5755196",
"0.5703966",
"0.5692615",
"0.568841",
"0.56757134",
"0.5596205",
"0.5546677",
"0.55370474",
"0.54986805",
"0.54589975",
"0.54281527",
"0.5373826",
"0.53717273",
"0.53694713",
"0.536339",
"0.5350739",
"0.5347312",
"0.5346429",
"0.5341416",
"0.5327994",
"0.5325512"
] | 0.73890424 | 0 |
Given a dict with keys of segment id, and val a list of waze jams (for now, just jams), the properties of a road segment, and the total number of snapshots we're looking at, update the road segment's properties to include features | def get_features(waze_info, properties, num_snapshots):
# Waze feature list
# jam_percent - percentage of snapshots that have a jam on this segment
if properties['segment_id'] in waze_info:
# only count one jam per snapshot on a road
num_jams = len(set([x['properties']['snapshotId']
for x in waze_info[properties['segment_id']]]))
else:
num_jams = 0
# Turn into number between 0 and 100
properties.update(jam_percent=100*num_jams/num_snapshots)
properties.update(jam=1 if num_jams else 0)
# Other potential features
# Something with speeds in the jams
# Alerts, or certain kinds of alerts
# Look at alerts that are crashes, maybe ignore those jams?
# Might be interesting to look at crashes on segments as well
# but not as a feature for the model
return properties | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_segmentation_map(segmap, object_map):\n obj_pix = object_map != 0\n segmap[obj_pix] = object_map[obj_pix]\n return segmap",
"def map_segments(datadir, filename):\n items = json.load(open(filename))\n\n # Only look at jams for now\n items = [get_linestring(x) for x in items if x['eventType'] == 'jam']\n\n items = util.reproject_records(items)\n\n # Get the total number of snapshots in the waze data\n num_snapshots = max([x['properties']['snapshotId'] for x in items])\n\n osm_file = os.path.join(\n datadir,\n 'processed',\n 'maps',\n 'osm_elements.geojson'\n )\n\n road_segments, inters = util.get_roads_and_inters(osm_file)\n\n # Get roads_and_inters returns elements that have shapely geometry\n # In order to output the unchanged points back out at the end,\n # Need to convert to geojson\n # This is something that should be addressed\n inters = [{'properties': x['properties'], 'geometry': {\n 'type': 'Point',\n 'coordinates': [x['geometry'].x, x['geometry'].y]\n }} for x in inters]\n \n roads, roads_index = util.index_segments(\n road_segments, geojson=True, segment=True)\n road_buffers = []\n for road in roads:\n road_buffers.append(road[0].buffer(3))\n\n print(\"read in {} road segments\".format(len(roads)))\n\n waze_info = defaultdict(list)\n count = 0\n\n for item in items:\n count += 1\n\n if item['properties']['eventType'] == 'jam':\n for idx in roads_index.intersection(item['geometry'].bounds):\n segment = roads[idx]\n buff = road_buffers[idx]\n\n # But if the roads share a name,\n # increase buffer size, in case of a median segment\n # Waze does not appear to specify which direction\n if 'street' in item['properties'] and segment[1]['name'] and \\\n item['properties']['street'].split()[0] == segment[1]['name'].split()[0]:\n buff = segment[0].buffer(10)\n overlap = buff.intersection(item['geometry'])\n\n if not overlap.length or \\\n (overlap.length < 20 and segment[0].length > 20):\n # Skip segments with no overlap\n # or very short overlaps\n continue\n waze_info[segment[1]['segment_id']].append(item)\n # Add waze features\n # Also convert into format that util.prepare_geojson is expecting\n updated_roads = []\n roads_with_jams = []\n for road in road_segments:\n properties = get_features(\n waze_info,\n road.properties,\n num_snapshots\n )\n updated_roads.append({\n 'geometry': {\n 'coordinates': [x for x in road.geometry.coords],\n 'type': 'LineString'\n },\n 'properties': properties\n })\n if properties['segment_id'] in waze_info:\n roads_with_jams.append({\n 'geometry': {\n 'coordinates': [x for x in road.geometry.coords],\n 'type': 'LineString'\n },\n 'properties': properties\n })\n\n results = util.prepare_geojson(updated_roads + inters)\n\n with open(osm_file, 'w') as outfile:\n geojson.dump(results, outfile)\n\n jam_results = util.prepare_geojson(roads_with_jams)\n\n with open(os.path.join(\n datadir,\n 'processed',\n 'maps',\n 'jams.geojson'), 'w') as outfile:\n geojson.dump(jam_results, outfile)",
"def set_calculated_segments(self, total_lights, segments):\n self.set_segments(segments)\n self.set_lights_per_segment(int(total_lights / segments))",
"def test_updating_segment_criteria(self):\n pass",
"def addproperties_json(source, mortspd):\n with open(source, encoding=\"utf-8\",mode=\"r\") as f: # load boundaries\n boundaries = json.load(f)\n \n\n for regionBoundary in boundaries['features']: # get nb murdered by region\n del regionBoundary['properties']['admin1Pcod']\n del regionBoundary['properties']['admin1RefN']\n \n regionBoundary['properties']['Departement'] = regionBoundary['properties']['admin1Name']\n \n currentRegion = regionBoundary['properties']['Departement']\n if currentRegion in mortspd:\n regionBoundary['properties']['Morts'] = mortspd[currentRegion]\n \n else: \n regionBoundary['properties']['Morts'] = 0 \n continue\n return boundaries",
"def __test_all_segments_with_updates(self, arr, fnc, upd):\n segment_tree = SegmentTree(arr, fnc)\n for index, value in upd.items():\n arr[index] = value\n segment_tree.update(index, value)\n self.__test_segments_helper(segment_tree, fnc, arr)",
"def set_segments_to_value(arr, segments, value=0):\n for segment in segments:\n arr[segment[0]:segment[1]] = value",
"def speed_map_segs_to_geojson(seg_list):\n # Initialize a new GeoJSON object\n new_geojson = {\n 'type': 'FeatureCollection',\n 'features': []\n }\n\n # Dont work on the input list\n seg_list_copy = copy.deepcopy(seg_list)\n\n # Iterativley build the features of the new GeoJSON object\n for i, seg in enumerate(seg_list_copy):\n # Prepare the feature properties\n del seg['fromStop']\n del seg['toStop']\n\n # New attribute, can be used to identify segments\n seg['order'] = i\n\n # Prepare the feature geometry coordinates\n pathLocs = seg.pop('pathLocs')\n coords = [[p['lon'], p['lat']] for p in pathLocs]\n\n # Construct feature\n new_feature = {\n 'type': 'Feature',\n 'geometry': {'type': 'LineString', 'coordinates': coords},\n 'properties': seg\n }\n\n # Append feature to the list of features in GeoJSON object\n new_geojson['features'].append(new_feature)\n\n return new_geojson",
"def update(self,d:dict):\n for name,(value,n) in d.items():\n if n==0:\n continue\n self.meters[name].update(value,n)",
"def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))",
"def getsegs (bounds, split):\n segmentslist=bisect_rectange(split, bounds[0], bounds[1], bounds[2], bounds[3])\n count=1\n segpass=0\n \n #Get list of segment ids currently in database\n query=\"\"\"select seg_id from segment;\"\"\"\n df = pd.read_sql_query(query,con=engine)\n segids=set(df.seg_id)\n \n while count < len(segmentslist):\n try:\n for i in segmentslist:\n segments=getsegmentinfo(i)\n \n \n for seg in segments:\n #If running function several times for different splits, this ignores existing segments and prints a message\n if seg.id in segids: \n segpass+=1\n if (segpass % 10 == 0): \n print (\"{} segments already exist\".format(segpass))\n #Else this is a new segment, so get details from the strava and geocodio apis and save them to a dataframe and eventually to the database\n else:\n location = geocodio_client.reverse((seg.start_latlng[0], seg.start_latlng[1]))\n zipcode=location['results'][0]['address_components']['zip']\n \n newrow = {'seg_id' : seg.id,\n 'resource_state': seg.resource_state,\n 'climb_category':seg.climb_category,\n 'climb_category_desc':seg.climb_category_desc,\n 'average_grade':seg.avg_grade,\n 'elev_difference': str(seg.elev_difference).split()[0],\n 'distance': str(seg.distance).split()[0],\n 'name' : seg.name,\n 'start_lat' : seg.start_latlng[0],\n 'start_long' : seg.start_latlng[1],\n 'end_lat' : seg.end_latlng[0],\n 'end_long' : seg.end_latlng[1],\n 'points' : seg.points,\n 'starred':seg.starred,\n 'zipcode':zipcode\n }\n df=pd.DataFrame(newrow, index=[0])\n \n try:\n #Save dataframe to database\n df.to_sql('segment', engine,index=False,if_exists='append')\n except:\n pass\n\n #Prints message which keeps track of number of sub bounds completed \n if (count % 10) == 0:\n print (\"Getting segments in bound {} of {}\".format(count, len(segmentslist)))\n count+=1\n except Exception as inst:\n print (inst) \n return None",
"def test_updating_a_segment(self):\n pass",
"def update():\r\n hero_ids = []\r\n with open('json/heroes.json', 'r') as heroes:\r\n data = heroes.read()\r\n obj = json.loads(data)\r\n for i in obj['data']['constants']['heroes']:\r\n hero_ids.append(i['id'])\r\n for i, hero in enumerate(hero_ids):\r\n url = f\"https://api.stratz.com/api/v1/Hero/{hero}?rank=8\"\r\n r1, r2, r3, r4, r5 = 0, 0, 0, 0, 0\r\n safe, mid, off, roam = 0, 0, 0, 0\r\n r = requests.get(url=url, headers={\"Authorization\": f\"Bearer {TOKEN}\"})\r\n r_obj = r.json()\r\n total_matches = r_obj['heroes'][0]['pickBan']['pick']['matchCount']\r\n for j in r_obj['heroes'][0]['heroLaneDetail']:\r\n if j['laneId'] == 1:\r\n safe = j['matchCount'] / total_matches\r\n elif j['laneId'] == 2:\r\n mid = j['matchCount'] / total_matches\r\n elif j['laneId'] == 3:\r\n off = j['matchCount'] / total_matches\r\n else:\r\n roam = j['matchCount']\r\n for k in r_obj['heroes'][0]['heroRoleDetail']:\r\n if k['roleId'] == 0:\r\n core = k['matchCount'] / total_matches\r\n elif k['roleId'] == 1:\r\n support = k['matchCount'] / total_matches\r\n # safe lane core/hard support\r\n r1 = safe * core\r\n r5 = safe * support\r\n # offlane core/soft support\r\n r3 = off * core\r\n r4 = off * support\r\n # midlane core/roamer\r\n r2 = mid * core\r\n r4 += (mid * support)\r\n obj['data']['constants']['heroes'][i]['roles'] = [r1, r2, r3, r4, r5]\r\n print(f\"Roles for hero {hero} added successfully!\")\r\n time.sleep(1)\r\n with open('json/heroes.json', 'w') as heroes:\r\n json.dump(obj, heroes)",
"def _update_state_from_infos(self) -> None:\n # update the keys that is the integer label_value of the SegmentInfo\n self.infos = {\n si.label_value : si for si in self.infos.values()\n }\n return None\n\n # TODO Legacy branch\n # for idx, seginfo in enumerate(self.infos.values()):\n # prefix = f'Segment{idx}_'\n # self.metadata.update(\n # seginfo.to_dict(keystyle='slicer', prefix=prefix)\n # )",
"def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)",
"def enrich_params(self):\n\n self.params['nmaps'] = len(self.params['probes']) + np.sum(self.params['spins'] == 2)\n\n pass",
"def update_nets_with_segments(pcb_data: List[Dict[str, Any]], nets: List[Net]):\n segments = get_all_dicts_by_key(pcb_data, 'segment')\n for segment in segments:\n start: Coords = get_dict_by_key(segment['segment'], 'start')['start']\n start[1] = str(-1*float(start[1]))\n end: Coords = get_dict_by_key(segment['segment'], 'end')['end']\n end[1] = str(-1 * float(end[1]))\n width: str = get_dict_by_key(segment['segment'], 'width')['width']\n layer_data: str = get_dict_by_key(segment['segment'], 'layer')['layer']\n layers: List[Layer] = convert_to_layers(layer_data)\n new_segment: Segment = Segment(start=start, end=end, width=width, layers=layers)\n net_id: str = get_dict_by_key(segment['segment'], 'net')['net']\n for net in nets:\n if float(net.net_id) == float(net_id):\n net.segments.append(new_segment)",
"def add_property(path, key, value):\n with open(path) as fp:\n features = geojson.loads(fp.read())\n\n for feature in features.features:\n feature.properties[key] = value\n\n with open(path, 'w') as fp:\n fp.write(geojson.dumps(features))",
"def computeSegmentsAttributes(mergedSegments,updatedSpeed):\n \n mergedSegments=mergedSegments.assign(nonNullProp = updatedSpeed.notna().sum(axis=1)/updatedSpeed.columns.size)\n mergedSegments=mergedSegments.assign( edges = mergedSegments.nodes.apply(lambda x : np.array([x[0],x[-1]])))\n mergedSegments=mergedSegments.assign( cosHead = mergedSegments['loc'].apply(lambda x :np.cos(np.deg2rad(get_north_azimut([x['coordinates'][-2],x['coordinates'][-1]])))))\n mergedSegments=mergedSegments.assign( sinHead = mergedSegments['loc'].apply(lambda x :np.sin(np.deg2rad(get_north_azimut([x['coordinates'][-2],x['coordinates'][-1]])))))\n mergedSegments=mergedSegments.assign( cosTail = mergedSegments['loc'].apply(lambda x :np.cos(np.deg2rad(get_north_azimut([x['coordinates'][0],x['coordinates'][1]])))))\n mergedSegments=mergedSegments.assign( sinTail = mergedSegments['loc'].apply(lambda x :np.sin(np.deg2rad(get_north_azimut([x['coordinates'][0],x['coordinates'][1]])))))\n mergedSegments=mergedSegments.assign( head = mergedSegments.nodes.apply(lambda x : x[-1]))\n mergedSegments=mergedSegments.assign( tail = mergedSegments.nodes.apply(lambda x : x[0]))\n return mergedSegments",
"def _update_dicts(game, synergy, counter):\n radiant_win, radiant_heroes, dire_heroes = game[1], game[2], game[3]\n\n radiant_heroes = map(int, radiant_heroes.split(','))\n dire_heroes = map(int, dire_heroes.split(','))\n\n for i in range(5):\n for j in range(5):\n if i != j:\n synergy['games'][radiant_heroes[i] - 1, radiant_heroes[j] - 1] += 1\n synergy['games'][dire_heroes[i] - 1, dire_heroes[j] - 1] += 1\n\n if radiant_win:\n synergy['wins'][radiant_heroes[i] - 1, radiant_heroes[j] - 1] += 1\n else:\n synergy['wins'][dire_heroes[i] - 1, dire_heroes[j] - 1] += 1\n\n counter['games'][radiant_heroes[i] - 1, dire_heroes[j] - 1] += 1\n counter['games'][dire_heroes[i] - 1, radiant_heroes[j] - 1] += 1\n\n if radiant_win:\n counter['wins'][radiant_heroes[i] - 1, dire_heroes[j] - 1] += 1\n else:\n counter['wins'][dire_heroes[i] - 1, radiant_heroes[j] - 1] += 1",
"def update(self, identifier, new_feature):\n\n all_data = self._load()\n for i, feature in enumerate(all_data['features']):\n if self.id_field in feature:\n if feature[self.id_field] == identifier:\n new_feature['properties'][self.id_field] = identifier\n all_data['features'][i] = new_feature\n elif self.id_field in feature['properties']:\n if feature['properties'][self.id_field] == identifier:\n new_feature['properties'][self.id_field] = identifier\n all_data['features'][i] = new_feature\n with open(self.data, 'w') as dst:\n dst.write(json.dumps(all_data))",
"def set_properties(struct):",
"def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])",
"def _resize_seg(self, results):\n for key in results.get('seg_fields', []):\n if self.keep_ratio:\n gt_seg = mmcv.imrescale(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n else:\n gt_seg = mmcv.imresize(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n results[key] = gt_seg",
"def record_segments(mongodb):\n start_time = time.time()\n\n collection = mongodb[EVENTS_COL]\n # For incremental updates, retrieve only the events not processed yet.\n #entries = collection.find({\"processed\": 0}).limit(1000) #.batch_size(1000)\n entries = collection.find().limit(500000) #.batch_size(1000)\n print entries.count(), \"new events found\"\n data = process_segments(mongodb, list(entries))\n collection_seg = mongodb[SEGMENTS_COL]\n # collection.remove()\n results = {}\n for video_id in data:\n results[video_id] = {}\n for username in data[video_id]:\n # TOOD: in order to implement incremental updates,\n # we need to combine existing segment data with incoming ones.\n # Maybe not worth it. Segments are unlikely to be cut in the middle.\n # remove all existing (video, username) entries\n # collection2.remove({\"video_id\": video_id, \"user_id\": username})\n for segment in data[video_id][username][\"segments\"]:\n result = segment\n result[\"video_id\"] = video_id\n result[\"user_id\"] = username\n collection_seg.insert(result)\n results[video_id][username] = segment\n # Mark all as processed\n entries.rewind()\n for entry in entries:\n collection.update({\"_id\": entry[\"_id\"]}, {\"$set\": {\"processed\": 1}})\n # Make sure the collection is indexed.\n from pymongo import ASCENDING\n collection_seg.ensure_index(\n [(\"video_id\", ASCENDING), (\"user_id\", ASCENDING)])\n\n print sys._getframe().f_code.co_name, \"COMPLETED\", (time.time() - start_time), \"seconds\"\n # print results\n return results",
"def update(self,\n tier1_id,\n segment_id,\n segment_monitoring_profile_binding_map_id,\n segment_monitoring_profile_binding_map,\n ):\n return self._invoke('update',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n 'segment_monitoring_profile_binding_map_id': segment_monitoring_profile_binding_map_id,\n 'segment_monitoring_profile_binding_map': segment_monitoring_profile_binding_map,\n })",
"def update_route(self, vrpdata):\n self.distance = 0\n self.quantity = 0\n self.tourValid = False\n lastc = 0 # first entry is depot\n for c in self.route:\n self.distance += vrpdata.DistMatrix[lastc][c]\n self.quantity += vrpdata.CustDem[c]\n lastc = c\n self.distance += vrpdata.DistMatrix[lastc][0] # last entry is depot\n self.tourValid = (self.quantity <= vrpdata.MaxVehCap)",
"def update_dict(new,old):",
"def update(self, segment_value_id):\n segments = list()\n if segment_value_id and self._segment and any(segment['segment_value_id'] == segment_value_id for segment in self._segment_values):\n segments = [segment for segment in self._segment_values if segment['segment_value_id'] == segment_value_id]\n elif self._segment_values:\n segments = self._segment_values\n self._process_dataset_ids = None\n else:\n segments.append(0)\n #self._process_dataset_ids = None\n\n self._outer_conn = self._get_outer_connection()\n\n any_data_fetched = False\n chart_gen = ChartGenerator()\n if self._outer_conn:\n self._jfile.save_fetch_settings({'sql': self._data['data_fetch_command'],\n 'segment_id': self._data['segment_id'],\n 'source_database_connection_id': self._data['source_database_connection_id'],\n })\n\n # check if index chart is set\n index_chart = 0\n if self._charts and self._data['report_index_report_data_set_chart_id'] \\\n and any(chart['report_data_set_chart_id']==self._data['report_index_report_data_set_chart_id'] for chart in self._charts):\n index_chart = filter(lambda chart: chart['report_data_set_chart_id'] == self._data['report_index_report_data_set_chart_id'], self._charts)[0]\n # no index chart is set, use first chart\n if self._charts and not index_chart:\n index_chart = self._charts[0]\n \n\n for segment_value in segments:\n \n self._jfile.set_segment_value(segment_value)\n if segment_value:\n self._segment_value_id = segment_value['segment_value_id']\n self._segment_value = segment_value\n else:\n self._segment_value_id = 0\n self._segment_value = None\n \n self._filelocker = FileLock(\"%s%s/run_segment_%s\" % (self._path, self._id, self._segment_value_id), 0, 0)\n \n # try to lock run segment file lock\n if not self._filelocker.acquire():\n # if segment file is lock continue for next segment\n if self._logger:\n self._logger.info(\"Segment %s is locked. Skip it.\" % self._segment_value_id)\n continue\n \n \n if self._process_type == 'full_gen':\n self._clear_instances(self._segment_value_id)\n \n last_meas_time = self._get_last_meas_time()\n \n meas_times = self._get_meas_times(last_meas_time)\n \n update_columns = True\n \n\n if self._process_type == 'soft_gen':\n update_columns = False\n\n \n #any_segment_data_fetched = False\n last_instance = None\n last_generation_time = None\n #instance = None\n\n if meas_times:\n last_instance_id = None\n meas_time = None\n for meas_time in meas_times:\n #start = time.time()\n self._jfile.set_meas_time(meas_time)\n instance = self._get_instance(meas_time, segment_value, last_meas_time)\n all_data = dict()\n\n last_instance_id = None\n if instance:\n if self._process_type != 'soft_gen':\n last_instance = self._json_fetched_data\n last_meas_time = meas_time\n last_generation_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n #any_segment_data_fetched = True\n any_data_fetched = True\n\n # process data set\n data_set_instance = self._process_instance(instance, meas_time, update_columns, False, segment_value)\n last_instance_id = data_set_instance.instance_id\n\n # save raw data\n if self._process_type != 'soft_gen':\n if self._data['report_save_historical_instances_ind'] == 'Y':\n self._jfile.save_data_fetch_instance(\n {'instance': last_instance,\n 'meas_time': last_meas_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'generation_time': last_generation_time\n }, last_instance_id)\n else:\n self._jfile.save_data_fetch(\n {'instance': last_instance,\n 'meas_time': last_meas_time.strftime('%Y-%m-%d %H:%M:%S'),\n 'generation_time': last_generation_time\n })\n\n if data_set_instance:\n # process new row values for charts drawn by selected row values\n update_columns = False\n all_data[0] = data_set_instance.get_formatted_header_rows()\n\n # run all pivots\n for pivot in self._pivots:\n #data_set_pivot_instance = self._process_pivot(pivot, data_set_instance, segment_value)\n data_set_pivot_instance = self._process_pivot(pivot, data_set_instance)\n all_data[pivot['report_data_set_pivot_id']] = data_set_pivot_instance.get_formatted_header_rows()\n\n if self._process_type != 'soft_gen':\n self._populate_row_values(self._charts, all_data)\n\n #insert chart instances to db. no not save instances if no historical instances or processing soft get\n if self._data['report_save_historical_instances_ind'] == 'Y' and self._process_type != 'soft_gen':\n self._process_charts(last_instance_id)\n\n chart_gen.report_charts(self._id, self._segment_value_id, meas_time, last_instance_id, all_data, self._jfile, chart_id=0)\n \n# #create preview and thumbnail\n# index_chart = None\n#\n# # check if index chart is set\n# if self._charts and self._data['report_index_report_data_set_chart_id'] \\\n# and any(chart['report_data_set_chart_id']==self._data['report_index_report_data_set_chart_id'] for chart in self._charts):\n# index_chart = filter(lambda chart: chart['report_data_set_chart_id'] == self._data['report_index_report_data_set_chart_id'], self._charts)[0]\n#\n# # no index chart is set, use first chart\n# if self._charts and not index_chart:\n# index_chart = self._charts[0]\n\n# create_thumb_preview = True\n# print index_chart\n\n# if not index_chart:\n# # do not create thumbnail/preview if no charts available\n# create_thumb_preview = False\n# elif self._process_type == 'soft_gen':\n# # do not create thumbnail/preview if processing soft regeneration and current dataset instance id is not the last\n# if self._data['report_save_historical_instances_ind'] == 'Y' and not self._is_last_dataset_id(last_instance_id):\n# create_thumb_preview = False\n#\n#\n# if create_thumb_preview:\n# chart_gen.report_thumbnail(self._id, self._segment_value_id, meas_time, 0, all_data, self._jfile, chart_id=index_chart['report_data_set_chart_id'])\n# chart_gen.report_preview(self._id, self._segment_value_id, meas_time, 0, all_data, self._jfile, chart_id=index_chart['report_data_set_chart_id'])\n\n if self._process_type != 'soft_gen':\n self._update_last_meas_time(meas_time)\n\n #print \"it took\", time.time() - start, \"seconds.\"\n\n\n create_thumb_preview = True\n\n if last_instance_id:\n if not index_chart:\n # do not create thumbnail/preview if no charts available\n create_thumb_preview = False\n elif self._process_type == 'soft_gen':\n # do not create thumbnail/preview if processing soft regeneration and current dataset instance id is not the last\n if self._data['report_save_historical_instances_ind'] == 'Y' and not self._is_last_dataset_id(last_instance_id):\n create_thumb_preview = False\n\n\n if create_thumb_preview:\n chart_gen.report_thumbnail(self._id, self._segment_value_id, meas_time, 0, all_data, self._jfile, chart_id=index_chart['report_data_set_chart_id'])\n chart_gen.report_preview(self._id, self._segment_value_id, meas_time, 0, all_data, self._jfile, chart_id=index_chart['report_data_set_chart_id'])\n\n \n # create current json files for historical instances\n if self._data['report_save_historical_instances_ind'] == 'Y' and last_instance_id and \\\n (self._process_type != 'soft_gen' or (self._process_type == 'soft_gen' and self._is_last_dataset_id(last_instance_id))):\n self._make_current_jfiles()\n\n self._make_meta()\n\n self._update_run_time()\n\n# if any_segment_data_fetched and self._process_type != 'soft_gen':\n# self._jfile.save_data_fetch({'instance': last_instance,\n# 'meas_time': last_meas_time.strftime('%Y-%m-%d %H:%M:%S'),\n# 'generation_time': last_generation_time\n# })\n\n # release run segment file lock\n self.unlock()\n \n \n if not any_data_fetched:\n return \"None of data was fetched\"\n else:\n raise Exception(\"No external db connection\")\n return ''",
"def update_properties(self, prop_dict):\n ft_dict = {ft.name: ft for ft in self.get_field_types()}\n for name, val in prop_dict.items():\n ft = ft_dict[name]\n if ft.is_parameter():\n key = \"value\"\n else:\n key = \"sample\"\n if issubclass(type(val), Sequence) and ft.array:\n self.set_field_value_array(name, None, [{key: v} for v in val])\n else:\n self.set_field_value(name, None, {key: val})"
] | [
"0.5607581",
"0.547822",
"0.5368693",
"0.5272165",
"0.51953465",
"0.5120909",
"0.51085174",
"0.5026395",
"0.4963226",
"0.4891959",
"0.48770952",
"0.48622277",
"0.48405787",
"0.48178878",
"0.48024377",
"0.4794791",
"0.47820604",
"0.47804672",
"0.47776642",
"0.47595677",
"0.47306454",
"0.46903366",
"0.46811673",
"0.46751052",
"0.4666915",
"0.46656713",
"0.46637192",
"0.46584022",
"0.46530986",
"0.46468526"
] | 0.5887457 | 0 |
Map a set of waze segment info (jams) onto segments drawn from | def map_segments(datadir, filename):
items = json.load(open(filename))
# Only look at jams for now
items = [get_linestring(x) for x in items if x['eventType'] == 'jam']
items = util.reproject_records(items)
# Get the total number of snapshots in the waze data
num_snapshots = max([x['properties']['snapshotId'] for x in items])
osm_file = os.path.join(
datadir,
'processed',
'maps',
'osm_elements.geojson'
)
road_segments, inters = util.get_roads_and_inters(osm_file)
# Get roads_and_inters returns elements that have shapely geometry
# In order to output the unchanged points back out at the end,
# Need to convert to geojson
# This is something that should be addressed
inters = [{'properties': x['properties'], 'geometry': {
'type': 'Point',
'coordinates': [x['geometry'].x, x['geometry'].y]
}} for x in inters]
roads, roads_index = util.index_segments(
road_segments, geojson=True, segment=True)
road_buffers = []
for road in roads:
road_buffers.append(road[0].buffer(3))
print("read in {} road segments".format(len(roads)))
waze_info = defaultdict(list)
count = 0
for item in items:
count += 1
if item['properties']['eventType'] == 'jam':
for idx in roads_index.intersection(item['geometry'].bounds):
segment = roads[idx]
buff = road_buffers[idx]
# But if the roads share a name,
# increase buffer size, in case of a median segment
# Waze does not appear to specify which direction
if 'street' in item['properties'] and segment[1]['name'] and \
item['properties']['street'].split()[0] == segment[1]['name'].split()[0]:
buff = segment[0].buffer(10)
overlap = buff.intersection(item['geometry'])
if not overlap.length or \
(overlap.length < 20 and segment[0].length > 20):
# Skip segments with no overlap
# or very short overlaps
continue
waze_info[segment[1]['segment_id']].append(item)
# Add waze features
# Also convert into format that util.prepare_geojson is expecting
updated_roads = []
roads_with_jams = []
for road in road_segments:
properties = get_features(
waze_info,
road.properties,
num_snapshots
)
updated_roads.append({
'geometry': {
'coordinates': [x for x in road.geometry.coords],
'type': 'LineString'
},
'properties': properties
})
if properties['segment_id'] in waze_info:
roads_with_jams.append({
'geometry': {
'coordinates': [x for x in road.geometry.coords],
'type': 'LineString'
},
'properties': properties
})
results = util.prepare_geojson(updated_roads + inters)
with open(osm_file, 'w') as outfile:
geojson.dump(results, outfile)
jam_results = util.prepare_geojson(roads_with_jams)
with open(os.path.join(
datadir,
'processed',
'maps',
'jams.geojson'), 'w') as outfile:
geojson.dump(jam_results, outfile) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def watershed_segment(M,xM=None,yM=None):\n\n if xM != None and yM != None:\n sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening\n sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding\n sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion\n ma,mi =(44245.21*xM*yM),(316.037*xM*yM) \n else:\n selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])\n selD = np.where(selD!=0,selD,1)\n \n sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])\n sel2D = np.where(sel2D!=0,sel2D,1)\n\n sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])\n sel3D = np.where(sel3D!=0,sel3D,1)\n\n\n sel = np.ones(selD) # for opening\n sel2 = np.ones(sel2D) # for local thresholding\n sel3 = np.ones(sel3D) # for erosion\n ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)\n\n # get a few points in the center of each blob\n \n # threshold\n bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))\n #& (M>=stats.scoreatpercentile(M.flatten(),80)))\n\n # open and erode\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)\n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n #M2 = rescaled(M,0,65000).astype(np.uint16)\n #newlabels = ndi.watershed_ift(M2,labels)\n newlabels = labels\n \n # get rid of groups unless they have the right number of pixels\n\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts)) \n old2new[(counts < int(mi)) | (counts > int(ma))] = 0\n newlabels = old2new[newlabels]\n\n return newlabels",
"def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()",
"def segment(self):\n\n #Run the marker selection GUI\n self.ps.startGUI()\n self.numSegments = self.ps.numSegments\n markerPoints = self.ps.result\n if(markerPoints == 0):\n print(\"No markers, exiting watershed...\")\n return False\n\n markers = np.zeros(self.imgShape, dtype = np.uint8)\n \n #Format the markers to matrix\n for i in range(0,len(markerPoints)):\n for j in range(0,len(markerPoints[i])):\n x = markerPoints[i][j][0]\n y = markerPoints[i][j][1]\n\n markers[x,y] = (i+1)\n\n watershed = markers.copy().astype(np.int32)\n self.segmentedImg = cv2.watershed(self.img,watershed)\n return self.segmentedImg",
"def segment(data):",
"def get_as_mb(segments):\n\n ref_x = 0.0\n ref_y = 0.0\n\n mbs = []\n \n for segment in segments:\n\n xa = segment['x']\n ya = segment['y']\n\n for x,y in zip(xa,ya):\n\n if x==0 and y==0:\n continue\n\n dx = x - ref_x\n dy = y - ref_y\n\n mb = \"\"\n\n if dx == 0:\n if dy >= 0:\n mb = 'n 0 e {}'.format(dy)\n else:\n mb = 's 0 w {}'.format(-dy)\n else:\n if dy == 0:\n if dx >= 0:\n mb = 'n 90 e {}'.format(dx)\n else:\n mb = 'n 90 w {}'.format(-dx)\n else:\n\n ang = math.degrees(math.atan(dx/dy))\n ang = round(ang,10)\n\n if dy >= 0:\n mb = \"n {}\".format(abs(ang))\n if dx >= 0:\n mb += \" e\"\n else:\n mb += \" w\"\n else:\n mb = \"s {}\".format(abs(ang))\n if dx >= 0:\n mb += \" e\"\n else:\n mb += \" w\"\n\n length = math.sqrt(dx**2 + dy**2)\n length = round(length,10)\n\n mb += \" {}\".format(length)\n \n ref_x = x\n ref_y = y\n\n mbs.append(mb)\n \n return mbs",
"def get_zone(text_reg):\n posi_zone = []\n gray_zone = []\n for txt in text_reg:\n x1, y1, x2, y2 = txt[0], txt[1], txt[2], txt[3]\n x3, y3, x4, y4 = txt[4], txt[5], txt[6], txt[7]\n line_1_2_len = np.sqrt(np.square(x1 - x2) + np.square(y1 - y2))\n line_1_4_len = np.sqrt(np.square(x1 - x4) + np.square(y1 - y4))\n if line_1_2_len <= line_1_4_len:\n # short side is line_1_2\n mid_point_1_2 = [(x1 + x2) / 2, (y1 + y2) / 2]\n mid_point_m_1 = [(x1 + mid_point_1_2[0]) / 2, (y1 + mid_point_1_2[1]) / 2]\n mid_point_m_2 = [(x2 + mid_point_1_2[0]) / 2, (y2 + mid_point_1_2[1]) / 2]\n\n mid_point_3_4 = [(x3 + x4) / 2, (y3 + y4) / 2]\n mid_point_m_3 = [(x3 + mid_point_3_4[0]) / 2, (y3 + mid_point_3_4[1]) / 2]\n mid_point_m_4 = [(x4 + mid_point_3_4[0]) / 2, (y4 + mid_point_3_4[1]) / 2]\n\n gray_zone.append([x1, y1, mid_point_m_1[0], mid_point_m_1[1], mid_point_m_4[0], mid_point_m_4[1], x4, y4])\n gray_zone.append([mid_point_m_2[0], mid_point_m_2[1], x2, y2, x3, y3, mid_point_m_3[0], mid_point_m_3[1]])\n posi_zone.append([mid_point_m_1[0], mid_point_m_1[1], mid_point_m_2[0], mid_point_m_2[1],\n mid_point_m_3[0], mid_point_m_3[1], mid_point_m_4[0], mid_point_m_4[1]])\n else:\n # short side is line_1_4\n mid_point_1_4 = [(x1 + x4) / 2, (y1 + y4) / 2]\n mid_point_m_1 = [(x1 + mid_point_1_4[0]) / 2, (y1 + mid_point_1_4[1]) / 2]\n mid_point_m_4 = [(x4 + mid_point_1_4[0]) / 2, (y4 + mid_point_1_4[1]) / 2]\n\n mid_point_2_3 = [(x2 + x3) / 2, (y2 + y3) / 2]\n mid_point_m_2 = [(x2 + mid_point_2_3[0]) / 2, (y2 + mid_point_2_3[1]) / 2]\n mid_point_m_3 = [(x3 + mid_point_2_3[0]) / 2, (y3 + mid_point_2_3[1]) / 2]\n gray_zone.append([x1, y1, x2, y2, mid_point_m_2[0], mid_point_m_2[1], mid_point_m_1[0], mid_point_m_1[1]])\n gray_zone.append([mid_point_m_4[0], mid_point_m_4[1], mid_point_m_3[0], mid_point_m_3[1], x3, y3, x4, y4])\n posi_zone.append([mid_point_m_1[0], mid_point_m_1[1], mid_point_m_2[0], mid_point_m_2[1],\n mid_point_m_3[0], mid_point_m_3[1], mid_point_m_4[0], mid_point_m_4[1]])\n\n return gray_zone, posi_zone",
"def visualise_from_joints(self, ax):\n\t\tax.set_xlim(-2,2)\n\t\tax.set_ylim(-2,2)\n\t\tax.set_zlim(-1,1)\n\n\t\tself.joint2pos()\n\t\tskeleton_points = self.skeleton_param['list_points']\n\t\tskeleton_segments = self.skeleton_param['list_segments']\n\n\t\tfor seg in skeleton_segments:\n\t\t\tpoint_ini = skeleton_segments[seg]['points'][0]\n\t\t\tx_ini = skeleton_points[point_ini]['position'][0]\n\t\t\ty_ini = skeleton_points[point_ini]['position'][1]\n\t\t\tz_ini = skeleton_points[point_ini]['position'][2]\n\n\t\t\tpoint_fin = skeleton_segments[seg]['points'][1]\n\t\t\tx_fin = skeleton_points[point_fin]['position'][0]\n\t\t\ty_fin = skeleton_points[point_fin]['position'][1]\n\t\t\tz_fin = skeleton_points[point_fin]['position'][2]\n\n\t\t\tax.plot([x_ini, x_fin],\t[y_ini, y_fin], [z_ini, z_fin], 'm')\n\n\n\t\treturn ax",
"def create_jointsmap(uv_coord, size):\r\n\r\n\t# define connections and colors of the bones\r\n\t# print(coords_hw[-1]) # this is center ( the 22nd point)\r\n\tcanvas = np.zeros((size, size, 3))\r\n\tbones = [\r\n\t\t((1, 2), THUMB_COLOR1),\r\n\t\t((2, 3), THUMB_COLOR2),\r\n\t\t((3, 4), THUMB_COLOR3),\r\n\r\n\t\t((5, 6), INDEX_COLOR1),\r\n\t\t((6, 7), INDEX_COLOR2),\r\n\t\t((7, 8), INDEX_COLOR3),\r\n\r\n\t\t((9, 10), MIDDLE_COLOR1),\r\n\t\t((10, 11), MIDDLE_COLOR2),\r\n\t\t((11, 12), MIDDLE_COLOR3),\r\n\r\n\t\t((13, 14), RING_COLOR1),\r\n\t\t((14, 15), RING_COLOR2),\r\n\t\t((15, 16), RING_COLOR3),\r\n\r\n\t\t((17, 18), PINKY_COLOR1),\r\n\t\t((18, 19), PINKY_COLOR2),\r\n\t\t((19, 20), PINKY_COLOR3)]\r\n\tpalm = []\r\n\tfor connection, _ in [((0, 1), []),\r\n\t\t\t\t\t\t ((1, 5), []),\r\n\t\t\t\t\t\t ((5, 9), []),\r\n\t\t\t\t\t\t ((9, 13), []),\r\n\t\t\t\t\t\t ((13, 17), []),\r\n\t\t\t\t\t\t ((17, 0), []), ]:\r\n\t\tcoord1 = uv_coord[connection[0]]\r\n\t\tpalm.append([int(coord1[0]), int(coord1[1])])\r\n\t# palm.append([int((coord1[0]-.5)* W_scale+ W_offset ), int(-(coord1[1]- .5)* H_scale+ H_offset)])\r\n\t# print(palm)\r\n\tcv2.fillConvexPoly(canvas, np.array([palm], dtype=np.int32), PALM_COLOR)\r\n\tfor connection, color in bones:\r\n\t\tcoord1 = uv_coord[connection[0]]\r\n\t\tcoord2 = uv_coord[connection[1]]\r\n\t\tcoords = np.stack([coord1, coord2])\r\n\t\t# 0.5, 0.5 is the center\r\n\t\tx = coords[:, 0]\r\n\t\ty = coords[:, 1]\r\n\t\tmX = x.mean()\r\n\t\tmY = y.mean()\r\n\t\tlength = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5\r\n\t\tangle = np.math.degrees(np.math.atan2(y[0] - y[1], x[0] - x[1]))\r\n\t\tpolygon = cv2.ellipse2Poly((int(mX), int(mY)), (int(length / 2), 16), int(angle), 0, 360, 1)\r\n\t\tcv2.fillConvexPoly(canvas, polygon, color)\r\n\treturn canvas",
"def make_male_m4_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from list.\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m = m2_info[2]\n\n m2_points = m2_info[3]\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m = m3_info[2]\n\n m3_points = m3_info[3]\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n \"\"\"\n 2 Get base point to make SHIGUCHI points. (dx, dy)\n Get base point to make AIKAKI shape. (ix, iy)\n \"\"\"\n # SHIGUCHI\n dx_U = m2_p0[0]\n dy_U = m2_p0[1]\n\n dx_L = m3_p1[0]\n dy_L = m3_p1[1]\n\n # AIKAKI\n tx = m4_p0[0]\n ty = (m4_p0[1] + m4_p1[1]) / 2\n\n \"\"\"\n 3 AIKAKI points\n \"\"\"\n y_k = z_m\n\n AIKAKI_offset = 0.2\n\n # male AIKAKI\n p = (tx, ty)\n p0 = (tx, ty - z_m / 2 + AIKAKI_offset / 2)\n p1 = (tx - x_m4 / 2, ty - z_m / 2 + AIKAKI_offset / 2)\n p2 = (tx - x_m4 / 2, ty + z_m / 2 - AIKAKI_offset / 2)\n p3 = (tx, ty + z_m / 2 - AIKAKI_offset / 2)\n male_AIKAKI_points = (p0, p1, p2, p3)\n\n # female AIKAKI\n p = (tx, ty)\n p0 = (tx - x_m4, ty + z_m / 2 - AIKAKI_offset / 2)\n p1 = (tx - x_m4 / 2, ty + z_m / 2 - AIKAKI_offset / 2)\n p2 = (tx - x_m4 / 2, ty - z_m / 2 + AIKAKI_offset / 2)\n p3 = (tx - x_m4, ty - z_m / 2 + AIKAKI_offset / 2)\n female_AIKAKI_points = (p0, p1, p2, p3)\n\n \"\"\"\n 4 Call approriate function.\n \"\"\"\n if SHIGUCHI_name == 'TOME':\n pass\n\n elif SHIGUCHI_name == 'IRIWA':\n dx = dx_U\n dy = dy_U\n\n m_info = m2_info\n choice = 'UpperLeft'\n m2_KUMIKI_points1, m2_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_KUMIKI_points1)\n # rs.AddPolyline(m2_KUMIKI_points2)\n\n m2_KUMIKI_points2.reverse()\n\n dx = dx_L\n dy = dy_L\n\n m_info = m3_info\n choice = 'LowerLeft'\n m3_KUMIKI_points1, m3_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_KUMIKI_points1)\n # rs.AddPolyline(m3_KUMIKI_points2)\n\n elif SHIGUCHI_name == 'SANMAIKUMI':\n pass\n\n elif SHIGUCHI_name == 'AIKAKI':\n pass\n\n elif SHIGUCHI_name == 'HAKO':\n pass\n\n else:\n sys.exit()\n\n \"\"\"\n 5 Get SEN information.\n \"\"\"\n SEN_info = get_m1_m4_SEN_info(tx, ty, m4_info, y_k)\n\n # upper shape\n upper_shape_upper, upper_shape_lower =\\\n m4_make_upper_shape_points_list(tx, ty, m4_info, SEN_info)\n\n upper_shape_upper_left_row = upper_shape_upper[0]\n upper_shape_upper_right_row = upper_shape_upper[1]\n\n upper_shape_lower_left_row = upper_shape_lower[0]\n upper_shape_lower_right_row = upper_shape_lower[1]\n\n # lower shape\n lower_shape_upper, lower_shape_lower =\\\n m4_make_lower_shape_points_list(tx, ty, m4_info, SEN_info)\n\n lower_shape_upper_left_row = lower_shape_upper[0]\n lower_shape_upper_right_row = lower_shape_upper[1]\n\n lower_shape_lower_left_row = lower_shape_lower[0]\n lower_shape_lower_right_row = lower_shape_lower[1]\n\n # middle shape\n middle_shape_upper, middle_shape_lower =\\\n m4_make_middle_shape_points_list(tx, ty, m4_info, SEN_info)\n\n middle_shape_upper_left_row = middle_shape_upper[0]\n middle_shape_upper_right_row = middle_shape_upper[1]\n\n middle_shape_lower_left_row = middle_shape_lower[0]\n middle_shape_lower_right_row = middle_shape_lower[1]\n\n \"\"\"\n 6 Extend list\n \"\"\"\n # Upper\n male_upper_m4 = []\n male_upper_m4.append(m4_p0)\n male_upper_m4.extend(upper_shape_lower_right_row)\n male_upper_m4.extend(male_AIKAKI_points)\n male_upper_m4.extend(upper_shape_upper_right_row)\n male_upper_m4.append(m4_p1)\n\n male_upper_m4.extend(m2_KUMIKI_points2)\n\n male_upper_m4.append(m4_p2)\n male_upper_m4.extend(upper_shape_upper_left_row)\n male_upper_m4.extend(upper_shape_lower_left_row)\n male_upper_m4.append(m4_p3)\n\n male_upper_m4.extend(m3_KUMIKI_points2)\n\n male_upper_m4.append(m4_p0)\n\n # rs.AddPolyline(male_upper_m4)\n\n # Middle\n male_middle_m4 = []\n male_middle_m4.append(m4_p0)\n male_middle_m4.extend(middle_shape_lower_right_row)\n male_middle_m4.extend(male_AIKAKI_points)\n male_middle_m4.extend(middle_shape_upper_right_row)\n male_middle_m4.append(m4_p1)\n\n male_middle_m4.extend(m2_KUMIKI_points2)\n\n male_middle_m4.append(m4_p2)\n male_middle_m4.extend(middle_shape_upper_left_row)\n male_middle_m4.extend(middle_shape_lower_left_row)\n male_middle_m4.append(m4_p3)\n\n male_middle_m4.extend(m3_KUMIKI_points2)\n\n male_middle_m4.append(m4_p0)\n\n # rs.AddPolyline(male_middle_m4)\n\n # Lower\n male_lower_m4 = []\n male_lower_m4.append(m4_p0)\n male_lower_m4.extend(lower_shape_lower_right_row)\n male_lower_m4.extend(male_AIKAKI_points)\n male_lower_m4.extend(lower_shape_upper_right_row)\n male_lower_m4.append(m4_p1)\n\n male_lower_m4.extend(m2_KUMIKI_points2)\n\n male_lower_m4.append(m4_p2)\n male_lower_m4.extend(lower_shape_upper_left_row)\n male_lower_m4.extend(lower_shape_lower_left_row)\n male_lower_m4.append(m4_p3)\n\n male_lower_m4.extend(m3_KUMIKI_points2)\n\n male_lower_m4.append(m4_p0)\n\n # rs.AddPolyline(male_lower_m4)\n\n\n m4_male_points_list = [male_upper_m4, male_middle_m4, male_lower_m4]\n\n return m4_male_points_list, SEN_info",
"def wallsAndGates(self, rooms: List[List[int]]) -> None:\n if rooms==[]: return\n xcord=len(rooms)\n ycord=len(rooms[0])\n indexstack=[(i,j) for i in range(len(rooms)) for j in range(len(rooms[0])) if rooms[i][j] == 0]\n direction=[(0,1),(1,0),(0,-1),(-1,0)]\n gatenum=1\n while indexstack != []:\n newindex=[]\n for item in indexstack:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if 0<=xpoint <len(rooms) and 0<=ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=gatenum\n newindex.append((xpoint,ypoint))\n indexstack=newindex\n gatenum+=1\n ''''\n for item in index_0:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=1\n index_1.append((xpoint,ypoint))\n for item in index_1:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=2\n index_2.append((xpoint,ypoint))\n for item in index_2:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <len(rooms) and ypoint<len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=3\n index_3.append((xpoint,ypoint))\n for item in index_3:\n for mapdir in direction:\n xpoint=item[0]+mapdir[0]\n ypoint=item[1]+mapdir[1]\n if xpoint <=len(rooms) and ypoint<=len(rooms[0]):\n if rooms[xpoint][ypoint]==pow(2,31)-1:\n rooms[xpoint][ypoint]=4\n #index_3.append((xpoint,ypoint))'''",
"def make_SHIGUCHI_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from m1_info, m2_info, m3_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m2 = m2_info[2]\n\n m2_points = m2_info[3]\n\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m = m3_info[2]\n\n m3_points = m3_info[3]\n\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n \"\"\"\n 2 Get base point to make SHIGUCHI\n m1 & m2 -> base point = m2_p3 = (dx_U_right, dy_U_right)\n m1 & m3 -> base point = m3_p2 = (dx_L_right, dy_L_right)\n\n m4 & m2 -> base point = m2_p0 = (dx_U_left, dy_U_left)\n m4 & m3 -> base point = m3_p1 = (dx_L_left, dy_L_left)\n \"\"\"\n dx_U_right = m2_p3[0]\n dy_U_right = m2_p3[1]\n\n dx_L_right = m3_p2[0]\n dy_L_right = m3_p2[1]\n\n dx_U_left = m2_p0[0]\n dy_U_left = m2_p0[1]\n\n dx_L_left = m3_p1[0]\n dy_L_left = m3_p1[1]\n\n \"\"\"\n 3 Call appropriate function.\n \"\"\"\n if SHIGUCHI_name == 'TOME':\n pass\n elif SHIGUCHI_name == 'IRIWA':\n # Right side\n dx = dx_U_right\n dy = dy_U_right\n m_info = m2_info\n choice = 'UpperRight'\n m2_right_KUMIKI_points1, m2_right_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_right_KUMIKI_points1)\n # rs.AddPolyline(m2_right_KUMIKI_points2)\n\n dx = dx_L_right\n dy = dy_L_right\n m_info = m3_info\n choice = 'LowerRight'\n m3_right_KUMIKI_points1, m3_right_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_right_KUMIKI_points1)\n # rs.AddPolyline(m3_right_KUMIKI_points2)\n\n # Left side\n dx = dx_U_left\n dy = dy_U_left\n m_info = m2_info\n choice = 'UpperLeft'\n m2_left_KUMIKI_points1, m2_left_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_left_KUMIKI_points1)\n # rs.AddPolyline(m2_left_KUMIKI_points2)\n\n dx = dx_L_left\n dy = dy_L_left\n m_info = m3_info\n choice = 'LowerLeft'\n m3_left_KUMIKI_points1, m3_left_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_left_KUMIKI_points1)\n # rs.AddPolyline(m3_left_KUMIKI_points2)\n\n elif SHIGUCHI_name == 'SANMAIKUMI':\n pass\n elif SHIGUCHI_name == 'AIKAKI':\n pass\n elif SHIGUCHI_name == 'HAKO':\n pass\n else:\n sys.exit()\n\n SHIGUCHI_list =\\\n [m2_right_KUMIKI_points1, m2_right_KUMIKI_points2,\\\n m3_right_KUMIKI_points1, m3_right_KUMIKI_points2,\\\n m2_left_KUMIKI_points1, m2_left_KUMIKI_points2,\\\n m3_left_KUMIKI_points1, m3_left_KUMIKI_points2]\n\n return SHIGUCHI_list",
"def m2_m3_make_middle_shape_points_list(dx, dy, m_info, SEN_info):\n \"\"\"\n 1 Get information from m_info & SEN_info.\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n l_n = SEN_info[4]\n r_n = SEN_info[5]\n set = SEN_info[6]\n l_offset = SEN_info[7]\n r_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_m1_upper_row list\n middle_shape_m1_lower_row list\n\n middle_shape_m2_upper_row list\n middle_shape_m2_lower_row list\n \"\"\"\n # material1\n middle_shape_m1_upper_row = []\n middle_shape_m1_lower_row = []\n\n for i in range(l_n):\n # upper row\n ix = i * l_offset + set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4 = X_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p1, p2, p3, p4]\n middle_shape_m1_upper_row.extend((upper_points))\n\n for i in range(l_n - 1, -1, -1):\n # lower row\n ix = i * l_offset + set\n iy = t_sen + dy\n p0, p1, p2, p3, p4 = X_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p3, p4, p1, p2]\n middle_shape_m1_lower_row.extend(lower_points)\n\n # material2\n middle_shape_m2_upper_row = []\n middle_shape_m2_lower_row = []\n\n for i in range(r_n):\n # upper row\n ix = x_m - i * r_offset - set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4 = X_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p4, p3, p2, p1]\n middle_shape_m2_upper_row.extend((upper_points))\n\n for i in range(r_n - 1, -1, -1):\n # lower row\n ix = x_m - i * r_offset - set\n iy = t_sen + dy\n p0, p1, p2, p3, p4 = X_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p2, p1, p4, p3]\n middle_shape_m2_lower_row.extend(lower_points)\n\n middle_shape_m1 = [middle_shape_m1_upper_row, middle_shape_m1_lower_row]\n middle_shape_m2 = [middle_shape_m2_upper_row, middle_shape_m2_lower_row]\n\n return middle_shape_m1, middle_shape_m2",
"def watershed_segment_2(M,click_coords):\n \n # todo: choose these structures based on aspect ratio of M and input parameters\n sel = np.ones((4,10)) # for opening\n sel2 = np.ones((15,75)) # for local thresholding\n sel3 = np.ones((2,5)) # for erosion\n # get a few points in the center of each blob\n \n # threshold\n #bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)) & (M>=scoreatpercentile(M.flatten(),60)))\n \n score = stats.percentileofscore(M.flatten(),M[int(click_coords[0][1]),int(click_coords[0][0])])\n bw = (M>=stats.scoreatpercentile(M.flatten(),score))\n\n # open and erode\n #bools = sp.zeros((M.shape[0],M.shape[1]),int)\n #bools[int(click_coords[0]),int(click_coords[1])] = 1\n #blobs = sp.where(bools == 1,True,False)\n blobs = snm.binary_opening(bw,structure=sel)\n blobs = snm.binary_dilation(blobs,iterations=3)\n blobs = snm.binary_erosion(blobs,structure=sel3)\n \n \n # label\n labels,_ = ndi.label(blobs)\n labels[labels > 0] += 1\n #labels[0,0] = 1\n\n # rescale and cast to int16, then use watershed\n M2 = rescaled(M,0,65000).astype(np.uint16)\n newlabels = ndi.watershed_ift(M2,labels)\n \n # get rid of groups unless they have the right number of pixels\n counts = np.bincount(newlabels.flatten())\n old2new = np.arange(len(counts))\n old2new[(counts < 100) | (counts > 600)] = 0\n newlabels = old2new[newlabels]\n \n return newlabels",
"def jacking_calculations(dictionary, view):\n f_list = []\n s_list = []\n corners = ['Left Front', 'Right Front', 'Left Rear', 'Right Rear']\n for corner in corners:\n # Establishing Instant Center Left Front\n ic_direction, ic_point = plane_intersection_line(\n plane_equation(dictionary[corner]['Upper Fore'],\n dictionary[corner]['Upper Aft'],\n dictionary[corner]['Upper Out']),\n plane_equation(dictionary[corner]['Lower Fore'],\n dictionary[corner]['Lower Aft'],\n dictionary[corner]['Lower Out']),\n dictionary[corner]['Upper Fore'],\n dictionary[corner]['Lower Fore'])\n axis = plot_line(ic_direction, ic_point, np.linspace(0, 2, 2))\n # Establishing Side View Instant Center\n ic_xz = three_d_vector_plane_intersection((axis[0][0], axis[1][0], axis[2][0]),\n (axis[0][1], axis[1][1], axis[2][1]),\n dictionary[corner]['Wheel Center'],\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([1, 0, 0])),\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([0, 0, 1])))\n # Establishing Front View Instant Center\n ic_yz = three_d_vector_plane_intersection((axis[0][0], axis[1][0], axis[2][0]),\n (axis[0][1], axis[1][1], axis[2][1]),\n dictionary[corner]['Wheel Center'],\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([0, 1, 0])),\n np.add(np.array(dictionary[corner]\n ['Wheel Center']), np.array([0, 0, 1])))\n # Establishing Jacking Height\n y_val = dictionary['Performance Figures']['Center of Gravity'][1]\n cg_plane_points = [[1, y_val, 1], [-1, y_val, 4], [-3, y_val, 6]]\n wheel_center_ground = [(dictionary[corner]['Wheel Center'][0]),\n (dictionary[corner]['Wheel Center'][1]), 0]\n np.array(wheel_center_ground)\n jacking_height = three_d_vector_plane_intersection(wheel_center_ground,\n ic_yz, cg_plane_points[0], cg_plane_points[1],\n cg_plane_points[2])\n # Establishing Jacking Coefficient\n wc_jh = np.subtract(jacking_height, wheel_center_ground)\n jacking_coeff = -abs(wc_jh[2] / wc_jh[1])\n # Establishing Pitch Coefficient\n wc_icxz = np.subtract(ic_xz, dictionary[corner]['Wheel Center'])\n wc_cg = np.subtract(dictionary['Performance Figures']['Center of Gravity'],\n dictionary[corner]['Wheel Center'])\n pitch_coeff = (wc_icxz[2] / wc_icxz[0]) / (wc_cg[2] / wc_cg[0])\n if view == 'Front':\n f_list.append(jacking_coeff)\n elif view == 'Side':\n s_list.append(pitch_coeff)\n else:\n print 'Wtf, you want an isometric or something?'\n return\n if view == 'Front':\n return f_list\n elif view == 'Side':\n return s_list\n else:\n print 'view does not equal Front or Side'\n return",
"def m4_make_middle_shape_points_list(tx, ty, m4_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_upper_left_row list\n middle_shape_upper_right_row list\n\n middle_shape_lower_left_row list\n middle_shape_lower_right_row list\n \"\"\"\n # upper side\n middle_shape_upper_left_row = []\n middle_shape_upper_right_row = []\n\n for i in range(u_n - 1, -1, -1):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p3, p4, p1, p2]\n middle_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n):\n # right row\n ix = tx - t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4]\n middle_shape_upper_right_row.extend(right_points)\n\n # lower side\n middle_shape_lower_left_row = []\n middle_shape_lower_right_row = []\n\n for i in range(l_n):\n # left row\n ix = tx - (x_m4 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p3, p4, p1, p2]\n middle_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n - 1, -1, -1):\n # right row\n ix = tx - t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p1, p2, p3, p4]\n middle_shape_lower_right_row.extend(right_points)\n\n middle_shape_upper = [middle_shape_upper_left_row, middle_shape_upper_right_row]\n middle_shape_lower = [middle_shape_lower_left_row, middle_shape_lower_right_row]\n\n return middle_shape_upper, middle_shape_lower",
"def make_male_m1_list(SHIGUCHI_name, m1_info, m2_info, m3_info, m4_info, offset):\n \"\"\"\n 1 Get information from list.\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n x_m2 = m2_info[0]\n y_m2 = m2_info[1]\n z_m = m2_info[2]\n\n m2_points = m2_info[3]\n m2_p0 = m2_points[0]\n m2_p1 = m2_points[1]\n m2_p2 = m2_points[2]\n m2_p3 = m2_points[3]\n\n x_m3 = m3_info[0]\n y_m3 = m3_info[1]\n z_m = m3_info[2]\n\n m3_points = m3_info[3]\n m3_p0 = m3_points[0]\n m3_p1 = m3_points[1]\n m3_p2 = m3_points[2]\n m3_p3 = m3_points[3]\n\n x_m4 = m4_info[0]\n y_m4 = m4_info[1]\n z_m = m4_info[2]\n\n m4_points = m4_info[3]\n m4_p0 = m4_points[0]\n m4_p1 = m4_points[1]\n m4_p2 = m4_points[2]\n m4_p3 = m4_points[3]\n\n \"\"\"\n 2 Get base point to make SHIGUCHI points. (dx, dy)\n Get base point to make AIKAKI shape. (ix, iy)\n \"\"\"\n # SHIGUCHI\n dx_U = m2_p3[0]\n dy_U = m2_p3[1]\n\n dx_L = m3_p2[0]\n dy_L = m3_p2[1]\n\n # AIKAKI\n tx = m1_p0[0]\n ty = (m1_p0[1] + m1_p1[1]) / 2\n\n \"\"\"\n 3 AIKAKI points\n \"\"\"\n y_k = z_m\n\n AIAKAKI_offset = 0.2\n\n # male AIKAKI\n p = (tx, ty)\n p0 = (tx, ty - z_m / 2 + AIAKAKI_offset / 2)\n p1 = (tx + x_m1 / 2, ty - z_m / 2 + AIAKAKI_offset / 2)\n p2 = (tx + x_m1 / 2, ty + z_m / 2 - AIAKAKI_offset / 2)\n p3 = (tx, ty + z_m / 2 - AIAKAKI_offset / 2)\n male_AIKAKI_points = (p0, p1, p2, p3)\n\n # female AIKAKI\n p = (tx, ty)\n p0 = (tx + x_m1, ty + z_m / 2 - AIAKAKI_offset / 2)\n p1 = (tx + x_m1 / 2, ty + z_m / 2 - AIAKAKI_offset / 2)\n p2 = (tx + x_m1 / 2, ty - z_m / 2 + AIAKAKI_offset / 2)\n p3 = (tx + x_m1, ty - z_m / 2 + AIAKAKI_offset / 2)\n female_AIKAKI_points = (p0, p1, p2, p3)\n\n \"\"\"\n 4 Call approriate function.\n \"\"\"\n if SHIGUCHI_name == 'TOME':\n pass\n\n elif SHIGUCHI_name == 'IRIWA':\n dx = dx_U\n dy = dy_U\n\n m_info = m2_info\n choice = 'UpperRight'\n m2_KUMIKI_points1, m2_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m2_KUMIKI_points1)\n # rs.AddPolyline(m2_KUMIKI_points2)\n\n m2_KUMIKI_points2.reverse()\n\n dx = dx_L\n dy = dy_L\n\n m_info = m3_info\n choice = 'LowerRight'\n m3_KUMIKI_points1, m3_KUMIKI_points2 = make_IRIWA_KUMIKI_points(dx, dy, m_info, choice, offset)\n # rs.AddPolyline(m3_KUMIKI_points1)\n # rs.AddPolyline(m3_KUMIKI_points2)\n\n elif SHIGUCHI_name == 'SANMAIKUMI':\n pass\n\n elif SHIGUCHI_name == 'AIKAKI':\n pass\n\n elif SHIGUCHI_name == 'HAKO':\n pass\n\n else:\n sys.exit()\n\n \"\"\"\n 5 Get SEN information.\n \"\"\"\n SEN_info = get_m1_m4_SEN_info(tx, ty, m1_info, y_k)\n\n # upper shape\n upper_shape_upper, upper_shape_lower =\\\n m1_make_upper_shape_points_list(tx, ty, m1_info, SEN_info)\n\n upper_shape_upper_left_row = upper_shape_upper[0]\n upper_shape_upper_right_row = upper_shape_upper[1]\n\n upper_shape_lower_left_row = upper_shape_lower[0]\n upper_shape_lower_right_row = upper_shape_lower[1]\n\n # lower shape\n lower_shape_upper, lower_shape_lower =\\\n m1_make_lower_shape_points_list(tx, ty, m1_info, SEN_info)\n\n lower_shape_upper_left_row = lower_shape_upper[0]\n lower_shape_upper_right_row = lower_shape_upper[1]\n\n lower_shape_lower_left_row = lower_shape_lower[0]\n lower_shape_lower_right_row = lower_shape_lower[1]\n\n # middle shape\n middle_shape_upper, middle_shape_lower =\\\n m1_make_middle_shape_points_list(tx, ty, m1_info, SEN_info)\n\n middle_shape_upper_left_row = middle_shape_upper[0]\n middle_shape_upper_right_row = middle_shape_upper[1]\n\n middle_shape_lower_left_row = middle_shape_lower[0]\n middle_shape_lower_right_row = middle_shape_lower[1]\n\n \"\"\"\n 6 Extend list\n \"\"\"\n # Upper\n male_upper_m1 = []\n male_upper_m1.append(m1_p0)\n male_upper_m1.extend(upper_shape_lower_left_row)\n male_upper_m1.extend(male_AIKAKI_points)\n male_upper_m1.extend(upper_shape_upper_left_row)\n male_upper_m1.append(m1_p1)\n\n male_upper_m1.extend(m2_KUMIKI_points2)\n\n male_upper_m1.append(m1_p2)\n male_upper_m1.extend(upper_shape_upper_right_row)\n male_upper_m1.extend(upper_shape_lower_right_row)\n male_upper_m1.append(m1_p3)\n\n male_upper_m1.extend(m3_KUMIKI_points2)\n\n male_upper_m1.append(m1_p0)\n\n # rs.AddPolyline(male_upper_m1)\n\n # Middle\n male_middle_m1 = []\n male_middle_m1.append(m1_p0)\n male_middle_m1.extend(middle_shape_lower_left_row)\n male_middle_m1.extend(male_AIKAKI_points)\n male_middle_m1.extend(middle_shape_upper_left_row)\n male_middle_m1.append(m1_p1)\n\n male_middle_m1.extend(m2_KUMIKI_points2)\n\n male_middle_m1.append(m1_p2)\n male_middle_m1.extend(middle_shape_upper_right_row)\n male_middle_m1.extend(middle_shape_lower_right_row)\n male_middle_m1.append(m1_p3)\n\n male_middle_m1.extend(m3_KUMIKI_points2)\n\n male_middle_m1.append(m1_p0)\n\n # rs.AddPolyline(male_middle_m1)\n\n # Lower\n male_lower_m1 = []\n male_lower_m1.append(m1_p0)\n male_lower_m1.extend(lower_shape_lower_left_row)\n male_lower_m1.extend(male_AIKAKI_points)\n male_lower_m1.extend(lower_shape_upper_left_row)\n male_lower_m1.append(m1_p1)\n\n male_lower_m1.extend(m2_KUMIKI_points2)\n\n male_lower_m1.append(m1_p2)\n male_lower_m1.extend(lower_shape_upper_right_row)\n male_lower_m1.extend(lower_shape_lower_right_row)\n male_lower_m1.append(m1_p3)\n\n male_lower_m1.extend(m3_KUMIKI_points2)\n\n male_lower_m1.append(m1_p0)\n\n # rs.AddPolyline(male_lower_m1)\n\n m1_male_points_list = [male_upper_m1, male_middle_m1, male_lower_m1]\n\n return m1_male_points_list, SEN_info",
"def m2_m3_make_upper_shape_points_list(dx, dy, m_info, SEN_info):\n \"\"\"\n 1 Get information from m_info & SEN_info.\n \"\"\"\n x_m = m_info[0]\n y_m = m_info[1]\n z_m = m_info[2]\n\n m_points = m_info[3]\n\n m_p0 = m_points[0]\n m_p1 = m_points[1]\n m_p2 = m_points[2]\n m_p3 = m_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n l_n = SEN_info[4]\n r_n = SEN_info[5]\n set = SEN_info[6]\n l_offset = SEN_info[7]\n r_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n upper_shape_left_upper_row list\n upper_shape_left_lower_row list\n\n upper_shape_right_upper_row list\n upper_shape_right_lower_row list\n \"\"\"\n # Leftside\n upper_shape_left_upper_row = []\n upper_shape_left_lower_row = []\n\n for i in range(l_n):\n # upper row\n ix = i * l_offset + set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p4, p3, p2, p1, p8, p7, p6, p5]\n upper_shape_left_upper_row.extend((upper_points))\n\n for i in range(l_n - 1, -1, -1):\n # lower row\n ix = i * l_offset + set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p8, p7, p6, p5, p4, p3, p2, p1]\n upper_shape_left_lower_row.extend(lower_points)\n\n # Rightside\n upper_shape_right_upper_row = []\n upper_shape_right_lower_row = []\n\n for i in range(r_n):\n # upper row\n ix = x_m - i * r_offset - set\n iy = y_m - t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n upper_points = [p5, p6, p7, p8, p1, p2, p3, p4]\n upper_shape_right_upper_row.extend((upper_points))\n\n for i in range(r_n - 1, -1, -1):\n # lower row\n ix = x_m - i * r_offset - set\n iy = t_sen + dy\n p0, p1, p2, p3, p4, p5, p6, p7, p8 = X_upper_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n lower_points = [p1, p2, p3, p4, p5, p6, p7, p8]\n upper_shape_right_lower_row.extend(lower_points)\n\n upper_shape_left = [upper_shape_left_upper_row, upper_shape_left_lower_row]\n upper_shape_right = [upper_shape_right_upper_row, upper_shape_right_lower_row]\n\n return upper_shape_left, upper_shape_right",
"def parse_annotations(Hinv, obsmat_txt):\n\n def to_image_frame(loc):\n \"\"\"\n Given H^-1 and (x, y, z) in world coordinates,\n returns (u, v, 1) in image frame coordinates.\n \"\"\"\n loc = np.dot(Hinv, loc) # to camera frame\n return loc / loc[2] # to pixels (from millimeters)\n\n mat = np.loadtxt(obsmat_txt)\n num_peds = int(np.max(mat[:, 1])) + 1\n peds = [np.array([]).reshape(0, 4) for _ in range(num_peds)] # maps ped ID -> (t,x,y,z) path\n\n num_frames = (mat[-1, 0] + 1).astype(\"int\")\n num_unique_frames = np.unique(mat[:, 0]).size\n recorded_frames = [-1] * num_unique_frames # maps timestep -> (first) frame\n peds_in_frame = [[] for _ in range(num_unique_frames)] # maps timestep -> ped IDs\n\n frame = 0\n time = -1\n blqk = False\n for row in mat:\n if row[0] != frame:\n frame = int(row[0])\n time += 1\n recorded_frames[time] = frame\n\n ped = int(row[1])\n\n peds_in_frame[time].append(ped)\n loc = np.array([row[2], row[4], 1])\n loc = to_image_frame(loc)\n loc = [time, loc[0], loc[1], loc[2]]\n peds[ped] = np.vstack((peds[ped], loc))\n\n return recorded_frames, peds_in_frame, peds",
"def consolidate_instances_all_way(self, stats, segmented_instances):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n #get all pixel labels in the segmented_instances mask\n segment_numbers = np.unique(segmented_instances)\n\n # remove the background label\n segment_numbers=segment_numbers[segment_numbers!=0]\n\n end_points = np.empty((len(segment_numbers),),dtype=np.object_)\n end_points.fill([])\n\n for curr_segment in segment_numbers:\n idx=[]\n i=curr_segment-1\n if curr_segment!=0:\n #Show all segments of curr_segment. Only useful to view results\n img[segmented_instances== curr_segment]= 255\n #get indeces of the segments for curr_segment\n idx = np.argwhere(segmented_instances == curr_segment)\n if len(idx>0):\n end_points[i]= self._get_end_points(segmented_instances, i, \\\n stats, idx)\n # add point markers and lines connecting each end point to centroid.\n # useful only to view results\n \"\"\"for pt_num, pt in enumerate(end_points[i]):\n cv2.circle(img, (pt[0],pt[1]), 3, 100, -1)\n cv2.line(img,(pt[0],pt[1]),\\\n (stats['centroid'][i,0], stats['centroid'][i,1]),150,2)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, 200, -1)\"\"\"\n #self.showme(img, 'line '+str(i))\n\n # cluster segments into stem instances\n cluster_mask, clustered_instances = self._cluster_segments_all_way(segmented_instances,\\\n segment_numbers, end_points, \\\n stats)\n\n #put all instances in one layer\n if len(cluster_mask)>0:\n single_layer_cluster_mask=np.zeros(cluster_mask[0].shape)\n for i in xrange(len(cluster_mask)):\n single_layer_cluster_mask[cluster_mask[i]>0]= i+1\n\n # self.showObjects(clustered_instances);\n return single_layer_cluster_mask, clustered_instances",
"def det2seg(cann, output_dir):\n\n if os.path.isdir(output_dir) is False:\n os.makedirs(output_dir, exist_ok=True)\n\n imids = cann.getImgIds()\n cats = cann.loadCats(cann.getCatIds())\n\n cat_colours = {0: (0, 0, 0)}\n\n # Set seed for palette colour\n np.random.seed(121)\n\n # Create category colourmap\n for c in cats:\n cat_colours[c['id']] = (np.random.randint(0,256), np.random.randint(0,256), np.random.randint(0,256))\n\n colour_map = np.array(list(cat_colours.values()))\n if colour_map.shape != (len(cats) + 1, 3):\n raise AssertionError(\"Incorrect shape of color map array\")\n\n for imid in tqdm(imids):\n img = cann.loadImgs(imid)\n if len(img) > 1:\n raise AssertionError(\"Multiple images with same id\")\n h, w = img[0]['height'], img[0]['width']\n name = img[0]['file_name']\n if name[-4:] != \".png\":\n name = name[:-4] + \".png\"\n im = np.zeros((h, w), dtype=np.uint8)\n annids = cann.getAnnIds(imgIds=[imid])\n if not annids:\n # No annotations\n res = Image.fromarray(im)\n res.save(os.path.join(output_dir, '{}'.format(name)))\n else:\n anns = cann.loadAnns(annids)\n for ann in anns:\n poly = ann['segmentation'][0]\n cat = ann['category_id']\n img = Image.new('L', (w, h))\n if len(poly) >= 6:\n ImageDraw.Draw(img).polygon(poly, fill=cat)\n else:\n continue\n mask = np.array(img)\n im = np.maximum(im, mask)\n res = Image.fromarray(im)\n res.putpalette(colour_map.astype(np.uint8))\n res.save(os.path.join(output_dir, '{}'.format(name)))",
"def Read_MapGen(filename,stats = False):\n from numpy import array\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_.readlines()]\n\n Shorelines = []\n segment = []\n for line in data:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(array(segment))\n segment = []\n else:\n segment.append(map(float,string.split(line)))\n if segment: Shorelines.append(array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = False\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n\n return Shorelines",
"def generatePolygons():",
"def getSegments(points):\n return _identifyStrokes(points)[1]",
"def create_spheres(self,depth_arr):\n\n\n '''\n depth_arr- depth image as numpy array\n '''\n\n try:\n #points=[nose,left_wrist,right,wrist,left_ankle,right ankle]\n points=[self.rpts[0],self.rpts[15],self.rpts[16],self.rpts[27],self.rpts[28]]\n self.spheres.points=[]\n self.spheres.header.frame_id = \"kinect_frame\"\n self.spheres.header.stamp= rospy.Time.now()\n \n self.spheres.id = 0\n self.spheres.action =Marker.ADD\n \n #points\n self.spheres.type = Marker.SPHERE_LIST\n self.spheres.color.r = 1.0\n self.spheres.color.a = 1.0\n \n self.spheres.scale.x = 0.08\n self.spheres.scale.y = 0.08\n self.spheres.scale.z = 0.01\n for p in points:\n depth_val=float(depth_arr[p[1], p[0]])\n pts_x,pts_y,pts_z=self.depth_to_xyz(p[0],p[1],depth_val)\n \n self.sphere_point=Point()\n self.sphere_point.x = pts_x\n self.sphere_point.y = pts_y\n self.sphere_point.z = pts_z\n self.spheres.points.append(self.sphere_point)\n \n except:\n pass",
"def consolidate_instances(self, stats, segmented_instances, idx_map):\n\n img = np.zeros(segmented_instances.shape).astype(np.uint8)\n\n labels = np.unique(segmented_instances)\n labels=labels[labels!=0]\n reverse_idx_map = np.zeros(len(idx_map)).astype(np.int)\n for l in labels:\n reverse_idx_map[idx_map[l]]=np.int(l)\n\n #calculate slope of line between centroids.\n # TO DO: make this more efficient.\n centroid_slopes = self._calc_centroid_slopes(segmented_instances, labels, stats, idx_map)\n seg_slopes = np.zeros(len(labels))\n #for each instance i\n for i in range(0, len(labels)):\n idx=[]\n curr_label = reverse_idx_map[i]\n if curr_label!=0:\n #Show all segments of curr_label\n img[segmented_instances== curr_label]= 255\n #calculate slope m of instance i\n idx = np.argwhere(segmented_instances == curr_label)\n if len(idx>0):\n max_y= max(idx[:,0])\n min_y= min(idx[:,0])\n x_for_max = idx[idx[:,0]==max_y, 1][0]\n x_for_min = idx[idx[:,0]==min_y, 1][0]\n if x_for_max < x_for_min:\n x1= x_for_max\n y1= max_y\n x2= x_for_min\n y2= min_y\n else:\n x1= x_for_min\n y1= min_y\n x2= x_for_max\n y2= max_y\n m = self._slope(x1,y1,x2,y2)\n seg_slopes[i]=m\n cv2.line(img,(x1, y1),(x2, y2),(0,100,0),4)\n cv2.circle(img, (stats['centroid'][i,0], stats['centroid'][i,1]), 3, (200, 0, 0), -1)\n #self.showme(img, 'line '+str(i))\n\n # cluster segments\n clusters, clustered_instances = self._cluster_segments(segmented_instances, centroid_slopes, seg_slopes, reverse_idx_map)\n #find the closest centroid to a line with slope m that starts at the instances centroid\n # self.showObjects(clustered_instances);\n return clusters, clustered_instances",
"def create_maps(self,data,tod,mjd,coords):\n features = np.log10(self.getFeatures(data))/np.log10(2)\n special_idx = np.where((features==16))[0]\n # This is for getting the stare data on more recent\n # calibration observations.\n point_data = self.get_point_data(data,special_idx)\n \n cel_maps = self.create_single_map(tod,\n coords['ra'],\n coords['dec'],\n self.source_positions['ra'][coords['sky_data_flag']],\n self.source_positions['dec'][coords['sky_data_flag']])\n az_maps = self.create_single_map(tod,\n coords['az'],\n coords['el'],\n self.source_positions['az'][coords['sky_data_flag']],\n self.source_positions['el'][coords['sky_data_flag']])\n cel_maps= self.average_maps(cel_maps)\n az_maps = self.average_maps(az_maps)\n xygrid = np.meshgrid((np.arange(self.Nx)+0.5)*self.dx - self.Nx*self.dx/2.,\n (np.arange(self.Ny)+0.5)*self.dy - self.Ny*self.dy/2.)\n \n \n cel_maps['xygrid']=xygrid\n cel_maps['StareCoords']= {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n az_maps['xygrid']=xygrid\n az_maps['StareCoords'] = {**point_data,'pa':np.nanmean(self.source_positions['pa'])}\n return cel_maps,az_maps",
"def _draw2djoints(ax, annots, links, alpha=1):\n colors = ['r', 'm', 'b', 'c', 'g']\n for finger_idx, finger_links in enumerate(links):\n for idx in range(len(finger_links) - 1):\n _draw2dseg(\n ax,\n annots,\n finger_links[idx],\n finger_links[idx + 1],\n c=colors[finger_idx],\n alpha=alpha)",
"def psana_geom_splitter(psf, returned_units='mm'):\n #geom = cspad.geometry(event)\n origin_64 = np.zeros((64, 3))\n FS_64 = np.zeros_like(origin_64)\n SS_64 = np.zeros_like(origin_64)\n\n origin_32, SS_32, FS_32 = map(np.array, zip(*psf))\n for i in range(32):\n # create the origins of each sub asic\n origin_A = origin_32[i]\n shift = 194. * 109.92 + (274.8 - 109.92) * 2.\n unit_f = FS_32[i] / np.linalg.norm(FS_32[i])\n origin_B = origin_A + unit_f * shift\n\n # save two sub-asics per each of the 32 actual asics\n idx_A = 2 * i\n idx_B = 2 * i + 1\n origin_64[idx_A] = origin_A\n origin_64[idx_B] = origin_B\n FS_64[idx_A] = FS_64[idx_B] = FS_32[i]\n SS_64[idx_A] = SS_64[idx_B] = SS_32[i]\n\n if returned_units == \"mm\": # dials convention\n return origin_64 / 1000., SS_64 / 1000., FS_64 / 1000.,\n elif returned_units == \"um\": # psgeom convention\n return origin_64, SS_64, FS_64\n elif returned_units == \"pixels\": # crystfel convention\n return origin_64 / 109.92, SS_64 / 109.92, FS_64 / 109.92",
"def m1_make_middle_shape_points_list(tx, ty, m1_info, SEN_info):\n \"\"\"\n 1 Get information from m1_info & SEN_info\n \"\"\"\n x_m1 = m1_info[0]\n y_m1 = m1_info[1]\n z_m = m1_info[2]\n\n m1_points = m1_info[3]\n\n m1_p0 = m1_points[0]\n m1_p1 = m1_points[1]\n m1_p2 = m1_points[2]\n m1_p3 = m1_points[3]\n\n w_sen = SEN_info[0]\n n_w_sen = SEN_info[1]\n h_sen = SEN_info[2]\n t_sen = SEN_info[3]\n u_n = SEN_info[4]\n l_n = SEN_info[5]\n set = SEN_info[6]\n u_offset = SEN_info[7]\n l_offset = SEN_info[8]\n\n \"\"\"\n 2 Make lists.\n middle_shape_upper_left_row list\n middle_shape_upper_right_row list\n\n middle_shape_lower_left_row list\n middle_shape_lower_right_row list\n \"\"\"\n # upper side\n middle_shape_upper_left_row = []\n middle_shape_upper_right_row = []\n\n for i in range(u_n):\n # left row\n ix = tx + t_sen\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_upper_left_row.extend((left_points))\n\n for i in range(u_n - 1, -1, -1):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty + (i * u_offset + set) + 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_upper_right_row.extend(right_points)\n\n # lower side\n middle_shape_lower_left_row = []\n middle_shape_lower_right_row = []\n\n for i in range(l_n - 1, -1, -1):\n # left row\n ix = tx + t_sen\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n left_points = [p2, p1, p4, p3]\n middle_shape_lower_left_row.extend((left_points))\n\n for i in range(l_n):\n # right row\n ix = tx + (x_m1 - t_sen)\n iy = ty - (i * l_offset + set) - 10\n\n p0, p1, p2, p3, p4 = Y_middle_shape_points(ix, iy, w_sen, t_sen, n_w_sen)\n right_points = [p4, p3, p2, p1]\n middle_shape_lower_right_row.extend(right_points)\n\n middle_shape_upper = [middle_shape_upper_left_row, middle_shape_upper_right_row]\n middle_shape_lower = [middle_shape_lower_left_row, middle_shape_lower_right_row]\n\n return middle_shape_upper, middle_shape_lower",
"def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A"
] | [
"0.55145633",
"0.5488799",
"0.544273",
"0.5393466",
"0.53590786",
"0.5282899",
"0.52790564",
"0.52568614",
"0.5226479",
"0.5168571",
"0.51627374",
"0.5147692",
"0.5122647",
"0.5085006",
"0.507419",
"0.5074113",
"0.5067044",
"0.5063809",
"0.50582534",
"0.50517637",
"0.50384414",
"0.5028394",
"0.50237644",
"0.50092053",
"0.50086504",
"0.50065273",
"0.49706173",
"0.4970019",
"0.49540222",
"0.49473184"
] | 0.67416376 | 0 |
change self.O to index of V | def trans_o(self):
temp_array = []
for j in range(self.O.shape[1]):
for i in range(self.V.shape[1]):
if self.V[0, i] == self.O[0, j]:
temp_array.append(i)
self.O = mat(temp_array) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_v_item(self, vindex, new_val):\n\n i = [((0, 0),),\n ((1, 1),),\n ((2, 2),),\n ([1, 2], [2, 1]),\n ([2, 0], [0, 2]),\n ([0, 1], [1, 0])]\n\n for j, k in i[vindex]:\n self[j, k] = new_val",
"def position(self, u, v):\n raise NotImplementedError",
"def other(self,idx):\n\n if idx == self.v.index:\n return self.w.index\n elif idx == self.w.index:\n return self.v.index\n else:\n print(\"Inconsistent edge vertex.\")",
"def v(self, v):\n self._v = v",
"def update_idx(self):\n self.idx = (self.F * self.FMUL +\n self.E * self.EMUL +\n self.Z * self.ZMUL +\n self.A * self.AMUL +\n self.B * self.BMUL )",
"def __setitem__(self, i, val):\n\t\tif i < self.n:\n\t\t\tself.v[i] = val",
"def set_idx(self, i, other, tensor_value):\n for k, v in self.variables.items():\n if k not in other.variables:\n self.variables[k][i] *= 0\n\n for k, v in other.variables.items():\n if k not in self.variables:\n self.variables[k] = np.zeros(tensor_value.shape)\n self.variables[k][i] = other.variables[k]",
"def index(self, value, i=0, j=None):\n # YOUR CODE HERE\n raise NotImplementedError()",
"def v(self, v):\n\n self._v = v",
"def v(self, v):\n\n self._v = v",
"def __setitem__(self, i, v):\n # The policy function can't be modified",
"def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)",
"def __getitem__(self, i):\n\t\tif i < self.n:\n\t\t\treturn self.v[i]",
"def update(self, v, r):\n pass",
"def __getitem__(self, index):\n if isinstance(index, slice):\n return Vetor(self.elem[index])\n else:\n return self.elem[index]",
"def LD_Vx_I(self, x):\n\t\tfor i in range(0, x + 1):\n\t\t\tself.V[i] = self.ram[self.I + i]",
"def v(self, v) :\n\t\ttry :\n\t\t\tself._v = v\n\t\texcept Exception as e:\n\t\t\traise e",
"def LD_I_Vx(self, x):\n\t\tfor i in range(0, x + 1):\n\t\t\tself.ram[self.I + i] = self.V[i]",
"def index(self, v):\n return self._bin_search_recursive(v, 0, self.__len__() - 1)",
"def atualizaVertice(self, v = []):\r\n\r\n #reseta as arestas para d0\r\n #for a in self.arestas:\r\n # a.peso = a.d0\r\n\r\n for vertice in v:\r\n for a in self.arestas:\r\n if (vertice.id == a.v1.id):\r\n #print (\"atualiza aresta\", a.id)\r\n if (a.v2.atualizado):\r\n a.peso = a.d2\r\n else:\r\n a.peso = a.d1\r\n\r\n\r\n if (vertice.id == a.v2.id):\r\n #print (\"atualiza aresta\", a.id)\r\n if (a.v1.atualizado):\r\n a.peso = a.d2\r\n else:\r\n a.peso = a.d1\r\n \r\n vertice.atualizado = True\r\n \r\n for vertice in v:\r\n vertice.atualizado = False",
"def __pos__(self) -> 'MultiVector':\n\n newValue = self.value + 0 # copy\n\n return self._newMV(newValue)",
"def ulist(M, pos,v):\n import copy\n list= copy.deepcopy(M)\n list[pos]=v\n return list",
"def C(self, u, v):\n pass",
"def reconstruct_input(self, ix):",
"def V(self):\n return self._V",
"def V(self):\n return self._V",
"def V(self):\n return self._V",
"def __init__(self,index):\n self.index=index",
"def setInternalIndex(self,ind):\n\t\tself.trMtrxNode_ind = ind",
"def order_v(self):\n return self._degree_v + 1"
] | [
"0.616067",
"0.5951123",
"0.5940856",
"0.59249765",
"0.589935",
"0.58980864",
"0.5833399",
"0.5831099",
"0.57763237",
"0.57763237",
"0.5732142",
"0.5729264",
"0.571432",
"0.5690176",
"0.5676093",
"0.56725025",
"0.5644405",
"0.56342834",
"0.5614423",
"0.55696946",
"0.5519098",
"0.5507883",
"0.5506215",
"0.5503665",
"0.5497862",
"0.5497862",
"0.5497862",
"0.5440639",
"0.543569",
"0.54310876"
] | 0.7162628 | 0 |
This method is so that child classes can define additional object state checks before cloning (e.g. see ModelWrapperBase which should not clone if the modelcaching manager has already been set) | def additional_cloning_checks(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _try_clone_model(model):\n try:\n return copy.deepcopy(model)\n except Exception:\n warnings.warn(\n \"Failed to clone model. Model state might be mutated during verification.\"\n )\n return model",
"def sanitize_clone(self):\n pass",
"def clone(self):\n raise NotImplementedError",
"def _prepare(cls):\n # the dbmodel is either the proxy base or ourselves\n dbmodel = cls._meta.concrete_model if cls._meta.proxy else cls\n cls.__dbclass__ = dbmodel\n if not hasattr(dbmodel, \"__instance_cache__\"):\n # we store __instance_cache__ only on the dbmodel base\n dbmodel.__instance_cache__ = {}\n super()._prepare()",
"def _clone(self):\n #可见,这样可以将本类初始化参数全部赋给c对象,作为c的属性\n c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)\n c._sticky_filter = self._sticky_filter\n c._for_write = self._for_write\n c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n c._known_related_objects = self._known_related_objects\n c._iterable_class = self._iterable_class\n c._fields = self._fields\n return c",
"def _base_clone(self, queryset, klass=None, setup=False, **kwargs):\r\n cache_query = kwargs.get('_cache_query', getattr(self, '_cache_query', False))\r\n kwargs['_cache_query'] = cache_query\r\n if not hasattr(self, '_reversemapping'):\r\n self._reversemapping = {}\r\n\r\n if cache_query and isinstance(queryset, ValuesQuerySet):\r\n fields = kwargs.get('_fields', getattr(self,'_fields', ()))\r\n if fields:\r\n fields = list(fields)\r\n else:\r\n fields = [f.attname for f in self.model._meta.fields]\r\n \r\n for related_field in self._related_fields.keys():\r\n if related_field not in fields and self._is_valid_field(related_field):\r\n fields.append(related_field)\r\n setup = True\r\n kwargs['_fields'] = tuple(fields)\r\n \r\n if cache_query:\r\n reversemapping = {}\r\n for attname, related in self._get_reverse_relations(self.model):\r\n reversemapping[attname + '_cache'] = attname\r\n kwargs['_reversemapping'] = reversemapping\r\n if isinstance(queryset, ValuesQuerySet):\r\n parent_class = ValuesQuerySet\r\n else:\r\n parent_class = QuerySet\r\n clone = parent_class._clone(self, klass=klass, setup=setup, **kwargs)\r\n if not hasattr(clone, '_cache_query'):\r\n clone._cache_query = getattr(self, '_cache_query', False)\r\n if not hasattr(clone, '_reversemapping'):\r\n clone._reversemapping = getattr(self, '_reversemapping', {})\r\n if not hasattr(clone, '_target_maps'):\r\n clone._target_maps = getattr(self, '_target_maps', {})\r\n if not hasattr(clone, '_flush_fields'):\r\n clone._flush_fields = getattr(self, '_flush_fields', ())\r\n \r\n return clone",
"def _clone(self):\n c = self.__class__(\n model=self.model,\n query=self.query.chain(),\n using=self._db,\n hints=self._hints,\n )\n c._sticky_filter = self._sticky_filter\n c._for_write = self._for_write\n c._prefetch_related_lookups = self._prefetch_related_lookups[:]\n c._known_related_objects = self._known_related_objects\n c._iterable_class = self._iterable_class\n c._fields = self._fields\n return c",
"def clone(self):",
"def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()",
"def onClone(self):\n pass",
"def clone(self):\n return None",
"def __getstate__(self):\r\n\r\n d = copy.copy(self.__dict__)\r\n del d['_room_table_model'] # Do not save easily re-creatable table models\r\n del d['_socket_table_model'] # Do not save easily re-creatable table models\r\n del d['_fuse_table_model'] # Do not save easily re-creatable table models\r\n del d['_fuse_tree_item_model']\r\n del d['_invoker']\r\n\r\n return d",
"def copy(self):\n return super().copy()",
"def __getstate__(self):\n copy = self.__dict__.copy()\n copy['_workaround'] = None\n return copy",
"def __copy__(self):\n raise NotImplementedError",
"def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result",
"def is_clone(self):\n return not self.is_original()",
"def shallow_copy(self):\n # TODO: Rename this to __copy__()?\n raise NotImplementedError(\"shallow_copy is not implemented\")",
"def __deepcopy__(self, memo):\n cls = self.__class__\n result = cls.__new__(cls)\n memo[id(self)] = result\n for k, v in self.__dict__.items():\n if k not in ['viewer', 'automatic_rendering_callback']:\n setattr(result, k, copy.deepcopy(v, memo))\n else:\n setattr(result, k, None)\n return result",
"def make_mutable_REMEMBER_CLEANUP_FIRST(self):\n # UNSET the flag to make object immutable and hashable - need to do it in a roundabout way,\n # because the immutability prevents simply \"self.immutable = False\" from working!\n self.__dict__['immutable'] = False\n # but if I put __slots__ in, self.__dict__ won't exist any more... TODO Options for then:\n # setattr(self, 'immutable', False) - doesn't seem to work?\n # object.__setattr__(self, 'immutable', False) - does that work?",
"def test_deepcopy_removes_cached_values(self):\n foreign_object = Membership._meta.get_field(\"person\")\n # Trigger storage of cached_property into ForeignObject's __dict__.\n foreign_object.path_infos\n foreign_object.reverse_path_infos\n # The ForeignObjectRel doesn't have reverse_path_infos.\n foreign_object.remote_field.path_infos\n self.assertIn(\"path_infos\", foreign_object.__dict__)\n self.assertIn(\"reverse_path_infos\", foreign_object.__dict__)\n self.assertIn(\"path_infos\", foreign_object.remote_field.__dict__)\n # Cached value is removed via __getstate__() on ForeignObjectRel\n # because no __deepcopy__() method exists, so __reduce_ex__() is used.\n remote_field_copy = copy.deepcopy(foreign_object.remote_field)\n self.assertNotIn(\"path_infos\", remote_field_copy.__dict__)\n # Field.__deepcopy__() internally uses __copy__() on both the\n # ForeignObject and ForeignObjectRel, so all cached values are removed.\n foreign_object_copy = copy.deepcopy(foreign_object)\n self.assertNotIn(\"path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"reverse_path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"path_infos\", foreign_object_copy.remote_field.__dict__)",
"def copy(self):\r\n raise Exception, \"not implemented\"",
"def __deepcopy__(self, memodict=None):\n return self.copy()",
"def clone(self):\n clone = super(Property, self).clone()\n clone.fget = self.fget\n clone.fset = self.fset\n clone.cached = self.cached\n return clone",
"def make_immutable(self):\n # just set the flag to make object immutable and hashable\n self.immutable = True",
"def __deepcopy__(self, memo):\r\n new_inst = super().__deepcopy__(memo)\r\n new_inst.road_width = self.road_width\r\n new_inst.road_length = self.road_length\r\n new_inst.surface = self.surface\r\n \r\n return new_inst",
"def clone(self) -> Mutator:\n raise NotImplementedError",
"def clone(self):\n return _libsbml.ModelCreator_clone(self)",
"def test_copy_removes_direct_cached_values(self):\n foreign_object = Membership._meta.get_field(\"person\")\n # Trigger storage of cached_property into ForeignObject's __dict__.\n foreign_object.path_infos\n foreign_object.reverse_path_infos\n # The ForeignObjectRel doesn't have reverse_path_infos.\n foreign_object.remote_field.path_infos\n self.assertIn(\"path_infos\", foreign_object.__dict__)\n self.assertIn(\"reverse_path_infos\", foreign_object.__dict__)\n self.assertIn(\"path_infos\", foreign_object.remote_field.__dict__)\n # Cached value is removed via __getstate__() on ForeignObjectRel\n # because no __copy__() method exists, so __reduce_ex__() is used.\n remote_field_copy = copy.copy(foreign_object.remote_field)\n self.assertNotIn(\"path_infos\", remote_field_copy.__dict__)\n # Cached values are removed via __copy__() on ForeignObject for\n # consistency of behavior.\n foreign_object_copy = copy.copy(foreign_object)\n self.assertNotIn(\"path_infos\", foreign_object_copy.__dict__)\n self.assertNotIn(\"reverse_path_infos\", foreign_object_copy.__dict__)\n # ForeignObjectRel's remains because it's part of a shallow copy.\n self.assertIn(\"path_infos\", foreign_object_copy.remote_field.__dict__)",
"def __deepcopy__(self, others={}):\n miniMe = self.__class__.__new__(self.__class__)\n others[id(self)] = miniMe\n for key, val in self.__dict__.items():\n if id(val) in others:\n setattr(miniMe, key, others[id(val)])\n else:\n new = deepcopy(val, others)\n others[id(val)] = new\n setattr(miniMe, key, new)\n if miniMe.package:\n miniMe._addOurselvesToPackage(self.path)\n return miniMe"
] | [
"0.622249",
"0.6182846",
"0.6147965",
"0.61453974",
"0.60161626",
"0.60104495",
"0.5994128",
"0.5990812",
"0.5983475",
"0.5928679",
"0.5925357",
"0.59229016",
"0.58280325",
"0.581751",
"0.5797897",
"0.57970923",
"0.5795579",
"0.5782216",
"0.57797414",
"0.5779336",
"0.57745355",
"0.575398",
"0.57418525",
"0.573738",
"0.57050794",
"0.57020205",
"0.56693566",
"0.5642613",
"0.56158435",
"0.5599714"
] | 0.73478645 | 0 |
Saves the computed kernel for easy lookup as .npz file | def create_kernel(ktype='sph-anarchy'):
kernel = get_kernel(ktype)
header = np.array([{'kernel': ktype, 'bins': kernsize}])
np.savez('kernel_{}.npz'.format(ktype), header=header, kernel=kernel)
print (header)
return kernel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', chip_ids=self.chip_ids, core_ids=self.core_ids, cx_ids=self.cx_ids)",
"def save(self, model_out_file):\n\t\tvariables_dict = {v.name: v for v in tf.global_variables()}\n\t\tvalues_dict = self.sess.run(variables_dict)\n\t\tnp.savez(open(model_out_file, 'wb'), **values_dict)",
"def save(self, filename):\n np.savez(temp_dir + '/' + filename + '.npz', core_ids=self.core_ids, cx_ids=self.cx_ids)",
"def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)",
"def save(self, path):\n np.savez_compressed(path, **self.model_dict)",
"def save_to_disk(self, filename='ens_state.nc'):\n self.to_netcdf(filename)",
"def save_data_to_disk(self):\n Omega_M = self.theta_fid[0]\n for key in self.data.keys():\n np.save(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy', self.data[key])",
"def save_intrinsics(self, save_dir):\n if not osp.isfile(\n osp.join(save_dir, 'intrinsics', 'intrinsics.npy')):\n np.save(osp.join(\n save_dir, 'intrinsics', 'intrinsics'), self.camera_model.K)",
"def save_spi3d(self):\n lut = self.generate_lut()\n file_path = os.path.join(self.output, self.name)\n file_io.save_file(lut, file_path)",
"def save_trained_model(self, filename):\n d = self.pack_npz()\n with open(filename, 'wb') as f:\n np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d)",
"def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)",
"def write_kernel(w, k):\n w.writeln(\"void {k}(const Image<int>& in, Image<int>& out\".format(k=k.name))\n # write the tap signal in the function argument list\n for tapName in k.rtapNames:\n #tapType = k.edges[tapName].dtype\n #tapCType = dtypeMap[tapType]\n tapCType = getCType(k.edges[tapName])\n for indices in expand_range(k.edges[tapName].dim):\n w.writeln(\"\\t, {type} {sig}\".format(type=tapCType, sig=mangle((tapName, indices))))\n w.writeln(\")\")\n w.writeln(\"{\")\n w.indent()\n # TODO: insert size error checking into C code here\n\n w.writeln(\"for(int y = 0; y < in.height(); y++){\")\n w.indent()\n w.writeln(\"for(int x = 0; x < in.width(); x++){\")\n w.indent()\n\n \n # Grab the register declaration for the partial-pixel output and blow it into\n # the complete list of input registers\n startName = k.ppoutName\n #startType = k.edges[startName].dtype\n #startCType = dtypeMap[startType]\n startCType = getCType(k.edges[startName])\n for indices in expand_range(k.edges[startName].dim):\n # HACK: work with multi-channel or single-channel images\n z_idx = 0\n if len(indices) == 3:\n z_idx = indices[2]\n\n w.writeln(\"{type} {reg} = in(x+{xoff}, y+{yoff}, {z});\".format(\n type=startCType,\n reg=mangle((startName, indices)),\n xoff=(indices[0]-k.centroid[0]), \n yoff=(indices[1]-k.centroid[1]), z=z_idx))\n \n # Set up the constants\n for const in k.constants:\n # TODO: be careful here, because we need to be consistent with naming/indexing\n # TODO: handle int/float; infer datatype in parser\n w.writeln(\"const float {reg} = {val};\".format(reg=mangle((const[0], [0])), val=const[1]))\n \n w.writeln(\"\")\n\n\n #Special Register Examples for Reduce:\n #fix_17_0 pixel_out_pos[1:0] # Location of Reduce pixel in output image\n #fix_17_0 centroid_pos[1:0] # Location of Centroid in input image\n if \"centroid_pos\" in k.specialRegs:\n w.writeln(\"int centroid_pos_0 = x;\")\n w.writeln(\"int centroid_pos_1 = y;\")\n\n if \"pixel_out_pos\" in k.specialRegs:\n w.writeln(\"int pixel_out_pos_0 = x;\")\n w.writeln(\"int pixel_out_pos_1 = y;\")\n \n # Create a list of (name, index) tuples representing the valid (i.e., evaluated) signal\n validRegs = [(startName, i) for i in expand_range(k.edges[startName].dim)]\n validRegs += [(tapName, i) for tapName in k.rtapNames \n for i in expand_range(k.edges[tapName].dim)]\n validRegs += [(regName, i) for regName in k.specialRegs \n for i in expand_range(k.edges[regName].dim)]\n validRegs += [(c[0], [0]) for c in k.constants]\n \n # Make a copy of the list of operations which we can remove stuff from\n unprocessed = dict(k.ops)\n \n # Process all the operations\n while len(unprocessed) > 0:\n progress = False\n for opKey in unprocessed:\n op = k.ops[opKey]\n # Find an operation that can be evaluated\n if opOk(op, validRegs):\n #dtype = k.edges[op.result[0]].dtype\n #dtype = dtypeMap[dtype] # Look up the C-equivalent for this type\n dtype = getCType(k.edges[op.result[0]])\n # TODO: include integer/fraction width\n \n # TODO: error checking that we have the right number of operands - this should be done in the parser, actually\n # Evaluate it\n if op.name in ['max', 'min']:\n write_complex_op(w, op, dtype)\n elif op.name == \"sum\": \n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"mv\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"add\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' + ', mangle(op.operands))))\n elif op.name == \"sub\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' - ', mangle(op.operands))))\n elif op.name == \"mult\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' * ', mangle(op.operands))))\n elif op.name == \"div\":\n w.writeln(\"{dtype} {dst} = {src};\".format(dtype=dtype, dst=mangle(op.result), src=str.join(' / ', mangle(op.operands))))\n\n elif op.name == \"lshift\":\n w.writeln(\"{dtype} {dst} = {op1} << {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"rshift\":\n w.writeln(\"{dtype} {dst} = {op1} >> {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"and\":\n w.writeln(\"{dtype} {dst} = {op1} & {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"or\":\n w.writeln(\"{dtype} {dst} = {op1} | {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"ne\":\n w.writeln(\"{dtype} {dst} = {op1} != {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"eq\":\n w.writeln(\"{dtype} {dst} = {op1} == {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lt\":\n w.writeln(\"{dtype} {dst} = {op1} < {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"lte\":\n w.writeln(\"{dtype} {dst} = {op1} <= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gt\":\n w.writeln(\"{dtype} {dst} = {op1} > {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"gte\":\n w.writeln(\"{dtype} {dst} = {op1} >= {op2};\".format(dtype=dtype, dst=mangle(op.result), op1=mangle(op.operands[0]), op2=mangle(op.operands[1])))\n elif op.name == \"not\":\n w.writeln(\"{dtype} {dst} = !{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"abs\":\n w.writeln(\"{dtype} {dst} = ({src} >= 0) ? {src} : (-{src});\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n elif op.name == \"inv\":\n w.writeln(\"{dtype} {dst} = -{src};\".format(dtype=dtype, dst=mangle(op.result), src=mangle(op.operands[0])))\n\n elif op.name == \"mux\":\n w.writeln(\"{dtype} {dst} = {cond} ? {op1} : {op2};\".format(dtype=dtype, dst=mangle(op.result), \\\n cond=mangle(op.operands[0]), op1=mangle(op.operands[1]), op2=mangle(op.operands[2])))\n else:\n print \"Unhandled operator \" + opKey\n \n validRegs.append(op.result)\n # Remove it from the list\n unprocessed.pop(opKey)\n progress = True\n break # We changed the list, so we gotta start over\n \n # If we went through the whole list without finding any ops to evaluate,\n # something is wrong and we need to give up.\n if progress is False:\n print \"Failed to evaluate some ops!\"\n for opKey in unprocessed:\n print \"\\t %s %s\" % (unprocessed[opKey].name, unprocessed[opKey].result)\n break\n \n for indices in expand_range(k.edges[k.sink].dim):\n #writeln('printf(\"result: %f\\\\n\", {reg});'.format(reg=mangle((k.sink, indices))))\n # TODO: make this handle depths other than 3\n w.writeln('out(x,y,{z}) = {reg};'.format(z=indices[0], reg=mangle((k.sink, indices))))\n\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"}\")\n w.unindent()\n w.writeln(\"} // END %s\" % k.name)\n w.writeln(\"\\n\")",
"def save(self, format='npz'):\n _path = os.getenv('STARTERLITE') + '/output/grf/%s.%s' % (self.fn, format)\n _wf_dict = {'grf': self.survey_maps, 'coords': self.survey_map_coords}\n np.savez(_path, **_wf_dict)",
"def save_group_KDE(self, model, groupname, outdir):\n\t\t#Load in necessary info for either data or noise depending on the passed model\n\t\tif model == 'Signal':\n\t\t\tdic = self.signal\n\t\telif model == 'Noise':\n\t\t\tdic = self.noise\n\t\telse:\n\t\t\t#Raise error here\n\t\t\tpass\n\t\t\n\t\t#Save binary files cotaining both KDE coordinates and KDE values\n\t\tnp.save('%s/%s_%s_KDE_coords.npy'%(outdir,groupname,model), dic[groupname]['KDE'][0])\n\t\tnp.save('%s/%s_%s_KDE_values.npy'%(outdir,groupname,model), dic[groupname]['KDE'][1])",
"def save_equilibrator_bin_data(self, npz_file_name):\n preprocess_dict = {'cids': self.params['cids']}\n for k, v in self.params.items():\n if k.find('preprocess_') != -1:\n preprocess_dict[k.replace('preprocess_', '')] = v\n np.savez_compressed(npz_file_name, **preprocess_dict)",
"def save_memory(self, filename):\n \n\n with open(filename + '/obses.npy', 'wb') as f:\n np.save(f, self.obses)\n \n with open(filename + '/actions.npy', 'wb') as f:\n np.save(f, self.actions)\n\n with open(filename + '/next_obses.npy', 'wb') as f:\n np.save(f, self.next_obses)\n \n with open(filename + '/rewards.npy', 'wb') as f:\n np.save(f, self.rewards)\n \n with open(filename + '/not_dones.npy', 'wb') as f:\n np.save(f, self.not_dones)\n \n with open(filename + '/not_dones_no_max.npy', 'wb') as f:\n np.save(f, self.not_dones_no_max)\n\n with open(filename + '/index.txt', 'w') as f:\n f.write(\"{}\".format(self.idx))\n\n print(\"save buffer to {}\".format(filename))",
"def save(self, path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n np.save(os.path.join(path, 'V.npy'), self.V.cpu().numpy())\n\n if self.W is not None:\n np.save(os.path.join(path, 'W.npy'), self.W.cpu().numpy())\n\n if self.vb is not None:\n np.save(os.path.join(path, 'v_bias.npy'), self.vb.cpu().numpy())\n\n if self.wb is not None:\n np.save(os.path.join(path, 'w_bias.npy'), self.wb.cpu().numpy())\n\n if self.dictionary is not None:\n self.dictionary.save(os.path.join(path, 'dictionary'))",
"def _write_kernel_files(parameters:RunParameters, periods:np.array,\n save_name:str, n_runs:int):\n\n execfile = '{0}.run_kernels'.format(save_name)\n\n max_angular_order = {\n 'Rayleigh': 5500,\n 'Love': 3500,\n }\n\n eigfiles = (['{}_{}.eig_fix'.format(save_name, run)\n for run in range(1, n_runs)])\n\n\n with open(execfile, 'w') as fid:\n fid.write(\"\"\"#!/bin/bash\n#\necho \"======================\" > {0}.log\necho \"Stripping MINEOS\" >> {0}.log\n#\n{1}/mineos_strip <<! >> {0}.log\n{0}.strip\n{2}\n{3}\n\n!\n#\necho \"======================\" > {0}.log\necho \"Done stripping, now calculating tables\" > {0}.log\n#\n{1}/mineos_table <<! >> {0}.log\n{0}.table\n40000\n0 {4:.1f}\n1 {5:.0f}\n{0}.q\n{0}.strip\n\n!\n#\necho \"======================\" > {0}.log\necho \"Creating branch file\" > {0}.log\n#\n{1}/plot_wk <<! >> {0}.log\ntable {0}.table_hdr\nsearch\n1 0.0 {4:.1f}\n99 0 0\nbranch\n\nquit\n!\n#\necho \"======================\" > {0}.log\necho \"Making frechet phV kernels binary\" > {0}.log\n#\nif [ -f \"{0}.cvfrechet\" ]; then rm {0}.cvfrechet; fi\n{1}/frechet_cv <<! >> {0}.log\n{6}\n{0}.table_hdr.branch\n{0}.cvfrechet\n{2}\n0\n{3}\n\n!\n#\necho \"======================\" > {0}.log\necho \"Writing phV kernel files for each period\" > {0}.log\n#\n \"\"\".format(\n save_name,\n parameters.bin_path,\n '{}_0.eig_fix'.format(save_name),\n '\\n'.join(eigfiles),\n 1000 / min(periods) + 0.1, # max freq. in mHz\n max_angular_order[parameters.Rayleigh_or_Love],\n parameters.qmod_path,\n ))\n\n # Need to loop through periods in executable\n for period in periods:\n with open(execfile, 'a') as fid:\n fid.write(\"\"\"{1}/draw_frechet_gv <<!\n{0}.cvfrechet\n{0}_cvfrechet_{2:.1f}s\n{2:.2f}\n!\n \"\"\".format(\n save_name,\n parameters.bin_path,\n period,\n ))\n\n\n return execfile",
"def _multiprocessing_save_sp(in_out_path):\n vp, np = in_out_path\n if not os.path.exists(np + '.npz'):\n save_compact_fft(\n np,\n embedding_fft(load_one_sp_embedding(vp))\n )\n return np",
"def save_npys(data, model_name, output_string):\n for k, v in data.iteritems():\n output = os.path.join(\n output_string,\n '%s_%s' % (model_name, k)\n )\n np.save(output, v)",
"def outputPulses(self,filename):\n np.save(filename,self.getData())\n return",
"def save(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n filename = filename# + '_Lx_' + str(self.Lx) + 'm_Ly_' + str(self.Ly) + 'm'\n self.path_filename = path + filename + '.pkl'\n f = open(self.path_filename, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()",
"def save(self, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n params = {\n 'model': self.__class__.__name__,\n 'elements': self.elements,\n 'r_cut': self.r_cut,\n 'fitted': self.gp.fitted,\n 'gp': {\n 'kernel': self.gp.kernel.kernel_name,\n 'n_train': self.gp.n_train,\n 'sigma': self.gp.kernel.theta[0],\n 'noise': self.gp.noise,\n 'r0': self.gp.kernel.theta[2]\n },\n 'grid': {\n 'r_min': self.grid_start,\n 'r_max': self.grid_end,\n 'r_num': self.grid_num,\n 'filename': {}\n } if self.grid else {}\n }\n\n gp_filename = \"GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy\".format(\n p=params)\n\n params['gp']['filename'] = gp_filename\n self.gp.save(path / gp_filename)\n\n for k, grid in self.grid.items():\n key = str(k)\n grid_filename = \"GRID_{}_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz\".format(\n key, p=params)\n params['grid']['filename'][key] = grid_filename\n grid.save(path / grid_filename)\n\n with open(path / \"MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params), 'w') as fp:\n json.dump(params, fp, indent=4, cls=NpEncoder)\n\n print(\"Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params))",
"def save_fc_inv(fc_inv, events, fbsfs, output):\n fc_inv_save = xr.DataArray(data=fc_inv, dims=('fbsfs', 'events'), coords={'fbsfs': fbsfs, 'events': events})\n fc_inv_save.to_netcdf(output)\n del fc_inv_save",
"def save_npz(save_dict={}, name='model.npz'):\n rename_dict = {}\n for k, value in enumerate(save_dict):\n rename_dict.update({'param'+str(k) : value.eval()})\n np.savez(name, **rename_dict)\n print('Model is saved to: %s' % name)",
"def save_result(self):\n np.save(os.path.join(self.outpath, self.image_name + '_run.npy'), {\n 'device' : u.get_gpu_name(),\n 'elapsed': u.sec2time(self.elapsed),\n 'outpath': self.outpath,\n 'history': self.history,\n 'mask' : self.mask,\n 'image' : self.img,\n 'output' : self.out_best,\n 'noise' : self.input_list,\n })\n \n # save the model\n if self.args.savemodel:\n torch.save(self.net.state_dict(),\n os.path.join(self.outpath, self.image_name + '_model.pth'))",
"def save(self, path):\n\n if not isinstance(path, Path):\n path = Path(path)\n\n params = {\n 'model': self.__class__.__name__,\n 'element': self.element,\n 'r_cut': self.r_cut,\n 'fitted': self.gp.fitted,\n 'gp': {\n 'kernel': self.gp.kernel.kernel_name,\n 'n_train': self.gp.n_train,\n 'sigma': self.gp.kernel.theta[0],\n 'noise': self.gp.noise,\n 'r0': self.gp.kernel.theta[2]\n },\n 'grid': {\n 'r_min': self.grid_start,\n 'r_max': self.grid_end,\n 'r_num': self.grid_num,\n 'filename': {}\n } if self.grid else {}\n }\n\n gp_filename = \"GP_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npy\".format(\n p=params)\n\n params['gp']['filename'] = gp_filename\n self.gp.save(path / gp_filename)\n\n if self.grid:\n grid_filename = 'GRID_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.npz'.format(\n p=params)\n\n params['grid']['filename'] = grid_filename\n self.grid.save(path / grid_filename)\n\n with open(path / 'MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json'.format(p=params), 'w') as fp:\n json.dump(params, fp, indent=4, cls=NpEncoder)\n\n print(\"Saved model with name: MODEL_ker_{p[gp][kernel]}_ntr_{p[gp][n_train]}.json\".format(p=params))",
"def serialize(self, data):\n buffer = io.BytesIO()\n scipy.sparse.save_npz(buffer, data)\n return buffer.getvalue()",
"def dump_npz(filename: str, obj, **kwargs):\n return np.savez(filename, obj)",
"def save_nn(self, networkname= 'nn'):\n np.save(f\"{networkname}_data.npy\", self.weights_and_biases)\n print(f\"Data saved to {networkname}_data.npy\")"
] | [
"0.6442163",
"0.6418604",
"0.6242592",
"0.62161463",
"0.6036001",
"0.59375066",
"0.5916903",
"0.5870925",
"0.5862356",
"0.58407557",
"0.582576",
"0.5815789",
"0.58025026",
"0.5755386",
"0.5704577",
"0.5699239",
"0.5672944",
"0.56612027",
"0.56331027",
"0.5628472",
"0.5596223",
"0.5559018",
"0.55585533",
"0.5558272",
"0.55389774",
"0.55382806",
"0.55301464",
"0.5518992",
"0.55164784",
"0.55139524"
] | 0.6803789 | 0 |
Custom save method to autoset the phs field. | def save(self, *args, **kwargs):
self.phs = self.set_phs()
super(Study, self).save(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, *args, **kwargs):\n if not self.pkhash:\n self.pkhash = compute_hash(self.script)\n super(DataOpener, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)",
"def save(self):\n if self.pumping_test_form:\n self.pumping_test_form.save()\n if self.form:\n if self.pumping_test_form:\n self.form.instance.pumping_test = self.pumping_test_form.instance\n self.form.save()\n self.well.hydrogeology_parameter = self.form.instance",
"def save(self, *args, **kwargs):\n if not self.tracking_number:\n self.tracking_number = self._generate_tracking_number()\n super().save(*args, **kwargs)",
"def beforeSave(self):",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self, commit=True):\n instance = super().save(commit)\n if 'IsAHJOfficialOf' in self.changed_data:\n # The AHJs entered into the field.\n form_ahjs = self.cleaned_data['IsAHJOfficialOf']\n assign_ahj_official_status(instance, form_ahjs)\n return instance",
"def save(self):\n\n pass",
"def save(self):\n # TODO (Pierre): code",
"def save (self):\n pass",
"def save(self, *args, **kwargs):\n self.trait_flavor_name = self.set_trait_flavor_name()\n # Call the \"real\" save method.\n super(HarmonizedTrait, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n return",
"def save(self, *args, **kwargs):\n pass",
"def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()",
"def before_save(self):",
"def dummy():\n\t\t\tself.save()",
"def save(self) -> None:\n pass",
"def save(self) -> None:\n pass",
"def save(self) -> None:\n pass",
"def save(self, *args, **kwargs):\r\n\r\n if not self.trackerid:\r\n self.trackerid = generate_trackerid()\r\n super(Profile, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n\n if not self.trackerid:\n self.trackerid = generate_trackerid()\n super(Profile, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)",
"def save(self):\n # Makes sure that all required properties are available before persistence.\n for name, prop in fields(self, Property).items():\n if hasattr(prop, 'required') and prop.required:\n value = getattr(self, name)\n if prop.empty(value):\n raise BadValueError(\"Property: %s is required\" % name)\n \n Lisa.save(self)\n self.differ.commit()"
] | [
"0.6665393",
"0.6541349",
"0.6541349",
"0.6539829",
"0.63699436",
"0.6302961",
"0.627035",
"0.62583447",
"0.62583447",
"0.62583447",
"0.62583447",
"0.62583447",
"0.62433136",
"0.6223159",
"0.6214938",
"0.61958426",
"0.6168871",
"0.61671007",
"0.6163853",
"0.613194",
"0.61283",
"0.6124372",
"0.61148435",
"0.61148435",
"0.61148435",
"0.6111242",
"0.61059356",
"0.6090048",
"0.6090048",
"0.6045398"
] | 0.7240601 | 0 |
Automatically set phs from the study's accession number. Properly format the phs number for this study, so it's easier to get to in templates. | def set_phs(self):
return 'phs{:06}'.format(self.i_accession) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_spondaic(self, scansion: str) -> str:\n mark_list = string_utils.mark_list(scansion)\n vals = list(scansion.replace(\" \", \"\"))\n new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]\n corrected = \"\".join(new_vals)\n new_line = list(\" \" * len(scansion))\n for idx, car in enumerate(corrected):\n new_line[mark_list[idx]] = car\n return \"\".join(new_line)",
"def set_pno(self, pno):\n self.__pno = pno",
"def save(self, *args, **kwargs):\n self.phs = self.set_phs()\n super(Study, self).save(*args, **kwargs)",
"def phsen_maker(file):\n\n\tdata = {}\n\tfor line in file.readlines():\n\t\tkey, val = line.split(\"\t\")\n\t\tdata[key] = val\n\n\t# Set up CSV writing\n\twr = csv.writer(open(\"PHSEN_Cal_File.csv\", \"w\", newline=''))\n\n\tequivalent = {\"CC_ea434\" : \"Ea_434\", \"CC_eb434\" : \"Eb_434\", \"CC_ea578\" : \"Ea_578\", \"CC_eb578\": \"Eb_578\"} \n\tconstants = {\"CC_ind_off\": 0, \"CC_ind_slp\": 1, \"CC_psal\": 35}\n\tcsv_format = [\"CC_ea434\", \"CC_ea578\", \"CC_eb434\", \"CC_eb578\", \"CC_ind_off\", \"CC_ind_slp\", \"CC_psal\"]\n\tcsv_maker(wr, csv_format, data, constants, equivalent)",
"def psid(self, psid):\n\n self._psid = psid",
"def set_spouse(self, s, line_number=0):\n if isinstance(self.spouse, set):\n self.spouse = self.spouse | {s}\n self._spouse_lines = self._spouse_lines | {line_number}\n else:\n self.spouse = {s} if (s and s != 'NA') else 'NA'\n self._spouse_lines = {line_number}",
"def prshare(value):\n value = value or 0\n value = int(round(value * 50))\n return '{:05d}'.format(value)",
"def pyp_reports(request):\n student_id = int(get_from_matchdict('id', request.matchdict))\n\n pdf = get_from_matchdict('pdf', request.matchdict)\n check = request.params.get('check')\n if check and check.lower() == 'true':\n check = True\n else:\n check = False\n\n internal_check = request.params.get('internal_check')\n\n mb_user = request.session.get('mb_user', None)\n if not mb_user:\n # FIXME: Need to re-do it\n pass\n # api_token = request.params.get('api_token')\n # if not api_token or api_token != gns.config.managebac.api_token:\n # return HTTPForbidden()\n elif mb_user.type.startswith('Advisor') or mb_user.type == 'Account Admins':\n # let them in\n pass\n else:\n return HTTPForbidden()\n\n term_id = gns.config.managebac.current_term_id\n with DBSession() as session:\n try:\n rep_statement = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id,\n # PrimaryReport.homeroom_comment!=''\n )\n stu_statement = session.query(Students).filter_by(id=student_id)\n student = stu_statement.one()\n report = rep_statement.one()\n gns.tutorial(\"Got the target student\",edit=(stu_statement, '.sql'))\n gns.tutorial(\"Got Primary report with course information\", edit=(rep_statement, '.sql'))\n except NoResultFound:\n if pdf:\n # raw_input('no report entry for this student: {} with term_id {}'.format(student_id, term_id))\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n except MultipleResultsFound:\n print(\"Issue with database!\")\n raise HTTPInternalServerError(\"Issue with database!\")\n\n title = u\"IGB International School (June 2016): Student Report for {} {}\".format(student.first_name, student.last_name)\n\n # This bit is the only manual info that isn't on managebac\n uoi_table = {\n -1: {\n # ey sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"Playing and learning together enables us to come to new understandings.\"),\n 2: dict(title=\"Sharing The Planet\", central_idea=\"Our lives are interconnected with living things.\"),\n # ey sem 2\n 3: dict(title=\"How the World Works\", central_idea=\"Water is all around us and has many uses.\"),\n 4: dict(title=\"How We Express Ourselves\", central_idea=\"Stories inform, provoke us and provide enjoyment.\"),\n },\n 0: {\n # kg sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"We are part of a community who work, learn, and play together\"),\n 2: dict(title=\"How We Organise Ourselves\", central_idea=\"Communities create systems to fullfill a need.\"),\n 3: dict(title=\"Where We Are in Place and Time\", central_idea=\"Shelters look different and serve a purpose.\"),\n\n # kg sem 2\n 4: dict(title=\"Sharing the Planet\", central_idea=\"People's choices and actions impact the environment and their community.\"),\n 5: dict(title=\"How the World Works\", central_idea=\"Our body and man made resources help protect us from the natural environment.\"),\n 6: dict(title=\"How We Express Ourselves\", central_idea=\"An audience can be engaged through performance.\")\n },\n 1: {\n # gr1 sem 1\n 1: dict(title=\"How we organize ourselves\", central_idea=\"Humans use tools and strategies to understand and organise their environment.\"),\n 2: dict(title=\"Who We Are\", central_idea=\"Games provide us with opportunities to develop an understanding of ourselves and others.\"),\n 3: dict(title=\"How We Express Ourselves\", central_idea=\"Celebrations are an opportunity to reflect and appreciate cultures and beliefs.\"),\n # gr1 sem 2\n 4: dict(title=\"How the World Works\", central_idea=\"Machines make a difference to the way we live our lives.\"),\n 5: dict(title=\"Sharing the Planet\", central_idea=\"Water is essential to life and is a limited resource to many.\"),\n 6: dict(title=\"Where We Are in Place and Time\", central_idea=\"Clocks are a universal measurement tool of time that have had an impact in the past and the present.\"),\n },\n 2: {\n # gr2 sem 1\n 1: dict(title=\"Who We Are\", central_idea=\"With rights come responsibilities.\"),\n 2: dict(title=\"How We Express Ourselves\", central_idea=\"Cultures tell stories in different ways and for different reasons.\"),\n 3: dict(title=\"How We Organize Ourselves\", central_idea=\"Number system provide a common language we can use to make sense of the world.\"),\n # gr2 sem 2\n 4: dict(title=\"Sharing The Planet\", central_idea=\"Plants sustain life on earth and we have a responsible role to play\"),\n 5: dict(title=\"Where we are in Place and Time\", central_idea=\"Influence can change people and their environment.\"),\n 6: dict(title=\"How the World Works\", central_idea=\"Forces are a vital part of our survival.\"),\n },\n 3: {\n # gr3 sem 1\n 1: dict(title=\"How We Organise Ourselves\", central_idea=\"Communication connects people.\"),\n 2: dict(title=\"Sharing the Planet\", central_idea=\"People can conserve the world's resources through responsible behaviours\"),\n 3: dict(title=\"Where We are in Place and Time\", central_idea=\"Innovations from past civilizations have an influence on the present\"),\n # gr3 sem 2\n 4: dict(title=\"How the World Works\", central_idea=\"Safe structures are designed and built for purpose and consider the environment and materials.\"),\n 5: dict(title=\"Who We Are\", central_idea=\"Communication connects people and communities.\"),\n 6: dict(title=\"How We Express Ourselves\", central_idea=\"Nature can inspire people to express their creativity.\"),\n },\n 4: {\n # gr4 sem 1\n 1: dict(title=\"How We Express Ourselves\", central_idea=\"Media influences how we think and the choices we make.\"),\n 2: dict(title=\"Sharing the Planet\", central_idea=\"Organisms rely on one another to balance ecosystems.\"),\n 3: dict(title=\"How we Organise Ourselves\", central_idea=\"Societies establish systems for trade and commerce to meet needs and wants.\"),\n # gr4 sem 2\n 4: dict(title=\"Where We Are in Place and Time\", central_idea=\"The quest for understanding has led to exploration and discovery.\"),\n 5: dict(title=\"How The World Works\", central_idea=\"Earth has formed over time and is still changing.\"),\n 6: dict(title=\"Who We Are\", central_idea=\"People's beliefs influence their actions.\"),\n },\n 5: {\n # gr5 sem 1\n 1: dict(title=\"How we Organise Ourselves\", central_idea=\"All societies have rules and reasons for these rules.\"),\n 2: dict(title=\"Where We Are in Place and Time\", central_idea=\"Malaysia's cultural diversity has been shaped by its history.\"),\n 3: dict(title=\"How the World Works\", central_idea=\"Changes to matter can be of a chemical and/or physical nature.\"),\n # gr5 sem 2\n 4: dict(title=\"Sharing The Planet\", central_idea=\"The choices we make during moments of conflict affect our relationships\"),\n 5: dict(title=\"How We Express Ourselves: Exhibition\", central_idea=\"Artists seek to evoke an emotional response from their audience.\"),\n 6: dict(title=\"Who We Are\", central_idea=\"External and internal factors cause changes in our lives\"),\n },\n }\n\n chinese_teachers = {\n 10792613: [11203970, 10836999, 10912649, 10863230, 11544715, 11707916, 11609996, 11707918, 11708046, 10912651, 11707928, 11274137, 11707932, 11707934, 11204000, 11204641, 11204001, 11708067, 11270692, 11707940, 11204385, 11563304, 11204008, 11153068, 11573550, 11707952, 10882225, 11204017, 11707957, 10834618, 10866874, 11080380, 10893375, 11707840, 11190340, 10834630, 11611847, 10834633, 10834636, 11693517, 11707984, 11203923, 11707859, 10834645, 10834648, 10834649, 10834651, 11707870, 11182305, 11203938, 11200870, 10973671, 11707882, 11708014, 11203950, 11203952, 11708018, 11203954, 10882162, 11633398, 11707900, 11538429, 11124222, 11135103, 11737995, 11621139, 11707870, 10882159], # xiaopiong\n 11256632: [11204609, 10836994, 11707907, 11135108, 10836999, 11135112, 10837001, 11203979, 10865037, 11707924, 11621141, 11203988, 11204377, 11173915, 10913691, 11204637, 10856823, 11204383, 11204640, 11707939, 11204392, 11614634, 11364525, 10882226, 11204660, 11190071, 10834616, 10834617, 11464377, 10866873, 10866876, 10834621, 10834622, 10866877, 10856636, 11578945, 11611841, 10893379, 10834628, 10834625, 11611847, 10834635, 10834640, 10834642, 10834643, 11930324, 11707860, 11203926, 11707990, 11426392, 11502297, 11578839, 11707869, 11708005, 10834661, 11203946, 11324785, 11124210, 10863222, 11124215, 10856824, 11203961, 10856826, 11124219, 11204605, 11707902, 10986488], # nancy\n }\n\n students_chinese_teachers = {}\n\n for teacher_id, student_ids in chinese_teachers.items():\n with DBSession() as session:\n teacher = session.query(Teachers).filter_by(id=teacher_id).one()\n for this_student in student_ids:\n students_chinese_teachers[this_student] = teacher\n\n bahasa_teachers = {\n 10872708: [10908165, 10856828],\n }\n students_bahasa_teachers = {}\n for teacher_id, student_ids in bahasa_teachers.items():\n with DBSession() as session:\n teacher = session.query(Teachers).filter_by(id=teacher_id).one()\n for this_student in student_ids:\n students_bahasa_teachers[this_student] = teacher\n\n if 'Grade' in report.course.name or 'Kindergarten' in report.course.name:\n which_folder = 'grades'\n template = 'frontend:elem_reports/templates/student_pyp_report.pt'\n\n with DBSession() as session:\n try:\n rep_statement = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n options(joinedload('sections')).\\\n options(joinedload('sections.learning_outcomes')).\\\n options(joinedload('sections.teachers')).\\\n options(joinedload('sections.strands')).\\\n options(joinedload('teacher')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id\n )\n att_statement = session.query(Absences).filter_by(term_id=term_id, student_id=student_id)\n\n attendance = att_statement.one()\n report = rep_statement.one()\n\n gns.tutorial(\"Got K-5 report info with joined information\", edit=(rep_statement, '.sql'), banner=True)\n except NoResultFound:\n if pdf:\n # raw_input(\"No K-5 report entry\")\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n\n subject_rank = {\n 'language': 0,\n 'mathematics': 1,\n 'unit of inquiry 1': 2,\n 'unit of inquiry 2': 3,\n 'unit of inquiry 3': 4,\n 'unit of inquiry 4': 4.1,\n 'unit of inquiry 5': 4.2,\n 'unit of inquiry 6': 4.3,\n 'art': 5,\n 'music': 6,\n 'physical education': 7,\n 'bahasa melayu': 8,\n 'chinese': 9,\n 'host nation': 10,\n 'self-management': 10000\n }\n report.sections = sorted([section for section in report.sections if subject_rank.get(section.name.lower(), 10001) < 10000], key=lambda x: subject_rank.get(x.name.lower(), 1000))\n report.sections = [section for section in report.sections if section.comment]\n\n # Only output sections that have any data in them\n # Comment out during development\n # report.sections = [section for section in report.sections if section.comment]\n\n if 'Kindergarten' in report.course.grade:\n grade_norm = 0\n else:\n grade_norm = int(re.sub(\"[^0-9]\", \"\", report.course.grade))\n\n rotate_list = [0, 1, 2, 5, 9]\n pagination_list = [0, 1, 4, 7, 10]\n\n for section in report.sections:\n section.rank = subject_rank.get(section.name.lower())\n report.sections = [s for s in report.sections if s.rank not in [4.1, 4.2, 4.3]] # skip\n\n gns.tutorial(\"Formatting each subject area in this order: {}\".format(\", \".join([r.name for r in report.sections])), banner=True)\n for section in report.sections:\n # Substitute the correct Chinese teachers based on manual info above\n # Do first so all subsequent operations take place properly\n if section.rank == 9 and student.id in students_chinese_teachers:\n section.teachers = [students_chinese_teachers.get(student.id)]\n\n if section.rank == 8 and student.id in students_bahasa_teachers:\n # Host Nations? and Bahasa mixed up maybe?\n section.teachers = [students_bahasa_teachers.get(student.id)]\n\n section.append_uoi_table = section.rank == 4\n section.display_rotated = section.rank in rotate_list\n\n if section.rank in [2]:\n section.organization_header = 'Units of Inquiry'\n section.name_after = \"\"\n elif section.rank in [3, 4]:\n section.organization_header = 'skip'\n section.name_after = \"\"\n else:\n section.organization_header = section.name + ' (' + \" & \".join([s.first_name + ' ' + s.last_name for s in section.teachers]) + ')'\n section.name_after = \"\"\n\n # Set the unit title if it needs to be\n if section.rank in [2, 3, 4, 4.1, 4.2, 4.3]:\n which_uoi = int(re.sub(\"[^0-9]\", \"\", section.name))\n section.name = uoi_table.get(grade_norm)[which_uoi]['title']\n\n # Determine pagination\n if section.rank in pagination_list: # TODO What about more than two inquiry units?\n section.pagination = True\n else:\n section.pagination = False\n\n section.learning_outcomes = sorted(section.learning_outcomes, key=lambda x: x.which)\n\n # Standardize the headings\n if section.rank in [2, 3, 4, 4.1, 4.2, 4.3]:\n section.name = section.name.title()\n section.name_after = uoi_table.get(grade_norm)[which_uoi]['central_idea']\n\n en_dash = u'\\u2013'\n for outcome in section.learning_outcomes:\n\n if section.rank in [2, 3, 4]:\n # Unit of inquiry\n outcome.heading = \"\"\n\n elif section.rank not in [0, 1]:\n outcome.heading = \"\" # blank\n\n else:\n # If it's a subject that we care to keep the data, standardize the format:\n outcome.heading = outcome.heading.replace(en_dash, '-')\n match = re.match('(.*)-', outcome.heading)\n if match:\n outcome.heading = match.group(1).strip()\n\n # Evaluates and adds data to items\n old_heading = None\n for outcome in section.learning_outcomes:\n\n if outcome.heading != old_heading:\n # Mark that indicates we need to evaluate\n\n if section.rank in [0, 1]:\n # Determine the effort assigned by the teacher for this\n effort = [s.selection for s in section.strands if s.label_titled.startswith(outcome.heading)]\n effort = effort[0] if len(effort) == 1 else (effort[0] if len(set(effort)) == 1 else \"<?>\")\n else:\n effort = [s.selection for s in section.strands if s.selection]\n effort = effort[0] if len(set(effort)) == 1 else str(effort)\n outcome.effort = {'G': \"Good\", 'N': \"Needs Improvement\", 'O': \"Outstanding\"}.get(effort, None)\n\n if not outcome.effort and internal_check:\n # Raise a problem here\n raise ReportIncomplete('something') # FIXME: There is no report incomplete exception\n\n old_heading = outcome.heading\n\n if not outcome.selection and internal_check:\n raise ReportIncomplete('something')\n gns.tutorial(\"Completed formatting of {} section\".format(section.name))\n\n report.sections = [s for s in report.sections if s.rank not in [4.1, 4.2, 4.3]] # skip\n\n elif 'Early' in report.course.name:\n which_folder = 'early_years'\n template = 'frontend:elem_reports/templates/student_pyp_ey_report.pt'\n\n # 1/2: semeseter\n # 0/1: early years\n\n ey_report_indicators = {\n 1: {\n 0: [\n {'name': 'Listening & Speaking', 'content': 'Learners show an understanding of the value of speaking and listening to communicate. They are using language to name their environment, to get to know each other, to initiate and explore relationships, to question and inquire.'},\n {'name': 'Viewing & Presenting', 'content': 'Learners show an understanding that the world around them is full of visual language that conveys meaning. They are able to interpret and respond to visual texts. They are extending and using visual language in more purposeful ways.'},\n {'name': 'Reading & Writing', 'content': 'Learners show an understanding that print represents the real or the imagined world. They have a concept of a \"book\", and an awareness of some of its structural elements. They use visual cues to recall sounds and the words they are \"reading\" to construct meaning.'},\n ],\n 1: [\n {'name': 'Number', 'content': 'Learners will understand that numbers are used for many different purposes in the real world. They will develop an understanding of one-to-one correspondence, be able to count and use number words and numerals to represent quantities.'},\n {'name': 'Shape and Space', 'content': 'Learners will develop an understanding that shapes have characteristics that can be described and compared.'},\n {'name': 'Pattern', 'content': 'Learners will develop an understanding that patterns and sequences occur in everyday situations. They will be able to identify and extend patterns in various ways.'},\n {'name': 'Measurement', 'content': 'Learners will develop an understanding of how measurement involves the comparison of objects and ordering.They will be able to identify and compare attributes of real objects.'},\n {'name': 'Data', 'content': 'Learners will develop an understanding of how the collection and organization of information helps to make sense of the world. They will sort and label objects by attributes and discuss information represented in graphs including pictographs and tally marks.'}\n ]\n },\n 2: {\n 0: [\n {'name': 'Listening & Speaking', 'content': 'Learners will show an understanding of the value of speaking and listening to communicate. They will use language to name their environment, to get to know each other, to initiate and explore relationships, to question and inquire.'},\n {'name': 'Viewing & Presenting', 'content': 'Learners will show an understanding that the world around them is full of visual language that conveys meaning. They will interpret and respond to visual texts. They will be extending and using visual language in more purposeful ways.'},\n {'name': 'Reading & Writing', 'content': 'Learners will show an understanding that print represents the real or the imagined world. They will develop the concept of a “book”, and an awareness of some of its structural elements. They will use visual cues to recall sounds and the words they are “reading” to construct meaning.'},\n ],\n 1: [\n {'name': 'Number', 'content': 'Learners will understand that numbers are used for many different purposes in the real world. They will develop an understanding of one-to-one correspondence, be able to count and use number words and numerals to represent quantities.'},\n {'name': 'Shape and Space', 'content': 'Learners will understand and use common language to describe paths, regions and boundaries of their immediate environment.'},\n {'name': 'Pattern', 'content': 'Learners will understand that patterns and sequences occur in everyday situations. They will be able to identify, describe, extend and create patterns in various ways.'},\n {'name': 'Measurement', 'content': 'Learners will develop an understanding of how measurement involves the comparison of objects and the ordering and sequencing of events. They will be able to identify, compare and describe attributes of real objects as well as describe and sequence familiar events in their daily routine.'},\n {'name': 'Data', 'content': 'Learners will develop an understanding of how the collection and organization of information helps to make sense of the world. They will sort and label objects by attributes and discuss information represented in graphs including pictographs and tally marks. The learners will discuss chance in daily events.'},\n ],\n },\n }\n with DBSession() as session:\n try: \n report = session.query(PrimaryReport).\\\n options(joinedload('course')).\\\n options(joinedload('sections')).\\\n options(joinedload('sections.learning_outcomes')).\\\n options(joinedload('sections.teachers')).\\\n options(joinedload('teacher')).\\\n filter(\n PrimaryReport.term_id == term_id,\n PrimaryReport.student_id == student_id,\n ).one()\n student = session.query(Students).filter_by(id=student_id).one()\n attendance = session.query(Absences).filter_by(term_id=term_id, student_id=student_id).one()\n except NoResultFound:\n if pdf:\n raise HTTPNotFound()\n else:\n raise HTTPFound(location=request.route_url(\"student_pyp_report_no\", id=student_id))\n\n subject_rank = {\n 'self-management': -1,\n 'language': 0,\n 'mathematics': 1,\n 'unit of inquiry 1': 2,\n 'unit of inquiry 2': 3,\n 'unit of inquiry 3': 4,\n 'unit of inquiry 4': 4.1,\n 'unit of inquiry 5': 4.2,\n 'unit of inquiry 6': 4.3,\n 'art': 5,\n 'music': 6,\n 'physical education': 7,\n 'bahasa melayu': 8,\n 'chinese': 9,\n 'host nation': 10\n }\n\n report.sections = sorted([section for section in report.sections if subject_rank.get(section.name.lower()) < 10000], key=lambda x: subject_rank.get(x.name.lower(), 1000))\n # report.sections = report_sections\n # Filter out the un-needed units of inquiry\n # report.sections = [s for s in report.sections if s.rank <= 1 or (s.rank >= 4 and s.rank not in [4,4.1])]\n\n\n # Only output sections that have any data in them\n # Comment out during development\n # report.sections = [section for section in report.sections if section.comment and subject_rank.get(section.name.lower()) not in [2, 3]]\n\n grade_norm = -1\n\n pagination_list = [0, 3, 7, 10]\n\n for section in report.sections:\n\n section.rank = subject_rank.get(section.name.lower())\n\n if section.rank == -1:\n # blurb for self-management\n section.blurb = \"<i><p>Within the PYP, the approaches to learning skill of self management encompasses the development of gross and fine motor skills, spatial awareness, safety, healthy lifestyles, codes of behaviour and informed choices. </p><p>In an Early Years context these are reflected through the play based approach to teaching and learning. Reporting about self management in Early Years focuses on the whole child, stressing the importance of developing independence, social and emotional skills such as making relationships, managing feelings and behaviour, self confidence and self awareness. In addition the development of physical skills (moving and handling, health and self care) are highlighted as well. </p></i>\"\n else:\n section.blurb = \"\"\n\n if section.rank in [0, 1]: # Could be Lanugage & Maths, set up the report indicators\n ey = int('Early Years 1' in report.course.name) + 1\n section.report_indicators = ey_report_indicators[ey][section.rank] # change this to 2 later\n else:\n section.report_indicators = None\n\n # Substitute the correct Chinese teachers based on manual info above\n if section.rank == 9 and student.id in students_chinese_teachers:\n section.teachers = [students_chinese_teachers.get(student.id)]\n\n if section.rank in [999999]: # Turn this off\n section.organization_header = \"Units of Inquiry\"\n section.name_after = \"\"\n elif section.rank in [4, 4.1]:\n section.organization_header = 'skip'\n section.name_after = \"\"\n else:\n section.organization_header = None\n section.name_after = ' (' + \" & \".join([s.first_name + ' ' + s.last_name for s in section.teachers]) + ')'\n\n if section.rank in [2, 3, 4, 4.1, 4.2,4.3,4.4]:\n which_uoi = int(re.sub(\"[^0-9]\", \"\", section.name))\n section.name = uoi_table.get(grade_norm)[which_uoi]['title']\n section.name_after = \"\"\n\n # Determine pagination\n if section.rank in pagination_list: #TODO What about more than two inquiry units?\n section.pagination = True\n else:\n section.pagination = False\n\n if section.rank in [2, 3, 4, 4.1, 4.2,4.3,4.4]:\n section.name = section.name.title() \n section.name_after = uoi_table.get(grade_norm)[which_uoi]['central_idea']\n\n section.learning_outcomes = sorted(section.learning_outcomes, key=lambda x: x.which)\n\n # ey sections\n report.sections = [s for s in report.sections if s.rank not in [4, 4.1]]\n\n\n options={\n 'quiet': '',\n 'disable-javascript': '',\n 'encoding': 'utf-8',\n 'header-html': 'http://igbisportal.vagrant:6543/header-html',\n 'header-spacing': '5',\n\n\n 'footer-html': 'http://igbisportal.vagrant:6543/footer-html?student_id={}'.format(student.id),\n\n 'print-media-type': '',\n\n 'margin-left': '3mm',\n 'margin-right': '3mm',\n 'margin-bottom': '10mm'\n }\n\n\n if check:\n stu = student.first_nickname_last_studentid\n message = []\n for s in report.sections:\n if not s.teachers:\n message.append(\"No teacher assigned in {}\".format(s.name))\n #raise HTTPNotFound(\"##No teacher assigned for {} in {}##\".format(stu, s.name))\n if not s.comment:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} missing {} comment'.format(teachers, s.name))\n #raise HTTPNotFound('##{} missing {} comment for {}##'.format(teachers, s.name, stu))\n\n if s.learning_outcomes and not 'Early' in report.course.name:\n\n if s.overall_comment == 'N/A':\n for o in s.learning_outcomes:\n if hasattr(o, 'effort') and not o.effort:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter {} effort for {}'.format(teachers, o.heading, s.name))\n # raise HTTPNotFound()\n if not o.selection:\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter {} indication for {}'.format(teachers, o.heading, s.name))\n # raise HTTPNotFound('##{} did not enter indication for {} in {}##'.format(teachers, s.name, stu))\n\n elif s.overall_comment == '':\n teachers = \",\".join([t.username_handle for t in s.teachers])\n message.append('{} did not enter effort for single subject {}'.format(teachers, s.name)) \n\n if message:\n raise HTTPNotFound('##\\n({}) {}:\\n\\t{}##'.format(student.grade, student.first_nickname_last_studentid, \"\\n\\t\".join(message)))\n\n raise HTTPFound()\n\n with DBSession() as session:\n try:\n record = session.query(db.table.PrimaryReportLastUpdated).filter(db.table.PrimaryReportLastUpdated.student_id == student.id).one()\n last_updated = record.timestamp\n last_updated_date = last_updated.strftime(gns.config.reports.last_updated_format)\n except NoResultFound:\n last_updated_date = '<Unknown>'\n except MultipleResultsFound:\n last_updated_date = '<Internal DB Error: Multiple results found>'\n\n if pdf:\n result = render(template,\n dict(\n title=title,\n report=report,\n student=student,\n attendance=attendance,\n pdf=True,\n download_url=\"\",\n link_to_mb=\"\",\n last_updated=\"\",\n ),\n request=request)\n import pdfkit # import here because installation on server is hard\n\n prefix_file_name = '{}/pdf-downloads/{}/{}-Grade{}-{}-[{}]-'.format(\n gns.config.paths.home,\n which_folder,\n '55048',\n grade_norm,\n student.first_name + '-' + student.last_name,\n student.student_id\n )\n\n full_file = '{}({}).pdf'.format(prefix_file_name, last_updated_date)\n\n for _file in glob.glob(\"{}.*\".format(prefix_file_name)):\n # Remove any old stuff still lingering in there\n if _file != full_file:\n os.remove(_file)\n\n path = '{}/pdf-downloads/{}/{}-Grade{}-{}-[{}]-({}).pdf'.format(\n gns.config.paths.home,\n which_folder,\n '55048',\n grade_norm,\n student.first_name + '-' + student.last_name,\n student.student_id,\n last_updated_date\n )\n\n gns.tutorial(\"Sending to pdfkit, also saving to {path}\".format(path=path), edit=(result, '.pretty'), banner=True)\n try:\n pdffile = pdfkit.from_string(result, path, options=options) # render as HTML and return as a string\n except OSError as err:\n return HTTPInternalServerError(\"Problem with file? {}\".format(err))\n\n pdffile # not used\n if pdf.lower() == \"download\":\n content_type = \"application/octet-stream\"\n\n response = FileResponse(path, request=request, content_type=content_type)\n response.content_disposition = u\"attachment; filename={}.pdf\".format(title)\n return response\n\n else:\n content_type = \"application/pdf\"\n response = FileResponse(path, request=request, content_type=content_type, charset='utf-8')\n return response\n\n else:\n # Check when it was last updated\n\n if gns.tutorial_on:\n import pkg_resources\n package, filename = template.split(\":\")\n abspath = pkg_resources.resource_filename(*template.split(\":\"))\n from chameleon import PageTemplateFile\n template_file = PageTemplateFile(abspath)\n gns.tutorial(\"Loaded the template\", edit=(template_file.read(), '.html'), banner=True)\n result = render(template,\n dict(\n title=title,\n report=report,\n student=student,\n attendance=attendance,\n pdf=False,\n download_url=\"/students/{}/pyp_report/download/\".format(student.id),\n link_to_mb=\"https://igbis.managebac.com/classes/{}/pyp-gradebook/tasks/term_grades?student={}&term={}\".format(report.course.id, student.id, gns.config.managebac.current_term_id),\n last_updated=last_updated_date,\n ),\n request=request\n )\n response = Response(result)\n return response",
"def get_study_info(study_link):\n template = \"https://clinicaltrials.gov{}\"\n study_link = study_link.replace(' ', '+')\n return template.format(study_link)",
"def set_full_accession(self):\n return self.STUDY_VERSION_ACCESSION.format(self.study.phs, self.i_version, self.i_participant_set)",
"def Spiculation(self):\n s = self.spiculation\n assert s in range(1,6), \"Spiculation score out of bounds.\"\n return _char_to_word_[ s-1 ] + ' Spiculation'",
"def __init__(self, snps, phenotype):\n self.snps = snps\n self.phenotype = phenotype",
"def setup(self, ds: PetscDocStringImpl) -> None:\n subheading = 0\n self.items = {}\n\n def inspector(ds: PetscDocStringImpl, loc: SourceRange, line: str, verdict: Verdict) -> None:\n if verdict > 0:\n head, _, rest = line.partition(':')\n head = head.strip()\n assert head, f'No heading in PROSE section?\\n\\n{loc.formatted(num_context=5)}'\n if self.items.keys():\n nonlocal subheading\n subheading += 1\n start_line = loc.start.line\n self.items[subheading] = (\n (ds.make_source_range(head, line, start_line), head),\n [(ds.make_source_range(rest, line, start_line), rest)] if rest else []\n )\n elif line.strip():\n try:\n self.items[subheading][1].append((loc, line))\n except KeyError as ke:\n raise ParsingError from ke\n return\n\n super()._do_setup(ds, inspector)\n return",
"def displayPathtoPrincess(n, grid):\n solution = SavePrincess(grid)\n print \"{0}\".format(solution)",
"def add_snp(self,snp_line):\r\n #TODO: make sure there are no cases anymore where there are multiple primer sets\r\n parent = self.pcr_product[0].attributes.id\r\n snp_line.attributes.parent = parent\r\n self.snp.append(snp_line)",
"def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))",
"def _ps(score):\n #s = \"({0[0]:.3f}, {0[1]:.3f})\".format(score)\n s = \"{0:.3f}\".format(score)\n return s",
"def __str__(self):\n return \"%s, %02d, %s\" % (self.code, self.section, self.semester)",
"def setpmidInfo():\n DB = PT.DB\n for p in PT.proteins:\n f = DB[p]['PMID_link']\n #print f\n try:\n auth, tit = t.fetchPMIDSummary(f['text'])\n #print 'got info', tit\n except:\n print 'no pmid'\n try:\n f['authors'] = auth\n f['title'] = tit\n print auth, tit\n #print DB[p]['PMID_link']\n except:\n print 'no dict'\n \n return",
"def study_id(self, study_id):\n\n self._study_id = study_id",
"def study_id(self, study_id):\n\n self._study_id = study_id",
"def study_id(self, study_id):\n\n self._study_id = study_id",
"def study_code(self, study_code):\n\n self._study_code = study_code",
"def pSsnChanged(self):\n\t\tssn_widget = self.ui.findChild(QWidget, \"p_ssn\")\n\t\tssn = ssn_widget.toPlainText()\n\t\t\n\t\tif(len(ssn) == 11):\n\t\t\tp_name = self.ui.findChild(QWidget, \"p_name\")\n\t\t\tp_age = self.ui.findChild(QWidget, \"p_age\")\n\t\t\tp_length = self.ui.findChild(QWidget, \"p_length\")\n\t\t\t\n\t\t\t# Make database query with SSN and see if there's a match\n\t\t\t# --> update p_name, p_ssn, p_age, p_length\n\t\t\tQueryMatch = True\n\t\t\t\n\t\t\tif QueryMatch:\n\t\t\t\t# Test data\t\t\t\n\t\t\t\tif ssn == \"080290-123X\":\n\t\t\t\t\tp_name.setText(\"Tauno Testi\")\n\t\t\t\t\tp_age.setText(\"27\")\n\t\t\t\t\tp_length.setText(\"175 cm\")\n\t\t\t\telif ssn == \"120487-831C\":\n\t\t\t\t\tp_name.setText(\"Marjo Testelias\")\n\t\t\t\t\tp_age.setText(\"31\")\n\t\t\t\t\tp_length.setText(\"165 cm\")\n\t\t\t\t\n\t\t\t\tself.patient_ssn = ssn\n\t\t\t\tself.patient_chosen = True\n\t\t\telse:\n\t\t\t\t# no match, clear data and set flag to False\n\t\t\t\tp_name.setText(\"\")\n\t\t\t\tp_age.setText(\"\")\n\t\t\t\tp_length.setText(\"\")\n\t\t\t\tself.patient_chosen = False",
"def formatPitch(asciiPitch):\n pitch = asciiPitch.lower()\n\n # get octave value relative to 4\n octave = 4\n if \"'\" in pitch:\n numApostrophes = pitch.count(\"'\")\n octave += numApostrophes\n if octave >= 8:\n octave = 7\n pitch = pitch.replace(\"'\", \"\")\n elif \"-\" in pitch:\n numDashes = pitch.count(\"-\")\n octave -= numDashes\n if octave <= 0:\n octave = 1\n pitch = pitch.replace(\"-\", \"\")\n\n # i don't think pysynth likes e# or b#\n if pitch == \"e#\":\n pitch = \"f\"\n elif pitch == \"b#\":\n pitch = \"c\"\n\n # these shouldn't be a problem - test these\n if pitch.count(\"#\") > 1:\n pitch = pitch[0] + \"#\"\n elif pitch.count(\"b\") > 2:\n pitch = pitch[0] + \"b\"\n\n pitch += str(octave)\n return pitch",
"def add_snps(self, lineage_variants_file: Path):\n with lineage_variants_file.open() as lineage_file:\n for line in lineage_file:\n lineage_variant = line.rstrip().split(\"\\t\")\n position = int(lineage_variant[0])\n base = lineage_variant[1]\n\n self.snps[position] = base",
"def _fill_suport(self, suport_id, unused_subcase_id, model):\n suport_name = 'suport1_id=%i' % suport_id\n self.gui.create_alternate_vtk_grid(\n suport_name, color=RED_FLOAT, line_width=5, opacity=1., point_size=4,\n representation='point', is_visible=False)\n suport_nids = get_suport_node_ids(model, suport_id)\n msg = ', which is required by %r' % suport_name\n self._add_nastran_nodes_to_grid(suport_name, suport_nids, model, msg)\n return suport_name",
"def test_set_hs(self):\n s = State(substance=\"water\")\n s.hs = Q_(1061602.391543017, \"J/kg\"), Q_(3028.9867985920914, \"J/(kg*K)\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(373.1242958476843, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(101325.0, \"Pa\")) # type: ignore\n assert np.isclose(s.hs[0], Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.hs[1], Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.u, Q_(1013250, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(3028.9867985920914, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.4772010021515822, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1061602.391543017, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.28475636946248034, \"dimensionless\")) # type: ignore",
"def get_new_numberkey_for_soort(owner_proj, soort):\n if soort == 'userwijz':\n sel = owner_proj.rfcs\n elif soort == 'userprob':\n sel = owner_proj.probs\n elif soort == 'bevinding':\n sel = owner_proj.tbev\n else:\n return ''\n ny = str(datetime.date.today().year)\n h = ''\n try:\n last_id = sel.latest(\"datum_in\").nummer\n except ObjectDoesNotExist:\n pass\n else:\n yr, nr = last_id.split('-')\n if yr == ny:\n h = '-'.join((yr, '%04i' % (int(nr) + 1)))\n if h == '':\n h = '-'.join((ny, '0001'))\n return h",
"def __init__(self):\n segment_number = 2\n list_digits = 4\n super().__init__(4, segment_number, list_digits, default_val=\"0 \")\n self.set_credit(self.get_credit())"
] | [
"0.51352465",
"0.5065886",
"0.50378156",
"0.49864033",
"0.49230617",
"0.4835675",
"0.4805406",
"0.4756666",
"0.47290546",
"0.4711049",
"0.46981743",
"0.46366873",
"0.45931435",
"0.45776656",
"0.4569041",
"0.45661506",
"0.4560519",
"0.45518896",
"0.4550762",
"0.45448464",
"0.45448464",
"0.45448464",
"0.45381606",
"0.45320383",
"0.4530593",
"0.45287737",
"0.45179468",
"0.44998154",
"0.44990987",
"0.4498955"
] | 0.59272975 | 0 |
Gets the absolute URL of the detail page for a given Study instance. | def get_absolute_url(self):
return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_absolute_url(self):\n return ('publication_detail', (), {'slug': self.slug})",
"def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)",
"def details_url(self):\n if self._data.get('details_url'):\n path = self._data.get('details_url')\n try:\n path, hash_ = path.split('#')\n hash_ = '#' + hash_\n except ValueError:\n hash_ = ''\n return '{}?from_activity={}{}'.format(path, self._data.get('id'), hash_)",
"def get_absolute_url(self):\n return reverse('book_details', args=[str(self.id)])",
"def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())",
"def get_absolute_url(self):\n return reverse('book-detail', args=[str(self.id)]) \n # Returns an URL that can be used to access a detail record for this model \n # (for this to work we will have to \n # -- Define a URL mapping that has the name 'book-detail' (name='book-detail')\n # -- Define an associated view.\n # -- Define an associated template.",
"def get_absolute_url(self):\n return reverse(\"jewelry_detail\", args = [str(self.id)])",
"def build_details_url(self, params={}):\n\n if 'url' in params:\n url = params['url']\n url += '?page=' + str(int(params['page'])) + '&sort=' + str(params['sort'])\n return url",
"def get_absolute_url(self):\n return reverse('injury-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\n return reverse('performer-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\t\treturn reverse('source-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('patient-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('wine-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n path_components = {'slug': self.slug}\n return reverse('playlist-details-page', kwargs=path_components)",
"def get_absolute_url(self):\n return '/booking/%s/detail' % self.id",
"def get_provenance_url(uuid):\n return '{explore_url}/details/{uuid}'.format(explore_url=EXPLORE_URL, uuid=uuid)",
"def get_absolute_url(self):\n return reverse(\n \"variants:case-detail\",\n kwargs={\"project\": self.project.sodar_uuid, \"case\": self.sodar_uuid},\n )",
"def get_absolute_url(self):\n return reverse('blogger-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\n url = reverse('comicsite.views.page', args=[self.comicsite.short_name,self.title])\n return url",
"def get_absolute_url(self):\n return reverse('book-detail', kwargs={'slug': self.slug})",
"def get_absolute_url(self):\n return reverse('tournament-details', args=[self.uuid])",
"def get_absolute_url(self):\n return reverse('initiatives:detail', kwargs={'slug': self.slug})",
"def get_url(self) -> str:\n\n return self.__page_url",
"def get_absolute_url(self):\n return reverse_lazy('matterapps_detail', kwargs={'slug': self.slug,})",
"def get_absolute_url(self):\n return reverse('bl-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def instance_url(self) -> str:\n easypost_id = self.get(\"id\")\n if not easypost_id:\n raise Error(\"%s instance has invalid ID: %r\" % (type(self).__name__, easypost_id))\n return \"%s/%s\" % (self.class_url(), easypost_id)",
"def page_url(self):\n url = '/plaque/%s' % self.key.urlsafe()\n return url",
"def get_detail_URL(recipe_id):\n return reverse('recipeapp:recipe-detail', args=[recipe_id])"
] | [
"0.6810438",
"0.6689379",
"0.6567158",
"0.64550245",
"0.64396584",
"0.64053893",
"0.63963675",
"0.6392365",
"0.63474274",
"0.63005847",
"0.62952244",
"0.6280981",
"0.62611544",
"0.62428",
"0.62371224",
"0.6232598",
"0.62293047",
"0.6219801",
"0.62155926",
"0.6194746",
"0.61677283",
"0.6164772",
"0.6162665",
"0.6140584",
"0.6130346",
"0.6113918",
"0.6113918",
"0.6113506",
"0.6106742",
"0.61043555"
] | 0.7098282 | 0 |
Produce a url to initially populate checkboxes in the search page based on the study. | def get_search_url(self):
return reverse('trait_browser:source:studies:pk:traits:search', kwargs={'pk': self.pk}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dataset_search_url(self):\n return reverse('trait_browser:source:studies:pk:datasets:search', kwargs={'pk': self.pk})",
"def form_search_url(self):\r\n self.reformat_search_for_spaces()\r\n self.target_yt_search_url_str = self.prefix_of_search_url + self.yt_search_key + self.filter_url_portion",
"def test_study_source_get_search_url_response(self):\n this_study = factories.StudyFactory.create()\n url = this_study.get_search_url()\n response = self.client.get(url)\n # url should work\n self.assertEqual(response.status_code, 200)\n self.assertIsInstance(response.context['form'], forms.SourceTraitSearchForm)",
"def QAsearch():\n question = ''\n form = QuestionForm()\n question = form.question.data\n if form.validate_on_submit():\n return redirect(url_for('answer',word=question))\n return render_template(\n 'QAsearch.html',\n title = 'QAsearch Page',\n year = datetime.now().year,\n form = form,\n question = question\n )",
"def get_search_url(free_text_search):\n url = baseUrl + \"data/\"\n if not free_text_search:\n url += \"warehouse/\"\n url += \"search?\"\n return url",
"def page_body():\r\n st.header(\"Search\")\r\n st.subheader(\"Search For SMEs With A Few Different Options\")\r\n\r\n search_mode_selection = st.radio(\r\n help=\"Search For SMEs That Have Particular Connections, Titles, Or Names...\",\r\n label=\"Search By\",\r\n options=(SearchMode.Connection.value, SearchMode.JobTitle.value, SearchMode.Name.value),\r\n )\r\n\r\n search_form = st.form(key=\"search_form\", clear_on_submit=False)\r\n search_query = search_form.text_input(label=\"\", value=\"Search...\", max_chars=50)\r\n search_button = search_form.form_submit_button(label=\"Search\")\r\n\r\n if search_button:\r\n results = get_search_results(search_query, SearchMode[str(search_mode_selection).replace(\" \", \"\")])\r\n\r\n # Loop through the results returned from the database query\r\n for result in results:\r\n result_dict = result.to_dict() # Convert internally to a Python dict\r\n\r\n # dict keys here are actually database keys in Firestore. You would need to be signed in to see the proper values\r\n with st.expander(result_dict[\"name\"] + \" - \" + str(result_dict[\"age\"]) + \" years old\"):\r\n st.header(result_dict[\"name\"])\r\n st.write(result_dict[\"jobTitle\"])\r\n\r\n st.subheader(\"Personal Summary\")\r\n st.write(result_dict[\"personalSummary\"])\r\n\r\n if result_dict[\"companyName\"]:\r\n st.subheader(\"Works At\")\r\n st.write(result_dict[\"companyName\"])\r\n\r\n if result_dict[\"connections\"]:\r\n st.subheader(result_dict[\"name\"] + \"'s Connections\")\r\n st.write(\", \".join(result_dict[\"connections\"]))",
"def _build_url_exact(self, q: str, **kwargs: Dict) -> str:\n url = f\"{self._URL}?where=\"\n if kwargs.get('doi'):\n input_doi = kwargs.get('doi')\n url += f'''{{\"doi\":\"{input_doi}\"}}'''\n return url",
"def advanced_search():\n\n return render_template('Advanced_Search.html')",
"def genSearch(request):\n \n assert isinstance(request, HttpRequest)\n booklist=[]\n form = request.GET.copy();\n searchvalue =form['query']\n for k,v in get_valid_Books().items():\n if searchvalue.lower() in v.title.lower() or searchvalue.lower() in v.desc.lower() or searchvalue.lower() in v.a_id.name.lower():\n booklist.append(v)\n if booklist is None:\n clearfilter=\"False\"\n else:\n clearfilter=\"True\"\n\n return render(\n request,\n 'app/about.html',\n {\n 'title':'Books',\n 'books':booklist,\n 'clearfilter':clearfilter,\n 'year':datetime.now().year,\n }\n )",
"def _submit_url(self, request: Request) -> str:\n variables = [v.replace('/', '%2F') for v in request.variables]\n vars = ','.join(variables)\n return (\n f'https://{self.config.harmony_hostname}/{request.collection.id}'\n f'/ogc-api-coverages/1.0.0/collections/{vars}/coverage/rangeset'\n )",
"def search():\n student_to_find=request.args.get(\"student\", None)\n print(f\"A buscar: {student_to_find}\")\n student_list=search_student(student_to_find)\n return render_template(\"search.html\",student_list_result=student_list)",
"def contain_url(self):\n url = self.url\n\n d_month_year = self.get_date_year_month(self.depart_date)\n d_day = self.get_date_day(self.depart_date)\n if self.return_date == '':\n # If no return date is entered,\n # the 'search_type' parameter\n # is set to 'OW' (One Way).\n search_type = 'OW'\n parameters = self.get_parameters_string(\n search_type, d_month_year, d_day)\n else:\n # If a return date is entered,\n # the 'search_type' parameter\n # is set to 'RT' (Round Trip).\n search_type = 'RT'\n r_month_year = self.get_date_year_month(self.return_date)\n r_day = self.get_date_day(self.return_date)\n parameters = self.get_parameters_string(\n search_type, d_month_year, d_day,\n r_month_year, r_day)\n url = url + parameters\n return url",
"def hyperlink_search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif 'UniProtKB Accession' in request.GET and request.GET['UniProtKB Accession'] or \\\n\t'Protein' in request.GET and request.GET['Protein'] or \\\n\t'Gene' in request.GET and request.GET['Gene'] or \\\n\t'Organism' in request.GET and request.GET['Organism'] or \\\n\t'Organismid' in request.GET and request.GET['Organismid'] or \\\n\t'SubCellular' in request.GET and request.GET['SubCellular'] or \\\n\t'Peptide Sequence' in request.GET and request.GET['Peptide Sequence'] or \\\n\t'Pathway Name' in request.GET and request.GET['Pathway Name'] or \\\n\t'Disease Name' in request.GET and request.GET['Disease Name'] or \\\n\t'Go ID' in request.GET and request.GET['Go ID'] or \\\n\t'Go Name' in request.GET and request.GET['Go Name'] or \\\n\t'Go Term' in request.GET and request.GET['Go Term'] or \\\n\t'AssayFdaApproveMark' in request.GET and request.GET['AssayFdaApproveMark']:\n\t\tuseruniprotkb =\"\"\n\t\tuserprotein =\"\"\n\t\tusergeneid =\"\"\n\t\tuserorg=\"\"\n\t\tuserorgid=\"\"\n\t\tusersubcell =\"\"\n\t\tuserpepseq =\"\"\n\t\tuserpathway =\"\"\n\t\tuserdis =\"\"\n\t\tusergoid =\"\"\n\t\tusergotn =\"\"\n\t\tusergot=\"\"\n\t\tuserassayfdaapprovemark=\"\"\n\t\tfinalsearhdata=''\n\t\ttry:\n\t\t\tuseruniprotkb = request.GET[\"UniProtKB Accession\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in useruniprotkb:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('|')\n\t\telse:\n\t\t\tuseruniprotkb=(useruniprotkb.strip()).split('\\\\n')\n\t\tuseruniprotkb=[(item.strip()).lower() for item in useruniprotkb]\n\t\tuseruniprotkb=map(str, useruniprotkb)\n\t\tuseruniprotkb=filter(None, useruniprotkb)\n\n\t\ttry:\n\t\t\tuserprotein = request.GET[\"Protein\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userprotein:\n\t\t\tuserprotein=(userprotein.strip()).split('|')\n\t\telse:\n\t\t\tuserprotein=(userprotein.strip()).split('\\\\n')\n\t\tuserprotein=[(item.strip()).lower() for item in userprotein]\n\t\tuserprotein=map(str, userprotein)\n\t\tuserprotein=filter(None, userprotein)\n\n\t\ttry:\n\t\t\tusergeneid = request.GET[\"Gene\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergeneid:\n\t\t\tusergeneid=(usergeneid.strip()).split('|')\n\t\telse:\n\t\t\tusergeneid=(usergeneid.strip()).split('\\\\n')\n\t\tusergeneid=[(item.strip()).lower() for item in usergeneid]\n\t\tusergeneid=map(str, usergeneid)\n\t\tusergeneid=filter(None, usergeneid)\n\n\t\ttry:\n\t\t\tuserorg = request.GET[\"Organism\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorg:\n\t\t\tuserorg=(userorg.strip()).split('|')\n\t\telse:\n\t\t\tuserorg=(userorg.strip()).split('\\\\n')\n\t\tuserorg=[(item.strip()).lower() for item in userorg]\n\t\tuserorg=map(str, userorg)\n\t\tuserorg=filter(None, userorg)\n\n\t\ttry:\n\t\t\tuserorgid = request.GET[\"Organismid\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userorgid:\n\t\t\tuserorgid=(userorgid.strip()).split('|')\n\t\telse:\n\t\t\tuserorgid=(userorgid.strip()).split('\\\\n')\n\t\tuserorgid=[(item.strip()).lower() for item in userorgid]\n\t\tuserorgid=map(str, userorgid)\n\t\tuserorgid=filter(None, userorgid)\n\n\t\ttry:\n\t\t\tusersubcell = request.GET[\"SubCellular\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usersubcell:\n\t\t\tusersubcell=(usersubcell.strip()).split('|')\n\t\telse:\n\t\t\tusersubcell=(usersubcell.strip()).split('\\\\n')\n\t\tusersubcell=[(item.strip()).lower() for item in usersubcell]\n\t\tusersubcell=map(str, usersubcell)\n\t\tusersubcell=filter(None, usersubcell)\n\n\t\ttry:\n\t\t\tuserpepseq = request.GET[\"Peptide Sequence\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpepseq:\n\t\t\tuserpepseq=(userpepseq.strip()).split('|')\n\t\telse:\n\t\t\tuserpepseq=(userpepseq.strip()).split('\\\\n')\n\t\tuserpepseq=[(item.strip()).lower() for item in userpepseq]\n\t\tuserpepseq=map(str, userpepseq)\n\t\tuserpepseq=filter(None, userpepseq)\n\n\t\ttry:\n\t\t\tuserpathway = request.GET[\"Pathway Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userpathway:\n\t\t\tuserpathway=(userpathway.strip()).split('|')\n\t\telse:\n\t\t\tuserpathway=(userpathway.strip()).split('\\\\n')\n\t\tuserpathway=[(item.strip()).lower() for item in userpathway]\n\t\tuserpathway=map(str, userpathway)\n\t\tuserpathway=filter(None, userpathway)\n\n\t\ttry:\n\t\t\tuserdis = request.GET[\"Disease Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userdis:\n\t\t\tuserdis=(userdis.strip()).split('|')\n\t\telse:\n\t\t\tuserdis=(userdis.strip()).split('\\\\n')\n\t\tuserdis=[(item.strip()).lower() for item in userdis]\n\t\tuserdis=map(str, userdis)\n\t\tuserdis=filter(None, userdis)\n\n\t\ttry:\n\t\t\tusergoid = request.GET[\"Go ID\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergoid:\n\t\t\tusergoid=(usergoid.strip()).split('|')\n\t\telse:\n\t\t\tusergoid=(usergoid.strip()).split('\\\\n')\n\t\tusergoid=[(item.strip()).lower() for item in usergoid]\n\t\tusergoid=map(str, usergoid)\n\t\tusergoid=filter(None, usergoid)\n\n\t\ttry:\n\t\t\tusergotn = request.GET[\"Go Name\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergotn:\n\t\t\tusergotn=(usergotn.strip()).split('|')\n\t\telse:\n\t\t\tusergotn=(usergotn.strip()).split('\\\\n')\n\t\tusergotn=[(item.strip()).lower() for item in usergotn]\n\t\tusergotn=map(str, usergotn)\n\t\tusergotn=filter(None, usergotn)\n\n\t\ttry:\n\t\t\tusergot = request.GET[\"Go Term\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in usergot:\n\t\t\tusergot=(usergot.strip()).split('|')\n\t\telse:\n\t\t\tusergot=(usergot.strip()).split('\\\\n')\n\t\tusergot=[(item.strip()).lower() for item in usergot]\n\t\tusergot=map(str, usergot)\n\t\tusergot=filter(None, usergot)\n\n\t\ttry:\n\t\t\tuserassayfdaapprovemark = request.GET[\"AssayFdaApproveMark\"]\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\t\tif '|' in userassayfdaapprovemark:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('|')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\telse:\n\t\t\tuserassayfdaapprovemark=(userassayfdaapprovemark.strip()).split('\\\\n')\n\t\t\tuserassayfdaapprovemark=list(set(userassayfdaapprovemark))\n\t\tuserassayfdaapprovemark=[(item.strip()).lower() for item in userassayfdaapprovemark]\n\t\tuserassayfdaapprovemark=map(str, userassayfdaapprovemark)\n\t\tuserassayfdaapprovemark=filter(None, userassayfdaapprovemark)\n\n\t\tspquerylist =[]\n\t\tsearchtermlist=[]\n\n\t\tif len(useruniprotkb) >0:\n\t\t\tfinalsearhdata+='UniProtKB Accession:'+';'.join(useruniprotkb)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in useruniprotkb:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"UniProtKB Accession.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userprotein)> 0:\n\t\t\tfinalsearhdata+='Protein:'+';'.join(userprotein)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userprotein:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Protein.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergeneid) >0:\n\t\t\tfinalsearhdata+='Gene:'+';'.join(usergeneid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergeneid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Gene.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorg) > 0:\n\t\t\tfinalsearhdata+='Organism:'+';'.join(userorg)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorg:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userorgid) > 0:\n\t\t\tfinalsearhdata+='Organism ID:'+';'.join(userorgid)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userorgid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Organism ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usersubcell) >0:\n\t\t\tfinalsearhdata+='SubCellular:'+';'.join(usersubcell)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usersubcell:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"SubCellular.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpepseq) >0:\n\t\t\tfinalsearhdata+='Peptide Sequence:'+';'.join(userpepseq)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpepseq:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Peptide Sequence.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userpathway) >0:\n\t\t\tfinalsearhdata+='Pathway Name:'+';'.join(userpathway)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userpathway:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Pathway Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(userdis) >0:\n\t\t\tfinalsearhdata+='Disease Name:'+';'.join(userdis)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userdis:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Disease Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergoid) >0:\n\t\t\tfinalsearhdata+='Go ID:'+';'.join(usergoid)+' '\n\t\t\tsdict={}\n\t\t\tsdict[\"Go ID.ngram\"]=[i.split(' ')[0] for i in usergoid]\n\t\t\ttdict={}\n\t\t\ttdict[\"terms\"]=sdict\n\t\t\tsearchtermlist.append(tdict)\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergoid:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go ID.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]+={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergotn) >0:\n\t\t\tfinalsearhdata+='Go Name:'+';'.join(usergotn)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergotn:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Name.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tif len(usergot) > 0:\n\t\t\tfinalsearhdata+='Go Term:'+';'.join(usergot)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in usergot:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Go Term.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(userassayfdaapprovemark) > 0:\n\t\t\tfinalsearhdata+='Assays for FDA approved Marker::'+';'.join(userassayfdaapprovemark)+' '\n\t\t\tshouldlist=[]\n\t\t\tfor x in userassayfdaapprovemark:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Assays for FDA approved Marker.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\n\t\tif len(searchtermlist)>0:\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\n\t\t\tquery={\n\t\t\t\t\"query\": {\n\t\t\t\t\t\"bool\": {\n\t\t\t\t\t\t\"must\":searchtermlist\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnameFIle=names.get_first_name()\n\t\t\tjsonfilename=nameFIle+'_advance_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\t\t\t\t\t\n\t\t\t\t\tjfinaldata.append(jdic)\n\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata)\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1]))\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items())[:10])\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries']))\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'statsummary', jsonfilename)\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dumps(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/adavancesearch/results/\"+jsonfilename+\"'\"\n\t\t\t\tcontextindex={\n\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t}\n\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})",
"def search(request):\n\n # get form data \n searchItem = request.GET.get(\"q\")\n # if searchItem is an exact match redirect to that page\n if (util.get_entry(searchItem) is not None):\n return HttpResponseRedirect(reverse(\"entry\", kwargs={\n \"title\": searchItem\n }))\n # add any pages with the string in it to results list \n else: \n results = []\n substring = False\n for title in util.list_entries():\n if searchItem.upper() in title.upper():\n results.append(title)\n if results:\n substring = True\n # return results\n return render(request, \"encyclopedia/search.html\", {\n \"searchItem\": searchItem,\n \"substring\": substring,\n \"results\": results\n })",
"def get_student_form():\n\n return render_template(\"students_search.html\")",
"def build_url(self):\n url = requests.utils.requote_uri(\n self.torrent_page + self.string_search)\n if self.page == '1337x':\n return(url + '/1/')\n elif self.page == 'limetorrents':\n return(url + '/')\n else:\n return(url)",
"def __on_click_begin(self):\n self.__refresh_search_results()\n if self.__cards and self.__study_params:\n study_set = StudySet(name=\"Study Query\", cards=self.__cards)\n self.__application.push_study_state(\n study_set,\n study_params=self.__study_params,\n scheduler_params=self.__scheduler_params)",
"def search_page(request):\n if request.method == \"GET\":\n page = request.GET.get('q')\n entries = util.list_entries()\n entries_set=set(entries)\n\n if page in entries_set:\n return render(request, \"encyclopedia/visit_entry.html\",{\n \"entry\": util.get_entry(page),\n \"title\": page\n })\n \n else:\n results = list(filter(lambda x: page in x, entries))\n return render(request, \"encyclopedia/search_page.html\",{\n \"results\": results\n })",
"def get_query_url(self, search_args):\n self._browser.open(\"http://poe.trade/\")\n # There are two forms, the second one is the search form\n # Both forms don't have names so we just know the 2nd one is the right one\n self._browser.form = list(self._browser.forms())[1]\n \n # Populate the forms with the stuff we want\n for form_name in search_args:\n control = self._browser.form.find_control(form_name)\n control.value = search_args[form_name]\n \n # By default we want people are are online and accepting buyouts\n buyout_control = self._browser.form.find_control(name=\"has_buyout\")\n online_control = self._browser.form.find_control(name=\"online\")\n buyout_control.value = [\"1\"]\n online_control.value = [\"x\"]\n \n search_response = self._browser.submit()\n return search_response.geturl()",
"def build_url(**kwargs):\n base_url = 'https://sfbay.craigslist.org/search/sby/apa?'\n\n query_params = {\n 'hasPic': '1',\n 'bundleDuplicates': '1',\n 'min_price': '1100',\n 'max_price': '1800',\n 'availabilityMode': '0',\n 'sale_date': 'all+dates',\n }\n\n # more query parameters passed, add them to the dict\n if kwargs:\n query_params.update(kwargs)\n\n return base_url + urllib.parse.urlencode(query_params)",
"def search_page():\n return render_template('page_query.html', search_label=g_search_type)",
"def _assemble_kw_url(self, keywords):\n search_params = self._build_param_request()\n include = self._build_field_request()\n\n keywords = '+'.join(keywords)\n request_url = \\\n self.nsf_api + 'keyword=' + keywords + include + search_params\n\n return request_url",
"def searchUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName",
"def get_clinicaltrial_url(search_term):\n template = \"https://clinicaltrials.gov/ct2/results?cond={}&Search=Apply&recrs=a&age_v=&gndr=&type=&rslt=\"\n search_term = search_term.replace(' ', '+')\n return template.format(search_term)",
"def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})",
"def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })",
"def get_student_form():\n\n return render_template(\"student_search.html\")",
"def get_student_form():\n\n return render_template(\"student_search.html\")",
"def get_student_form():\n\n return render_template(\"student_search.html\")",
"def get_student_form():\n\n return render_template(\"student_search.html\")"
] | [
"0.5623152",
"0.5556928",
"0.55449784",
"0.5331433",
"0.52878296",
"0.52027786",
"0.5178475",
"0.51745033",
"0.51574606",
"0.51473963",
"0.5132062",
"0.51022416",
"0.51008046",
"0.5066496",
"0.50376517",
"0.5019615",
"0.501772",
"0.50154096",
"0.5013686",
"0.50117487",
"0.50070435",
"0.49997023",
"0.49762985",
"0.49643996",
"0.49605462",
"0.49571705",
"0.49558023",
"0.49558023",
"0.49558023",
"0.49558023"
] | 0.60279405 | 0 |
Produce a url to search datasets wtihin the study. | def get_dataset_search_url(self):
return reverse('trait_browser:source:studies:pk:datasets:search', kwargs={'pk': self.pk}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dataset_url(self, dataset: Dict) -> str:\n return f\"{self.site_url}/dataset/{dataset['name']}\"",
"def url(self) -> str:\n return self.DATASET_URLS[self.name]",
"def get_search_url(free_text_search):\n url = baseUrl + \"data/\"\n if not free_text_search:\n url += \"warehouse/\"\n url += \"search?\"\n return url",
"def url(self):\n scheme, netloc, path, query, fragment = six.moves.urllib.parse.urlsplit(self.baseurl)\n url = six.moves.urllib.parse.urlunsplit((\n scheme, netloc, path + '.dods',\n self.id + hyperslab(self.slice) + '&' +\n '&'.join(self.selection), fragment)).rstrip('&')\n\n return url",
"def get_search_url(self):\n return reverse('trait_browser:source:studies:pk:traits:search', kwargs={'pk': self.pk})",
"def get_url(self):\n # Replace erddapy get_download_url()\n # We need to replace it to better handle http responses with by-passing the _check_url_response\n # https://github.com/ioos/erddapy/blob/fa1f2c15304938cd0aa132946c22b0427fd61c81/erddapy/erddapy.py#L247\n\n # First part of the URL:\n protocol = self.erddap.protocol\n dataset_id = self.erddap.dataset_id\n response = self.erddap.response\n url = f\"{self.erddap.server}/{protocol}/{dataset_id}.{response}?\"\n\n # Add variables to retrieve:\n self.erddap.variables = (\n self._minimal_vlist\n ) # Define the list of variables to retrieve\n variables = self.erddap.variables\n variables = \",\".join(variables)\n url += f\"{variables}\"\n\n # Add constraints:\n self.define_constraints() # Define constraint to select this box of data (affect self.erddap.constraints)\n constraints = self.erddap.constraints\n _constraints = copy.copy(constraints)\n for k, v in _constraints.items():\n if k.startswith(\"time\"):\n _constraints.update({k: parse_dates(v)})\n _constraints = quote_string_constraints(_constraints)\n _constraints = \"\".join([f\"&{k}{v}\" for k, v in _constraints.items()])\n url += f\"{_constraints}\"\n\n # Last part:\n url += '&distinct()&orderBy(\"time,pres\")'\n return url",
"def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")",
"def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")",
"def get_study_data(self, soup, url):\n pass",
"def url_HITRANCIA():\n url=u\"https://hitran.org/data/CIA/\"\n return url",
"def _build_url_exact(self, q: str, **kwargs: Dict) -> str:\n url = f\"{self._URL}?where=\"\n if kwargs.get('doi'):\n input_doi = kwargs.get('doi')\n url += f'''{{\"doi\":\"{input_doi}\"}}'''\n return url",
"def get_url(self, dataset_code):\n module = None\n for qol_param in common.QOL_PARAMS:\n if dataset_code in common.QOL_PARAMS[qol_param]:\n module = common.QOL_PARAMS[qol_param][dataset_code]\n break\n\n url = self.__get_host(dataset_code)\n url = self.__apply_filters(url, common)\n if module is not None:\n url = self.__apply_filters(url, module)\n\n return url",
"def get_data_source_url(station=STATION_ID, metric=METRIC, hilo_only=True):\n date = \"{}{:02}{:02}\".format(now.tm_year, now.tm_mon, now.tm_mday)\n\n URL = \"https://api.tidesandcurrents.noaa.gov/api/prod/datagetter?format=json\"\n URL += \"&product=predictions\"\n URL += \"&interval=hilo\" if hilo_only else \"\"\n URL += \"&datum=mllw\" # MLLW = \"tides\"\n URL += \"&units=metric\" if metric else \"&units=english\"\n URL += \"&time_zone=lst_ldt\" if DST_ON else \"&time_zone=lst\"\n URL += \"&begin_date=\" + date\n URL += \"&end_date=\" + date\n URL += \"&station=\" + station\n\n return URL",
"def url(self):\n return url_search_posts(self.parameters, url_domain=self.url_domain)",
"def urlGenerator(self):\n \n # VERMONT #\n baseurl = 'https://www.vermontjudiciary.org'\n path = '/opinions-decisions'\n # from date\n param1 = 'facet_from_date=01/01'\n # to date\n param2 = 'facet_to_date=01/01/'\n # division\n param3 = 'f%5B0%5D=court_division_opinions_library%3A'\n # search by text\n param4 = 'search_api_fulltext='\n # page\n param5 = 'page='\n # generate list of URL\n listURL = []\n \n # list of divisions\n vt_court_division = {\"civil\": \"1\", \"supreme court\": \"7\", \"environmental\": \"3\", \"family\": \"4\", \"criminal\": \"2\"}\n # inputs\n from_year = 2000\n to_year = 2017\n endPages = 75 #0-74\n startPages = 0\n # make change to pull data from different division by changing division name below to any of the division in vt_court_vivision dict\n division = vt_court_division[\"environmental\"]\n # url generating\n for i in range(startPages, endPages):\n build_url = baseurl + path + '?' + param1 + str(from_year) + \"&\" + param2 + str(to_year) + \"&\" + param3 + division + param4 + \"&\" + param5 + str(i) + \"\"\n # append url to listUrl\n listURL.append(build_url)\n i += 1\n \n # return full list of URLs\n return listURL",
"def fetch_dataset_url_map(dataset):\n path = pl.Path(pl.Path(__file__).resolve().parent, YAML_FILE)\n yml = open_yml(path)[dataset]\n\n return dataset_details(\n dataset,\n yml['type'],\n BASE_URL + yml['url']['pre'] + \"/{}\" + yml['url']['post'],\n yml['f'],\n yml.get('col'),\n yml.get('val_col')\n )",
"def build_retrieve_url(\n ids, display, result=None, download=None, file=None, offset=None,\n length=None, subseq_range=None, expanded=False, header=False\n):\n url = baseUrl + \"data/view/\"\n url += ids\n check_display_option(display)\n url += \"&display=%s\" % (display)\n if result is not None:\n url += \"&result=%s\" % (result)\n if length is not None:\n check_length(length)\n url += \"&length=%s\" % (length)\n if offset is not None:\n url += \"&offset=%s\" % (offset)\n if subseq_range is not None:\n check_subseq_range(subseq_range)\n url += \"&range=%s\" % (subseq_range)\n url += \"&expanded=true\" if expanded else \"&expanded=false\"\n url += \"&header=true\" if header else \"&header=false\"\n if download is not None or file is not None:\n check_download_file_options(download, file)\n url += \"&download=%s\" % (download)\n return url",
"def create_search_url():\n\n search_url = 'http://newsapi.org/v2/everything?'\n\n # A date and optional time for the oldest article allowed. This should be in ISO 8601 format.\n oldest_article = get_oldest_article_date()\n \n payload = {\n \"q\":\"solar+energy+utility\",\n \"from\":oldest_article,\n \"sortBy\":\"relevancy\",\n \"pageSize\":100,\n \"apiKey\": os.environ['GOOGLE_NEWS_KEY']\n }\n\n\n return search_url, payload",
"def data_url(self):\n raise NotImplementedError",
"def data_url(self):\n raise NotImplementedError",
"def _build_url(self, service, resource_type, parameters={}):\n # authenticated dataselect queries have different target URL\n if self.user is not None:\n if service == \"dataselect\" and resource_type == \"query\":\n resource_type = \"queryauth\"\n return build_url(self.base_url, service, self.major_versions[service],\n resource_type, parameters,\n service_mappings=self._service_mappings,\n subpath=self.url_subpath)",
"def searchUrl(self):\n return self.lweBaseUrl + \\\n \"/collections/\" + self.collectionName",
"def get_url(phrase, prefix='all', start=0, max_results=10, sort_by='relevance', sort_order='descending'):\n base_url = 'http://export.arxiv.org/api/query?search_query='\n url = base_url + \\\n prefix+':'+phrase + \\\n '&start='+str(start) + \\\n '&max_results='+str(max_results) + \\\n '&sortBy='+sort_by + \\\n '&sortOrder='+sort_order\n return url",
"def full_text_doc_url(self):\n base_url = 'https://pic.datamade.us/chicago/document/'\n # base_url = 'http://127.0.0.1:5000/chicago/document/'\n \n if self.documents.filter(document_type='V').all():\n legistar_doc_url = self.documents.filter(document_type='V').first().document.url\n doc_url = '{0}?filename={2}&document_url={1}'.format(base_url, \n legistar_doc_url, \n self.identifier)\n return doc_url\n else:\n return None",
"def create_url(keyword, hits_limit, start_record, api_key):\n keyword = requests.utils.quote(keyword)\n url_base = (\"http://kulturarvsdata.se/ksamsok/api?x-api={api_key}\"\n \"&method=search&hitsPerPage={hits_limit}\"\n \"&startRecord={start_record}\"\n \"&query=serviceOrganization=RA%C3%84%20\"\n \"and%20serviceName=KMB%20\"\n \"and%20itemType=foto%20and%20mediaLicense=*%20\"\n \"and%20text={keyword}\")\n return url_base.format(api_key=api_key,\n hits_limit=hits_limit,\n start_record=start_record,\n keyword=keyword)",
"def contain_url(self):\n url = self.url\n\n d_month_year = self.get_date_year_month(self.depart_date)\n d_day = self.get_date_day(self.depart_date)\n if self.return_date == '':\n # If no return date is entered,\n # the 'search_type' parameter\n # is set to 'OW' (One Way).\n search_type = 'OW'\n parameters = self.get_parameters_string(\n search_type, d_month_year, d_day)\n else:\n # If a return date is entered,\n # the 'search_type' parameter\n # is set to 'RT' (Round Trip).\n search_type = 'RT'\n r_month_year = self.get_date_year_month(self.return_date)\n r_day = self.get_date_day(self.return_date)\n parameters = self.get_parameters_string(\n search_type, d_month_year, d_day,\n r_month_year, r_day)\n url = url + parameters\n return url",
"def solr_url(config):\n return _solr_core_url(config) + 'query'",
"def download(dataset_revision):\n return reverse('manageDatasets.download', 'microsites.urls',\n kwargs={'dataset_id': str(dataset_revision['dataset_id']), 'slug': dataset_revision['slug']})",
"def get_datafiles(self, url, survey_path):\n pass",
"def get_absolute_url(self):\n return reverse('trait_browser:source:datasets:detail', kwargs={'pk': self.pk})"
] | [
"0.7010941",
"0.66344947",
"0.6386537",
"0.6122667",
"0.60913324",
"0.60644984",
"0.5981221",
"0.5981221",
"0.5922729",
"0.58895314",
"0.5875229",
"0.58479536",
"0.5847596",
"0.5828358",
"0.5824677",
"0.5810934",
"0.5793076",
"0.5771518",
"0.57501584",
"0.57501584",
"0.57360697",
"0.5716842",
"0.5707884",
"0.5694435",
"0.56564784",
"0.5614354",
"0.56137913",
"0.56066453",
"0.560044",
"0.55937076"
] | 0.765159 | 0 |
Get html for study's name linking to study detail page. | def get_name_link_html(self):
url_text = "{{% url 'trait_browser:source:studies:pk:detail' pk={} %}} ".format(self.pk)
return URL_HTML.format(url=url_text, name=self.i_study_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def study():\n return render_template('study.html')",
"def get_study_info(study_link):\n template = \"https://clinicaltrials.gov{}\"\n study_link = study_link.replace(' ', '+')\n return template.format(study_link)",
"def get_study_name_from_id(self, study_id: int) -> str:\n raise NotImplementedError",
"def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # a list of (project_title, grade) for a given student\n titles_grades = hackbright.get_grades_by_github(github)\n\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n titles_grades=titles_grades)\n\n return html",
"def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n html = render_template('student_info.html',\n first=first,\n last=last,\n github=github)\n return html",
"def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html",
"def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html",
"def study_legacy():\n return render_template('study-legacy.html')",
"def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html",
"def GET_details(self, article):\r\n return DetailsPage(link = article).render()",
"def get_study_info(self,std_id):\n raise NotImplementedError",
"def get_student():\n\n github = request.args.get('github', 'jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n\n\n rows = hackbright.list_projects(github)\n\n return render_template (\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows\n )",
"def get_student():\n\n github = request.args.get('github')\n\n # print (\"aaaaaa\",hackbright.get_student_by_github(github))\n\n # if hackbright.get_student_by_github(github):\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # html = render_template(\"student_info.html\",\n # first = first,\n # last = last,\n # github=github)\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github)",
"def _get_name_relurl_and_desc(snippet_html):\n name_and_url_part, desc_part = snippet_html.find_all('p', 'snippet')\n name = name_and_url_part.get_text()\n relative_url = name_and_url_part.find('a').get('href')\n desc = desc_part.get_text()\n return name, relative_url, desc",
"def get_student():\n\n # github = \"jhacks\"\n github = request.args.get('github','jhacks')\n first, last, github = hackbright.get_student_by_github(github)\n return render_template(\"student_info.html\" , first=first, gorilla=last, giraffe=github)\n # return \"%s is the GitHub account for %s %s\" % (github, first, last)",
"def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)",
"def __str__(self):\n return self.page.get_title()",
"def get_study_data(self, soup, url):\n pass",
"def test_redirects_to_study_detail_page(self):\n study = factories.StudyFactory.create()\n # We need to create some datasets and traits so the detail page renders properly.\n source_traits = factories.SourceTraitFactory.create_batch(\n 10, source_dataset__source_study_version__i_is_deprecated=False,\n source_dataset__source_study_version__study=study)\n response = self.client.post(self.get_url(), {'object': study.pk})\n self.assertRedirects(response, reverse('trait_browser:source:studies:pk:detail', args=[study.pk]))",
"def survey_detail(request, survey_slug):\n if request.user.is_authenticated:\n if not request.user.groups.filter(name='Survey Creators').exists():\n raise Http404(\"Page not found\")\n else:\n raise Http404(\"Page not found\")\n\n survey = get_object_or_404(Survey, slug=survey_slug)\n my_surveys = Survey.objects.filter(author=request.user).order_by('title')\n\n if request.user == survey.author:\n return render(request,\n 'skip_logic/survey_detail.html',\n {'survey': survey, 'my_surveys': my_surveys,})\n else:\n raise Http404(\"Page not found\")",
"def get_info(self) -> str:\n return textwrap.dedent(\n \"\"\"\n <h1>Test page</h1>\n \"\"\"\n )",
"def get_name(self):\n return self.soup.find('div', id = 'zh-topic-title').h1\\\n .get_text(strip = True).encode(CODE)",
"def student_summary() -> str:\n db_path: str = \"810_startup.db\"\n\n try:\n db: sqlite3.Connection = sqlite3.connect(db_path)\n except sqlite3.OperationalError:\n return f'Error: Unable to open database at path {db_path}'\n else:\n query: str = \"select students.Name, students.CWID, grades.Course, grades.Grade, instructors.Name from students,grades,instructors where students.CWID=StudentCWID and InstructorCWID=instructors.CWID order by students.Name\"\n data: Dict[str, str] = [{'Name': name, 'CWID': cwid, 'Course': course, 'Grade': grade, 'Instructor': instructor} for name, cwid, course, grade, instructor in db.execute(query)]\n\n db.close()\n\n return render_template(\n 'students.html',\n title = 'Stevens Repository',\n table_title = 'Students Summary',\n students = data)",
"def getAdditionalDetails(self, soup):\n title_details = soup.find('div', id=\"titleDetails\")\n title_details = title_details.findAll('div', class_=\"txt-block\")\n return title_details",
"def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})",
"def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n return render_template('student_info.html',\n first=first,\n last=last,\n github=github)\n \n\n #return \"{} is the GitHub account for {} {}\".format(github, first, last)",
"def title(self):\n return self.data.find(\n 'span', class_='briefResultsTitle'\n ).find(\n 'a'\n ).get_text()",
"def get(self):\n return orthanc.study(self.orthanc_id)",
"def test_correct_study_found_by_name(self):\n study_name = 'my_unlikely_study_name'\n study = factories.StudyFactory.create(i_study_name=study_name)\n url = self.get_url()\n response = self.client.get(url, {'q': study_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [study.i_accession])",
"def abstract_html(self, read_more_link=False):\n text, shortened = self.abstract_plaintext(include_shortened=True)\n context = {'text': text, 'shortened': shortened}\n if shortened and read_more_link:\n context['more_url'] = self.primary_url()\n return django_render('donations/includes/abstract.html', context)"
] | [
"0.68769675",
"0.6697346",
"0.61508894",
"0.60814106",
"0.60482645",
"0.6036819",
"0.588089",
"0.58306956",
"0.5795159",
"0.57281035",
"0.56913704",
"0.5666021",
"0.5664573",
"0.5647374",
"0.5640818",
"0.5606993",
"0.56017995",
"0.559533",
"0.5540547",
"0.5521373",
"0.5483745",
"0.54501754",
"0.5424516",
"0.5405152",
"0.5396126",
"0.53691334",
"0.5360877",
"0.53502893",
"0.5344078",
"0.5334552"
] | 0.8060419 | 0 |
Return a count of the number of tags for which current traits are tagged, but archived, in this study. | def get_archived_tags_count(self):
return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(
trait__source_dataset__source_study_version__study=self
).current().aggregate(
models.Count('tag', distinct=True))['tag__count'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('trait', distinct=True)\n )['trait__count']",
"def get_non_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self).aggregate(\n models.Count('trait', distinct=True))['trait__count']",
"def get_non_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).aggregate(\n models.Count('tag', distinct=True)\n )['tag__count']",
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def get_all_tags_count(self):\n return apps.get_model('tags', 'Tag').objects.filter(\n all_traits__source_dataset__source_study_version__study=self,\n all_traits__source_dataset__source_study_version__i_is_deprecated=False\n ).distinct().count()",
"def render_number_tagged_traits(self, record):\n return record.current_non_archived_traits.count()",
"def get_count(self):\n return len(self._tags)",
"def count_tags():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_tags()\r\n trans.commit()",
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def test_context_data_excludes_archived_taggedtraits(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n # Make fake tagged traits that all have the same tag.\n self.tagged_traits = TaggedTraitFactory.create_batch(\n 10, trait__source_dataset__source_study_version__study=self.study, tag=tag)\n archived_tagged_trait = self.tagged_traits[0]\n archived_tagged_trait.archive()\n archived_tagged_trait.refresh_from_db()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.non_archived().count())\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.all().count() - 1)",
"def tag_count(self, tag):\n return sum(self._out_counts.get(tag, {}).values())",
"def __len__(self):\n return len(self._tagged)",
"def getTagsNum(self):\r\n self.gettags()",
"def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts",
"def count_total_tags():\r\n total = TagMgr.count()\r\n stat = StatBookmark(attrib=TAG_CT, data=total)\r\n DBSession.add(stat)",
"def test_task_count_tags(self):\r\n tasks.count_tags()\r\n\r\n stat = StatBookmark.query.first()\r\n self.assertEqual(stat.attrib, stats.TAG_CT)\r\n self.assertEqual(stat.data, 4)",
"def count_by_tag(self, dataframe, tags):\r\n if tags and not dataframe['tags'].empty:\r\n data_to_return = []\r\n counter = 0\r\n for tag in tags:\r\n for datafield in dataframe['tags']:\r\n if tag in datafield:\r\n counter += 1\r\n data_to_return.append([tag, counter])\r\n counter = 0\r\n return pandas.DataFrame(data_to_return, columns=('TAG', 'TagCount'))",
"def count_tags(tags):\n counts = {}\n for tag_list in tags.values():\n for tag in tag_list:\n if tag in counts:\n counts[tag] += 1\n else:\n counts[tag] = 1\n return counts",
"def archived_tags(self):\n archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=archived_tagged_traits.values_list('tag__pk', flat=True))",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])",
"def test_has_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))",
"def test_has_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))",
"def test_returns_all_studies_with_archived_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def count(self):\n\n raise NotImplementedError",
"def tag_counts(self, types=[]):\n if not types:\n types = self.tag_types\n for tag_type in types:\n print \"\\t%15s : %-10s\" % (tag_type, len(self.tag_dictionary[tag_type]))",
"def count_deleted(self):\n count = 0\n for _, e in self.contents.items():\n count = count + e.count_deleted()\n return count"
] | [
"0.85525626",
"0.8337307",
"0.808836",
"0.7484111",
"0.7208109",
"0.6977215",
"0.6822008",
"0.6755481",
"0.66966176",
"0.6484534",
"0.63340545",
"0.62938446",
"0.6283145",
"0.6265508",
"0.6221511",
"0.6198768",
"0.6086896",
"0.6075747",
"0.6005409",
"0.5969021",
"0.59272254",
"0.5906543",
"0.5906543",
"0.5877014",
"0.58637244",
"0.58637244",
"0.58637244",
"0.58383805",
"0.583629",
"0.57915974"
] | 0.84307426 | 1 |
Return a queryset of all of the current TaggedTraits from this study. | def get_all_tagged_traits(self):
return apps.get_model('tags', 'TaggedTrait').objects.filter(
trait__source_dataset__source_study_version__study=self,
).current() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def test_returns_all_studies_with_tagged_traits_for_multiple_tags(self):\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def studies(self):\n return self._study_queryset",
"def test_returns_all_studies_with_unreviewed_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def archived_tags(self):\n archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=archived_tagged_traits.values_list('tag__pk', flat=True))",
"def test_returns_all_studies_with_archived_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all source traits and make 5 new ones, so there are only 5 for study 1.\n models.SourceTrait.objects.all().delete()\n self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def test_returns_all_studies_with_reviewed_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for (idx, study) in enumerate(self.studies):\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n if idx % 2 == 0:\n status = DCCReview.STATUS_CONFIRMED\n else:\n status = DCCReview.STATUS_FOLLOWUP\n DCCReviewFactory.create(tagged_trait=tmp, status=status)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def topics(self):\n # use get_model to avoid circular dependency\n topic_model = apps.get_model('tags', 'Topic')\n return topic_model.objects.filter(tag__in=self.tags.all()).distinct()",
"def test_returns_all_studies_with_archived_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_studies_with_unreviewed_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_all_tags_count(self):\n return apps.get_model('tags', 'Tag').objects.filter(\n all_traits__source_dataset__source_study_version__study=self,\n all_traits__source_dataset__source_study_version__i_is_deprecated=False\n ).distinct().count()",
"def test_returns_all_studies_with_reviewed_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for (idx, study) in enumerate(self.studies):\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n if idx % 2 == 0:\n status = DCCReview.STATUS_CONFIRMED\n else:\n status = DCCReview.STATUS_FOLLOWUP\n DCCReviewFactory.create(tagged_trait=tmp, status=status)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(tags__name=self.kwargs['tag_slug'])",
"def get_queryset(self):\n judge_qs = Judge.objects.filter(judge=self.request.user)\n return Contest.objects.filter(\n pk__in=judge_qs.values('contest'),\n publish_date__gte=timezone.now(),\n )",
"def obj_categories(self):\r\n return self._tags",
"def tags(self):\r\n return Tags(self)",
"def tags(self):\r\n return Tags(self)",
"def get_non_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self).aggregate(\n models.Count('trait', distinct=True))['trait__count']",
"def get_new_sourcetraits(self):\n previous_study_version = self.get_previous_version()\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n if previous_study_version is not None:\n qs = SourceTrait.objects.filter(\n source_dataset__source_study_version=self\n )\n # We can probably write this with a join to be more efficient.\n previous_variable_accessions = SourceTrait.objects.filter(\n source_dataset__source_study_version=previous_study_version\n ).values_list('i_dbgap_variable_accession', flat=True)\n qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)\n return qs\n else:\n return SourceTrait.objects.none()",
"def getTags(self,):\n\t\treturn self.tags;",
"def get_queryset(self):\n print(self.kwargs['collection'])\n collection_tags = Collection.objects.values_list('tags__name', flat=True)\n return Post.objects.filter(tags__name__in=collection_tags).distinct()",
"def queryset(self, request, queryset):\n for tag in get_resource_tags():\n if self.value() == tag[0]:\n return queryset.filter(tags__slug__iexact=tag[0])",
"def non_archived_tags(self):\n non_archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.non_archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=non_archived_tagged_traits.values_list('tag__pk', flat=True))",
"def get_tags(self):\n\n return self.tags",
"def test_does_not_return_studies_without_tagged_traits_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n other_study = self.studies[1]\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(other_study.pk, pks)"
] | [
"0.6514019",
"0.6437152",
"0.6416004",
"0.6301729",
"0.61813736",
"0.61287004",
"0.61267114",
"0.6058266",
"0.5956338",
"0.59383553",
"0.5782794",
"0.5782794",
"0.57523257",
"0.568733",
"0.5643809",
"0.5636289",
"0.55189633",
"0.5469623",
"0.5362374",
"0.53501254",
"0.53038394",
"0.53038394",
"0.5302501",
"0.5272108",
"0.52687424",
"0.52490187",
"0.5246478",
"0.52439755",
"0.524232",
"0.52298844"
] | 0.8149221 | 0 |
Return a queryset of the current archived TaggedTraits from this study. | def get_archived_tagged_traits(self):
return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(
trait__source_dataset__source_study_version__study=self
).current() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def archived_tags(self):\n archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=archived_tagged_traits.values_list('tag__pk', flat=True))",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def test_returns_all_studies_with_archived_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_studies_with_archived_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def non_archived_tags(self):\n non_archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.non_archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=non_archived_tagged_traits.values_list('tag__pk', flat=True))",
"def get_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('trait', distinct=True)\n )['trait__count']",
"def test_returns_study_with_archived_tagged_trait_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(\n tag=tag, trait__source_dataset__source_study_version__study=study, archived=True)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertIn(study.pk, pks)",
"def get_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('tag', distinct=True))['tag__count']",
"def get_non_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self).aggregate(\n models.Count('trait', distinct=True))['trait__count']",
"def test_context_data_excludes_archived_taggedtraits(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n # Make fake tagged traits that all have the same tag.\n self.tagged_traits = TaggedTraitFactory.create_batch(\n 10, trait__source_dataset__source_study_version__study=self.study, tag=tag)\n archived_tagged_trait = self.tagged_traits[0]\n archived_tagged_trait.archive()\n archived_tagged_trait.refresh_from_db()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.non_archived().count())\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.all().count() - 1)",
"def studies(self):\n return self._study_queryset",
"def test_returns_all_studies_with_unreviewed_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_non_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).aggregate(\n models.Count('tag', distinct=True)\n )['tag__count']",
"def test_does_not_return_study_with_archived_tagged_trait_for_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(\n tag=tag, trait__source_dataset__source_study_version__study=study, archived=True)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\",' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(study.pk, pks)",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_returns_all_studies_with_tagged_traits_for_multiple_tags(self):\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def test_returns_all_studies_with_reviewed_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for (idx, study) in enumerate(self.studies):\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n if idx % 2 == 0:\n status = DCCReview.STATUS_CONFIRMED\n else:\n status = DCCReview.STATUS_FOLLOWUP\n DCCReviewFactory.create(tagged_trait=tmp, status=status)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_queryset(self):\n return Entry.published.filter(tags__slug=self.kwargs['tag_slug'])",
"def get_queryset(self):\n judge_qs = Judge.objects.filter(judge=self.request.user)\n return Contest.objects.filter(\n pk__in=judge_qs.values('contest'),\n publish_date__gte=timezone.now(),\n )",
"def test_returns_all_studies_with_unreviewed_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(tags__name=self.kwargs['tag_slug'])",
"def get_queryset(self):\n return Strategy.objects.select_related('author').order_by('-pub_date')[:5]",
"def get_queryset(self):\n return Strategy.objects.select_related('author').order_by('-pub_date')[:5]",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all source traits and make 5 new ones, so there are only 5 for study 1.\n models.SourceTrait.objects.all().delete()\n self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def get_new_sourcetraits(self):\n previous_study_version = self.get_previous_version()\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n if previous_study_version is not None:\n qs = SourceTrait.objects.filter(\n source_dataset__source_study_version=self\n )\n # We can probably write this with a join to be more efficient.\n previous_variable_accessions = SourceTrait.objects.filter(\n source_dataset__source_study_version=previous_study_version\n ).values_list('i_dbgap_variable_accession', flat=True)\n qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)\n return qs\n else:\n return SourceTrait.objects.none()",
"def get_queryset(self):\n return Chapter.objects.filter(story=self.story).order_by(\"-start_date\")"
] | [
"0.77290195",
"0.75084317",
"0.72397757",
"0.71252674",
"0.65865415",
"0.64593077",
"0.64155614",
"0.62145716",
"0.61490464",
"0.60586834",
"0.6027033",
"0.58406144",
"0.5763332",
"0.56548536",
"0.5611018",
"0.5603707",
"0.5603707",
"0.5603707",
"0.55999005",
"0.5566254",
"0.5534818",
"0.53832793",
"0.53623366",
"0.5348254",
"0.53448856",
"0.53440917",
"0.53440917",
"0.5329881",
"0.5326044",
"0.52693343"
] | 0.8213264 | 0 |
Return a queryset of the current nonarchived TaggedTraits from this study. | def get_non_archived_tagged_traits(self):
return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(
trait__source_dataset__source_study_version__study=self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def non_archived_tags(self):\n non_archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.non_archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=non_archived_tagged_traits.values_list('tag__pk', flat=True))",
"def archived_tags(self):\n archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=archived_tagged_traits.values_list('tag__pk', flat=True))",
"def test_returns_all_studies_with_archived_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_studies_with_archived_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_non_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self).aggregate(\n models.Count('trait', distinct=True))['trait__count']",
"def test_returns_all_studies_with_unreviewed_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_studies_with_unreviewed_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_context_data_excludes_archived_taggedtraits(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n # Make fake tagged traits that all have the same tag.\n self.tagged_traits = TaggedTraitFactory.create_batch(\n 10, trait__source_dataset__source_study_version__study=self.study, tag=tag)\n archived_tagged_trait = self.tagged_traits[0]\n archived_tagged_trait.archive()\n archived_tagged_trait.refresh_from_db()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.non_archived().count())\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.all().count() - 1)",
"def studies(self):\n return self._study_queryset",
"def test_returns_all_studies_with_reviewed_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for (idx, study) in enumerate(self.studies):\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n if idx % 2 == 0:\n status = DCCReview.STATUS_CONFIRMED\n else:\n status = DCCReview.STATUS_FOLLOWUP\n DCCReviewFactory.create(tagged_trait=tmp, status=status)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_non_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).aggregate(\n models.Count('tag', distinct=True)\n )['tag__count']",
"def test_does_not_return_study_with_archived_tagged_trait_for_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(\n tag=tag, trait__source_dataset__source_study_version__study=study, archived=True)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\",' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(study.pk, pks)",
"def get_new_sourcetraits(self):\n previous_study_version = self.get_previous_version()\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n if previous_study_version is not None:\n qs = SourceTrait.objects.filter(\n source_dataset__source_study_version=self\n )\n # We can probably write this with a join to be more efficient.\n previous_variable_accessions = SourceTrait.objects.filter(\n source_dataset__source_study_version=previous_study_version\n ).values_list('i_dbgap_variable_accession', flat=True)\n qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)\n return qs\n else:\n return SourceTrait.objects.none()",
"def get_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('trait', distinct=True)\n )['trait__count']",
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def test_does_not_return_studies_without_tagged_traits_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n other_study = self.studies[1]\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(other_study.pk, pks)",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_has_no_archived_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n archived_tagged_trait = TaggedTraitFactory.create(trait=self.trait, archived=True)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertNotIn(archived_tagged_trait, [el[0] for el in context['tagged_traits_with_xs']])",
"def test_returns_study_with_archived_tagged_trait_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(\n tag=tag, trait__source_dataset__source_study_version__study=study, archived=True)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertIn(study.pk, pks)",
"def test_returns_all_studies_with_reviewed_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for (idx, study) in enumerate(self.studies):\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study, tag=tag)\n tagged_traits.append(tmp)\n if idx % 2 == 0:\n status = DCCReview.STATUS_CONFIRMED\n else:\n status = DCCReview.STATUS_FOLLOWUP\n DCCReviewFactory.create(tagged_trait=tmp, status=status)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_all_studies_without_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def test_returns_study_with_unreviewed_tagged_trait_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertIn(study.pk, pks)",
"def get_queryset(self):\n judge_qs = Judge.objects.filter(judge=self.request.user)\n return Contest.objects.filter(\n pk__in=judge_qs.values('contest'),\n publish_date__gte=timezone.now(),\n )",
"def get_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('tag', distinct=True))['tag__count']",
"def test_does_not_return_studies_with_unreviewed_tagged_traits_with_other_tag_for_given_tag(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n other_tag = TagFactory.create()\n other_study = self.studies[1]\n other_tagged_trait = TaggedTraitFactory.create(\n tag=other_tag, trait__source_dataset__source_study_version__study=other_study)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\"}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(other_study.pk, pks)",
"def test_does_not_return_studies_with_unreviewed_tagged_trait_with_other_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(tag=tag, trait__source_dataset__source_study_version__study=study)\n other_tag = TagFactory.create()\n other_study = self.studies[1]\n other_tagged_trait = TaggedTraitFactory.create(\n tag=other_tag, trait__source_dataset__source_study_version__study=other_study)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\",' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(other_study.pk, pks)",
"def test_returns_all_studies_with_tagged_traits_for_multiple_tags(self):\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))"
] | [
"0.775637",
"0.73766696",
"0.708502",
"0.68280214",
"0.6783516",
"0.6571693",
"0.6464671",
"0.6396007",
"0.63075525",
"0.611397",
"0.6077231",
"0.60213524",
"0.59917337",
"0.590065",
"0.5870306",
"0.58424973",
"0.5763946",
"0.57431793",
"0.56793123",
"0.56793123",
"0.56793123",
"0.5561857",
"0.55278844",
"0.5511613",
"0.54736805",
"0.5471956",
"0.54147184",
"0.5410344",
"0.54011595",
"0.53853613"
] | 0.82755035 | 0 |
Return the count of all current traits that have been tagged in this study. | def get_all_traits_tagged_count(self):
return SourceTrait.objects.filter(
source_dataset__source_study_version__study=self
).current().exclude(all_tags=None).count() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_non_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self).aggregate(\n models.Count('trait', distinct=True))['trait__count']",
"def get_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('trait', distinct=True)\n )['trait__count']",
"def get_all_tags_count(self):\n return apps.get_model('tags', 'Tag').objects.filter(\n all_traits__source_dataset__source_study_version__study=self,\n all_traits__source_dataset__source_study_version__i_is_deprecated=False\n ).distinct().count()",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def get_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('tag', distinct=True))['tag__count']",
"def get_count(self):\n return len(self._tags)",
"def render_number_tagged_traits(self, record):\n return record.current_non_archived_traits.count()",
"def get_non_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).aggregate(\n models.Count('tag', distinct=True)\n )['tag__count']",
"def count_indications(self) -> int:\n return self._count_model(Indication)",
"def count_all(self):\n return Counter(self._sequence)",
"def count(self):\n\n raise NotImplementedError",
"def count(self):\n return len([i for i in self.iteritems()])",
"def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts",
"def tag_counts(self, types=[]):\n if not types:\n types = self.tag_types\n for tag_type in types:\n print \"\\t%15s : %-10s\" % (tag_type, len(self.tag_dictionary[tag_type]))",
"def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()",
"def hits(self):\n return sum(self.labels.values())",
"def tag_count(self, tag):\n return sum(self._out_counts.get(tag, {}).values())",
"def counts(self):\n return sum(self.counter.values()), len(self.visited)",
"def counts(self):\n return sum(self.counter.values()), len(self.visited)",
"def getObservationCount(self):\r\n return self._s_obs",
"def getTagsNum(self):\r\n self.gettags()",
"def counts(self) -> dict:\n return Counter(self.sequence)",
"def __len__(self):\n return len(self._tagged)",
"def document_skill_counts(self, source_object: Dict):\n skill_counts = Counter()\n for candidate_skill in self.candidate_skills(source_object):\n skill_counts[self.nlp.lowercase_strip_punc(candidate_skill.skill_name).lstrip().rstrip()] += 1\n return skill_counts",
"def count(self):\n return len(self._components)",
"def count_by_tag(self, dataframe, tags):\r\n if tags and not dataframe['tags'].empty:\r\n data_to_return = []\r\n counter = 0\r\n for tag in tags:\r\n for datafield in dataframe['tags']:\r\n if tag in datafield:\r\n counter += 1\r\n data_to_return.append([tag, counter])\r\n counter = 0\r\n return pandas.DataFrame(data_to_return, columns=('TAG', 'TagCount'))",
"def counts(self):\n\n counts = defaultdict(int)\n\n for i, geom in zip(self.tree_ids, self.tree):\n point_int = list(self.sindex.intersection(geom.bounds))\n if point_int:\n counts[i] += len(point_int)\n\n return dict(counts)",
"def active_type_counts(self):\n names = self.visible()\n return {\n 'total': names.count(),\n 'personal': len([n for n in names if n.is_personal()]),\n 'organization': len([n for n in names if n.is_organization()]),\n 'event': len([n for n in names if n.is_event()]),\n 'software': len([n for n in names if n.is_software()]),\n 'building': len([n for n in names if n.is_building()])\n }",
"def count(self, cls=None):\n return len(self.all(cls))",
"def count(self):\n return sum(1 for _ in self)"
] | [
"0.7672161",
"0.7633912",
"0.7162639",
"0.7084544",
"0.67033195",
"0.66352296",
"0.6590511",
"0.64851606",
"0.6463744",
"0.63729537",
"0.6240862",
"0.6184167",
"0.6182421",
"0.60347986",
"0.60346127",
"0.6001969",
"0.6001855",
"0.5997859",
"0.5997859",
"0.5991083",
"0.5980368",
"0.5961597",
"0.5946219",
"0.59367746",
"0.5924454",
"0.5909945",
"0.5909271",
"0.5900527",
"0.58992654",
"0.5898491"
] | 0.85028684 | 0 |
Return the count of current traits that have been tagged (and the tag archived) in this study. | def get_archived_traits_tagged_count(self):
return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(
trait__source_dataset__source_study_version__study=self
).current().aggregate(
models.Count('trait', distinct=True)
)['trait__count'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_non_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self).aggregate(\n models.Count('trait', distinct=True))['trait__count']",
"def get_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('tag', distinct=True))['tag__count']",
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def get_non_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).aggregate(\n models.Count('tag', distinct=True)\n )['tag__count']",
"def get_all_tags_count(self):\n return apps.get_model('tags', 'Tag').objects.filter(\n all_traits__source_dataset__source_study_version__study=self,\n all_traits__source_dataset__source_study_version__i_is_deprecated=False\n ).distinct().count()",
"def render_number_tagged_traits(self, record):\n return record.current_non_archived_traits.count()",
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def get_count(self):\n return len(self._tags)",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def test_context_data_excludes_archived_taggedtraits(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n # Make fake tagged traits that all have the same tag.\n self.tagged_traits = TaggedTraitFactory.create_batch(\n 10, trait__source_dataset__source_study_version__study=self.study, tag=tag)\n archived_tagged_trait = self.tagged_traits[0]\n archived_tagged_trait.archive()\n archived_tagged_trait.refresh_from_db()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.non_archived().count())\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.all().count() - 1)",
"def tag_count(self, tag):\n return sum(self._out_counts.get(tag, {}).values())",
"def __len__(self):\n return len(self._tagged)",
"def test_has_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))",
"def test_has_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))",
"def getTagsNum(self):\r\n self.gettags()",
"def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts",
"def count_indications(self) -> int:\n return self._count_model(Indication)",
"def count_by_tag(self, dataframe, tags):\r\n if tags and not dataframe['tags'].empty:\r\n data_to_return = []\r\n counter = 0\r\n for tag in tags:\r\n for datafield in dataframe['tags']:\r\n if tag in datafield:\r\n counter += 1\r\n data_to_return.append([tag, counter])\r\n counter = 0\r\n return pandas.DataFrame(data_to_return, columns=('TAG', 'TagCount'))",
"def count(self):\n\n raise NotImplementedError",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def count_tags():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_tags()\r\n trans.commit()",
"def tag_counts(self, types=[]):\n if not types:\n types = self.tag_types\n for tag_type in types:\n print \"\\t%15s : %-10s\" % (tag_type, len(self.tag_dictionary[tag_type]))",
"def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()",
"def test_returns_all_studies_with_archived_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def count_all(self):\n return Counter(self._sequence)",
"def archived_tags(self):\n archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=archived_tagged_traits.values_list('tag__pk', flat=True))",
"def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])",
"def count(self):\n return len([i for i in self.iteritems()])",
"def active_count(self):\n cnt = 0\n for item in self[:]:\n if item.is_alive():\n cnt += 1\n else:\n self.remove(item)\n return cnt",
"def counts(self):\n return sum(self.counter.values()), len(self.visited)"
] | [
"0.8268371",
"0.80618906",
"0.79486185",
"0.75812364",
"0.7075274",
"0.6992174",
"0.69508713",
"0.66764146",
"0.6522477",
"0.6356951",
"0.63389266",
"0.62884736",
"0.6141169",
"0.6141169",
"0.6046133",
"0.5984525",
"0.5980168",
"0.5971963",
"0.5918252",
"0.5876502",
"0.5867922",
"0.579797",
"0.575619",
"0.57482505",
"0.57364684",
"0.5735281",
"0.5725159",
"0.5724879",
"0.57122874",
"0.56550556"
] | 0.85503507 | 0 |
Return the count of current traits that have been tagged (and the tag not archived) in this study. | def get_non_archived_traits_tagged_count(self):
return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(
trait__source_dataset__source_study_version__study=self).aggregate(
models.Count('trait', distinct=True))['trait__count'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def get_archived_traits_tagged_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('trait', distinct=True)\n )['trait__count']",
"def get_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('tag', distinct=True))['tag__count']",
"def get_non_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).aggregate(\n models.Count('tag', distinct=True)\n )['tag__count']",
"def get_all_tags_count(self):\n return apps.get_model('tags', 'Tag').objects.filter(\n all_traits__source_dataset__source_study_version__study=self,\n all_traits__source_dataset__source_study_version__i_is_deprecated=False\n ).distinct().count()",
"def render_number_tagged_traits(self, record):\n return record.current_non_archived_traits.count()",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def get_count(self):\n return len(self._tags)",
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def tag_count(self, tag):\n return sum(self._out_counts.get(tag, {}).values())",
"def test_context_data_excludes_archived_taggedtraits(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n # Make fake tagged traits that all have the same tag.\n self.tagged_traits = TaggedTraitFactory.create_batch(\n 10, trait__source_dataset__source_study_version__study=self.study, tag=tag)\n archived_tagged_trait = self.tagged_traits[0]\n archived_tagged_trait.archive()\n archived_tagged_trait.refresh_from_db()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.non_archived().count())\n self.assertEqual(tag_count_row['tt_count'], TaggedTrait.objects.all().count() - 1)",
"def __len__(self):\n return len(self._tagged)",
"def test_has_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))",
"def test_has_tagged_traits(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))",
"def count_indications(self) -> int:\n return self._count_model(Indication)",
"def getTagsNum(self):\r\n self.gettags()",
"def count_tags(tag_events):\n tagged_lines = []\n for tag_event in tag_events:\n for tag in tag_event[1][\"tag\"][\"labels\"]:\n tagged_lines.append(tag)\n tag_counts = Counter(tagged_lines)\n return tag_counts",
"def count_by_tag(self, dataframe, tags):\r\n if tags and not dataframe['tags'].empty:\r\n data_to_return = []\r\n counter = 0\r\n for tag in tags:\r\n for datafield in dataframe['tags']:\r\n if tag in datafield:\r\n counter += 1\r\n data_to_return.append([tag, counter])\r\n counter = 0\r\n return pandas.DataFrame(data_to_return, columns=('TAG', 'TagCount'))",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def count(self):\n\n raise NotImplementedError",
"def get_count(self, tag: Text) -> int:\r\n sub_tags = tag.split(\"+\")\r\n return len([e for e in self.elements if all(t in e.tags for t in sub_tags)])",
"def test_context_data_no_taggedtraits(self):\n TaggedTrait.objects.all().delete()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n self.assertIn('study', context)\n self.assertEqual(context['study'], self.study)\n self.assertIn('tag_counts', context)\n self.assertEqual(len(context['tag_counts']), 0)\n # The button linking to this view shouldn't be present because study.get_non_archived_traits_tagged_count is 0.\n self.assertNotContains(response, self.get_url(self.study.pk))",
"def prepare_count_incidents(self, object):\n roles = object.actorrole_set.all()\n return Incident.objects.filter(actors_role__in=roles).count()",
"def tag_counts(self, types=[]):\n if not types:\n types = self.tag_types\n for tag_type in types:\n print \"\\t%15s : %-10s\" % (tag_type, len(self.tag_dictionary[tag_type]))",
"def count_tags():\r\n trans = transaction.begin()\r\n StatBookmarkMgr.count_total_tags()\r\n trans.commit()",
"def count(self):\n return len([i for i in self.iteritems()])",
"def count_correct_tags(self):\n correct_dict = {}\n for gold_tag, predict_tag in zip(self.golden_tags, self.predict_tags):\n if gold_tag == predict_tag:\n if gold_tag not in correct_dict:\n correct_dict[gold_tag] = 1\n else:\n correct_dict[gold_tag] += 1\n\n return correct_dict",
"def get_tags_count(self, m_id, u_id=None):\n query = \"\"\"\n select tags, count(tags) from tags\n where movie_id={movie_id} group by tags\n \"\"\".format(movie_id=m_id,)\n\n if u_id is not None:\n query = \"\"\"\n select tags, count(tags) from tags\n where movie_id={movie_id} and user_id={user_id}\n group by tags\n \"\"\".format(movie_id=m_id, user_id=u_id)\n\n res = self.db.execute(query).fetchall()\n\n tags_occured = dict()\n for row in res:\n tags_occured[row[0]] = row[1]\n\n # print(tags_occured)\n\n return tags_occured",
"def count_all(self):\n return Counter(self._sequence)",
"def active_count(self):\n cnt = 0\n for item in self[:]:\n if item.is_alive():\n cnt += 1\n else:\n self.remove(item)\n return cnt"
] | [
"0.823464",
"0.8140047",
"0.7480824",
"0.7419102",
"0.71749496",
"0.70208025",
"0.6762968",
"0.67440826",
"0.65510833",
"0.6394683",
"0.63454866",
"0.6240015",
"0.6186335",
"0.6186335",
"0.615093",
"0.615016",
"0.6122516",
"0.6041667",
"0.59720755",
"0.5931473",
"0.58819157",
"0.5865149",
"0.58316976",
"0.582143",
"0.57765836",
"0.5750976",
"0.57420886",
"0.570243",
"0.57021755",
"0.5688815"
] | 0.8264949 | 0 |
Return the most recent SourceStudyVersion linked to this study. | def get_latest_version(self):
try:
version = self.sourcestudyversion_set.filter(
i_is_deprecated=False
).order_by( # We can't use "latest" since it only accepts one field in Django 1.11.
'-i_version',
'-i_date_added'
).first()
except ObjectDoesNotExist:
return None
return version | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_latest_version(self):\n study = self.source_study_version.study\n current_study_version = self.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same dataset associated with the current study version.\n try:\n current_dataset = SourceDataset.objects.get(\n source_study_version=current_study_version,\n i_accession=self.i_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_dataset",
"def get_latest_version(self):\n current_study_version = self.source_dataset.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same trait associated with the current study version.\n try:\n current_trait = SourceTrait.objects.get(\n source_dataset__source_study_version=current_study_version,\n i_dbgap_variable_accession=self.i_dbgap_variable_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_trait",
"def sourceVersion(self):\n CraftCore.debug.trace(\"GitSource sourceVersion\")\n\n return self.__getCurrentRevision()",
"def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None",
"def get_previous_version(self):\n previous_study_version = self.source_dataset.source_study_version.get_previous_version()\n if previous_study_version is not None:\n try:\n previous_trait = SourceTrait.objects.get(\n source_dataset__source_study_version=previous_study_version,\n i_dbgap_variable_accession=self.i_dbgap_variable_accession\n )\n except SourceTrait.DoesNotExist:\n return None\n return previous_trait",
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version",
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n # get the latest possible Version instance by using the workspace path\n if version is None:\n version = self.get_version_from_project_dir()\n\n return version",
"def get_previous_version(self):\n return self.get_previous_versions().first()",
"def get_version(self):\r\n\r\n return self.versions[0].number",
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None",
"def latest_upstream_version(self):\n return self.database.latest_upstream_version",
"def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )",
"def get_last_revision(self):\n return self.index.get_index_revision(self.name)",
"def get_latest_revision(self):\n revision_list = self.get_revision_list()\n if revision_list:\n return revision_list[-1]\n else:\n raise NoRevisionsExistError()",
"def latest(cls):\n releases = cls.query.all()\n if len(releases) == 0:\n return None\n\n releases.sort(key=lambda x: x.version)\n return releases[-1]",
"def last_revision(self):\n return self.revision_set.order_by(\"created_on\").last()",
"def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"",
"def sourceVersion(self):\n # we hope that the build target is equal to the version that is build\n return self.subinfo.buildTarget",
"def version(self):\n self.version_list[-1] = self.revision\n version = '.'.join(self.version_list)\n return version",
"def version(self):\n self._get_latest_content()\n return self._data.get('version', None)",
"def sourceVersion(self):\n craftDebug.trace(\"HgSource.sourceVersion called\")\n\n if self.enableHg:\n # open a temporary file - do not use generic tmpfile because this doesn't give a good file object with python\n with open(os.path.join(self.checkoutDir().replace('/', '\\\\'), \".crafthgtip.tmp\"), \"wb+\") as tempfile:\n # run the command\n utils.system(\"%s tip\" % self.hgExecutable, stdout=tempfile)\n # TODO: check return value for success\n tempfile.seek(os.SEEK_SET)\n\n # read the temporary file and grab the first line\n revision = tempfile.readline().replace(\"changeset:\", \"\").strip()\n\n os.remove(os.path.join(self.checkoutDir().replace('/', '\\\\'), \".crafthgtip.tmp\"))\n # always return True to not break something serious\n return revision",
"def revision(self):\n return self._revision",
"def revision(self):\n return self._revision",
"def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")",
"def get_version(self):\n return self.version",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def get_version_from_recent_files(self):\n # full_path = self.fusion_prefs[\"LastCompFile\"]\n # return self.get_version_from_full_path(full_path)\n\n version = None\n rfm = RecentFileManager()\n\n try:\n recent_files = rfm[self.name]\n except KeyError:\n logger.debug('no recent files')\n recent_files = None\n\n if recent_files is not None:\n for i in range(len(recent_files)):\n version = self.get_version_from_full_path(recent_files[i])\n if version is not None:\n break\n\n logger.debug(\"version from recent files is: %s\" % version)\n\n return version",
"def version(self):\r\n return self.version_guid",
"def getVersion(self):\n return _libsbml.SBase_getVersion(self)"
] | [
"0.7559727",
"0.7077481",
"0.6922376",
"0.68969524",
"0.6865629",
"0.6647644",
"0.6573123",
"0.64721805",
"0.6470273",
"0.6398663",
"0.6269095",
"0.6241272",
"0.62055796",
"0.61247975",
"0.61089414",
"0.61071545",
"0.61031723",
"0.6082287",
"0.60816973",
"0.60259306",
"0.59947133",
"0.59521556",
"0.59521556",
"0.59471893",
"0.59113765",
"0.5884781",
"0.5884781",
"0.5871893",
"0.5870807",
"0.58493507"
] | 0.7674174 | 0 |
Return a dbGaP link to the page for the latest SourceStudyVersion. | def get_latest_version_link(self):
return self.get_latest_version().dbgap_link | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def version_link(self):\n release_link = url_for('data.data', selected_release=self.DATASET_RELEASE)\n return Markup(f\"<a href='{release_link}'>{self.DATASET_RELEASE}</a>\")",
"def set_dbgap_link(self):\n return self.STUDY_VERSION_URL.format(self.full_accession)",
"def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version",
"def get_latest_version(self):\n study = self.source_study_version.study\n current_study_version = self.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same dataset associated with the current study version.\n try:\n current_dataset = SourceDataset.objects.get(\n source_study_version=current_study_version,\n i_accession=self.i_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_dataset",
"def move_dbgap_link_to_study_version(apps, schema_editor):\n SourceStudyVersion = apps.get_model('trait_browser', 'SourceStudyVersion')\n for ssv in SourceStudyVersion.objects.all():\n ssv.dbgap_link = ssv.sourcedataset_set.first().sourcetrait_set.first().dbgap_study_link\n ssv.save()",
"def thousandg_link(variant_obj, build=None):\n dbsnp_id = variant_obj.get('dbsnp_id')\n build = build or 37\n\n if not dbsnp_id:\n return None\n\n if build == 37:\n url_template = (\"http://grch37.ensembl.org/Homo_sapiens/Variation/Explore\"\n \"?v={};vdb=variation\")\n else:\n url_template = (\"http://www.ensembl.org/Homo_sapiens/Variation/Explore\"\n \"?v={};vdb=variation\")\n\n return url_template.format(dbsnp_id)",
"def get_latest_release(self):\n cs = Custom_Soup(\n \"latest_release\", \"https://chromedriver.storage.googleapis.com/LATEST_RELEASE_\" + str(self.version))\n cs.get_request()\n self.latest_release = cs.get_text()",
"def view_specific_paper_version():\n paper = db.paper(request.args(0))\n if paper is None:\n session.flash = T('No such paper')\n redirect(URL('default', 'index'))\n form = SQLFORM(db.paper, record=paper, readonly=True)\n all_versions_link = A('All versions', _href=URL('default', 'view_paper_versions', args=[paper.paper_id]))\n return dict(form=form,\n all_versions_link=all_versions_link)",
"def set_dbgap_link(self):\n return self.VARIABLE_URL.format(\n self.source_dataset.source_study_version.full_accession, self.i_dbgap_variable_accession)",
"def getDBReleaseVersion(dbh, jobPars):\n\n return dbh.getDBReleaseVersion(jobPars=jobPars)",
"def getfullURL(date):\n\tbase_url = \"https://www.gpo.gov/fdsys/pkg/CREC-\"+date+\"/pdf/CREC-\"+date+\".pdf\"\n\treturn base_url",
"def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None",
"def get_study_info(study_link):\n template = \"https://clinicaltrials.gov{}\"\n study_link = study_link.replace(' ', '+')\n return template.format(study_link)",
"def RefsPage(request):\n sources = models.ValueSource.objects.all()\n sorted_sources = sorted(sources, key=lambda s: s.citation)\n template_data = {\"sources\": sorted_sources}\n return render_to_response('data_refs.html', template_data)",
"def db_version():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/version', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)",
"def svn_fs_revision_link(*args):\r\n return _fs.svn_fs_revision_link(*args)",
"def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url",
"def getURL(date):\n\tbase_url = \"https://www.gpo.gov/fdsys/pkg/CREC-\"+date+\"/pdf/CREC-\"+date+\".pdf\"\n\tprint base_url",
"def human_version(self):\n return _('Latest Stable') if self.version == 'latest_stable' else 'OpenRAVE %s'%self.version",
"def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")",
"def remove_study_version_dbgap_link(apps, schema_editor):\n SourceStudyVersion = apps.get_model('trait_browser', 'SourceStudyVersion')\n for ssv in SourceStudyVersion.objects.all():\n ssv.dbgap_link = ''\n ssv.save()",
"def get_name_link_html(self):\n url_text = \"{{% url 'trait_browser:source:studies:pk:detail' pk={} %}} \".format(self.pk)\n return URL_HTML.format(url=url_text, name=self.i_study_name)",
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None",
"def swegen_link(variant_obj):\n url_template = (\"https://swegen-exac.nbis.se/variant/{this[chromosome]}-\"\n \"{this[position]}-{this[reference]}-{this[alternative]}\")\n return url_template.format(this=variant_obj)",
"def get_open_source_link(self):\n return self.bot_data_file[\"open_source_link\"]",
"def url_for_version(self, version):\n url = \"https://ftp.acc.umu.se/pub/GNOME/sources/libsigc++\"\n ext = \".tar.gz\" if version < Version(\"2.2.10\") else \".tar.xz\"\n return url + \"/%s/libsigc++-%s%s\" % (version.up_to(2), version, ext)",
"def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200",
"def set_dbgap_link(self):\n return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession)",
"def sourceVersion(self):\n CraftCore.debug.trace(\"GitSource sourceVersion\")\n\n return self.__getCurrentRevision()",
"def get_version():\n return about.get_version()"
] | [
"0.6841118",
"0.6463157",
"0.6224093",
"0.57410735",
"0.56654507",
"0.5629362",
"0.55557",
"0.5552489",
"0.5513178",
"0.5491778",
"0.54240173",
"0.53660846",
"0.5361276",
"0.53601146",
"0.5352483",
"0.53248584",
"0.52955514",
"0.5282903",
"0.5276562",
"0.5262418",
"0.5259679",
"0.52487236",
"0.5211086",
"0.5206835",
"0.5205544",
"0.5202324",
"0.51987225",
"0.51984835",
"0.51839095",
"0.5168674"
] | 0.6467567 | 1 |
Automatically set full_accession from the study's phs value. | def set_full_accession(self):
return self.STUDY_VERSION_ACCESSION.format(self.study.phs, self.i_version, self.i_participant_set) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_full_accession(self):\n return self.DATASET_ACCESSION.format(\n self.i_accession, self.i_version, self.source_study_version.i_participant_set)",
"def set_full_accession(self):\n return self.VARIABLE_ACCESSION.format(\n self.i_dbgap_variable_accession, self.i_dbgap_variable_version,\n self.source_dataset.source_study_version.i_participant_set)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceStudyVersion, self).save(*args, **kwargs)",
"def set_fullname(self, value):\n self.fullname = value",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceTrait, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceDataset, self).save(*args, **kwargs)",
"def set_fullname(self, value):\n raise NotImplementedError('set_fullname')",
"def access_resistance(self):\n return None",
"def change_study(self, study_prefix: Optional[str]):\n if study_prefix is None or study_prefix == \"\":\n self._state[\"study\"] = None\n else:\n info = self.user_info\n if info is None:\n raise ValueError(\n \"Cannot validate study ID because you are not logged in\"\n )\n ids = info.matching_study_ids(study_prefix)\n if len(ids) == 0:\n raise ValueError(\n f'Accessible study not found in your login data for study prefix \"{study_prefix}\"'\n )\n if len(ids) > 1:\n raise ValueError(\n f'Ambiguous study prefix \"{study_prefix}\". Matches: {\", \".join(ids)}'\n )\n study_id = ids[0]\n self._state[\"study\"] = study_id\n self._save()\n pass",
"def study(self, study):\n self.logger.debug(\"In 'study' setter.\")\n\n self._study = study",
"def policy_alias(self):",
"def set_dbgap_link(self):\n return self.STUDY_VERSION_URL.format(self.full_accession)",
"def _setDegreesPerAU(self, fullcircle):\n self._fullcircle = fullcircle\n self._degreesPerAU = 360/fullcircle\n if self._mode == \"standard\":\n self._angleOffset = 0\n else:\n self._angleOffset = fullcircle/4.",
"def access():",
"def access(self, value):\n self._access = value",
"def compute_access(field):\n bus_acc = get_wbgen(field, 'access_bus')\n dev_acc = get_wbgen(field, 'access_dev')\n abbrev = {'READ_WRITE': 'RW', 'READ_ONLY': 'RO', 'WRITE_ONLY': 'WO'}\n typ = get_wbgen(field, 'type')\n if bus_acc is None:\n bus_acc = {'PASS_THROUGH': 'WO', 'MONOSTABLE': 'WO',\n 'CONSTANT': 'RO'}.get(typ, 'RW')\n else:\n bus_acc = abbrev.get(bus_acc)\n if dev_acc is None:\n dev_acc = {'CONSTANT': 'WO'}.get(typ, 'RO')\n else:\n dev_acc = abbrev.get(dev_acc)\n field.h_access = '{}_{}'.format(bus_acc, dev_acc)",
"def mod_family_accession(family_accession):\n\n return family_accession[:family_accession.index('.')]",
"def set_occupant(self):\n\t\tself.occupant = 1",
"def save(self, *args, **kwargs):\n self.phs = self.set_phs()\n super(Study, self).save(*args, **kwargs)",
"def full_name(self, full_name):\n\n self._full_name = full_name",
"def full_name(self, full_name):\n\n self._full_name = full_name",
"def __set_full_path_of_file(self, value):\n self.full_path_of_file = value",
"def set_fullscale(self, ch_id: int, vfull: float) -> None:\n self.write(':channel{0}:range {1:.4g}'.format(ch_id, vfull))",
"def set_access_point(self, value: str) -> None:\n\n self.__requester.set_base_url(value)",
"def setField(self, data):\n\t\tview = self.view\n\t\tview.sbAbstraccion.setValue(data['sbAbstraccion'])",
"def _has_staff_access_to_descriptor(user, descriptor, course_key):\r\n return _has_staff_access_to_location(user, descriptor.location, course_key)",
"def load_student_full_courseload():\n return None",
"def setReferencePoseSlider(self, part, *args):\n\n percent = float(args[0]) * .01\n self.setPosePercentage(percent, part)",
"def take_test(exam, student):\n\n student.score = exam.administer()",
"def determine_sample_accession(\n self,\n experiment_accession: str,\n sample_source_name: str,\n sample_assay_name: str,\n filename: str,\n ) -> str:\n\n # It SEEMS like the filename often contains part or all of the\n # sample name so we first try to see if either field contains\n # the filename with the extension stripped off:\n if isinstance(filename, str):\n stripped_filename = \".\".join(filename.split(\".\")[:-1])\n if stripped_filename != \"\":\n if stripped_filename in sample_source_name:\n return experiment_accession + \"-\" + sample_source_name\n elif stripped_filename in sample_assay_name:\n return experiment_accession + \"-\" + sample_assay_name\n\n # Accessions don't have spaces in them, but sometimes these\n # fields do so next we try to see if one has spaces and the\n # other doesn't:\n source_has_spaces = \" \" in sample_source_name\n assay_has_spaces = \" \" in sample_assay_name\n if assay_has_spaces and not source_has_spaces:\n return experiment_accession + \"-\" + sample_source_name\n elif source_has_spaces and not assay_has_spaces:\n return experiment_accession + \"-\" + sample_assay_name\n\n # We're out of options so return the longest one.\n if len(sample_source_name) >= len(sample_assay_name):\n return experiment_accession + \"-\" + sample_source_name\n else:\n return experiment_accession + \"-\" + sample_assay_name"
] | [
"0.69272184",
"0.6779552",
"0.5763856",
"0.51321983",
"0.50852907",
"0.5062284",
"0.5008536",
"0.49571142",
"0.46755826",
"0.46588916",
"0.46557772",
"0.46504992",
"0.46206248",
"0.45762715",
"0.45750052",
"0.4568539",
"0.4559341",
"0.4549361",
"0.45215124",
"0.44915038",
"0.44915038",
"0.44564617",
"0.4447713",
"0.44297668",
"0.44164768",
"0.44149777",
"0.43921274",
"0.43879",
"0.43635997",
"0.4351833"
] | 0.72539365 | 0 |
Return an ordered queryset of previous versions. | def get_previous_versions(self):
return self.study.sourcestudyversion_set.filter(
i_version__lte=self.i_version,
i_date_added__lt=self.i_date_added
).order_by(
'-i_version',
'-i_date_added'
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_previous_version(self):\n return self.get_previous_versions().first()",
"def update_previous_all_versions():\n\n # get all the ids\n version_ids = m.meta.Session.query(distinct(tst.TestVersion.id)).filter_by(archived=False).\\\n join('methods').filter_by(short_name='Online').\\\n join('test','type').filter_by(short_name='RC').all()\n\n for version_id in version_ids:\n update_previous(version_id)",
"def previous(self):\n try:\n return self.filter(end_date__lt=self.current().start_date).latest()\n except self.model.DoesNotExist:\n return None",
"def get_versions(self):\n versions = TextVersion.objects.filter(text__exact=self).order_by('-created')\n # TODO: use new postgresql 8.4 row_number as extra select to do that\n #for index in xrange(len(versions)):\n # v = versions[index]\n # # version_number is 1-based\n # setattr(v, 'version_number', len(versions) - index)\n return versions",
"def reversed(self):\n return QuerySet(reversed(list(self)))",
"def filter_queryset(self, qs):\n qs = super(ReleaseViewSet, self).filter_queryset(qs)\n if getattr(self, 'order_queryset', False):\n return sorted(qs, key=models.Release.version_sort_key)\n return qs",
"def select_versions(self):\n return []",
"def pre_sort(self, qs):\n return qs",
"def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,\r\n **kwargs):\r\n return self._findAll(name, attrs, text, limit, self.previousGenerator,\r\n **kwargs)",
"def getPrevFragments(self):\n return self.prevFragments",
"def previous_pages(self):\n \n if self.start > 0:\n return self.all_pages[0:self.start]\n return",
"def get_versions(start='current'):\n start = check_version_str(start)\n versions = get_linked_versions(start)\n\n results = versions[:]\n while results:\n results = get_linked_versions(results[-1])\n print results\n if results:\n versions.extend(results)\n\n versions = [x for x in set(versions) if check_manual_exists(x)]\n return sort_versions(versions, reverse=True)",
"def svn_fs_history_prev(*args):\r\n return _fs.svn_fs_history_prev(*args)",
"def versionHistory(self):\n url = self.metaData().getLink(\"version-history\")\n assert url is not None\n\n header = self._baseHeader.copy()\n response = self._adapter.getRequest(url, header)\n\n return json.loads(response['Body'])",
"def get_versions(self):\n # They randomly use and don't use 'r' prefix so we have to sort\n # versions manually\n versions = list(self._get_github_tags())\n versions.sort(\n key=operator.attrgetter('base_version'),\n reverse=True,\n )\n return versions",
"def retrieve_recently_changed_orders(self, **kwargs):\n return self.client.execute(\"order/multi-get\", \"GET\", kwargs)",
"def findPrevious(self, name=None, attrs={}, text=None, **kwargs):\r\n return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)",
"def previous(self):\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index == 0:\n return None\n return posts_by_date[index - 1]",
"def previous(self):\n return _SALOMERuntime.SALOMERuntime_PySwigIterator_previous(self)",
"def previous(self):\n\n\t\t# TODO Check if there's a more efficient way to do this\n\t\tlist = PollQuestion.objects.filter(id__lt = self.id, poll = self.poll)\n\t\tlist = list.order_by('-id')\n\n\t\tif len(list) < 1:\n\t\t\treturn None\n\n\t\treturn list[0]",
"def previous(self, rows: List[Row]) -> List[Row]:\n if not rows:\n return []\n input_row_index = self._get_row_index(rows[0])\n if input_row_index > 0:\n return [self.table_data[input_row_index - 1]]\n return []",
"def previous(self):\n return _libsbml.SwigPyIterator_previous(self)",
"def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None",
"def _order_changelog_versions(self, versions):\n\n return sorted(versions, key=LooseVersion)",
"def published_before(self) -> Type[QuerySet]:\n return Post.objects.filter(published__lt=self.published) if self.is_published() else Post.objects.none()",
"def getPrevious(self):\n return self.__previous__",
"def versions(self):\n return self._versions",
"def get_versions(self):\n raise NotImplementedError",
"def prev(self):\n return self.from_date(self.date_a - datetime.timedelta(1))",
"def recent(self):\n return self.filter(\n start_date__lte=self.current().end_date + timezone.timedelta(days=1),\n end_date__gte=self.current().start_date - timezone.timedelta(days=1),\n )"
] | [
"0.6681544",
"0.65478915",
"0.6312672",
"0.6222891",
"0.6191354",
"0.61462396",
"0.60991406",
"0.5949714",
"0.5930343",
"0.59164745",
"0.58782697",
"0.587286",
"0.5844892",
"0.58411556",
"0.57994163",
"0.57472384",
"0.5726047",
"0.57105625",
"0.5678144",
"0.5673545",
"0.56406736",
"0.5609506",
"0.5600511",
"0.555056",
"0.5500987",
"0.5486326",
"0.5465468",
"0.54305774",
"0.5420744",
"0.5415501"
] | 0.8416572 | 0 |
Return the previous version of this study. | def get_previous_version(self):
return self.get_previous_versions().first() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_previous_version(self):\n previous_study_version = self.source_dataset.source_study_version.get_previous_version()\n if previous_study_version is not None:\n try:\n previous_trait = SourceTrait.objects.get(\n source_dataset__source_study_version=previous_study_version,\n i_dbgap_variable_accession=self.i_dbgap_variable_accession\n )\n except SourceTrait.DoesNotExist:\n return None\n return previous_trait",
"def previous(self):\n return self.my_previous",
"def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None",
"def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )",
"def previous(self):\n\n pass",
"def getPrevious(self):\n return self.__previous__",
"def previous(self):\n try:\n return self.filter(end_date__lt=self.current().start_date).latest()\n except self.model.DoesNotExist:\n return None",
"def get_previous(self):\n return self._next_previous_helper('previous')",
"def restore_previous_ehr_version(self, ehr_record):\n return self.restore_ehr_version(ehr_record, ehr_record.version-1)[0]",
"def get_previous(self):\n return self.previous",
"def previous(self):\n return _osgAnimation.SwigPyIterator_previous(self)",
"def previous(self):\n return self._call_player_proxy('Prev', None)",
"def prev(self):\n return self.from_date(self.date_a - datetime.timedelta(1))",
"def previous(self):\n return _libsbml.SwigPyIterator_previous(self)",
"def get_prev(self):\n return self.prev",
"def previous(self):\n return _SALOMERuntime.SALOMERuntime_PySwigIterator_previous(self)",
"def previous(self):\n if self.currentframe > 0:\n return self.getframe(self.currentframe - 1)\n else:\n newobj = hdf5image()\n newobj.read(previous_filename(self.filename))\n return newobj",
"def prev(self):\n return self.__prev",
"def previous(self):\n return Reference(\":\".join(self.names[:-2]))",
"def previous(self) -> Optional[Chainable]:\n return None",
"def get_previous_step(self):\n return self.get_step_by_index(-2)",
"def previous(self):\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index == 0:\n return None\n return posts_by_date[index - 1]",
"def previous_date(self):\n yesterday = pendulum.yesterday('UTC')\n last_update = self.storage.last_update(self.feed)\n if not last_update or last_update < yesterday:\n last_update = yesterday\n return last_update",
"def getPreviousObservation(self):\n\n if (len(self.observationHistory) <= 1):\n return None\n\n return self.observationHistory[-2]",
"def previous(self):\n if self.currentframe > 0:\n return self.getframe(self.currentframe - 1)\n else:\n newobj = pixiimage()\n newobj.read(previous_filename(\n self.sequencefilename))\n return newobj",
"def getPrev(self):\n\t\t\treturn self.prev",
"def previous(self):\n if self.has_previous:\n previous_id = self.page - 1\n if self._cached_previous and self._cached_previous.id == previous_id:\n return self._cached_previous\n self._cached_previous = Page(previous_id)\n return self._cached_previous",
"def get_previous_observation(self):\n if len(self.observation_history) == 1:\n return None\n else:\n return self.observation_history[-2]",
"def previous(self):\n if self.cursor.pref:\n self.cursor = self.cursor.pref\n return self.cursor\n return None",
"def previous_step_result(self):\n return self._previous_step_result"
] | [
"0.76663053",
"0.7525812",
"0.7337205",
"0.7286422",
"0.72301817",
"0.71594524",
"0.7139116",
"0.7111315",
"0.710192",
"0.7072759",
"0.7060298",
"0.70384383",
"0.70250046",
"0.69853616",
"0.6982506",
"0.6953244",
"0.69062996",
"0.690354",
"0.6884494",
"0.68721044",
"0.68637425",
"0.68600905",
"0.68351716",
"0.68076617",
"0.680712",
"0.67293495",
"0.6699049",
"0.6689452",
"0.6676094",
"0.65781975"
] | 0.85586053 | 0 |
Return a queryset of SourceTraits that are new in this version compared to past versions. | def get_new_sourcetraits(self):
previous_study_version = self.get_previous_version()
SourceTrait = apps.get_model('trait_browser', 'SourceTrait')
if previous_study_version is not None:
qs = SourceTrait.objects.filter(
source_dataset__source_study_version=self
)
# We can probably write this with a join to be more efficient.
previous_variable_accessions = SourceTrait.objects.filter(
source_dataset__source_study_version=previous_study_version
).values_list('i_dbgap_variable_accession', flat=True)
qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)
return qs
else:
return SourceTrait.objects.none() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )",
"def get_new_sourcedatasets(self):\n previous_study_version = self.get_previous_version()\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n if previous_study_version is not None:\n qs = SourceDataset.objects.filter(source_study_version=self)\n # We can probably write this with a join to be more efficient.\n previous_dataset_accessions = SourceDataset.objects.filter(\n source_study_version=previous_study_version\n ).values_list('i_accession', flat=True)\n qs = qs.exclude(i_accession__in=previous_dataset_accessions)\n return qs\n else:\n return SourceDataset.objects.none()",
"def test_no_deprecated_traits_in_queryset(self):\n # Create an older, deprecated version of an existing source trait.\n trait = self.source_traits[0]\n # Make a new copy of the source study version, and decrement the version number.\n ssv2 = copy(trait.source_dataset.source_study_version)\n ssv2.i_version -= 1\n ssv2.i_id += 1\n ssv2.i_is_deprecated = True\n ssv2.save()\n # Make a new copy of the dataset, linked to older ssv.\n ds2 = copy(trait.source_dataset)\n ds2.i_id += 1\n ds2.source_study_version = ssv2\n ds2.save()\n # Copy the source trait and link it to the older dataset.\n trait2 = copy(trait)\n trait2.source_dataset = ds2\n trait2.i_trait_id += 1\n trait2.save()\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url, {'q': trait.i_trait_name})\n pks = get_autocomplete_view_ids(response)\n self.assertIn(trait.pk, pks)\n self.assertNotIn(trait2.pk, pks)",
"def test_no_deprecated_traits_in_queryset(self):\n # Create an older, deprecated version of an existing source trait.\n trait = self.source_traits[0]\n # Make a new copy of the source study version, and decrement the version number.\n ssv2 = copy(trait.source_dataset.source_study_version)\n ssv2.i_version -= 1\n ssv2.i_id += 1\n ssv2.i_is_deprecated = True\n ssv2.save()\n # Make a new copy of the dataset, linked to older ssv.\n ds2 = copy(trait.source_dataset)\n ds2.i_id += 1\n ds2.source_study_version = ssv2\n ds2.save()\n # Copy the source trait and link it to the older dataset.\n trait2 = copy(trait)\n trait2.source_dataset = ds2\n trait2.i_trait_id += 1\n trait2.save()\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url, {'q': trait.i_trait_name})\n pks = get_autocomplete_view_ids(response)\n self.assertIn(trait.pk, pks)\n self.assertNotIn(trait2.pk, pks)",
"def test_does_not_compare_with_two_versions_ago(self): # noqa\n new_trait_2 = factories.SourceTraitFactory.create(\n source_dataset__source_study_version=self.study_version_2)\n new_trait_3 = factories.SourceTraitFactory.create(\n source_dataset__source_study_version=self.study_version_3,\n i_dbgap_variable_accession=new_trait_2.i_dbgap_variable_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_trait_table']\n self.assertNotIn(new_trait_3, table.data)",
"def test_no_deprecated_traits_in_queryset(self):\n # Create an older, deprecated version of an existing source trait.\n trait = self.source_traits[0]\n # Make a new copy of the source study version, and decrement the version number.\n ssv2 = copy(trait.source_dataset.source_study_version)\n ssv2.i_version -= 1\n ssv2.i_id += 1\n ssv2.i_is_deprecated = True\n ssv2.save()\n # Make a new copy of the dataset, linked to older ssv.\n ds2 = copy(trait.source_dataset)\n ds2.i_id += 1\n ds2.source_study_version = ssv2\n ds2.save()\n # Copy the source trait and link it to the older dataset.\n trait2 = copy(trait)\n trait2.source_dataset = ds2\n trait2.i_trait_id += 1\n trait2.save()\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url, {'q': trait2.i_dbgap_variable_accession})\n pks = get_autocomplete_view_ids(response)\n self.assertIn(self.source_traits[0].pk, pks)\n self.assertNotIn(trait2.pk, pks)",
"def test_no_deprecated_traits_in_table(self):\n deprecated_traits = factories.SourceTraitFactory.create_batch(\n 10, source_dataset__source_study_version__i_is_deprecated=True,\n source_dataset__source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_trait_table']\n for trait in deprecated_traits:\n self.assertNotIn(trait, table.data)\n for trait in self.source_traits:\n self.assertIn(trait, table.data)",
"def test_no_deprecated_traits_in_table(self):\n deprecated_traits = factories.SourceTraitFactory.create_batch(\n 10, source_dataset__source_study_version__i_is_deprecated=True)\n response = self.client.get(self.get_url())\n context = response.context\n table = context['source_trait_table']\n for trait in deprecated_traits:\n self.assertNotIn(trait, table.data)\n for trait in self.source_traits:\n self.assertIn(trait, table.data)",
"def test_no_deprecated_traits_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source dataset and increment it. Link it to the new ssv.\n source_dataset2 = copy(self.source_dataset)\n source_dataset2.i_id += 1\n source_dataset2.source_study_version = source_study_version2\n source_dataset2.save()\n # Copy the source traits and link them to the new source dataset.\n source_traits2 = []\n for trait in self.source_traits:\n st2 = copy(trait)\n st2.source_dataset = source_dataset2\n st2.i_trait_id = trait.i_trait_id + len(self.source_traits)\n st2.save()\n source_traits2.append(st2)\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertNotIn(trait.i_trait_id, returned_pks)",
"def test_no_deprecated_traits_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source dataset and increment it. Link it to the new ssv.\n source_dataset2 = copy(self.source_dataset)\n source_dataset2.i_id += 1\n source_dataset2.source_study_version = source_study_version2\n source_dataset2.save()\n # Copy the source traits and link them to the new source dataset.\n source_traits2 = []\n for trait in self.source_traits:\n st2 = copy(trait)\n st2.source_dataset = source_dataset2\n st2.i_trait_id = trait.i_trait_id + len(self.source_traits)\n st2.save()\n source_traits2.append(st2)\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertNotIn(trait.i_trait_id, returned_pks)",
"def test_no_deprecated_traits_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source dataset and increment it. Link it to the new ssv.\n source_dataset2 = copy(self.source_dataset)\n source_dataset2.i_id += 1\n source_dataset2.source_study_version = source_study_version2\n source_dataset2.save()\n # Copy the source traits and link them to the new source dataset.\n source_traits2 = []\n for trait in self.source_traits:\n st2 = copy(trait)\n st2.source_dataset = source_dataset2\n st2.i_trait_id = trait.i_trait_id + len(self.source_traits)\n st2.save()\n source_traits2.append(st2)\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertNotIn(trait.i_trait_id, returned_pks)",
"def test_no_deprecated_traits_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source dataset and increment it. Link it to the new ssv.\n source_dataset2 = copy(self.source_dataset)\n source_dataset2.i_id += 1\n source_dataset2.source_study_version = source_study_version2\n source_dataset2.save()\n # Copy the source traits and link them to the new source dataset.\n source_traits2 = []\n for trait in self.source_traits:\n st2 = copy(trait)\n st2.source_dataset = source_dataset2\n st2.i_trait_id = trait.i_trait_id + len(self.source_traits)\n st2.save()\n source_traits2.append(st2)\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertNotIn(trait.i_trait_id, returned_pks)",
"def test_no_deprecated_traits_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source dataset and increment it. Link it to the new ssv.\n source_dataset2 = copy(self.source_dataset)\n source_dataset2.i_id += 1\n source_dataset2.source_study_version = source_study_version2\n source_dataset2.save()\n # Copy the source traits and link them to the new source dataset.\n source_traits2 = []\n for trait in self.source_traits:\n st2 = copy(trait)\n st2.source_dataset = source_dataset2\n st2.i_trait_id = trait.i_trait_id + len(self.source_traits)\n st2.save()\n source_traits2.append(st2)\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertNotIn(trait.i_trait_id, returned_pks)",
"def test_no_deprecated_traits_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source dataset and increment it. Link it to the new ssv.\n source_dataset2 = copy(self.source_dataset)\n source_dataset2.i_id += 1\n source_dataset2.source_study_version = source_study_version2\n source_dataset2.save()\n # Copy the source traits and link them to the new source dataset.\n source_traits2 = []\n for trait in self.source_traits:\n st2 = copy(trait)\n st2.source_dataset = source_dataset2\n st2.i_trait_id = trait.i_trait_id + len(self.source_traits)\n st2.save()\n source_traits2.append(st2)\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertNotIn(trait.i_trait_id, returned_pks)",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def test_no_deprecated_traits_in_table(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_trait_table']\n for trait in self.source_traits_v1:\n self.assertNotIn(trait, table.data)\n for trait in self.source_traits_v2:\n self.assertNotIn(trait, table.data)",
"def test_no_deprecated_traits_with_same_version_number(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n current_study_version = factories.SourceStudyVersionFactory.create(study=self.study, i_version=5)\n old_study_version = factories.SourceStudyVersionFactory.create(\n study=self.study, i_version=current_study_version.i_version, i_is_deprecated=True)\n current_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)\n old_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)\n current_tagged_trait = TaggedTraitFactory.create(trait=current_trait, tag=tag)\n old_tagged_trait = TaggedTraitFactory.create(trait=old_trait, tag=tag)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], 1)",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def test_no_previous_study_version(self):\n self.study_version_1.delete()\n self.study_version_2.delete()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_trait_table']\n self.assertEqual(len(table.data), 0)\n for trait in self.source_traits_v3:\n self.assertNotIn(trait, table.data)",
"def test_no_deprecated_traits_in_queryset(self):\n # Create an older, deprecated version of an existing source trait.\n trait = self.harmonized_traits[0]\n # Make a new copy of the harmonized_trait_set_version, and decrement the version number.\n htsv2 = copy(trait.harmonized_trait_set_version)\n htsv2.i_version -= 1\n htsv2.i_id += 1\n htsv2.i_is_deprecated = True\n htsv2.save()\n # Note that the new htsv is still liknked to the existing h. trait set.\n # Copy the harmonized trait and link it to the older htsv.\n trait2 = copy(trait)\n trait2.harmonized_trait_set_version = htsv2\n trait2.i_trait_id += 1\n trait2.save()\n # Get results from the autocomplete view and make sure only the new version is found.\n url = self.get_url()\n response = self.client.get(url, {'q': trait.i_trait_name})\n pks = get_autocomplete_view_ids(response)\n self.assertIn(trait.pk, pks)\n self.assertNotIn(trait2.pk, pks)",
"def test_does_not_compare_with_two_versions_ago(self): # noqa\n new_dataset_2 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_2)\n new_dataset_3 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_3,\n i_accession=new_dataset_2.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(new_dataset_3, table.data)",
"def test_no_deprecated_traits(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n current_study_version = factories.SourceStudyVersionFactory.create(study=self.study, i_version=5)\n old_study_version = factories.SourceStudyVersionFactory.create(\n study=self.study, i_version=4, i_is_deprecated=True)\n current_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)\n old_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)\n current_tagged_trait = TaggedTraitFactory.create(trait=current_trait, tag=tag)\n old_tagged_trait = TaggedTraitFactory.create(trait=old_trait, tag=tag)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], 1)",
"def test_no_deprecated_traits_in_table(self):\n deprecated_datasets = factories.SourceDatasetFactory.create_batch(\n 3, source_study_version__i_is_deprecated=True, source_study_version__study=self.study)\n for ds in deprecated_datasets:\n factories.SourceTraitFactory.create_batch(5, source_dataset=ds)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for dataset in deprecated_datasets:\n self.assertNotIn(dataset, table.data)\n for dataset in self.datasets:\n self.assertIn(dataset, table.data)",
"def test_no_updated_traits(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_trait_table']\n for trait in self.source_traits_v3:\n self.assertNotIn(trait, table.data)",
"def test_context_deprecated_trait_with_two_new_versions(self):\n study = factories.StudyFactory.create()\n source_study_version1 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=True, i_version=1)\n source_study_version2 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=True, i_version=2)\n source_study_version3 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=False, i_version=3)\n source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)\n source_dataset2 = factories.SourceDatasetFactory.create(\n source_study_version=source_study_version2,\n i_accession=source_dataset1.i_accession,\n i_version=source_dataset1.i_version,\n i_is_subject_file=source_dataset1.i_is_subject_file,\n i_study_subject_column=source_dataset1.i_study_subject_column,\n i_dbgap_description=source_dataset1.i_dbgap_description\n )\n source_dataset3 = factories.SourceDatasetFactory.create(\n source_study_version=source_study_version3,\n i_accession=source_dataset1.i_accession,\n i_version=source_dataset1.i_version,\n i_is_subject_file=source_dataset1.i_is_subject_file,\n i_study_subject_column=source_dataset1.i_study_subject_column,\n i_dbgap_description=source_dataset1.i_dbgap_description\n )\n trait1 = factories.SourceTraitFactory.create(source_dataset=source_dataset1)\n trait2 = factories.SourceTraitFactory.create(\n source_dataset=source_dataset2,\n i_detected_type=trait1.i_detected_type,\n i_dbgap_type=trait1.i_dbgap_type,\n i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,\n i_dbgap_variable_version=trait1.i_dbgap_variable_version,\n i_dbgap_comment=trait1.i_dbgap_comment,\n i_dbgap_unit=trait1.i_dbgap_unit,\n i_n_records=trait1.i_n_records,\n i_n_missing=trait1.i_n_missing,\n i_is_unique_key=trait1.i_is_unique_key,\n i_are_values_truncated=trait1.i_are_values_truncated\n )\n trait3 = factories.SourceTraitFactory.create(\n source_dataset=source_dataset3,\n i_detected_type=trait1.i_detected_type,\n i_dbgap_type=trait1.i_dbgap_type,\n i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,\n i_dbgap_variable_version=trait1.i_dbgap_variable_version,\n i_dbgap_comment=trait1.i_dbgap_comment,\n i_dbgap_unit=trait1.i_dbgap_unit,\n i_n_records=trait1.i_n_records,\n i_n_missing=trait1.i_n_missing,\n i_is_unique_key=trait1.i_is_unique_key,\n i_are_values_truncated=trait1.i_are_values_truncated\n )\n response = self.client.get(self.get_url(trait1.pk))\n context = response.context\n self.assertTrue(context['is_deprecated'])\n self.assertFalse(context['show_removed_text'])\n self.assertEqual(context['new_version_link'], trait3.get_absolute_url())\n self.assertContains(response, context['new_version_link'])\n self.assertNotContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"removed_deprecated_trait\">')\n self.assertContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"updated_deprecated_trait\">')",
"def test_no_deprecated_traits_in_table(self):\n # Set the ssv for three datasets to deprecated.\n for ds in self.datasets[1:3]:\n ssv = ds.source_study_version\n ssv.i_is_deprecated = True\n ssv.save()\n response = self.client.get(self.get_url())\n context = response.context\n table = context['source_dataset_table']\n for ds in self.datasets:\n if ds.source_study_version.i_is_deprecated:\n self.assertNotIn(ds, table.data)\n else:\n self.assertIn(ds, table.data)",
"def get_source_studies(self):\n return list(set([trait.source_dataset.source_study_version.study for trait in self.get_all_source_traits()]))",
"def test_context_deprecated_trait_with_no_newer_version(self):\n source_study_version1 = self.trait.source_dataset.source_study_version\n source_study_version1.i_is_deprecated = True\n source_study_version1.save()\n source_study_version2 = factories.SourceStudyVersionFactory.create(\n study=source_study_version1.study,\n i_is_deprecated=False,\n i_version=source_study_version1.i_version + 1\n )\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertTrue(context['is_deprecated'])\n self.assertTrue(context['show_removed_text'])\n self.assertIsNone(context['new_version_link'])\n self.assertContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"removed_deprecated_trait\">')\n self.assertNotContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"updated_deprecated_trait\">')",
"def apply_previous_tags(self, user):\n previous_study_version = self.get_previous_version()\n if previous_study_version is not None:\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n TaggedTrait = apps.get_model('tags', 'TaggedTrait')\n DCCReview = apps.get_model('tags', 'DCCReview')\n StudyResponse = apps.get_model('tags', 'StudyResponse')\n # Get the set of TaggedTraits from the previous study version.\n previous_tagged_traits = TaggedTrait.objects.non_archived().filter(\n trait__source_dataset__source_study_version=previous_study_version\n )\n # Raise an error if any of the previous taggedtraits have incomplete reviews.\n unreviewed_q = Q(dcc_review__isnull=True)\n no_response_q = Q(dcc_review__status=DCCReview.STATUS_FOLLOWUP) &\\\n Q(dcc_review__study_response__isnull=True) &\\\n Q(dcc_review__dcc_decision__isnull=True)\n no_decision_q = Q(dcc_review__status=DCCReview.STATUS_FOLLOWUP) &\\\n Q(dcc_review__study_response__status=StudyResponse.STATUS_DISAGREE) &\\\n Q(dcc_review__dcc_decision__isnull=True)\n incomplete_review_tagged_traits = previous_tagged_traits.filter(\n unreviewed_q | no_response_q | no_decision_q\n )\n if incomplete_review_tagged_traits.count() > 0:\n raise ValueError(INCOMPLETE_REVIEW_ERROR.format(''))\n # Get the set of variable accession numbers in the previous version that have tags applied them.\n previous_accessions_with_tags = previous_tagged_traits.values(\n trait_pk=F('trait__pk'),\n trait_accession=F('trait__i_dbgap_variable_accession')\n ).annotate(\n tt_count=Count('pk')\n ).filter(\n tt_count__gt=0\n ).values_list(\n 'trait_accession',\n flat=True\n ).distinct()\n traits_to_tag = SourceTrait.objects.filter(\n source_dataset__source_study_version=self,\n i_dbgap_variable_accession__in=previous_accessions_with_tags\n )\n for trait in traits_to_tag:\n trait.apply_previous_tags(user)"
] | [
"0.6562757",
"0.6290495",
"0.6281095",
"0.6281095",
"0.6224384",
"0.61902857",
"0.6117538",
"0.60669696",
"0.59755343",
"0.59755343",
"0.59755343",
"0.59755343",
"0.59755343",
"0.59755343",
"0.59033275",
"0.58881015",
"0.5698667",
"0.5668605",
"0.5618832",
"0.5610577",
"0.56091547",
"0.55634785",
"0.5542879",
"0.55196947",
"0.5511635",
"0.5505614",
"0.5481006",
"0.54006636",
"0.53402466",
"0.5308406"
] | 0.7811303 | 0 |
Return a queryset of SourceDatasets that are new in this version compared to past versions. | def get_new_sourcedatasets(self):
previous_study_version = self.get_previous_version()
SourceDataset = apps.get_model('trait_browser', 'SourceDataset')
if previous_study_version is not None:
qs = SourceDataset.objects.filter(source_study_version=self)
# We can probably write this with a join to be more efficient.
previous_dataset_accessions = SourceDataset.objects.filter(
source_study_version=previous_study_version
).values_list('i_accession', flat=True)
qs = qs.exclude(i_accession__in=previous_dataset_accessions)
return qs
else:
return SourceDataset.objects.none() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )",
"def test_no_deprecated_datasets_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source datasets and increment their versions. Link it to the new ssv.\n datasets2 = []\n for dataset in self.source_datasets:\n d2 = copy(dataset)\n d2.source_study_version = source_study_version2\n d2.i_id = dataset.i_id + len(self.source_datasets)\n d2.save()\n datasets2.append(d2)\n # Get results from the autocomplete view and make sure only the new versions are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(datasets2))\n for dataset in datasets2:\n self.assertIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertNotIn(dataset.i_id, returned_pks)",
"def test_no_deprecated_datasets_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source datasets and increment their versions. Link it to the new ssv.\n datasets2 = []\n for dataset in self.source_datasets:\n d2 = copy(dataset)\n d2.source_study_version = source_study_version2\n d2.i_id = dataset.i_id + len(self.source_datasets)\n d2.save()\n datasets2.append(d2)\n # Get results from the autocomplete view and make sure only the new versions are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(datasets2))\n for dataset in datasets2:\n self.assertIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertNotIn(dataset.i_id, returned_pks)",
"def test_no_deprecated_datasets_in_queryset(self):\n # Copy the source study version and increment it.\n source_study_version2 = copy(self.source_study_version)\n source_study_version2.i_version += 1\n source_study_version2.i_id += 1\n source_study_version2.save()\n # Make the old ssv deprecated.\n self.source_study_version.i_is_deprecated = True\n self.source_study_version.save()\n # Copy the source datasets and increment their versions. Link it to the new ssv.\n datasets2 = []\n for dataset in self.source_datasets:\n d2 = copy(dataset)\n d2.source_study_version = source_study_version2\n d2.i_id = dataset.i_id + len(self.source_datasets)\n d2.save()\n datasets2.append(d2)\n # Get results from the autocomplete view and make sure only the new versions are found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), len(datasets2))\n for dataset in datasets2:\n self.assertIn(dataset.i_id, returned_pks)\n for dataset in self.source_datasets:\n self.assertNotIn(dataset.i_id, returned_pks)",
"def test_does_not_compare_with_two_versions_ago(self): # noqa\n new_dataset_2 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_2)\n new_dataset_3 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_3,\n i_accession=new_dataset_2.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(new_dataset_3, table.data)",
"def get_new_sourcetraits(self):\n previous_study_version = self.get_previous_version()\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n if previous_study_version is not None:\n qs = SourceTrait.objects.filter(\n source_dataset__source_study_version=self\n )\n # We can probably write this with a join to be more efficient.\n previous_variable_accessions = SourceTrait.objects.filter(\n source_dataset__source_study_version=previous_study_version\n ).values_list('i_dbgap_variable_accession', flat=True)\n qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)\n return qs\n else:\n return SourceTrait.objects.none()",
"def _update_modified_since(self, timestamp):\n new_data_sources = [\n source\n for provider in self.data_source_providers\n for source in provider.get_data_sources_modified_since(timestamp)\n ]\n filtered_data_sources = self.get_filtered_configs(new_data_sources)\n invalid_data_sources = {ds._id for ds in new_data_sources} - {ds._id for ds in filtered_data_sources}\n self._add_data_sources_to_table_adapters(filtered_data_sources, invalid_data_sources)",
"def test_no_deprecated_datasets_in_queryset(self):\n models.SourceDataset.objects.all().delete()\n dataset_1 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=True)\n dataset_2 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=False)\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset_2.pk])",
"def test_no_deprecated_datasets_in_queryset(self):\n models.SourceDataset.objects.all().delete()\n dataset_1 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=True)\n dataset_2 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=False)\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset_2.pk])",
"def test_no_deprecated_datasets_in_queryset(self):\n models.SourceDataset.objects.all().delete()\n dataset_1 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=True)\n dataset_2 = factories.SourceDatasetFactory.create(source_study_version__i_is_deprecated=False)\n url = self.get_url()\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset_2.pk])",
"def select_versions(self):\n return []",
"def test_no_previous_study_version(self):\n self.study_version_1.delete()\n self.study_version_2.delete()\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertEqual(len(table.data), 0)\n for dataset in self.datasets_v3:\n self.assertNotIn(dataset, table.data)",
"def test_no_updated_datasets(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for dataset in self.datasets_v3:\n self.assertNotIn(dataset, table.data)",
"def test_no_deprecated_datasets_in_table(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for dataset in self.datasets_v1:\n self.assertNotIn(dataset, table.data)\n for dataset in self.datasets_v2:\n self.assertNotIn(dataset, table.data)",
"def test_context_deprecated_dataset_with_newer_version(self):\n study = factories.StudyFactory.create()\n source_study_version1 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=True, i_version=1)\n source_study_version2 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=False, i_version=2)\n source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)\n source_dataset2 = factories.SourceDatasetFactory.create(\n source_study_version=source_study_version2,\n i_accession=source_dataset1.i_accession,\n i_version=source_dataset1.i_version,\n i_is_subject_file=source_dataset1.i_is_subject_file,\n i_study_subject_column=source_dataset1.i_study_subject_column,\n i_dbgap_description=source_dataset1.i_dbgap_description\n )\n response = self.client.get(self.get_url(source_dataset1.pk))\n context = response.context\n self.assertTrue(context['is_deprecated'])\n self.assertFalse(context['show_removed_text'])\n self.assertEqual(context['new_version_link'], source_dataset2.get_absolute_url())\n self.assertContains(response, context['new_version_link'])\n self.assertNotContains(\n response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"removed_deprecated_dataset\">')\n self.assertContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"updated_deprecated_dataset\">')",
"def get_queryset(self):\n self.object = self.get_object()\n return self.object.sticker_set.all().order_by('-modification_date')",
"def test_no_removed_datasets(self):\n removed_dataset_1 = factories.SourceDatasetFactory.create(source_study_version=self.study_version_1)\n removed_dataset_2 = factories.SourceDatasetFactory.create(\n source_study_version=self.study_version_2, i_accession=removed_dataset_1.i_accession)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n self.assertNotIn(removed_dataset_1, table.data)\n self.assertNotIn(removed_dataset_2, table.data)\n self.assertEqual(len(table.data), 0)",
"def test_context_deprecated_dataset_with_no_newer_version(self):\n source_study_version1 = self.dataset.source_study_version\n source_study_version1.i_is_deprecated = True\n source_study_version1.save()\n source_study_version2 = factories.SourceStudyVersionFactory.create(\n study=source_study_version1.study,\n i_is_deprecated=False,\n i_version=source_study_version1.i_version + 1\n )\n response = self.client.get(self.get_url(self.dataset.pk))\n context = response.context\n self.assertTrue(context['is_deprecated'])\n self.assertTrue(context['show_removed_text'])\n self.assertIsNone(context['new_version_link'])\n self.assertContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"removed_deprecated_dataset\">')\n self.assertNotContains(\n response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"updated_deprecated_dataset\">')",
"def changesets(self):\r\n return changesets.Changesets(self)",
"def determine_changed_sources(self, other: DevJarSignature) -> set[str]:\n res = {}\n all_keys = set(self.modified_sources.keys()) | set(other.modified_sources.keys())\n for key in all_keys:\n if modified_sources.get(key) != other.get(key):\n res.add(key)\n if not res:\n assert self.changed_sources == other.changed_sources\n return res",
"def get_queryset(self):\n self.object = self.get_object()\n return self.object.sticker_set.filter(sprint__isnull=True).order_by(\n '-modification_date'\n )",
"def get_diffuse_sources(self, src_sel):\n extended = self._select_and_freeze(self.extended_sources, src_sel)\n for s in extended: # this seems redundant, but was necessary\n s.model.free[:] = False if src_sel.frozen(s) else s.free[:]\n sources.validate(s,self.nside, None)\n s.smodel = s.model\n \n return self.get_global_sources(src_sel.skydir()), extended",
"def compare_with_old_data_query(self):\n raise NotImplementedError",
"def test_context_deprecated_dataset_with_two_new_versions(self):\n study = factories.StudyFactory.create()\n source_study_version1 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=True, i_version=1)\n source_study_version2 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=True, i_version=2)\n source_study_version3 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=False, i_version=3)\n source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)\n source_dataset2 = factories.SourceDatasetFactory.create(\n source_study_version=source_study_version2,\n i_accession=source_dataset1.i_accession,\n i_version=source_dataset1.i_version,\n i_is_subject_file=source_dataset1.i_is_subject_file,\n i_study_subject_column=source_dataset1.i_study_subject_column,\n i_dbgap_description=source_dataset1.i_dbgap_description\n )\n source_dataset3 = factories.SourceDatasetFactory.create(\n source_study_version=source_study_version3,\n i_accession=source_dataset1.i_accession,\n i_version=source_dataset1.i_version,\n i_is_subject_file=source_dataset1.i_is_subject_file,\n i_study_subject_column=source_dataset1.i_study_subject_column,\n i_dbgap_description=source_dataset1.i_dbgap_description\n )\n response = self.client.get(self.get_url(source_dataset1.pk))\n context = response.context\n self.assertTrue(context['is_deprecated'])\n self.assertFalse(context['show_removed_text'])\n self.assertEqual(context['new_version_link'], source_dataset3.get_absolute_url())\n self.assertContains(response, context['new_version_link'])\n self.assertNotContains(\n response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"removed_deprecated_dataset\">')\n self.assertContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"updated_deprecated_dataset\">')",
"def test_includes_two_new_datasets(self):\n new_datasets = factories.SourceDatasetFactory.create_batch(2, source_study_version=self.study_version_3)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_dataset_table']\n for new_dataset in new_datasets:\n self.assertIn(new_dataset, table.data)",
"def list_all_dataset_versions(self):\n assert self.dataset_id, 'dataset_id required!'\n return self._datasets_request('GET', dataset_id=self.dataset_id, versions_request=True)",
"def diff_sets(self):\n self.difference = self.urls_from_json - self.urls_from_datastore",
"def _deleted_sources(self):\r\n # We compute the list lazily.\r\n if self._lazy_deleted_sources is None:\r\n with self.context.new_workunit('find-deleted-sources'):\r\n if os.path.exists(self._analysis_file):\r\n products = self._analysis_parser.parse_products_from_path(self._analysis_file)\r\n buildroot = get_buildroot()\r\n old_sources = products.keys() # Absolute paths.\r\n self._lazy_deleted_sources = [os.path.relpath(src, buildroot) for src in old_sources\r\n if not os.path.exists(src)]\r\n else:\r\n self._lazy_deleted_sources = []\r\n return self._lazy_deleted_sources",
"def get_queryset(self):\n\t\treturn Fishery.objects.filter(updated_date__lte=timezone.now())",
"def find_months_needing_update(\n self,\n product_name: str,\n only_those_newer_than: datetime,\n ) -> Iterable[Tuple[date, int]]:\n dataset_type = self.get_dataset_type(product_name)\n\n # Find the most-recently updated datasets and group them by month.\n return sorted(\n (month.date(), count)\n for month, count in self._engine.execute(\n select(\n [\n func.date_trunc(\n \"month\", datetime_expression(dataset_type.metadata_type)\n ).label(\"month\"),\n func.count(),\n ]\n )\n .where(ODC_DATASET.c.dataset_type_ref == dataset_type.id)\n .where(dataset_changed_expression() > only_those_newer_than)\n .group_by(\"month\")\n .order_by(\"month\")\n )\n )"
] | [
"0.6738969",
"0.6347704",
"0.6347704",
"0.6347704",
"0.6267281",
"0.6221294",
"0.6144477",
"0.58199567",
"0.58199567",
"0.58199567",
"0.55291677",
"0.5501097",
"0.5491006",
"0.5466397",
"0.5387165",
"0.5362838",
"0.5351967",
"0.5326075",
"0.5303191",
"0.52874905",
"0.528596",
"0.5254557",
"0.52523917",
"0.5226349",
"0.52182853",
"0.5209019",
"0.5205359",
"0.51771575",
"0.5175765",
"0.51069736"
] | 0.7908077 | 0 |
Apply tags from traits in the previous version of this Study to traits from this version. | def apply_previous_tags(self, user):
previous_study_version = self.get_previous_version()
if previous_study_version is not None:
SourceTrait = apps.get_model('trait_browser', 'SourceTrait')
TaggedTrait = apps.get_model('tags', 'TaggedTrait')
DCCReview = apps.get_model('tags', 'DCCReview')
StudyResponse = apps.get_model('tags', 'StudyResponse')
# Get the set of TaggedTraits from the previous study version.
previous_tagged_traits = TaggedTrait.objects.non_archived().filter(
trait__source_dataset__source_study_version=previous_study_version
)
# Raise an error if any of the previous taggedtraits have incomplete reviews.
unreviewed_q = Q(dcc_review__isnull=True)
no_response_q = Q(dcc_review__status=DCCReview.STATUS_FOLLOWUP) &\
Q(dcc_review__study_response__isnull=True) &\
Q(dcc_review__dcc_decision__isnull=True)
no_decision_q = Q(dcc_review__status=DCCReview.STATUS_FOLLOWUP) &\
Q(dcc_review__study_response__status=StudyResponse.STATUS_DISAGREE) &\
Q(dcc_review__dcc_decision__isnull=True)
incomplete_review_tagged_traits = previous_tagged_traits.filter(
unreviewed_q | no_response_q | no_decision_q
)
if incomplete_review_tagged_traits.count() > 0:
raise ValueError(INCOMPLETE_REVIEW_ERROR.format(''))
# Get the set of variable accession numbers in the previous version that have tags applied them.
previous_accessions_with_tags = previous_tagged_traits.values(
trait_pk=F('trait__pk'),
trait_accession=F('trait__i_dbgap_variable_accession')
).annotate(
tt_count=Count('pk')
).filter(
tt_count__gt=0
).values_list(
'trait_accession',
flat=True
).distinct()
traits_to_tag = SourceTrait.objects.filter(
source_dataset__source_study_version=self,
i_dbgap_variable_accession__in=previous_accessions_with_tags
)
for trait in traits_to_tag:
trait.apply_previous_tags(user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_previous_tags(self, creator):\n TaggedTrait = apps.get_model('tags', 'TaggedTrait')\n DCCReview = apps.get_model('tags', 'DCCReview')\n StudyResponse = apps.get_model('tags', 'StudyResponse')\n previous_trait = self.get_previous_version()\n if previous_trait is not None:\n for old_tagged_trait in previous_trait.all_taggedtraits.non_archived():\n # Raise an error if the review of the previous trait is incomplete.\n # Check for unreviewed\n if not hasattr(old_tagged_trait, 'dcc_review'):\n raise ValueError(INCOMPLETE_REVIEW_ERROR.format(' (unreviewed)'))\n elif old_tagged_trait.dcc_review.status == DCCReview.STATUS_FOLLOWUP:\n if hasattr(old_tagged_trait.dcc_review, 'study_response'):\n # Check for missing DCCDecision after disagree StudyResponse.\n if old_tagged_trait.dcc_review.study_response.status == StudyResponse.STATUS_DISAGREE \\\n and not hasattr(old_tagged_trait.dcc_review, 'dcc_decision'):\n raise ValueError(INCOMPLETE_REVIEW_ERROR.format(\n ' (no decision after disagree study response)'))\n else:\n # Check for missing StudyResponse and DCCDecision\n if not hasattr(old_tagged_trait.dcc_review, 'dcc_decision'):\n raise ValueError(INCOMPLETE_REVIEW_ERROR.format(\n ' (no response or decision after followup review)'))\n try:\n # Check if it already exists.\n self.all_taggedtraits.non_archived().get(tag=old_tagged_trait.tag)\n except TaggedTrait.DoesNotExist:\n # Create a new TaggedTrait.\n new_tagged_trait = TaggedTrait(\n tag=old_tagged_trait.tag, trait=self, creator=creator, previous_tagged_trait=old_tagged_trait)\n new_tagged_trait.full_clean()\n new_tagged_trait.save()\n # Create a DCCReview with confirmed status.\n dcc_review = DCCReview(\n tagged_trait=new_tagged_trait, status=DCCReview.STATUS_CONFIRMED, creator=creator)\n dcc_review.full_clean()\n dcc_review.save()",
"def test_no_deprecated_traits_with_same_version_number(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n current_study_version = factories.SourceStudyVersionFactory.create(study=self.study, i_version=5)\n old_study_version = factories.SourceStudyVersionFactory.create(\n study=self.study, i_version=current_study_version.i_version, i_is_deprecated=True)\n current_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)\n old_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)\n current_tagged_trait = TaggedTraitFactory.create(trait=current_trait, tag=tag)\n old_tagged_trait = TaggedTraitFactory.create(trait=old_trait, tag=tag)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], 1)",
"def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()",
"def test_no_deprecated_traits(self):\n TaggedTrait.objects.all().delete()\n tag = TagFactory.create()\n current_study_version = factories.SourceStudyVersionFactory.create(study=self.study, i_version=5)\n old_study_version = factories.SourceStudyVersionFactory.create(\n study=self.study, i_version=4, i_is_deprecated=True)\n current_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=current_study_version)\n old_trait = factories.SourceTraitFactory.create(source_dataset__source_study_version=old_study_version)\n current_tagged_trait = TaggedTraitFactory.create(trait=current_trait, tag=tag)\n old_tagged_trait = TaggedTraitFactory.create(trait=old_trait, tag=tag)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n tag_count_row = context['tag_counts'][0]\n self.assertEqual(tag_count_row['tt_count'], 1)",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def setTags(self,newtags):\n\t\tself.tags = newtags;",
"def apply_tags(self, tags):\n for tag_name in tags:\n tag = tag_name.strip().lower()\n self.tags.append(DBSession.merge(Tag(tag)))",
"def _transform_known_tags(self):\n self.missing_known_tags = []\n\n for k, tf in self._known_tags.items():\n v = self.tags.get(k, [])\n if not v:\n self.missing_known_tags.append(k)\n continue\n\n if len(v) > 1:\n raise Exception(f\"multiple instances of tag {k}\")\n\n setattr(self, k, v[0])",
"def tags():",
"def tags_changed(self, tags):\n pass",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def add_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags.union(set(tags))\n return cp",
"def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all source traits and make 5 new ones, so there are only 5 for study 1.\n models.SourceTrait.objects.all().delete()\n self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def test_context_deprecated_trait_with_two_new_versions(self):\n study = factories.StudyFactory.create()\n source_study_version1 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=True, i_version=1)\n source_study_version2 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=True, i_version=2)\n source_study_version3 = factories.SourceStudyVersionFactory.create(\n study=study, i_is_deprecated=False, i_version=3)\n source_dataset1 = factories.SourceDatasetFactory.create(source_study_version=source_study_version1)\n source_dataset2 = factories.SourceDatasetFactory.create(\n source_study_version=source_study_version2,\n i_accession=source_dataset1.i_accession,\n i_version=source_dataset1.i_version,\n i_is_subject_file=source_dataset1.i_is_subject_file,\n i_study_subject_column=source_dataset1.i_study_subject_column,\n i_dbgap_description=source_dataset1.i_dbgap_description\n )\n source_dataset3 = factories.SourceDatasetFactory.create(\n source_study_version=source_study_version3,\n i_accession=source_dataset1.i_accession,\n i_version=source_dataset1.i_version,\n i_is_subject_file=source_dataset1.i_is_subject_file,\n i_study_subject_column=source_dataset1.i_study_subject_column,\n i_dbgap_description=source_dataset1.i_dbgap_description\n )\n trait1 = factories.SourceTraitFactory.create(source_dataset=source_dataset1)\n trait2 = factories.SourceTraitFactory.create(\n source_dataset=source_dataset2,\n i_detected_type=trait1.i_detected_type,\n i_dbgap_type=trait1.i_dbgap_type,\n i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,\n i_dbgap_variable_version=trait1.i_dbgap_variable_version,\n i_dbgap_comment=trait1.i_dbgap_comment,\n i_dbgap_unit=trait1.i_dbgap_unit,\n i_n_records=trait1.i_n_records,\n i_n_missing=trait1.i_n_missing,\n i_is_unique_key=trait1.i_is_unique_key,\n i_are_values_truncated=trait1.i_are_values_truncated\n )\n trait3 = factories.SourceTraitFactory.create(\n source_dataset=source_dataset3,\n i_detected_type=trait1.i_detected_type,\n i_dbgap_type=trait1.i_dbgap_type,\n i_dbgap_variable_accession=trait1.i_dbgap_variable_accession,\n i_dbgap_variable_version=trait1.i_dbgap_variable_version,\n i_dbgap_comment=trait1.i_dbgap_comment,\n i_dbgap_unit=trait1.i_dbgap_unit,\n i_n_records=trait1.i_n_records,\n i_n_missing=trait1.i_n_missing,\n i_is_unique_key=trait1.i_is_unique_key,\n i_are_values_truncated=trait1.i_are_values_truncated\n )\n response = self.client.get(self.get_url(trait1.pk))\n context = response.context\n self.assertTrue(context['is_deprecated'])\n self.assertFalse(context['show_removed_text'])\n self.assertEqual(context['new_version_link'], trait3.get_absolute_url())\n self.assertContains(response, context['new_version_link'])\n self.assertNotContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"removed_deprecated_trait\">')\n self.assertContains(response, '<div class=\"alert alert-danger\" role=\"alert\" id=\"updated_deprecated_trait\">')",
"def add_tags(event):\n\n add_tags_from_presets()",
"def convert_all_tags(self):\n self.ratings = self.tag_converter.convert_ratings()\n self.categories = self.tag_converter.convert_categories()\n self.classes = self.tag_converter.convert_classes()\n\n old_characters = self.sql.read_table_to_dict(self.working_original, \"characters\")\n self.characters = self._convert_characters(old_characters)",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def tags(self, tags):\n self._tags = tags",
"def remove_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags - set(tags)\n return cp",
"def reset_traits ( self, traits = None, **metadata ):\n unresetable = []\n if traits is None:\n traits = self.trait_names( **metadata )\n for name in traits:\n try:\n delattr( self, name )\n except AttributeError:\n unresetable.append( name )\n return unresetable",
"def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)",
"def test_does_not_return_study_with_deprecated_tagged_trait_for_given_tag_with_only(self):\n tag = TagFactory.create()\n study = self.studies[0]\n tagged_trait = TaggedTraitFactory.create(\n tag=tag, trait__source_dataset__source_study_version__study=study,\n trait__source_dataset__source_study_version__i_is_deprecated=True)\n get_data = {'q': '', 'forward': ['{\"tag\":\"' + str(tag.pk) + '\",' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertNotIn(study.pk, pks)",
"def test_returns_all_studies_with_tagged_traits_for_multiple_tags(self):\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def add_traits(self, **traits):\n super().add_traits(**traits)\n for name, trait in traits.items():\n if trait.get_metadata('sync'):\n self.keys.append(name)\n self.send_state(name)",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)"
] | [
"0.67002356",
"0.5798329",
"0.5789118",
"0.5662857",
"0.5602511",
"0.5436931",
"0.53949434",
"0.53218657",
"0.53102833",
"0.52779347",
"0.5261458",
"0.51905525",
"0.5164367",
"0.5156515",
"0.5149465",
"0.51158303",
"0.50836796",
"0.50706464",
"0.5012632",
"0.49430275",
"0.49430275",
"0.49430275",
"0.4939927",
"0.49303246",
"0.49178135",
"0.48987475",
"0.48916516",
"0.488279",
"0.48569843",
"0.48569843"
] | 0.7317276 | 0 |
Custom save method to autoset full_accession and dbgap_link. | def save(self, *args, **kwargs):
self.full_accession = self.set_full_accession()
self.dbgap_link = self.set_dbgap_link()
super(SourceDataset, self).save(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceTrait, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceStudyVersion, self).save(*args, **kwargs)",
"def save(self, db):\n pass",
"def save_db(self) -> None:",
"def save(self,\n force_insert=False,\n force_update=False,\n using=None,\n update_fields=None):\n # If the short url wasn't specified\n if not self.short_url:\n # We pass the model instance that is being saved\n self.short_url = create_shortened_url(self)\n\n super().save(force_insert, force_update, using, update_fields)",
"def save(self, db):\n db.query(\n \"INSERT INTO fellows (name, accomodation)\\\n VALUES(:name, :accomodation)\",\n name=self.name, accomodation=self.wants_accomodation\n )",
"def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)",
"def save():",
"def save(self, *args, **kwargs):\n pass",
"def _save(self):\n for attrib in self.attribs:\n setattr(self, attrib, getattr(self.obj, attrib))",
"def save_without_setting_canon(self, *args, **kwargs):\n super(DocumentSetFieldEntry, self).save(*args, **kwargs)",
"def save(self):\n self.db.commit()",
"def save():\n pass",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self):\n self.__db.commit()",
"def post_save_access_attempt(self, instance, **kwargs):",
"def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()",
"def save(self):\n\n pass",
"def db_for_write(self, model, **hints):\n return None",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self, obj):",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs):\n return"
] | [
"0.7209021",
"0.71559155",
"0.6393143",
"0.6363915",
"0.6306047",
"0.62788814",
"0.5810558",
"0.57993186",
"0.5722098",
"0.57087",
"0.570662",
"0.56933665",
"0.567312",
"0.5658324",
"0.5658324",
"0.56490225",
"0.56343424",
"0.56319344",
"0.56160986",
"0.56087613",
"0.5595003",
"0.5595003",
"0.5595003",
"0.5595003",
"0.5595003",
"0.5594704",
"0.558285",
"0.558285",
"0.558285",
"0.55825764"
] | 0.73582214 | 0 |
Gets the absolute URL of the detail page for a given SourceDataset instance. | def get_absolute_url(self):
return reverse('trait_browser:source:datasets:detail', kwargs={'pk': self.pk}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_absolute_url(self):\n\t\treturn reverse('source-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return ('publication_detail', (), {'slug': self.slug})",
"def details_url(self):\n if self._data.get('details_url'):\n path = self._data.get('details_url')\n try:\n path, hash_ = path.split('#')\n hash_ = '#' + hash_\n except ValueError:\n hash_ = ''\n return '{}?from_activity={}{}'.format(path, self._data.get('id'), hash_)",
"def get_absolute_url(self):\n return reverse('report', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('blogger-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('book_details', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('bl-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('trait_browser:source:traits:detail', kwargs={'pk': self.pk})",
"def build_details_url(self, params={}):\n\n if 'url' in params:\n url = params['url']\n url += '?page=' + str(int(params['page'])) + '&sort=' + str(params['sort'])\n return url",
"def get_absolute_url(self):\n return reverse('patient-detail', args=[str(self.id)])",
"def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())",
"def get_absolute_url(self):\n\n return reverse('performer-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('csv-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('book-detail', args=[str(self.id)]) \n # Returns an URL that can be used to access a detail record for this model \n # (for this to work we will have to \n # -- Define a URL mapping that has the name 'book-detail' (name='book-detail')\n # -- Define an associated view.\n # -- Define an associated template.",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse(\"jewelry_detail\", args = [str(self.id)])",
"def url(self) -> str:\n return self.DATASET_URLS[self.name]",
"def get_absolute_url(self):\n return reverse('properties:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n\n return reverse('caretaker-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('relation-detail', args=[str(self.id)])",
"def get_absolute_url(self) -> str:\n return reverse(\"cv_detail\", kwargs={\"pk\": self.pk})",
"def get_absolute_url(self):\n return reverse('structured-name-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('injury-detail', args=[str(self.id)])",
"def get_dataset_search_url(self):\n return reverse('trait_browser:source:studies:pk:datasets:search', kwargs={'pk': self.pk})",
"def get_full_url(self):\n full_url = home_page + self.source_link\n return full_url",
"def get_absolute_url(self):\n return ('project_detail', (), {\n 'name': self.title\n })",
"def get_url(self) -> str:\n\n return self.__page_url",
"def get_absolute_url(self):\n\n url = reverse('comicsite.views.page', args=[self.comicsite.short_name,self.title])\n return url"
] | [
"0.7320892",
"0.71635264",
"0.6769658",
"0.66936725",
"0.6665065",
"0.66157633",
"0.66050607",
"0.658594",
"0.65774614",
"0.65592086",
"0.6530993",
"0.6519312",
"0.6510253",
"0.65062296",
"0.65050864",
"0.64783823",
"0.64783823",
"0.6441861",
"0.6411554",
"0.6410512",
"0.64024705",
"0.6401186",
"0.63942915",
"0.6383769",
"0.6374953",
"0.6364216",
"0.6356463",
"0.6355419",
"0.6339379",
"0.6324928"
] | 0.75452507 | 0 |
Automatically set full_accession from the dataset's dbGaP identifiers. | def set_full_accession(self):
return self.DATASET_ACCESSION.format(
self.i_accession, self.i_version, self.source_study_version.i_participant_set) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_full_accession(self):\n return self.VARIABLE_ACCESSION.format(\n self.i_dbgap_variable_accession, self.i_dbgap_variable_version,\n self.source_dataset.source_study_version.i_participant_set)",
"def set_full_accession(self):\n return self.STUDY_VERSION_ACCESSION.format(self.study.phs, self.i_version, self.i_participant_set)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceDataset, self).save(*args, **kwargs)",
"def set_dbgap_link(self):\n return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceTrait, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceStudyVersion, self).save(*args, **kwargs)",
"def set_dbgap_link(self):\n return self.VARIABLE_URL.format(\n self.source_dataset.source_study_version.full_accession, self.i_dbgap_variable_accession)",
"def update_gpdbid_file(array):\n \n standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())\n\n # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace\n writeGpDbidFile(standby_datadir, 1, get_logger_if_verbose())",
"def alias_grfn_vars(self, src_fullid: str, tgt_fullid: str):\n self.fullid_to_grfn_id[src_fullid] = self.fullid_to_grfn_id[tgt_fullid]",
"def mod_family_accession(family_accession):\n\n return family_accession[:family_accession.index('.')]",
"def set_auto_dc_offset(self, *args, **kwargs):\n return _uhd_swig.usrp_source_set_auto_dc_offset(self, *args, **kwargs)",
"def default_global_location(database):\n\n for dataset in get_many(database, *[equals(\"location\", None)]):\n dataset[\"location\"] = \"GLO\"\n return database",
"def gnomad_genomes_af(self):\n af = [gnomad_genomes.af for gnomad_genomes in self.gnomad_genomes]\n return af[0] if af else None",
"def set_dbgap_link(self):\n return self.STUDY_VERSION_URL.format(self.full_accession)",
"def generate_submission_accessions_data(submission_id=str()):\n\n columns = list()\n data_set = list()\n\n try:\n repository = Submission().get_repository_type(submission_id=submission_id)\n except Exception as e:\n Logger().exception(e)\n return dict(dataSet=data_set,\n columns=columns,\n message=\"Could not retrieve repository type\"\n )\n\n try:\n submission_record = Submission().get_collection_handle().find_one({'_id': ObjectId(submission_id)},\n {\"accessions\": 1})\n except Exception as e:\n Logger().exception(e)\n return dict(dataSet=data_set,\n columns=columns,\n message=\"Could not retrieve submission record\"\n )\n\n accessions = submission_record.get(\"accessions\", dict())\n\n if accessions:\n # -----------COLLATE ACCESSIONS FOR ENA SEQUENCE READS----------\n if repository == \"ena\":\n columns = [{\"title\": \"Accession\"}, {\"title\": \"Alias\"},\n {\"title\": \"Comment\"}, {\"title\": \"Type\"}]\n\n for key, value in accessions.items():\n if isinstance(value, dict): # single accession instance expected\n data_set.append(\n [value[\"accession\"], value[\"alias\"], str(), key])\n elif isinstance(value, list): # multiple accession instances expected\n for v in value:\n if key == \"sample\":\n data_set.append(\n [v[\"sample_accession\"], v[\"sample_alias\"], v[\"biosample_accession\"], key])\n else:\n data_set.append(\n [v[\"accession\"], v[\"alias\"], str(), key])\n\n elif repository == \"ena-ant\":\n # -----------COLLATE ACCESSIONS FOR ENA ANNOTATIONS----------\n columns = [{\"title\": \"Accession\"}, {\"title\": \"Alias\"},\n {\"title\": \"Comment\"}, {\"title\": \"Type\"}]\n\n for key, value in accessions.items():\n if isinstance(value, dict): # single accession instance expected\n data_set.append(\n [value[\"accession\"], value[\"alias\"], str(), key])\n elif isinstance(value, list): # multiple accession instances expected\n for v in value:\n if key == \"sample\":\n try:\n data_set.append(\n [v[\"sample_accession\"], v[\"sample_alias\"], v[\"biosample_accession\"], key])\n except:\n pass\n else:\n try:\n data_set.append(\n [v[\"accession\"], v[\"alias\"], str(), key])\n except:\n pass\n\n elif repository == \"figshare\":\n # -----------COLLATE ACCESSIONS FOR FIGSHARE REPO----------\n columns = [{\"title\": \"Accession\"}, {\"title\": \"Alias\"},\n {\"title\": \"Comment\"}, {\"title\": \"Type\"}]\n\n for idx, value in enumerate(accessions):\n data_set.append([value, \"Figshare File: \" +\n str(idx + 1), str(), str()])\n\n elif repository == \"dataverse\":\n # -----------COLLATE ACCESSIONS FOR DATAVERSE REPO----------\n columns = [{\"title\": \"DOI\"}, {\"title\": \"Dataverse\"}, {\"title\": \"Dataverse Alias\"},\n {\"title\": \"Dataset Title\"}]\n\n data_set.append(\n [accessions.get(\"dataset_doi\", str()), accessions.get(\"dataverse_title\", str()),\n accessions.get(\"dataverse_alias\", str()),\n accessions.get(\"dataset_title\", str())]\n )\n\n elif repository == \"dspace\":\n columns = [{\"title\": \"Description\"}, {\"title\": \"Format\"}, {\"title\": \"Filesize\"}, {\"title\": \"Retrieve Link\"},\n {\"title\": \"Metadata Link\"}]\n for a in accessions:\n link_ref = a[\"dspace_instance\"] + a[\"link\"]\n meta_link = '<a target=\"_blank\" href=\"' + \\\n a[\"meta_url\"] + '\">' + a[\"meta_url\"] + '</a>'\n retrieve_link = '<a href=\"' + link_ref + '/retrieve\">' + link_ref + '</a>'\n data_set.append(\n [a[\"description\"], a[\"format\"], (hurrysize(a[\"sizeBytes\"])),\n retrieve_link,\n meta_link]\n )\n\n elif repository == \"ckan\":\n columns = [{\"title\": \"Title\"}, {\"title\": \"Metadata Link\"}, {\n \"title\": \"Resource Link\"}, {\"title\": \"Name\"}]\n retrieve_link = '<a target=\"_blank\" href=\"' + accessions[\"url\"] + '/dataset/' + accessions[\n \"dataset_name\"] + '\">' + accessions[\"url\"] + '/dataset/' + accessions[\"dataset_name\"] + '</a>'\n meta_link = '<a target=\"_blank\" href=\"' + accessions[\"repo_url\"] + 'package_show?id=' + accessions[\n 'dataset_id'] + '\">' + 'Show Metadata' + '</a>'\n data_set.append(\n [accessions[\"dataset_title\"], meta_link,\n retrieve_link, accessions[\"dataset_name\"]]\n )\n\n return_dict = dict(dataSet=data_set,\n columns=columns,\n repository=repository\n )\n\n return return_dict",
"def idpac(self):\n return self._idpac",
"def to_index(self, full: bool = False):\n if not full:\n self.load()\n ds = self.data.argo.point2profile()\n df = (\n ds.drop_vars(set(ds.data_vars) - set([\"PLATFORM_NUMBER\"]))\n .drop_dims(\"N_LEVELS\")\n .to_dataframe()\n )\n df = (\n df.reset_index()\n .rename(\n columns={\n \"PLATFORM_NUMBER\": \"wmo\",\n \"LONGITUDE\": \"longitude\",\n \"LATITUDE\": \"latitude\",\n \"TIME\": \"date\",\n }\n )\n .drop(columns=\"N_PROF\")\n )\n df = df[[\"date\", \"latitude\", \"longitude\", \"wmo\"]]\n\n else:\n # Instantiate and load an IndexFetcher:\n index_loader = ArgoIndexFetcher(mode=self._mode,\n src=self._src,\n ds=self._dataset_id,\n **self.fetcher_kwargs)\n if self._AccessPoint == 'float':\n index_loader.float(self._AccessPoint_data['wmo']).load()\n if self._AccessPoint == 'profile':\n index_loader.profile(self._AccessPoint_data['wmo'], self._AccessPoint_data['cyc']).load()\n if self._AccessPoint == 'region':\n # Convert data box to index box (remove depth info):\n index_box = self._AccessPoint_data['box'].copy()\n del index_box[4:6]\n index_loader.region(index_box).load()\n df = index_loader.index\n\n if self._loaded and self._mode == 'standard' and len(self._index) != len(df):\n warnings.warn(\"Loading a full index in 'standard' user mode may lead to more profiles in the \"\n \"index than reported in data.\")\n\n # Possibly replace the light index with the full version:\n if not self._loaded or self._request == self.__repr__():\n self._index = df\n\n return df",
"def move_dbgap_link_to_dataset(apps, schema_editor):\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n for dataset in SourceDataset.objects.all():\n dataset.dbgap_link = dataset.sourcetrait_set.first().dbgap_dataset_link\n dataset.save()",
"def select_first_organism(cazy_data, gbk_accessions, replaced_taxa_logger):\n for accession in tqdm(gbk_accessions, desc='Selecting the first retrieved organism'):\n selected_kingdom = list(cazy_data[accession]['taxonomy'])[0].kingdom\n selected_organism = list(cazy_data[accession]['taxonomy'])[0].organism\n\n for tax_tuple in list(cazy_data[accession]['taxonomy'])[1:]:\n replaced_taxa_logger.warning(\n f\"{accession}\\t\"\n f\"SELECTED: {selected_kingdom} -- {selected_organism}\"\n f\"\\tREPLACED: {tax_tuple.kingdom}: {tax_tuple.organism}\"\n )\n\n cazy_data[accession][\"kingdom\"] = selected_kingdom\n cazy_data[accession][\"organism\"] = selected_organism\n\n return cazy_data",
"def policy_alias(self):",
"def _cmd_access(args):\n access_arr = access.do_access(args.fa_fname, args.exclude, args.min_gap_size)\n tabio.write(access_arr, args.output, \"bed3\")",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"def set_db_id(self):\n if self._id is None:\n db = self._core.get_db()\n self._id = db.get_seq_next('OPE_GEN')\n return self._id",
"def nucl_acid_ext(self, nucl_acid_ext):\n self.logger.debug(\"In 'nucl_acid_ext' setter.\")\n\n self._nucl_acid_ext = nucl_acid_ext",
"def writeProteinAccessions( self ):\n\n self.logger.info( 'writeProteinAccessions: START' )\n\n self.logger.info( 'writeProteinAccessions: insert file will be proteinAccessionsInsert.psql' )\n\n proteinAccessionFile = self.openInsertFile( 'proteinAccessionsInsert.psql')\n\n for proteinIdentification, proteinIdRelationalDatabase in self.proteinsInserted.iteritems():\n accessionId = self.accessionsInserted[ proteinIdentification ]\n self.writeFile( proteinAccessionFile, 'protein_accessions', [ str(proteinIdRelationalDatabase), str(accessionId) ] )\n\n\n self.logger.info( 'writeProteinAccessions: DONE' )",
"def set_perm(\n self, mapper: Mapper, connection: Connection, target: \"BaseDatasource\"\n ) -> None:\n try:\n target_get_perm = target.get_perm()\n except DatasetInvalidPermissionEvaluationException:\n logger.warning(\"Dataset has no database refusing to set permission\")\n return\n link_table = target.__table__\n if target.perm != target_get_perm:\n connection.execute(\n link_table.update()\n .where(link_table.c.id == target.id)\n .values(perm=target_get_perm)\n )\n target.perm = target_get_perm\n\n if (\n hasattr(target, \"schema_perm\")\n and target.schema_perm != target.get_schema_perm()\n ):\n connection.execute(\n link_table.update()\n .where(link_table.c.id == target.id)\n .values(schema_perm=target.get_schema_perm())\n )\n target.schema_perm = target.get_schema_perm()\n\n pvm_names = []\n if target.__tablename__ in {\"dbs\", \"clusters\"}:\n pvm_names.append((\"database_access\", target_get_perm))\n else:\n pvm_names.append((\"datasource_access\", target_get_perm))\n if target.schema:\n pvm_names.append((\"schema_access\", target.get_schema_perm()))\n\n # TODO(bogdan): modify slice permissions as well.\n for permission_name, view_menu_name in pvm_names:\n permission = self.find_permission(permission_name)\n view_menu = self.find_view_menu(view_menu_name)\n pv = None\n\n if not permission:\n permission_table = (\n self.permission_model.__table__ # pylint: disable=no-member\n )\n connection.execute(\n permission_table.insert().values(name=permission_name)\n )\n permission = self.find_permission(permission_name)\n self.on_permission_after_insert(mapper, connection, permission)\n if not view_menu:\n view_menu_table = (\n self.viewmenu_model.__table__ # pylint: disable=no-member\n )\n connection.execute(view_menu_table.insert().values(name=view_menu_name))\n view_menu = self.find_view_menu(view_menu_name)\n self.on_view_menu_after_insert(mapper, connection, view_menu)\n\n if permission and view_menu:\n pv = (\n self.get_session.query(self.permissionview_model)\n .filter_by(permission=permission, view_menu=view_menu)\n .first()\n )\n if not pv and permission and view_menu:\n permission_view_table = (\n self.permissionview_model.__table__ # pylint: disable=no-member\n )\n connection.execute(\n permission_view_table.insert().values(\n permission_id=permission.id, view_menu_id=view_menu.id\n )\n )\n permission = self.find_permission_view_menu(\n permission_name, view_menu_name\n )\n self.on_permission_view_after_insert(mapper, connection, permission)",
"def identify_primary_reference_datasets(conn, log):\n\n primary_ref = {}\n\n primary_ref['refimg_id_ip'] = phot_db.find_primary_reference_image_for_field(conn)\n\n query = 'SELECT facility, filter, software FROM reference_images WHERE refimg_id=\"'+str(primary_ref['refimg_id_ip'])+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n\n primary_ref['facility_id'] = t['facility'][0]\n primary_ref['software_id'] = t['software'][0]\n\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"ip\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref['ip'] = t['filter_id'][0]\n\n for f in ['rp', 'gp']:\n query = 'SELECT filter_id, filter_name FROM filters WHERE filter_name=\"'+f+'\"'\n t = phot_db.query_to_astropy_table(conn, query, args=())\n primary_ref[f] = t['filter_id'][0]\n\n query = 'SELECT refimg_id FROM reference_images WHERE facility=\"'+str(primary_ref['facility_id'])+\\\n '\" AND software=\"'+str(primary_ref['software_id'])+\\\n '\" AND filter=\"'+str(t['filter_id'][0])+'\"'\n qs = phot_db.query_to_astropy_table(conn, query, args=())\n\n if len(qs) > 0:\n primary_ref['refimg_id_'+f] = qs['refimg_id'][0]\n else:\n log.info('WARNING: Database contains no primary reference image data in filter '+f)\n\n log.info('Identified the primary reference datasets for this field as:')\n for key, value in primary_ref.items():\n log.info(str(key)+' = '+str(value))\n\n return primary_ref",
"def cafa4_mapping() -> pd.DataFrame:\n # List of the paths considered in the function\n paths = [\n \"cafa4.tar.gz\",\n \"CAFA4-export/TargetFiles/sp_species.9606.tfa\"\n ]\n if not any(os.path.exists(path) for path in paths):\n # Downloading the url to the given path\n download(\n url=\"https://www.biofunctionprediction.org/cafa-targets/CAFA4-export.tgz\",\n path=paths[0]\n )\n # Extracting the acquire\n shutil.unpack_archive(paths[0], \".\")\n # Delete the archived file\n os.remove(paths[0])\n # Parse the file and retrieve the IDs from the fasta file\n f = open(paths[1], \"r\")\n df = pd.DataFrame(\n (\n line[1:-1].split(\" \")\n for line in f.readlines()\n if line.startswith(\">\")\n ),\n columns=[\n \"cafa4_id\",\n \"uniprot_id\"\n ]\n )\n f.close()\n # Return the obtained IDs\n return df",
"def set_purged(*args):\n return _ida_frame.set_purged(*args)",
"def get_CG_id(gid, conn):\n\n get_CG = ('SELECT DISTINCT dx.accession '\n 'FROM feature f, feature_dbxref fd, db, dbxref dx '\n 'WHERE f.feature_id = fd.feature_id AND fd.dbxref_id = dx.dbxref_id '\n 'AND dx.db_id = db.db_id AND db.name = \\'FlyBase Annotation IDs\\' AND '\n 'dx.accession NOT LIKE \\'%%-%%\\' AND fd.is_current = \\'t\\' AND f.uniquename = %s')\n CG_id = connect(get_CG,gid,conn)\n return(CG_id)"
] | [
"0.69682556",
"0.6225129",
"0.5945919",
"0.54449517",
"0.5160533",
"0.5116956",
"0.5025976",
"0.49088228",
"0.47938767",
"0.47933",
"0.4758362",
"0.47100648",
"0.4704993",
"0.46769676",
"0.4670057",
"0.46583503",
"0.46069586",
"0.46018344",
"0.45937777",
"0.45649543",
"0.4514244",
"0.4508002",
"0.4486649",
"0.44831064",
"0.44762844",
"0.44680777",
"0.44563785",
"0.44379377",
"0.44372433",
"0.44052467"
] | 0.72782624 | 0 |
Automatically set dbgap_link from dbGaP identifier information. | def set_dbgap_link(self):
return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_dbgap_link(self):\n return self.VARIABLE_URL.format(\n self.source_dataset.source_study_version.full_accession, self.i_dbgap_variable_accession)",
"def update_gpdbid_file(array):\n \n standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())\n\n # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace\n writeGpDbidFile(standby_datadir, 1, get_logger_if_verbose())",
"def move_dbgap_link_to_dataset(apps, schema_editor):\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n for dataset in SourceDataset.objects.all():\n dataset.dbgap_link = dataset.sourcetrait_set.first().dbgap_dataset_link\n dataset.save()",
"def set_dbgap_link(self):\n return self.STUDY_VERSION_URL.format(self.full_accession)",
"def set_db_id(self):\n if self._id is None:\n db = self._core.get_db()\n self._id = db.get_seq_next('OPE_GEN')\n return self._id",
"def generate_link_attr(d: Dict):\n d.update({\"link\": urljoin(\"https://vdb-kasf1i23nr1kl2j4.rapid7.com/v1/content/\", d.get(\"identifier\"))})",
"def _add_bridge_db_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding BridgeDB identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n\n for uniq_id in tqdm.tqdm(map_dict, total=len(map_dict)):\n parts = uniq_id.split(':')\n db = parts[0]\n uid = parts[-1]\n\n if db in constants.BRIDGEDB_MAP:\n # list of other DBs to query from\n q_dbs = constants.BRIDGEDB_MAP[db]\n for q_db in q_dbs:\n try:\n r = r_session.get(\n 'http://webservice.bridgedb.org/Human/xrefs/{}/{}?dataSource={}'.format(\n constants.BRIDGEDB_KEYS[db],\n uid,\n constants.BRIDGEDB_KEYS[q_db]\n )\n )\n except Exception as x:\n print(\"%s: %s\" % (uniq_id, x.__class__.__name__))\n continue\n\n result = r.text\n if len(result) > 0:\n add_ids = [line.split('\\t')[0] for line in result.split('\\n')[:-1]]\n new_ids = ['{}:{}'.format(q_db, i) for i in add_ids if i.isalnum()]\n for n_id in new_ids:\n new_id = '{}:{}'.format(q_db, n_id)\n map_dict[uniq_id].add(new_id)\n\n time.sleep(0.5)\n\n return map_dict",
"def move_dbgap_link_to_study_version(apps, schema_editor):\n SourceStudyVersion = apps.get_model('trait_browser', 'SourceStudyVersion')\n for ssv in SourceStudyVersion.objects.all():\n ssv.dbgap_link = ssv.sourcedataset_set.first().sourcetrait_set.first().dbgap_study_link\n ssv.save()",
"def update_link_id(self, data):\n\n self.data[data['project_name']]['nodes'][data['first']]['ports'][data['first_port']]['link_id'] = data['link_id']\n self.data[data['project_name']]['nodes'][data['second']]['ports'][data['second_port']]['link_id'] = data['link_id']",
"def get_CG_id(gid, conn):\n\n get_CG = ('SELECT DISTINCT dx.accession '\n 'FROM feature f, feature_dbxref fd, db, dbxref dx '\n 'WHERE f.feature_id = fd.feature_id AND fd.dbxref_id = dx.dbxref_id '\n 'AND dx.db_id = db.db_id AND db.name = \\'FlyBase Annotation IDs\\' AND '\n 'dx.accession NOT LIKE \\'%%-%%\\' AND fd.is_current = \\'t\\' AND f.uniquename = %s')\n CG_id = connect(get_CG,gid,conn)\n return(CG_id)",
"def init_linkage():\n for case in AutoCase.objects.all():\n case.autolink()\n case.save()",
"def writeGpDbidFile(directory, dbid, logger=None):\n d = GpDbidFile(directory, logger=logger)\n d.dbid = dbid\n d.write_gp_dbid()",
"def _adjust_connection_URL(self, text):\n dbname = self.options.db\n parts = text.split('/')\n\n # Preserve the quotes if present\n if parts[-1].endswith(\"'\"):\n dbname += \"'\"\n\n parts[-1] = dbname\n return '/'.join(parts)",
"def set_platform_gs_prefix(self, gs_url):\n self.buildurl_gs_prefix = gs_url # pragma: no cover",
"def gbr_dl(self, gbr_dl):\n\n self._gbr_dl = gbr_dl",
"def gnomad_link(variant_obj):\n url_template = (\"http://gnomad.broadinstitute.org/variant/{this[chromosome]}-\"\n \"{this[position]}-{this[reference]}-{this[alternative]}\")\n return url_template.format(this=variant_obj)",
"def remove_dataset_dbgap_link(apps, schema_editor):\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n for dataset in SourceDataset.objects.all():\n dataset.dbgap_link = ''\n dataset.save()",
"def dbgap_server():\n settings = {\n 'app_id': __name__,\n 'api_bases': ['https://dbgap-api.ncbi.nlm.nih.gov/fhir/x1']\n }\n return DispatchingFHIRClient(settings=settings)",
"def connect(dbapi_connection, connection_record):\n connection_record.info['pid'] = os.getpid()",
"def setDB(dbname):\n global DBNAME\n DBNAME = dbname",
"def save_pgsql_conf(self, db):\n hookenv.log(\n \"Checking related DB information before saving PostgreSQL configuration\",\n hookenv.DEBUG,\n )\n if db:\n hookenv.log(\"Saving related PostgreSQL database config\", hookenv.DEBUG)\n self.kv.set(\"pgsql_host\", db.master.host)\n self.kv.set(\"pgsql_port\", db.master.port)\n self.kv.set(\"pgsql_db\", db.master.dbname)\n self.kv.set(\"pgsql_user\", db.master.user)\n self.kv.set(\"pgsql_pass\", db.master.password)\n self.kv.flush()",
"def set_deafult_gw(self, args):\n\n gw_ip = ip_address(args.ip)\n gw_info = UplinkGatewayInfo()\n gw_info.update_ip(str(gw_ip))\n print(\"set Default gw IP to %s\" % gw_info.get_gw_ip())",
"def db_name(self, db_name):\n\n self._db_name = db_name",
"def _process_dbxref(self):\n\n raw = '/'.join((self.rawdir, 'dbxref'))\n logger.info(\"processing dbxrefs\")\n line_counter = 0\n\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (dbxref_id, db_id, accession, version, description, url) = line\n # dbxref_id\tdb_id\taccession\tversion\tdescription\turl\n # 1\t2\tSO:0000000\t\"\"\n\n db_ids = { # the databases to fetch\n 50: 'PMID', # pubmed\n 68: 'RO', # obo-rel\n 71: 'FBdv', # FBdv\n 74: 'FBbt', # FBbt\n # 28:, # genbank\n 30: 'OMIM', # MIM\n # 38, # ncbi\n 75: 'ISBN', # ISBN\n 46: 'PMID', # PUBMED\n 51: 'ISBN', # isbn\n 52: 'SO', # so\n # 76, # http\n 77: 'PMID', # PMID\n 80: 'FBcv', # FBcv\n # 95, # MEDLINE\n 98: 'REACT', # Reactome\n 103: 'CHEBI', # Chebi\n 102: 'MESH', # MeSH\n 106: 'OMIM', # OMIM\n 105: 'KEGG-path', # KEGG pathway\n 107: 'DOI', # doi\n 108: 'CL', # CL\n 114: 'CHEBI', # CHEBI\n 115: 'KEGG', # KEGG\n 116: 'PubChem', # PubChem\n # 120, # MA???\n 3: 'GO', # GO\n 4: 'FlyBase', # FlyBase\n # 126, # URL\n 128: 'PATO', # PATO\n # 131, # IMG\n 2: 'SO', # SO\n 136: 'MESH', # MESH\n 139: 'CARO', # CARO\n 140: 'NCBITaxon', # NCBITaxon\n # 151, # MP ???\n 161: 'DOI', # doi\n 36: 'BDGP', # BDGP\n # 55, # DGRC\n # 54, # DRSC\n # 169, # Transgenic RNAi project???\n 231: 'RO', # RO ???\n 180: 'NCBIGene', # entrezgene\n # 192, # Bloomington stock center\n 197: 'UBERON', # Uberon\n 212: 'ENSEMBL', # Ensembl\n # 129, # GenomeRNAi\n 275: 'PMID', # PubMed\n 286: 'PMID', # pmid\n 264: 'HGNC',\n # 265: 'OMIM', # OMIM_Gene\n 266: 'OMIM', # OMIM_Phenotype\n 300: 'DOID', # DOID\n 302: 'MESH', # MSH\n 347: 'PMID', # Pubmed\n }\n\n if accession.strip() != '' and int(db_id) in db_ids:\n # scrub some identifiers here\n m = re.match(\n r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):',\n accession)\n if m:\n accession = re.sub(m.group(1)+r'\\:', '', accession)\n elif re.match(\n r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)',\n accession):\n continue\n elif re.match(r'\\:', accession): # starts with a colon\n accession = re.sub(r'\\:', '', accession)\n elif re.search(r'\\s', accession):\n # skip anything with a space\n # logger.debug(\n # 'dbxref %s accession has a space: %s',\n # dbxref_id, accession)\n continue\n\n if re.match(r'http', accession):\n did = accession.strip()\n else:\n prefix = db_ids.get(int(db_id))\n did = ':'.join((prefix, accession.strip()))\n if re.search(r'\\:', accession) and prefix != 'DOI':\n logger.warning(\n 'id %s may be malformed; skipping', did)\n\n self.dbxrefs[dbxref_id] = {db_id: did}\n\n elif url != '':\n self.dbxrefs[dbxref_id] = {db_id: url.strip()}\n else:\n continue\n\n # the following are some special cases that we scrub\n if int(db_id) == 2 \\\n and accession.strip() == 'transgenic_transposon':\n # transgenic_transposable_element\n self.dbxrefs[dbxref_id] = {db_id: 'SO:0000796'}\n\n line_counter += 1\n\n return",
"def __write_link_node_info_db(self, link_node_name, link_node):\n if \"crate_id\" not in link_node:\n return\n if \"lc1_node_id\" not in link_node:\n return\n slot = 2\n if \"analog_slot\" in link_node: \n slot = link_node[\"analog_slot\"]\n path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, link_node[\"cpu_name\"], link_node[\"crate_id\"], slot)\n\n macros={\"P\":link_node['app_prefix'],\n \"MPS_LINK_NODE_SIOC\":link_node['sioc'],\n \"MPS_LINK_NODE_ID\":link_node['lc1_node_id'],\n \"MPS_LINK_NODE_TYPE\":str(self.__link_node_type_to_number(link_node['type'])),\n \"MPS_CONFIG_VERSION\":self.config_version,\n \"MPS_CRATE_LOCATION\":link_node['physical'],\n \"MPS_CPU_NAME\":link_node['cpu_name']}\n self.__write_epics_db(path=path, template_name=\"link_node_info.template\", macros=macros)",
"def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c",
"def thousandg_link(variant_obj, build=None):\n dbsnp_id = variant_obj.get('dbsnp_id')\n build = build or 37\n\n if not dbsnp_id:\n return None\n\n if build == 37:\n url_template = (\"http://grch37.ensembl.org/Homo_sapiens/Variation/Explore\"\n \"?v={};vdb=variation\")\n else:\n url_template = (\"http://www.ensembl.org/Homo_sapiens/Variation/Explore\"\n \"?v={};vdb=variation\")\n\n return url_template.format(dbsnp_id)",
"def cal_guid(self):\n return 'setup' + str(self.id) + '@lnldb'",
"def change_adp(self, network: str):\r\n self.ip = network\r\n self.adp = self.ipv4_adp[network]\r\n self.mac = self.ipv4_mac[network].replace('-', ':')\r\n # print(self.adp, self.ip, self.mac)\r",
"def link_protein(self, protein):\n if self.protein is None:\n self.protein = protein\n protein.link_gene(self)"
] | [
"0.59333754",
"0.5608507",
"0.55640787",
"0.5525023",
"0.5480624",
"0.5438552",
"0.5386511",
"0.52137035",
"0.5121944",
"0.49963725",
"0.49718344",
"0.49320933",
"0.4931212",
"0.49076504",
"0.4907363",
"0.48347703",
"0.48316193",
"0.4791243",
"0.4774549",
"0.47308743",
"0.4696292",
"0.4659212",
"0.46402574",
"0.4599474",
"0.45938677",
"0.45796216",
"0.45794648",
"0.4577518",
"0.4552418",
"0.45456272"
] | 0.56734145 | 1 |
Get html for the dataset name linked to the dataset's detail page, with description as popover. | def get_name_link_html(self, max_popover_words=80):
if not self.i_dbgap_description:
description = '—'
else:
description = Truncator(self.i_dbgap_description).words(max_popover_words)
return POPOVER_URL_HTML.format(url=self.get_absolute_url(), popover=description,
name=self.dataset_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_dataset_details(name, analyst):\n\n template = None\n allowed_sources = user_sources(analyst)\n dataset_object = Dataset.objects(name = name,\n source__name__in=allowed_sources).first()\n if not dataset_object:\n error = (\"Either no data exists for this dataset\"\n \" or you do not have permission to view it.\")\n template = \"error.html\"\n args = {'error': error}\n return template, args\n\n dataset_object.sanitize_sources(username=\"%s\" % analyst,\n sources=allowed_sources)\n\n # remove pending notifications for user\n remove_user_from_notification(\"%s\" % analyst, dataset_object.id, 'Dataset')\n\n # subscription\n subscription = {\n 'type': 'Dataset',\n 'id': dataset_object.id,\n 'subscribed': is_user_subscribed(\"%s\" % analyst,\n 'Dataset',\n dataset_object.id),\n }\n\n #objects\n objects = dataset_object.sort_objects()\n\n #relationships\n relationships = dataset_object.sort_relationships(\"%s\" % analyst, meta=True)\n\n # relationship\n relationship = {\n 'type': 'Datset',\n 'value': dataset_object.id\n }\n\n #comments\n comments = {'comments': dataset_object.get_comments(),\n 'url_key':dataset_object.name}\n\n # favorites\n favorite = is_user_favorite(\"%s\" % analyst, 'Dataset', dataset_object.id)\n\n # services\n service_list = get_supported_services('Dataset')\n\n # analysis results\n service_results = dataset_object.get_analysis_results()\n\n args = {'dataset': dataset_object,\n 'objects': objects,\n 'relationships': relationships,\n 'comments': comments,\n 'favorite': favorite,\n 'relationship': relationship,\n 'subscription': subscription,\n 'name': dataset_object.name,\n 'service_list': service_list,\n 'service_results': service_results}\n\n return template, args",
"def get_template_tag(self):\n return \"{% dataset \" + self.cleantitle + \" %}\"",
"def get_name_link_html(self, max_popover_words=80):\n url_text = \"{{% url 'trait_browser:harmonized:traits:detail' pk={} %}} \".format(\n self.harmonized_trait_set_version.pk)\n if not self.i_description:\n description = '—'\n else:\n description = Truncator(self.i_description).words(max_popover_words)\n return POPOVER_URL_HTML.format(url=url_text, popover=description, name=self.trait_flavor_name)",
"def get_name_link_html(self):\n url_text = \"{{% url 'trait_browser:source:studies:pk:detail' pk={} %}} \".format(self.pk)\n return URL_HTML.format(url=url_text, name=self.i_study_name)",
"def __str__(self):\n return self.df_description",
"def detail_template(self):\n return '{}/{}.html'.format(self.object_name, self.detail_endpoint)",
"def get_name_link_html(self, max_popover_words=80):\n if not self.i_description:\n description = '—'\n else:\n description = Truncator(self.i_description).words(max_popover_words)\n return POPOVER_URL_HTML.format(url=self.get_absolute_url(), popover=description,\n name=self.i_trait_name)",
"def get_description(self, field_name='DESCRIPTION'):\n return self.get_html(field_name)",
"def get_description(obj):\n if not isinstance(obj.data, dict):\n return \"No description found.\"\n abstract = \"\"\n authors = []\n categories = []\n final_identifiers = []\n\n # Get identifiers\n dois = get_value(obj.data, \"dois.value\", [])\n if dois:\n final_identifiers.extend(dois)\n\n system_no = get_value(obj.data, \"external_system_numbers.value\", [])\n if system_no:\n final_identifiers.extend(system_no)\n\n # Get subject categories, adding main one first. Order matters here.\n record_categories = get_value(obj.data, \"arxiv_eprints.categories\", []) + \\\n get_value(obj.data, \"subject_terms.term\", [])\n for category_list in record_categories:\n if isinstance(category_list, list):\n categories.extend(category_list)\n else:\n categories.append(category_list)\n categories = list(OrderedDict.fromkeys(categories)) # Unique only\n abstract = get_value(obj.data, \"abstracts.value\", [\"\"])[0]\n authors = obj.data.get(\"authors\", [])\n return render_template('inspire_workflows/styles/harvesting_record.html',\n object=obj,\n authors=authors,\n categories=categories,\n abstract=abstract,\n identifiers=final_identifiers)",
"def build_html_description_string(self, dataset_metadata_dict, variable_attributes, point_data):\n\n logger.debug(variable_attributes)\n\n description_string = '<![CDATA['\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey Name', dataset_metadata_dict['dataset_title'])\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Survey ID', dataset_metadata_dict['ga_survey_id'])\n\n logger.debug(\"self.point_field_list: {}\".format(self.point_field_list))\n for field in self.point_field_list:\n # skip obsno, lat, long in field_list\n if field in ['obsno', 'latitude', 'longitude']:\n continue\n \n logger.debug(field)\n logger.debug(point_data[field])\n if variable_attributes[field].get('units'):\n description_string = description_string + '<p><b>{0}: </b>{1} {2}</p>'.format(\n variable_attributes[field].get('long_name'),\n point_data[field],\n variable_attributes[field].get('units'))\n else:\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format(\n variable_attributes[field].get('long_name'),\n point_data[field])\n\n if dataset_metadata_dict['dataset_link']:\n description_string = description_string + '<p><b>{0}: </b>{1}</p>'.format('Link to dataset', str(\n dataset_metadata_dict['dataset_link']))\n \n description_string = description_string + ']]>'\n return description_string",
"def render_name(self, record):\n return format_html(\n \"\"\"<a href=\"#\" class=\"js-view-edit\"\n data-toggle=\"tooltip\" data-url=\"{0}\"\n title=\"{1}\">{2}</a>\"\"\",\n reverse('table:view_edit', kwargs={'pk': record['id']}),\n _('Change the columns present in the view'),\n record['name'],\n )",
"def get_description(self):",
"def get_description(self):\n try:\n long_desc = self.__data[\"descriptions\"][\"MM - \" + self.__name][\"text\"].replace(\"<p>\", \"\").split('</p>')[0]\n return long_desc\n except:\n return None",
"def description(self):\r\n if \"description\" in self.data:\r\n return self.data[\"description\"]\r\n return None",
"def description(self) -> str:\n return self.data['description']",
"def description(self):\n return self.visual_desc",
"def DescriptiveName(self):\r\n\t\treturn self._get_attribute('descriptiveName')",
"def url(self) -> str:\n return self.DATASET_URLS[self.name]",
"def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")",
"def short_description(self):\n return self.name",
"def description(self):\n return self.data[\"attributes\"][\"description\"]",
"def get_description(self) -> str:\n pass",
"def html_data_table(self):\n return \"XXX\"",
"def get_description(self):\n pass",
"def _repr_html_(self) -> str:\n return self.all(pandas=True)._repr_html_() # type: ignore",
"def get_details_title(mat_dict):\n title = \"# Detail section for {} (COF {}) v{}\".format(mat_dict['name_conventional'], mat_dict['mat_id'],\n mat_dict['workflow_version'])\n return title",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description",
"def get_description(self):\n return self.description"
] | [
"0.6402002",
"0.6215334",
"0.6196248",
"0.6154834",
"0.5915565",
"0.58727175",
"0.5843847",
"0.5768029",
"0.57574415",
"0.57549495",
"0.57196856",
"0.57194483",
"0.5701436",
"0.56974375",
"0.5693088",
"0.5690996",
"0.5678522",
"0.5666621",
"0.5646503",
"0.56364393",
"0.5629016",
"0.56125176",
"0.56035256",
"0.55957925",
"0.5591015",
"0.5583076",
"0.5580875",
"0.5580875",
"0.5580875",
"0.5580875"
] | 0.67517895 | 0 |
Find the most recent version of this dataset. | def get_latest_version(self):
study = self.source_study_version.study
current_study_version = self.source_study_version.study.get_latest_version()
if current_study_version is None:
return None
# Find the same dataset associated with the current study version.
try:
current_dataset = SourceDataset.objects.get(
source_study_version=current_study_version,
i_accession=self.i_accession
)
except ObjectDoesNotExist:
return None
return current_dataset | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version",
"def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version",
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n # get the latest possible Version instance by using the workspace path\n if version is None:\n version = self.get_version_from_project_dir()\n\n return version",
"def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None",
"def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)",
"def get_latest_version(self, name):\n return self.filter(name=name).order_by('schema_version').last()",
"def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None",
"def get_latest(self, name):\n return self._scalar_history.get_latest(name)[1]",
"def latest_data(self):\n if self._data:\n return self._data[0]\n return None",
"def get_latest_version(self):\n current_study_version = self.source_dataset.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same trait associated with the current study version.\n try:\n current_trait = SourceTrait.objects.get(\n source_dataset__source_study_version=current_study_version,\n i_dbgap_variable_accession=self.i_dbgap_variable_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_trait",
"def latest(cls):\n releases = cls.query.all()\n if len(releases) == 0:\n return None\n\n releases.sort(key=lambda x: x.version)\n return releases[-1]",
"def get_latest_saved(self):\n doc = (get_latest_released_app_doc(self.domain, self._id)\n or get_latest_build_doc(self.domain, self._id))\n return self.__class__.wrap(doc) if doc else None",
"def get_last_revision(self):\n return self.index.get_index_revision(self.name)",
"def latest(self):\n return self._latest",
"def get_latest(self, name):\n return self._scalar_history.get_latest(name)",
"def last_revision(self):\n return self.revision_set.order_by(\"created_on\").last()",
"def get_version_from_recent_files(self):\n # full_path = self.fusion_prefs[\"LastCompFile\"]\n # return self.get_version_from_full_path(full_path)\n\n version = None\n rfm = RecentFileManager()\n\n try:\n recent_files = rfm[self.name]\n except KeyError:\n logger.debug('no recent files')\n recent_files = None\n\n if recent_files is not None:\n for i in range(len(recent_files)):\n version = self.get_version_from_full_path(recent_files[i])\n if version is not None:\n break\n\n logger.debug(\"version from recent files is: %s\" % version)\n\n return version",
"def load_latest_save(self, device=None):\n return torch.load(str(self.previous_saves()[-1].absolute()), map_location=device)",
"def latest(self):\n return self.journal_data[self.latest_id]",
"def get_latest_version(db_path):\n\t\t\n\t\t# create a file system and return latest version\n\t\treturn VersionedFile(db_path).get_latest_version()",
"def get_latest_revision(self):\n revision_list = self.get_revision_list()\n if revision_list:\n return revision_list[-1]\n else:\n raise NoRevisionsExistError()",
"def latest_data(self):\n if self._data:\n return self._data\n return None",
"def version(self):\n self._get_latest_content()\n return self._data.get('version', None)",
"def get_latest(self) -> tuple:\n raise NotImplementedError",
"def latest(self):\n return self.series.tail(1)[0]",
"def last(self):\n rows = sorted(self, key=lambda x: x.date)\n return rows[-1]",
"def get_latest_model():\n return get_models()[-1]",
"def get_latest_benchmark():\n\n benchmark_paths = glob.glob(\"./.benchmarks/*/*.json\")\n dates = [\n \"\".join(_b.split(\"/\")[-1].split(\"_\")[2:4]) for _b in benchmark_paths\n ]\n benchmarks = {date: value for date, value in zip(dates, benchmark_paths)}\n\n dates.sort()\n latest = dates[-1]\n benchmark_latest = benchmarks[latest]\n\n return benchmark_latest",
"def get_previous_version(self):\n return self.get_previous_versions().first()",
"def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None"
] | [
"0.72924036",
"0.7165979",
"0.7094863",
"0.689178",
"0.68007094",
"0.67894995",
"0.6763486",
"0.67255384",
"0.67026746",
"0.6646939",
"0.66431236",
"0.66066355",
"0.6586733",
"0.658399",
"0.6573558",
"0.6563115",
"0.6465068",
"0.64497256",
"0.6445124",
"0.64347154",
"0.640898",
"0.6402666",
"0.6345631",
"0.6243336",
"0.6223911",
"0.6209436",
"0.6207515",
"0.6197313",
"0.61589617",
"0.61508745"
] | 0.789287 | 0 |
Gets a list of trait_flavor_names for harmonized traits in this trait set version. | def get_trait_names(self):
return self.harmonizedtrait_set.values_list('trait_flavor_name', flat=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_trait_flavor_name(self):\n return '{}_{}'.format(self.i_trait_name, self.harmonized_trait_set_version.harmonized_trait_set.i_flavor)",
"def all_trait_names ( self ):\n return self.__class_traits__.keys()",
"def trait_names ( self, **metadata ):\n return self.traits( **metadata ).keys()",
"def list_flavors(cls):\n return cls.dbdriver.list_flavors()",
"def get_flavors_white_list(self):\n return self._sanitize(CONF.powervc.flavor_white_list)",
"def flavors(self, **query):\n return self._list(_flavor.Flavor, **query)",
"def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")",
"def get_flavors_black_list(self):\n return self._sanitize(CONF.powervc.flavor_black_list)",
"def copyable_trait_names ( self, **metadata ):\n return self.trait_names( **metadata )",
"def featureNames(self):\n return [feature.name for feature in self.features]",
"def class_trait_names ( cls, **metadata ):\n return cls.class_traits( **metadata ).keys()",
"def flavors(self, details=True):\n flv = _flavor.FlavorDetail if details else _flavor.Flavor\n return list(self._list(flv, paginated=True))",
"def getFeatureClassNames(self):\n return self.featureClasses.keys()",
"def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)",
"def editable_traits ( self ):\n names = self.trait_names( type = _is_not_event )\n names.sort()\n return names",
"def factory_names(self):\n return list(self._class_name_class_dict.keys())",
"def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))",
"def monomer_names(self):\n output = set()\n for item in self.monomers():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def get_feature_names(self):\n if isinstance(self.featurizers, list):\n return [self.feature_name]\n return self.featurizers(\"get feature names\")",
"def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)",
"def get_all_traits(schema_obj):\n\n traits = []\n for vendor in schema_obj.vendor_list:\n for trait in vendor.trait_list:\n traits.append(trait)\n return traits",
"def names(self):\n return list(item.name for item in self.mechanisms)",
"def get_hero_list(self):\n out_list = []\n for key, _ in self._heroes.items():\n out_list.append(key)\n out_list.sort()\n return out_list",
"def flavors(self, **kwargs):\n if kwargs is None:\n result = self.get_list(self.cloudman.compute.flavors(),\n kind=\"flavor\")\n if \"name\" in kwargs:\n result = self.flavor(name=kwargs['name'])\n\n else:\n result = self.get_list(self.cloudman.compute.flavors(**kwargs),\n kind=\"flavor\")\n\n return result",
"def get_feature_names(self):\n return [self.__class__.__name__]",
"def show_flavors():\n return get_flavors()",
"def flavors(self, **kwargs):\n raise NotImplementedError",
"def speciesNames(self):\n nsp = self.nSpecies()\n return map(self.speciesName,range(nsp))",
"def get_hd_types(self):\r\n return self._arm.get_hd_types()",
"def get_all_habits(self):\n return self.habits"
] | [
"0.67714554",
"0.6423445",
"0.62995416",
"0.6120722",
"0.60873955",
"0.5738161",
"0.5715752",
"0.57114977",
"0.5705768",
"0.56979",
"0.56829286",
"0.564801",
"0.5582354",
"0.55700886",
"0.5551664",
"0.5513089",
"0.54857224",
"0.54564124",
"0.5443423",
"0.5440488",
"0.54381365",
"0.54342645",
"0.54241055",
"0.5393772",
"0.5382477",
"0.5376634",
"0.5369453",
"0.53667367",
"0.53632677",
"0.5315514"
] | 0.8556329 | 0 |
Gets the absolute URL of the detail page for a given HarmonizedTraitSet instance. | def get_absolute_url(self):
return reverse('trait_browser:harmonized:traits:detail', kwargs={'pk': self.pk}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_absolute_url(self):\n return self.harmonized_trait_set_version.get_absolute_url()",
"def get_absolute_url(self):\n return reverse('trait_browser:source:traits:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return reverse('trait_browser:source:datasets:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return reverse('teacher-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('teacher-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse_lazy('matterapps_detail', kwargs={'slug': self.slug,})",
"def get_absolute_url(self):\n return reverse('tournament-details', args=[self.uuid])",
"def get_absolute_url(self):\n return reverse('tour-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\n return reverse('performer-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return ('publication_detail', (), {'slug': self.slug})",
"def get_absolute_url(self):\n return reverse('patient-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('relation-detail', args=[str(self.id)])",
"def detail_url(reteta_id):\n return reverse('reteta:reteta-detail', args=[reteta_id])",
"def get_absolute_url(self):\n return reverse('bleedinfo-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\n return reverse('caretaker-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\n return reverse('kid-detail', args=[str(self.id)])",
"def get_absolute_url(self):\r\n return \"{0}page1/\".format(self.get_short_url())",
"def get_absolute_url(self):\n return reverse('bl-detail', args=[str(self.id)])",
"def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)",
"def get_absolute_url(self):\n return reverse(\"jewelry_detail\", args = [str(self.id)])",
"def get_absolute_url(self):\n return reverse('properties:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n\n url = reverse('comicsite.views.page', args=[self.comicsite.short_name,self.title])\n return url",
"def get_absolute_url(self):\n # TODO not implemented yet\n return self.slug",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('articulo-detalle', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('articulo-detalle', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('post-detail', args=[str(self.slug)])",
"def get_absolute_url(self):\n return reverse('blogger-detail', args=[str(self.id)])"
] | [
"0.72508293",
"0.70623827",
"0.6548072",
"0.64680207",
"0.59867966",
"0.59867966",
"0.59520835",
"0.59289813",
"0.592246",
"0.5918289",
"0.58953965",
"0.58434725",
"0.5838052",
"0.5814749",
"0.57711065",
"0.57669204",
"0.57653147",
"0.575038",
"0.57430536",
"0.5742923",
"0.572632",
"0.57137394",
"0.57021755",
"0.5698646",
"0.56911474",
"0.56911474",
"0.56790537",
"0.56790537",
"0.5670592",
"0.5667114"
] | 0.7418204 | 0 |
Get a queryset of all the SourceTraits components for this harmonization unit (age, batch, or source). | def get_all_source_traits(self):
return self.component_source_traits.all() | self.component_batch_traits.all() | self.component_age_traits.all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_new_sourcetraits(self):\n previous_study_version = self.get_previous_version()\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n if previous_study_version is not None:\n qs = SourceTrait.objects.filter(\n source_dataset__source_study_version=self\n )\n # We can probably write this with a join to be more efficient.\n previous_variable_accessions = SourceTrait.objects.filter(\n source_dataset__source_study_version=previous_study_version\n ).values_list('i_dbgap_variable_accession', flat=True)\n qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)\n return qs\n else:\n return SourceTrait.objects.none()",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def get_source_studies(self):\n return list(set([trait.source_dataset.source_study_version.study for trait in self.get_all_source_traits()]))",
"def get_all_techniques(src, source_name, tactic=None):\n filters = [\n Filter(\"type\", \"=\", \"attack-pattern\"),\n Filter(\"external_references.source_name\", \"=\", source_name),\n ]\n if tactic:\n filters.append(Filter('kill_chain_phases.phase_name', '=', tactic))\n\n results = src.query(filters)\n return remove_deprecated(results)",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all source traits and make 5 new ones, so there are only 5 for study 1.\n models.SourceTrait.objects.all().delete()\n self.source_traits = factories.SourceTraitFactory.create_batch(5, source_dataset=self.source_dataset)\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def test_returns_all_traits_with_two_taggable_studies(self):\n # Delete all but five source traits, so that there are 5 from each study.\n models.SourceTrait.objects.exclude(i_dbgap_variable_accession__in=TEST_PHVS[:5]).delete()\n self.source_traits = list(models.SourceTrait.objects.all())\n study2 = factories.StudyFactory.create()\n self.user.profile.taggable_studies.add(study2)\n source_traits2 = factories.SourceTraitFactory.create_batch(\n 5, source_dataset__source_study_version__study=study2)\n # Get results from the autocomplete view and make sure only the correct study is found.\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n returned_pks = get_autocomplete_view_ids(response)\n # Make sure that there's only one page of results.\n self.assertTrue(models.SourceTrait.objects.all().count() <= 10)\n self.assertEqual(len(returned_pks), len(self.source_traits + source_traits2))\n for trait in source_traits2:\n self.assertIn(trait.i_trait_id, returned_pks)\n for trait in self.source_traits:\n self.assertIn(trait.i_trait_id, returned_pks)",
"def get_all_traits_tagged_count(self):\n return SourceTrait.objects.filter(\n source_dataset__source_study_version__study=self\n ).current().exclude(all_tags=None).count()",
"def get_source_query(self) -> QuerySet:\n raise NotImplementedError",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_returns_all_traits(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([trait.pk for trait in self.source_traits]), sorted(pks))",
"def test_context_data_only_finds_results_in_requested_study(self):\n trait = factories.SourceTraitFactory.create(\n i_description='lorem ipsum',\n source_dataset__source_study_version__study=self.study)\n factories.SourceTraitFactory.create(i_description='lorem ipsum')\n get = {'description': 'lorem'}\n response = self.client.get(self.get_url(self.study.pk), get)\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)\n self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])",
"def test_no_deprecated_traits_in_table(self):\n deprecated_traits = factories.SourceTraitFactory.create_batch(\n 10, source_dataset__source_study_version__i_is_deprecated=True,\n source_dataset__source_study_version__study=self.study)\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_trait_table']\n for trait in deprecated_traits:\n self.assertNotIn(trait, table.data)\n for trait in self.source_traits:\n self.assertIn(trait, table.data)",
"def test_no_deprecated_traits_in_table(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n table = context['source_trait_table']\n for trait in self.source_traits_v1:\n self.assertNotIn(trait, table.data)\n for trait in self.source_traits_v2:\n self.assertNotIn(trait, table.data)",
"def get_queryset(self):\n return Objective.objects.filter(perspective__description='Learning and Capacity').order_by('code')",
"def by_source(self, source):\n return self.filter(source_object=source)",
"def get_components(self,filt):\n comps = [self.components[i] for i in xrange(len(self.header)) if filt == self.header[i]]\n return comps",
"def get_queryset(self):\n return Objective.objects.order_by('perspective')",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def get_queryset(self):\n return Objective.objects.filter(perspective__description='Customer').order_by('code')",
"def get_all_traits(schema_obj):\n\n traits = []\n for vendor in schema_obj.vendor_list:\n for trait in vendor.trait_list:\n traits.append(trait)\n return traits",
"def test_context_data_with_valid_search_and_trait_name(self):\n trait = factories.SourceTraitFactory.create(i_description='lorem ipsum', i_trait_name='dolor')\n factories.SourceTraitFactory.create(i_description='lorem other', i_trait_name='tempor')\n response = self.client.get(self.get_url(), {'description': 'lorem', 'name': 'dolor'})\n qs = searches.search_source_traits(description='lorem', name='dolor')\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)\n self.assertQuerysetEqual(qs, [repr(x) for x in context['results_table'].data])",
"def get_queryset(self):\n\n\t\t# Initially set the returned objects to be all sentences\n\t\tqueryset = Sentence.objects.all()\n\n\t\t# Access the request params\n\t\tplayername = self.request.query_params.get('playername', None)\n\n\t\t# If a player name is specified ---> Set the filter\n\t\tif playername is not None:\n\t\t\tqueryset = queryset.filter(player_name=playername)\n\n\t\t# Return the appropriate queryset\n\t\treturn queryset"
] | [
"0.6352468",
"0.60128826",
"0.57998437",
"0.56801885",
"0.5599534",
"0.5557096",
"0.5557096",
"0.52780604",
"0.52433515",
"0.5181923",
"0.5181923",
"0.5181923",
"0.5181923",
"0.5181923",
"0.5181923",
"0.5181923",
"0.5181923",
"0.5181923",
"0.49282494",
"0.49022794",
"0.49019882",
"0.48713282",
"0.48710924",
"0.485786",
"0.48437932",
"0.4824199",
"0.48236135",
"0.48194945",
"0.4805713",
"0.48028955"
] | 0.71494865 | 0 |
Get a list containing all of the studies linked to component traits for this unit. | def get_source_studies(self):
return list(set([trait.source_dataset.source_study_version.study for trait in self.get_all_source_traits()])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def studies(self):\n return self._study_queryset",
"def get_all_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.filter(\n trait__source_dataset__source_study_version__study=self,\n ).current()",
"def orthanc_studies(self):\n return [orthanc.study(x.orthanc_id) for x in self.studies]",
"def components(self):\r\n return list(self._components)",
"def get_students(self) -> List['Student']:\n return self.students.values()",
"def get_studies(self, subj_id, modality=None, unique=False):\n\n url = 'studies?' + self._login_code + \\\n '&projectCode=' + self.proj_code + '&subjectNo=' + subj_id\n output = self._send_request(url)\n\n # Split at '\\n'\n stud_list = output.split('\\n')\n # Remove any empty entries!\n stud_list = [x for x in stud_list if x]\n\n if modality:\n for ii, study in enumerate(stud_list):\n url = 'modalities?' + self._login_code + \\\n '&projectCode=' + self.proj_code + '&subjectNo=' + \\\n subj_id + '&study=' + study\n output = self._send_request(url).split('\\n')\n\n if modality in output:\n if unique:\n return([study, ]) # always return a list\n else:\n stud_list[ii] = None\n\n # In Py3, filter returns an iterable object, but here we want list\n stud_list = list(filter(None, stud_list))\n\n return(stud_list)",
"def getListOfSpecies(self):\n return self.model.getListOfSpecies()",
"def list_components(self) -> Dict[str, Any]:\n return self._manager.list_components()",
"def get_all_source_traits(self):\n return self.component_source_traits.all() | self.component_batch_traits.all() | self.component_age_traits.all()",
"def get_component_html(self, harmonization_unit):\n source = [tr.get_name_link_html() for tr in (\n self.component_source_traits.all() & harmonization_unit.component_source_traits.all())]\n harmonized_trait_set_versions = [trait_set_version for trait_set_version in (\n self.component_harmonized_trait_set_versions.all() &\n harmonization_unit.component_harmonized_trait_set_versions.all())]\n harmonized = [tr.get_name_link_html() for trait_set in harmonized_trait_set_versions\n for tr in trait_set.harmonizedtrait_set.all()\n if not tr.i_is_unique_key]\n component_html = ''\n if len(source) > 0:\n trait_list = '\\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in source])\n component_html += INLINE_LIST_HTML.format(\n list_title='Component study variables for {}'.format(self.trait_flavor_name),\n list_elements=trait_list)\n if len(harmonized) > 0:\n trait_list = '\\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in harmonized])\n component_html += '\\n' + INLINE_LIST_HTML.format(\n list_title='Component harmonized variables for {}'.format(self.trait_flavor_name),\n list_elements=trait_list)\n return component_html",
"def get_list_of_students(self):\n return self._students",
"def get_session_researcher_study_ids():\n session_researcher = get_session_researcher()\n if session_researcher.site_admin:\n return Study.objects.exclude(deleted=True).values_list(\"id\", flat=True)\n else:\n return session_researcher.study_relations.filter(study__deleted=False).values_list(\"study__id\", flat=True)",
"def sections_list(self, only_graded=False):\n return [section for section in self.sections(only_graded=only_graded)]",
"def getEMPStudyList(self):\n try:\n studies = []\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('qiime_assets.get_emp_study_list', [results])\n for row in results:\n # study_id, sample_id, sample_name, project_name, study_title, email, sample_count, metadata_complete,\n # study_score, sample_score, s.number_samples_promised, s.number_samples_in_freezer, \n # s.principal_investigator\n studies.append((row[0], row[1], row[2], row[3], row[4], row[5],\n row[6], row[7], row[8], row[9], row[10], row[11], row[12]))\n return studies\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)",
"def iter_components(self):\n return self.components.values()",
"def students(self):\n\t\treturn self.grade_set.all().distinct()",
"def getSpeciesList(self):\n\n return self.speciesList",
"def get_sensors(self):\n sensors = set()\n for er in self.exercise_recordings:\n for sensor in er.sensors:\n if sensor not in sensors:\n sensors.add(sensor)\n return list(sensors)",
"def species(self):\n return [node.species for node in self]",
"def subunits(self):\n\n return [get_target_by_id(id_) for id_ in self._subunit_ids]",
"def components(self):\n return self.__components",
"def components(self):\n return self.__components",
"def get_components_list(self):\n\n components_list = self.driver.find_elements(*BasePageLocators.LIST_COMPONENS)\n return components_list",
"def components(self):\n return self._components",
"def components(self):\n return self._components",
"def get_species_list(self, obj):\n child = self.child if self.child else self.get_child_return(obj)\n s_list = child.get_species_list() if not obj.has_question else None\n\n return s_list",
"def getEssentialList(self):\n return self.essentials",
"def components(self):\n return [Equity(t, self) for t in self.component_tickers]",
"def categories(self):\n return self.r.smembers(self._categories_key)",
"def subjects(self):\n if not self._subjects:\n self._subjects = [subject_factory(s, workspace=self, samples=self.samples) for s in self._get_entities(self.subject_property_name)]\n return self._subjects"
] | [
"0.6521775",
"0.631228",
"0.6173921",
"0.5707282",
"0.5669657",
"0.5585801",
"0.557777",
"0.5572094",
"0.5565817",
"0.5506995",
"0.5487372",
"0.54773843",
"0.5472401",
"0.5445943",
"0.5405772",
"0.53850967",
"0.537402",
"0.53681386",
"0.53642213",
"0.5360461",
"0.53498906",
"0.53498906",
"0.53441125",
"0.53306603",
"0.53306603",
"0.53148335",
"0.5303525",
"0.5286475",
"0.5254463",
"0.5237495"
] | 0.66928583 | 0 |
Get html for a panel of component traits for the harmonization unit. Includes an inline list of included studies if applicable. | def get_component_html(self):
study_list = '\n'.join([study.get_name_link_html() for study in self.get_source_studies()])
age_list = '\n'.join([trait.get_name_link_html() for trait in self.component_age_traits.all()])
component_html = '\n'.join([
trait.get_component_html(harmonization_unit=self) for trait in self.harmonizedtrait_set.all()])
panel_body = []
if len(study_list) > 0:
study_html = INLINE_LIST_HTML.format(list_title='Included studies', list_elements=study_list)
panel_body.append(study_html)
if len(age_list) > 0:
age_html = INLINE_LIST_HTML.format(list_title='Component age variables', list_elements=age_list)
panel_body.append(age_html)
panel_body.append(component_html)
panel_body = '\n'.join(panel_body)
unit_panel = PANEL_HTML.format(panel_title='Harmonization unit: {}'.format(self.i_tag), panel_body=panel_body)
return unit_panel | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_component_html(self, harmonization_unit):\n source = [tr.get_name_link_html() for tr in (\n self.component_source_traits.all() & harmonization_unit.component_source_traits.all())]\n harmonized_trait_set_versions = [trait_set_version for trait_set_version in (\n self.component_harmonized_trait_set_versions.all() &\n harmonization_unit.component_harmonized_trait_set_versions.all())]\n harmonized = [tr.get_name_link_html() for trait_set in harmonized_trait_set_versions\n for tr in trait_set.harmonizedtrait_set.all()\n if not tr.i_is_unique_key]\n component_html = ''\n if len(source) > 0:\n trait_list = '\\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in source])\n component_html += INLINE_LIST_HTML.format(\n list_title='Component study variables for {}'.format(self.trait_flavor_name),\n list_elements=trait_list)\n if len(harmonized) > 0:\n trait_list = '\\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in harmonized])\n component_html += '\\n' + INLINE_LIST_HTML.format(\n list_title='Component harmonized variables for {}'.format(self.trait_flavor_name),\n list_elements=trait_list)\n return component_html",
"def get_component_html(self):\n return '\\n'.join([hunit.get_component_html() for hunit in self.harmonizationunit_set.all()])",
"def get_html(self):\r\n if self.debug == 'True':\r\n # Reset the user vote, for debugging only!\r\n self.user_voted = False\r\n if self.hints == {}:\r\n # Force self.hints to be written into the database. (When an xmodule is initialized,\r\n # fields are not added to the db until explicitly changed at least once.)\r\n self.hints = {}\r\n\r\n try:\r\n child = self.get_display_items()[0]\r\n out = child.render('student_view').content\r\n # The event listener uses the ajax url to find the child.\r\n child_id = child.id\r\n except IndexError:\r\n out = u\"Error in loading crowdsourced hinter - can't find child problem.\"\r\n child_id = ''\r\n\r\n # Wrap the module in a <section>. This lets us pass data attributes to the javascript.\r\n out += u'<section class=\"crowdsource-wrapper\" data-url=\"{ajax_url}\" data-child-id=\"{child_id}\"> </section>'.format(\r\n ajax_url=self.runtime.ajax_url,\r\n child_id=child_id\r\n )\r\n\r\n return out",
"def study():\n return render_template('study.html')",
"def _get_section_scores_html(self):\n ctx_data = {'section_scores': self.student_section_scores}\n\n html = loader.render_django_template(\n 'templates/xblock_jupyter_graded/section_scores.html',\n ctx_data\n )\n\n return html",
"def workbench_scenarios():\n return [\n (\"Oppia Embedding\",\n \"\"\"<vertical_demo>\n <oppia oppiaid=\"0\" src=\"https://www.oppia.org\" width=\"700\" />\n </vertical_demo>\n \"\"\"),\n ]",
"def workbench_scenarios():\n return [\n (\"SummaryXBlock\",\n \"\"\"<summary/>\n \"\"\"),\n (\"Multiple SummaryXBlock\",\n \"\"\"<vertical_demo>\n <summary/>\n <summary/>\n <summary/>\n </vertical_demo>\n \"\"\"),\n ]",
"def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content",
"def get_html(self) -> List[ComponentMeta]:\n return [Div(id=\"additions\")]",
"def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content",
"def workbench_scenarios():\n return [\n (\"HL rubric text XBlock\",\n \"\"\"<hl_rubric_text/>\n \"\"\"),\n\n ]",
"def content_to_html(self):\n if self.title != \"\":\n string_title = html_tag(\n plain_to_html(self.title), self.title, self.proc\n )\n string_title = html_heading(string_title, self.level)\n else:\n string_title = html_heading(html_line(\"1\"), self.level)\n\n if self.level == 1: # it's not a sub-analysis\n string_title = html_line_before(string_title, \"5\")\n\n # We render all our content before all our subsections to stop any of\n # our content looking like it belongs to the subsection.\n string_content = \"\".join(self.content)\n for section in self.subsections:\n string_content += section.content_to_html()\n\n return string_title + string_content",
"def get_wells_info(self):\n prod_info = self.read_wells_include_file(\n rel_path='INCLUDE/Produtores.inc')\n prod_info['well_type'] = 'prod'\n inj_info = self.read_wells_include_file(\n rel_path='INCLUDE/Injetores.inc')\n inj_info['well_type'] = 'inj'\n return pd.concat([prod_info, inj_info], ignore_index=True)",
"def _repr_html_(self) -> str:\n html_template = \"\"\"\n <script src=\"{webcomponents_js}\"></script>\n <link rel=\"import\" href=\"{facets_html}\">\n <facets-dive id=\"dive_elem\" height=\"{height}\"></facets-dive>\n <script>\n document.querySelector(\"#dive_elem\").data = {data};\n </script>\"\"\"\n html = html_template.format(\n facets_html=FACETS_DEPENDENCIES['facets_html'],\n webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'],\n data=self._data.to_json(orient='records'),\n height=self.height,\n )\n return html",
"def studies(self):\n return self._study_queryset",
"def study_legacy():\n return render_template('study-legacy.html')",
"def get_challenge_html(self):\r\n\r\n context = {\r\n 'top_scores': self.puzzle_leaders()}\r\n\r\n return self.system.render_template('folditchallenge.html', context)",
"def embed_components(self, reg = re.compile('([\\t ]*)-(frag|unit) \"([_\\w]+)\"')):\n contents = self.content\n extension ='haml'\n\n while True:\n\n component = reg.search(contents)\n\n if not component: break\n else:\n\n _indent, _unit_type, _unit_name = component.groups();\n\n unit_indn = _indent.replace('\\t', ' '* 4)\n unit_type = 'fragments' if _unit_type == 'frag' else 'components'\n unit_name = '.'.join((_unit_name, extension))\n\n templates_path = root(self.origin, 'templates')\n\n unit_file = os.path.join(templates_path, unit_type, unit_name)\n\n with open(unit_file, 'r') as reader: raw_unit = reader.read()\n\n haml_component = HamlComponent(self.origin, raw_unit, unit_type, unit_name)\n ress_keeper, contents = haml_component.package_ress(contents)\n\n\n\n for frag_block in ress_keeper: # js/css\n self.res_keeper[frag_block] = self.res_keeper.get('frag_block','') + ress_keeper[frag_block]\n for frag_block in self.res_keeper:\n _dir = 'style' if frag_block == 'css' else 'style'\n tgt = os.path.join(self.static_path, _dir, '.'.join(self.name, frag_block))\n with open(tgt, self.save_flag[frag_block]) as pen: pen.write(self.res_keeper[frag_block])\n\n unit = '\\n'.join([str(unit_indn) + line for line in haml_component.content.split('\\n')])\n\n contents = contents.replace('%s-%s \"%s\"'%(_indent, _unit_type, _unit_name), unit, 1)\n\n ## next case need recalc contents len before and after `ress_keeper, contents = haml_component.package_ress(contents)`\n ## and will work just for add in header (before -frag/unit tag). Too tricky\n # start, end, endpos = component.start(), component.end(), component.endpos\n # contents = contents[0:start] + unit + contents[end: endpos]\n\n return contents",
"def workbench_scenarios():\n return [\n (\"Discussion XBlock\",\n \"\"\"<vertical_demo>\n <discussion-forum/>\n </vertical_demo>\n \"\"\"),\n ]",
"def workbench_scenarios():\n return [\n (\"simstudentXBlock\",\n \"\"\"<vertical_demo>\n <simstudent/>\n </vertical_demo>\n \"\"\"),\n ]",
"def get_basicpuzzles_html(self):\r\n goal_level = '{0}-{1}'.format(\r\n self.required_level,\r\n self.required_sublevel)\r\n\r\n context = {\r\n 'due': self.due,\r\n 'success': self.is_complete(),\r\n 'goal_level': goal_level,\r\n 'completed': self.completed_puzzles(),\r\n }\r\n return self.system.render_template('folditbasic.html', context)",
"def html_of_unit(quant):\n return quant.dimensionality.html",
"def update_output_div(input_value):\n file = str(input_value).split(\"C:\\\\fakepath\\\\\")[-1]\n trial = pd.read_csv(file)\n trial[\"spans\"] = trial.spans.apply(literal_eval)\n _html = [html_to_dash(display_toxics(trial.spans[index], trial.text[index])) \n for index in range(0, trial.shape[0])]\n return html.P(_html)",
"def __str__(self):\n if len(self.lTraits_) == 0:\n return \"Aucun trait.\"\n str = u\"Liste de tous les traits : \"\n for trait in self.lTraits_:\n str = str + trait + \",\"\n return str",
"def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'element_id': self.element_id,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self._render_content()\r\n }\r\n\r\n return self.system.render_template('annotatable.html', context)",
"def retrieve_web_panel(panel_id: int, confidences: str = '01234'):\n import pandas as pd\n confidences = ''.join(sorted(confidences))\n reply = requests.get(f'https://panelapp.genomicsengland.co.uk/panels/{panel_id}/download/{confidences}/')\n table_handle = io.StringIO(reply.text)\n return pd.read_csv(table_handle, sep='\\t')",
"def get_overview_string(self, mission):\n\n s = self.get_pool_overview_string(mission) + \"\\n\\n\"\n s += self.get_job_overview_string(mission) + \"\\n\\n\"\n s += self.get_storage_container_overview_string(mission)\n\n return s",
"def display_panel(\n Y: pd.DataFrame,\n X: pd.DataFrame,\n regression_type: str = \"OLS\",\n entity_effects: bool = False,\n time_effects: bool = False,\n export: str = \"\",\n sheet_name: Optional[str] = None,\n):\n model = regression_model.get_regressions_results(\n Y,\n X,\n regression_type,\n entity_effects,\n time_effects,\n )\n if regression_type != \"OLS\":\n console.print(model)\n\n if export:\n results_as_html = model.summary.tables[1].as_html()\n df = pd.read_html(results_as_html, header=0, index_col=0)[0]\n dependent = Y.columns[0]\n export_data(\n export,\n os.path.dirname(os.path.abspath(__file__)),\n f\"{dependent}_{regression_type}_regression\",\n df,\n sheet_name,\n )\n\n return model",
"def get_html(self):\r\n goal_level = '{0}-{1}'.format(\r\n self.required_level,\r\n self.required_sublevel)\r\n\r\n showbasic = (self.show_basic_score.lower() == \"true\")\r\n showleader = (self.show_leaderboard.lower() == \"true\")\r\n\r\n context = {\r\n 'due': self.due,\r\n 'success': self.is_complete(),\r\n 'goal_level': goal_level,\r\n 'completed': self.completed_puzzles(),\r\n 'top_scores': self.puzzle_leaders(),\r\n 'show_basic': showbasic,\r\n 'show_leader': showleader,\r\n 'folditbasic': self.get_basicpuzzles_html(),\r\n 'folditchallenge': self.get_challenge_html()\r\n }\r\n\r\n return self.system.render_template('foldit.html', context)",
"def _get_markup(self):\n return make_soup(self.driver.find_element_by_id(\"contestDetailTable\").get_attribute(\"innerHTML\"))"
] | [
"0.73980016",
"0.71138966",
"0.5560296",
"0.5343086",
"0.5249588",
"0.5139348",
"0.51112926",
"0.5068411",
"0.5066079",
"0.5065291",
"0.4935733",
"0.48982418",
"0.48671803",
"0.48227435",
"0.4816911",
"0.47910064",
"0.47818005",
"0.47653103",
"0.47544286",
"0.47376806",
"0.46822572",
"0.46663603",
"0.46582198",
"0.46531588",
"0.46485758",
"0.46471432",
"0.46441877",
"0.46125",
"0.459898",
"0.45923126"
] | 0.8093886 | 0 |
Custom save method to autoset full_accession and dbgap_link. | def save(self, *args, **kwargs):
self.full_accession = self.set_full_accession()
self.dbgap_link = self.set_dbgap_link()
super(SourceTrait, self).save(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceDataset, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceStudyVersion, self).save(*args, **kwargs)",
"def save(self, db):\n pass",
"def save_db(self) -> None:",
"def save(self,\n force_insert=False,\n force_update=False,\n using=None,\n update_fields=None):\n # If the short url wasn't specified\n if not self.short_url:\n # We pass the model instance that is being saved\n self.short_url = create_shortened_url(self)\n\n super().save(force_insert, force_update, using, update_fields)",
"def save(self, db):\n db.query(\n \"INSERT INTO fellows (name, accomodation)\\\n VALUES(:name, :accomodation)\",\n name=self.name, accomodation=self.wants_accomodation\n )",
"def save(self, *args, **kwargs):\n super(self.__class__, self).save(*args, **kwargs)",
"def save():",
"def save(self, *args, **kwargs):\n pass",
"def _save(self):\n for attrib in self.attribs:\n setattr(self, attrib, getattr(self.obj, attrib))",
"def save_without_setting_canon(self, *args, **kwargs):\n super(DocumentSetFieldEntry, self).save(*args, **kwargs)",
"def save(self):\n self.db.commit()",
"def save():\n pass",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n super().save(*args, **kwargs)",
"def save(self):\n self.__db.commit()",
"def post_save_access_attempt(self, instance, **kwargs):",
"def save(self, *args, **kwargs):\n\n if self.id:\n firstcreation = False\n else:\n firstcreation = True\n\n #common save functionality for all models\n self._save_base()\n self.save_default(firstcreation)\n super(ComicSiteModel,self).save()",
"def save(self):\n\n pass",
"def db_for_write(self, model, **hints):\n return None",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self):\n pass",
"def save(self, obj):",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs) -> None:\n pass",
"def save(self, *args, **kwargs):\n return"
] | [
"0.73582214",
"0.71559155",
"0.6393143",
"0.6363915",
"0.6306047",
"0.62788814",
"0.5810558",
"0.57993186",
"0.5722098",
"0.57087",
"0.570662",
"0.56933665",
"0.567312",
"0.5658324",
"0.5658324",
"0.56490225",
"0.56343424",
"0.56319344",
"0.56160986",
"0.56087613",
"0.5595003",
"0.5595003",
"0.5595003",
"0.5595003",
"0.5595003",
"0.5594704",
"0.558285",
"0.558285",
"0.558285",
"0.55825764"
] | 0.7209021 | 1 |
Automatically set full_accession from the variable's dbGaP identifiers. | def set_full_accession(self):
return self.VARIABLE_ACCESSION.format(
self.i_dbgap_variable_accession, self.i_dbgap_variable_version,
self.source_dataset.source_study_version.i_participant_set) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_full_accession(self):\n return self.DATASET_ACCESSION.format(\n self.i_accession, self.i_version, self.source_study_version.i_participant_set)",
"def set_full_accession(self):\n return self.STUDY_VERSION_ACCESSION.format(self.study.phs, self.i_version, self.i_participant_set)",
"def set_dbgap_link(self):\n return self.VARIABLE_URL.format(\n self.source_dataset.source_study_version.full_accession, self.i_dbgap_variable_accession)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceDataset, self).save(*args, **kwargs)",
"def mod_family_accession(family_accession):\n\n return family_accession[:family_accession.index('.')]",
"def alias_grfn_vars(self, src_fullid: str, tgt_fullid: str):\n self.fullid_to_grfn_id[src_fullid] = self.fullid_to_grfn_id[tgt_fullid]",
"def set_dbgap_link(self):\n return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceStudyVersion, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.full_accession = self.set_full_accession()\n self.dbgap_link = self.set_dbgap_link()\n super(SourceTrait, self).save(*args, **kwargs)",
"def set_dbgap_link(self):\n return self.STUDY_VERSION_URL.format(self.full_accession)",
"def policy_alias(self):",
"def update_gpdbid_file(array):\n \n standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())\n\n # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace\n writeGpDbidFile(standby_datadir, 1, get_logger_if_verbose())",
"def default_global_location(database):\n\n for dataset in get_many(database, *[equals(\"location\", None)]):\n dataset[\"location\"] = \"GLO\"\n return database",
"def get_CG_id(gid, conn):\n\n get_CG = ('SELECT DISTINCT dx.accession '\n 'FROM feature f, feature_dbxref fd, db, dbxref dx '\n 'WHERE f.feature_id = fd.feature_id AND fd.dbxref_id = dx.dbxref_id '\n 'AND dx.db_id = db.db_id AND db.name = \\'FlyBase Annotation IDs\\' AND '\n 'dx.accession NOT LIKE \\'%%-%%\\' AND fd.is_current = \\'t\\' AND f.uniquename = %s')\n CG_id = connect(get_CG,gid,conn)\n return(CG_id)",
"def gnomad_genomes_af(self):\n af = [gnomad_genomes.af for gnomad_genomes in self.gnomad_genomes]\n return af[0] if af else None",
"def set_fullname(self, value):\n self.fullname = value",
"def compute_access(field):\n bus_acc = get_wbgen(field, 'access_bus')\n dev_acc = get_wbgen(field, 'access_dev')\n abbrev = {'READ_WRITE': 'RW', 'READ_ONLY': 'RO', 'WRITE_ONLY': 'WO'}\n typ = get_wbgen(field, 'type')\n if bus_acc is None:\n bus_acc = {'PASS_THROUGH': 'WO', 'MONOSTABLE': 'WO',\n 'CONSTANT': 'RO'}.get(typ, 'RW')\n else:\n bus_acc = abbrev.get(bus_acc)\n if dev_acc is None:\n dev_acc = {'CONSTANT': 'WO'}.get(typ, 'RO')\n else:\n dev_acc = abbrev.get(dev_acc)\n field.h_access = '{}_{}'.format(bus_acc, dev_acc)",
"def idpac(self):\n return self._idpac",
"def test_by_accession_geo_platform_accession_get(self):\n pass",
"def access():",
"def set_gadm(uid, gid):\n g.db.execute('update into user_group (gadm) values (1) where id_user == ? and id_group == ?',\n [uid, gid])",
"def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname",
"def writeProteinAccessions( self ):\n\n self.logger.info( 'writeProteinAccessions: START' )\n\n self.logger.info( 'writeProteinAccessions: insert file will be proteinAccessionsInsert.psql' )\n\n proteinAccessionFile = self.openInsertFile( 'proteinAccessionsInsert.psql')\n\n for proteinIdentification, proteinIdRelationalDatabase in self.proteinsInserted.iteritems():\n accessionId = self.accessionsInserted[ proteinIdentification ]\n self.writeFile( proteinAccessionFile, 'protein_accessions', [ str(proteinIdRelationalDatabase), str(accessionId) ] )\n\n\n self.logger.info( 'writeProteinAccessions: DONE' )",
"def set_fullname(self, value):\n raise NotImplementedError('set_fullname')",
"def init_auto_alias(self):\n for alias,cmd in self.auto_alias:\n self.alias_table[alias] = (0,cmd)",
"def _get_id_ac_string(accession: str, gene: str, sequence_len: int) -> str:\n id_str = \"ID {GENE:<24}{REVIEW:<18}{AA_COUNT} AA.\\n\".format(\n GENE=gene,\n REVIEW=\"Unreviewed;\",\n AA_COUNT=sequence_len\n )\n acc_str = \"AC {};\".format(accession)\n return id_str + acc_str",
"def set_purged(*args):\n return _ida_frame.set_purged(*args)",
"def _populate_oid_attid(self):\n self.hash_oid_name = {}\n res = self.search(expression=\"objectClass=attributeSchema\",\n controls=[\"search_options:1:2\"],\n attrs=[\"attributeID\",\n \"lDAPDisplayName\"])\n if len(res) > 0:\n for e in res:\n strDisplay = str(e.get(\"lDAPDisplayName\"))\n self.hash_oid_name[str(e.get(\"attributeID\"))] = strDisplay",
"def gnomad_exomes_af(self):\n af = [gnomad_exomes.af for gnomad_exomes in self.gnomad_exomes]\n return af[0] if af else None",
"def select_first_organism(cazy_data, gbk_accessions, replaced_taxa_logger):\n for accession in tqdm(gbk_accessions, desc='Selecting the first retrieved organism'):\n selected_kingdom = list(cazy_data[accession]['taxonomy'])[0].kingdom\n selected_organism = list(cazy_data[accession]['taxonomy'])[0].organism\n\n for tax_tuple in list(cazy_data[accession]['taxonomy'])[1:]:\n replaced_taxa_logger.warning(\n f\"{accession}\\t\"\n f\"SELECTED: {selected_kingdom} -- {selected_organism}\"\n f\"\\tREPLACED: {tax_tuple.kingdom}: {tax_tuple.organism}\"\n )\n\n cazy_data[accession][\"kingdom\"] = selected_kingdom\n cazy_data[accession][\"organism\"] = selected_organism\n\n return cazy_data"
] | [
"0.66088516",
"0.6157562",
"0.53482604",
"0.5188747",
"0.51160794",
"0.5022504",
"0.50135124",
"0.49394724",
"0.49343747",
"0.48210892",
"0.47820124",
"0.4719237",
"0.4669664",
"0.4625832",
"0.46101683",
"0.4575124",
"0.45396692",
"0.45073032",
"0.45045888",
"0.4492379",
"0.44751117",
"0.44720042",
"0.44489115",
"0.44182348",
"0.44137624",
"0.44106635",
"0.43978727",
"0.438448",
"0.4379721",
"0.43771723"
] | 0.722903 | 0 |
Automatically set dbgap_link from dbGaP identifier information. | def set_dbgap_link(self):
return self.VARIABLE_URL.format(
self.source_dataset.source_study_version.full_accession, self.i_dbgap_variable_accession) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_dbgap_link(self):\n return self.DATASET_URL.format(self.source_study_version.full_accession, self.i_accession)",
"def update_gpdbid_file(array):\n \n standby_datadir = os.path.normpath(array.standbyMaster.getSegmentDataDirectory())\n\n # MPP-13245, use single mechanism to manage gp_dbid file instead of ad-hoc replace\n writeGpDbidFile(standby_datadir, 1, get_logger_if_verbose())",
"def move_dbgap_link_to_dataset(apps, schema_editor):\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n for dataset in SourceDataset.objects.all():\n dataset.dbgap_link = dataset.sourcetrait_set.first().dbgap_dataset_link\n dataset.save()",
"def set_dbgap_link(self):\n return self.STUDY_VERSION_URL.format(self.full_accession)",
"def set_db_id(self):\n if self._id is None:\n db = self._core.get_db()\n self._id = db.get_seq_next('OPE_GEN')\n return self._id",
"def generate_link_attr(d: Dict):\n d.update({\"link\": urljoin(\"https://vdb-kasf1i23nr1kl2j4.rapid7.com/v1/content/\", d.get(\"identifier\"))})",
"def _add_bridge_db_identifiers(map_dict) -> dict:\n sys.stdout.write(\"Adding BridgeDB identifiers...\\n\")\n r_session = base_utils.requests_retry_session()\n\n for uniq_id in tqdm.tqdm(map_dict, total=len(map_dict)):\n parts = uniq_id.split(':')\n db = parts[0]\n uid = parts[-1]\n\n if db in constants.BRIDGEDB_MAP:\n # list of other DBs to query from\n q_dbs = constants.BRIDGEDB_MAP[db]\n for q_db in q_dbs:\n try:\n r = r_session.get(\n 'http://webservice.bridgedb.org/Human/xrefs/{}/{}?dataSource={}'.format(\n constants.BRIDGEDB_KEYS[db],\n uid,\n constants.BRIDGEDB_KEYS[q_db]\n )\n )\n except Exception as x:\n print(\"%s: %s\" % (uniq_id, x.__class__.__name__))\n continue\n\n result = r.text\n if len(result) > 0:\n add_ids = [line.split('\\t')[0] for line in result.split('\\n')[:-1]]\n new_ids = ['{}:{}'.format(q_db, i) for i in add_ids if i.isalnum()]\n for n_id in new_ids:\n new_id = '{}:{}'.format(q_db, n_id)\n map_dict[uniq_id].add(new_id)\n\n time.sleep(0.5)\n\n return map_dict",
"def move_dbgap_link_to_study_version(apps, schema_editor):\n SourceStudyVersion = apps.get_model('trait_browser', 'SourceStudyVersion')\n for ssv in SourceStudyVersion.objects.all():\n ssv.dbgap_link = ssv.sourcedataset_set.first().sourcetrait_set.first().dbgap_study_link\n ssv.save()",
"def update_link_id(self, data):\n\n self.data[data['project_name']]['nodes'][data['first']]['ports'][data['first_port']]['link_id'] = data['link_id']\n self.data[data['project_name']]['nodes'][data['second']]['ports'][data['second_port']]['link_id'] = data['link_id']",
"def get_CG_id(gid, conn):\n\n get_CG = ('SELECT DISTINCT dx.accession '\n 'FROM feature f, feature_dbxref fd, db, dbxref dx '\n 'WHERE f.feature_id = fd.feature_id AND fd.dbxref_id = dx.dbxref_id '\n 'AND dx.db_id = db.db_id AND db.name = \\'FlyBase Annotation IDs\\' AND '\n 'dx.accession NOT LIKE \\'%%-%%\\' AND fd.is_current = \\'t\\' AND f.uniquename = %s')\n CG_id = connect(get_CG,gid,conn)\n return(CG_id)",
"def init_linkage():\n for case in AutoCase.objects.all():\n case.autolink()\n case.save()",
"def writeGpDbidFile(directory, dbid, logger=None):\n d = GpDbidFile(directory, logger=logger)\n d.dbid = dbid\n d.write_gp_dbid()",
"def _adjust_connection_URL(self, text):\n dbname = self.options.db\n parts = text.split('/')\n\n # Preserve the quotes if present\n if parts[-1].endswith(\"'\"):\n dbname += \"'\"\n\n parts[-1] = dbname\n return '/'.join(parts)",
"def set_platform_gs_prefix(self, gs_url):\n self.buildurl_gs_prefix = gs_url # pragma: no cover",
"def gbr_dl(self, gbr_dl):\n\n self._gbr_dl = gbr_dl",
"def gnomad_link(variant_obj):\n url_template = (\"http://gnomad.broadinstitute.org/variant/{this[chromosome]}-\"\n \"{this[position]}-{this[reference]}-{this[alternative]}\")\n return url_template.format(this=variant_obj)",
"def remove_dataset_dbgap_link(apps, schema_editor):\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n for dataset in SourceDataset.objects.all():\n dataset.dbgap_link = ''\n dataset.save()",
"def dbgap_server():\n settings = {\n 'app_id': __name__,\n 'api_bases': ['https://dbgap-api.ncbi.nlm.nih.gov/fhir/x1']\n }\n return DispatchingFHIRClient(settings=settings)",
"def connect(dbapi_connection, connection_record):\n connection_record.info['pid'] = os.getpid()",
"def setDB(dbname):\n global DBNAME\n DBNAME = dbname",
"def save_pgsql_conf(self, db):\n hookenv.log(\n \"Checking related DB information before saving PostgreSQL configuration\",\n hookenv.DEBUG,\n )\n if db:\n hookenv.log(\"Saving related PostgreSQL database config\", hookenv.DEBUG)\n self.kv.set(\"pgsql_host\", db.master.host)\n self.kv.set(\"pgsql_port\", db.master.port)\n self.kv.set(\"pgsql_db\", db.master.dbname)\n self.kv.set(\"pgsql_user\", db.master.user)\n self.kv.set(\"pgsql_pass\", db.master.password)\n self.kv.flush()",
"def set_deafult_gw(self, args):\n\n gw_ip = ip_address(args.ip)\n gw_info = UplinkGatewayInfo()\n gw_info.update_ip(str(gw_ip))\n print(\"set Default gw IP to %s\" % gw_info.get_gw_ip())",
"def db_name(self, db_name):\n\n self._db_name = db_name",
"def _process_dbxref(self):\n\n raw = '/'.join((self.rawdir, 'dbxref'))\n logger.info(\"processing dbxrefs\")\n line_counter = 0\n\n with open(raw, 'r') as f:\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n f.readline() # read the header row; skip\n for line in filereader:\n (dbxref_id, db_id, accession, version, description, url) = line\n # dbxref_id\tdb_id\taccession\tversion\tdescription\turl\n # 1\t2\tSO:0000000\t\"\"\n\n db_ids = { # the databases to fetch\n 50: 'PMID', # pubmed\n 68: 'RO', # obo-rel\n 71: 'FBdv', # FBdv\n 74: 'FBbt', # FBbt\n # 28:, # genbank\n 30: 'OMIM', # MIM\n # 38, # ncbi\n 75: 'ISBN', # ISBN\n 46: 'PMID', # PUBMED\n 51: 'ISBN', # isbn\n 52: 'SO', # so\n # 76, # http\n 77: 'PMID', # PMID\n 80: 'FBcv', # FBcv\n # 95, # MEDLINE\n 98: 'REACT', # Reactome\n 103: 'CHEBI', # Chebi\n 102: 'MESH', # MeSH\n 106: 'OMIM', # OMIM\n 105: 'KEGG-path', # KEGG pathway\n 107: 'DOI', # doi\n 108: 'CL', # CL\n 114: 'CHEBI', # CHEBI\n 115: 'KEGG', # KEGG\n 116: 'PubChem', # PubChem\n # 120, # MA???\n 3: 'GO', # GO\n 4: 'FlyBase', # FlyBase\n # 126, # URL\n 128: 'PATO', # PATO\n # 131, # IMG\n 2: 'SO', # SO\n 136: 'MESH', # MESH\n 139: 'CARO', # CARO\n 140: 'NCBITaxon', # NCBITaxon\n # 151, # MP ???\n 161: 'DOI', # doi\n 36: 'BDGP', # BDGP\n # 55, # DGRC\n # 54, # DRSC\n # 169, # Transgenic RNAi project???\n 231: 'RO', # RO ???\n 180: 'NCBIGene', # entrezgene\n # 192, # Bloomington stock center\n 197: 'UBERON', # Uberon\n 212: 'ENSEMBL', # Ensembl\n # 129, # GenomeRNAi\n 275: 'PMID', # PubMed\n 286: 'PMID', # pmid\n 264: 'HGNC',\n # 265: 'OMIM', # OMIM_Gene\n 266: 'OMIM', # OMIM_Phenotype\n 300: 'DOID', # DOID\n 302: 'MESH', # MSH\n 347: 'PMID', # Pubmed\n }\n\n if accession.strip() != '' and int(db_id) in db_ids:\n # scrub some identifiers here\n m = re.match(\n r'(doi|SO|GO|FBcv|FBbt_root|FBdv|FBgn|FBdv_root|FlyBase|FBbt):',\n accession)\n if m:\n accession = re.sub(m.group(1)+r'\\:', '', accession)\n elif re.match(\n r'(FlyBase miscellaneous CV|cell_lineprop|relationship type|FBgn$)',\n accession):\n continue\n elif re.match(r'\\:', accession): # starts with a colon\n accession = re.sub(r'\\:', '', accession)\n elif re.search(r'\\s', accession):\n # skip anything with a space\n # logger.debug(\n # 'dbxref %s accession has a space: %s',\n # dbxref_id, accession)\n continue\n\n if re.match(r'http', accession):\n did = accession.strip()\n else:\n prefix = db_ids.get(int(db_id))\n did = ':'.join((prefix, accession.strip()))\n if re.search(r'\\:', accession) and prefix != 'DOI':\n logger.warning(\n 'id %s may be malformed; skipping', did)\n\n self.dbxrefs[dbxref_id] = {db_id: did}\n\n elif url != '':\n self.dbxrefs[dbxref_id] = {db_id: url.strip()}\n else:\n continue\n\n # the following are some special cases that we scrub\n if int(db_id) == 2 \\\n and accession.strip() == 'transgenic_transposon':\n # transgenic_transposable_element\n self.dbxrefs[dbxref_id] = {db_id: 'SO:0000796'}\n\n line_counter += 1\n\n return",
"def __write_link_node_info_db(self, link_node_name, link_node):\n if \"crate_id\" not in link_node:\n return\n if \"lc1_node_id\" not in link_node:\n return\n slot = 2\n if \"analog_slot\" in link_node: \n slot = link_node[\"analog_slot\"]\n path = '{}app_db/{}/{:04}/{:02}/'.format(self.dest_path, link_node[\"cpu_name\"], link_node[\"crate_id\"], slot)\n\n macros={\"P\":link_node['app_prefix'],\n \"MPS_LINK_NODE_SIOC\":link_node['sioc'],\n \"MPS_LINK_NODE_ID\":link_node['lc1_node_id'],\n \"MPS_LINK_NODE_TYPE\":str(self.__link_node_type_to_number(link_node['type'])),\n \"MPS_CONFIG_VERSION\":self.config_version,\n \"MPS_CRATE_LOCATION\":link_node['physical'],\n \"MPS_CPU_NAME\":link_node['cpu_name']}\n self.__write_epics_db(path=path, template_name=\"link_node_info.template\", macros=macros)",
"def draw_relation_graph(database_name, table_name, primary_key, group_name) -> Graph:\n\n nodes = []\n links = []\n disease_list = get_icd_diseasegroup_diseaseinfo(database_name, table_name, primary_key, group_name)[1]\n disease_list = disease_list.split(',')\n # print(disease_list)\n\n for disease in disease_list:\n disease_node = {\n \"name\": disease,\n \"symbolSize\": 50\n }\n\n if disease_node not in nodes:\n nodes.append(disease_node)\n\n gene_list = get_mesh_disease_info(database_name, 'mesh_gene', disease, 'DISEASE_ID')[1]\n gene_list = gene_list.split(',')\n for gene in gene_list:\n gene_node = {\n 'name': gene,\n 'symbolSize': 10\n }\n\n if gene_node not in nodes:\n nodes.append(gene_node)\n\n for gene in gene_list:\n links.append({\"source\": disease, \"target\": gene})\n\n print(nodes)\n print(links)\n\n c = (\n Graph(init_opts=opts.InitOpts(width=\"1440px\", height=\"900px\")).add(\"\", nodes, links, repulsion=3000)\n .set_global_opts(title_opts=opts.TitleOpts(title=\"gene-disease association network\"))\n )\n\n return c",
"def thousandg_link(variant_obj, build=None):\n dbsnp_id = variant_obj.get('dbsnp_id')\n build = build or 37\n\n if not dbsnp_id:\n return None\n\n if build == 37:\n url_template = (\"http://grch37.ensembl.org/Homo_sapiens/Variation/Explore\"\n \"?v={};vdb=variation\")\n else:\n url_template = (\"http://www.ensembl.org/Homo_sapiens/Variation/Explore\"\n \"?v={};vdb=variation\")\n\n return url_template.format(dbsnp_id)",
"def cal_guid(self):\n return 'setup' + str(self.id) + '@lnldb'",
"def change_adp(self, network: str):\r\n self.ip = network\r\n self.adp = self.ipv4_adp[network]\r\n self.mac = self.ipv4_mac[network].replace('-', ':')\r\n # print(self.adp, self.ip, self.mac)\r",
"def link_protein(self, protein):\n if self.protein is None:\n self.protein = protein\n protein.link_gene(self)"
] | [
"0.56734145",
"0.5608507",
"0.55640787",
"0.5525023",
"0.5480624",
"0.5438552",
"0.5386511",
"0.52137035",
"0.5121944",
"0.49963725",
"0.49718344",
"0.49320933",
"0.4931212",
"0.49076504",
"0.4907363",
"0.48347703",
"0.48316193",
"0.4791243",
"0.4774549",
"0.47308743",
"0.4696292",
"0.4659212",
"0.46402574",
"0.4599474",
"0.45938677",
"0.45796216",
"0.45794648",
"0.4577518",
"0.4552418",
"0.45456272"
] | 0.59333754 | 0 |
Gets the absolute URL of the detail page for a given SourceTrait instance. | def get_absolute_url(self):
return reverse('trait_browser:source:traits:detail', kwargs={'pk': self.pk}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_absolute_url(self):\n\t\treturn reverse('source-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('trait_browser:source:studies:pk:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return reverse('trait_browser:harmonized:traits:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return reverse('trait_browser:source:datasets:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n return reverse('tour-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return self.harmonized_trait_set_version.get_absolute_url()",
"def get_absolute_url(self):\n return reverse('tournament-details', args=[self.uuid])",
"def get_absolute_url(self):\n return ('publication_detail', (), {'slug': self.slug})",
"def get_absolute_url(self):\n return reverse('texture_detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('book-detail', args=[str(self.id)]) \n # Returns an URL that can be used to access a detail record for this model \n # (for this to work we will have to \n # -- Define a URL mapping that has the name 'book-detail' (name='book-detail')\n # -- Define an associated view.\n # -- Define an associated template.",
"def get_absolute_url(self):\n return reverse('relation-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse(\"jewelry_detail\", args = [str(self.id)])",
"def get_absolute_url(self):\n return reverse('book_details', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('bl-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('model-detail-view', args=[str(self.id)])",
"def exam_url(self, obj):\n request = self.context.get(\"request\")\n return reverse(\"exam-detail\", args=[obj.id], request=request)",
"def get_absolute_url(self):\n return reverse('injury-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('patient-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('blogger-detail', args=[str(self.id)])",
"def get_absolute_url(self) -> str:\n return reverse(\"cv_detail\", kwargs={\"pk\": self.pk})",
"def details_url(self):\n if self._data.get('details_url'):\n path = self._data.get('details_url')\n try:\n path, hash_ = path.split('#')\n hash_ = '#' + hash_\n except ValueError:\n hash_ = ''\n return '{}?from_activity={}{}'.format(path, self._data.get('id'), hash_)",
"def get_absolute_url(self):\n return ('member_detail', [self.pk])",
"def get_absolute_url(self):\n return reverse(\n \"variants:case-detail\",\n kwargs={\"project\": self.project.sodar_uuid, \"case\": self.sodar_uuid},\n )",
"def get_absolute_url(self):\n return reverse('report', args=[str(self.id)])",
"def get_absolute_url(self):\n return reverse('bleedinfo-detail', args=[str(self.id)])",
"def get_absolute_url(self):\n\n url = reverse('comicsite.views.page', args=[self.comicsite.short_name,self.title])\n return url",
"def get_absolute_url(self):\n return reverse(\"cars:detail\", kwargs={\"slug\": self.slug})",
"def get_absolute_url(self):\n return reverse('properties:detail', kwargs={'pk': self.pk})",
"def get_absolute_url(self):\n\n return reverse('caretaker-detail', args=[str(self.id)])"
] | [
"0.7119291",
"0.7097896",
"0.6950301",
"0.6894759",
"0.6431965",
"0.63921607",
"0.636551",
"0.635845",
"0.62975645",
"0.62486935",
"0.6246613",
"0.62231576",
"0.6212547",
"0.6189846",
"0.6181484",
"0.6181484",
"0.61728066",
"0.6160209",
"0.6159289",
"0.615591",
"0.6154543",
"0.61252743",
"0.6123792",
"0.61045706",
"0.61006993",
"0.6094829",
"0.60915446",
"0.60839707",
"0.6076166",
"0.60659164"
] | 0.7710777 | 0 |
Return queryset of archived tags linked to this trait. | def archived_tags(self):
archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.archived().filter(trait=self)
return apps.get_model('tags', 'Tag').objects.filter(
pk__in=archived_tagged_traits.values_list('tag__pk', flat=True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def non_archived_tags(self):\n non_archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.non_archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=non_archived_tagged_traits.values_list('tag__pk', flat=True))",
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(tags__name=self.kwargs['tag_slug'])",
"def get_queryset(self):\n return Entry.published.filter(tags__slug=self.kwargs['tag_slug'])",
"def get_queryset(self):\n return Item.objects.filter(owner=self.request.user).order_by('-created').prefetch_related('tags')",
"def get_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current().aggregate(\n models.Count('tag', distinct=True))['tag__count']",
"def tags(self):\n return self.__tags[:]",
"def get_tags(self):\n\n base_url = self.get_parent().url\n tags = self.tags.all()\n\n for tag in tags:\n tag.url = f\"{base_url}tags/{tag.slug}/\"\n\n return tags",
"def get_tags(self):\n\n return self.tags",
"def get_tags(self):\n return self.tags",
"def queryset(self, request, queryset):\n for tag in get_resource_tags():\n if self.value() == tag[0]:\n return queryset.filter(tags__slug__iexact=tag[0])",
"def test_returns_all_studies_with_archived_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def tagged(self, tag_slug):\n return self.filter(tag__slug=tag_slug)",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def get_all_tags_list(cls):\n all_tags_list = []\n # obj_list = cls.objects.filter(status=0).order_by('-update_time')\n obj_list = Article.objects.all()\n for obj in obj_list:\n all_tags_list = all_tags_list + obj.tags_list()\n # for tag in obj.tags.split(','):\n # all_tags_list.append(tag)\n return all_tags_list",
"def get_queryset(self):\n qs = AllowedTag.objects.filter(enabled=True)\n if self.q:\n qs = qs.filter(name__istartswith=self.q)\n return qs.order_by('name')",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def get_tags_list(*args, **kwargs):\n return Tag.objects.active()",
"def getTags(self,):\n\t\treturn self.tags;",
"def tags(self):\r\n return Tags(self)",
"def tags(self):\r\n return Tags(self)",
"def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys.prefetch_related('tags')"
] | [
"0.7533448",
"0.74357784",
"0.6842906",
"0.6513351",
"0.6450949",
"0.63617426",
"0.6330386",
"0.630018",
"0.6280889",
"0.62478477",
"0.6202606",
"0.610521",
"0.60856485",
"0.6033715",
"0.60267216",
"0.60267216",
"0.60267216",
"0.60267216",
"0.60267216",
"0.60267216",
"0.60267216",
"0.60267216",
"0.6025483",
"0.59669596",
"0.59611857",
"0.59611857",
"0.59567845",
"0.5943983",
"0.5943983",
"0.59384227"
] | 0.8736017 | 0 |
Return queryset of nonarchived tags linked to this trait. | def non_archived_tags(self):
non_archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.non_archived().filter(trait=self)
return apps.get_model('tags', 'Tag').objects.filter(
pk__in=non_archived_tagged_traits.values_list('tag__pk', flat=True)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_non_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self)",
"def archived_tags(self):\n archived_tagged_traits = apps.get_model('tags', 'TaggedTrait').objects.archived().filter(trait=self)\n return apps.get_model('tags', 'Tag').objects.filter(\n pk__in=archived_tagged_traits.values_list('tag__pk', flat=True))",
"def tags(self):\n return self.__tags[:]",
"def get_non_archived_tags_count(self):\n return apps.get_model('tags', 'TaggedTrait').objects.current().non_archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).aggregate(\n models.Count('tag', distinct=True)\n )['tag__count']",
"def get_archived_tagged_traits(self):\n return apps.get_model('tags', 'TaggedTrait').objects.archived().filter(\n trait__source_dataset__source_study_version__study=self\n ).current()",
"def non_hidden(self):\n return self.filter(hidden=False)",
"def non_hidden(self):\n return self.filter(hidden=False)",
"def get_queryset(self):\n queryset = super().get_queryset()\n return queryset.filter(tags__name=self.kwargs['tag_slug'])",
"def get_tags(self):\n\n return self.tags",
"def get_tags(self):\n return self.tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\n return self._tags",
"def tags(self):\r\n return Tags(self)",
"def tags(self):\r\n return Tags(self)",
"def tags(self):\n return self._changeset.get('tags', None)",
"def tags(self):\n tag_docs = self.tag_data\n tags = set([x[\"tag\"] for x in tag_docs])\n # remove the \"thawed\" tag\n tags.discard(\"thawed\")\n return tags",
"def test_returns_all_studies_with_archived_tagged_traits_without_given_tag_with_only(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': '', 'forward': ['{' + self.only_arg + '}']}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))",
"def get_unlabelled_documents_queryset(self):\n queryset = self.get_queryset()\n\n # Retrieve labelled IDs\n labelled_ids = self.get_labelled_documents_queryset()\\\n .values_list('document_id', flat=True)\n\n return queryset.exclude(pk__in=labelled_ids)",
"def get_tags(self) -> Set[Text]:\r\n return {tag for tag in self.tags}",
"def getTags(self,):\n\t\treturn self.tags;",
"def excluded(cls):\n return []",
"def get_unassigned_tags(**kwargs):\n return Tags.get_unassigned_tags(**kwargs)",
"def tags(self) -> List[Tag]:\n return self._tags",
"def get_queryset(self):\n return Item.objects.filter(owner=self.request.user).order_by('-created').prefetch_related('tags')"
] | [
"0.80429226",
"0.7348492",
"0.6551791",
"0.6469715",
"0.64208716",
"0.63318914",
"0.63318914",
"0.6299178",
"0.6227882",
"0.6208177",
"0.6100119",
"0.6100119",
"0.6100119",
"0.6100119",
"0.6100119",
"0.6100119",
"0.6100119",
"0.6100119",
"0.60505265",
"0.60505265",
"0.6050489",
"0.60406506",
"0.6031302",
"0.6023241",
"0.5988851",
"0.5978925",
"0.5952155",
"0.59381056",
"0.5926847",
"0.5925236"
] | 0.8711702 | 0 |
Get html for the trait name linked to the trait's detail page, with description as popover. | def get_name_link_html(self, max_popover_words=80):
if not self.i_description:
description = '—'
else:
description = Truncator(self.i_description).words(max_popover_words)
return POPOVER_URL_HTML.format(url=self.get_absolute_url(), popover=description,
name=self.i_trait_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_name_link_html(self, max_popover_words=80):\n url_text = \"{{% url 'trait_browser:harmonized:traits:detail' pk={} %}} \".format(\n self.harmonized_trait_set_version.pk)\n if not self.i_description:\n description = '—'\n else:\n description = Truncator(self.i_description).words(max_popover_words)\n return POPOVER_URL_HTML.format(url=url_text, popover=description, name=self.trait_flavor_name)",
"def get_name_link_html(self):\n url_text = \"{{% url 'trait_browser:source:studies:pk:detail' pk={} %}} \".format(self.pk)\n return URL_HTML.format(url=url_text, name=self.i_study_name)",
"def __str__(self):\n return '{trait_name} ({phv}): dataset {pht}'.format(trait_name=self.i_trait_name,\n phv=self.full_accession,\n pht=self.source_dataset.full_accession)",
"def get_details_title(mat_dict):\n title = \"# Detail section for {} (COF {}) v{}\".format(mat_dict['name_conventional'], mat_dict['mat_id'],\n mat_dict['workflow_version'])\n return title",
"def get_name_link_html(self, max_popover_words=80):\n if not self.i_dbgap_description:\n description = '—'\n else:\n description = Truncator(self.i_dbgap_description).words(max_popover_words)\n return POPOVER_URL_HTML.format(url=self.get_absolute_url(), popover=description,\n name=self.dataset_name)",
"def __str__(self):\n return \"{}\".format(self.eTrait_)",
"def detail_template(self):\n return '{}/{}.html'.format(self.object_name, self.detail_endpoint)",
"def __html__(self) -> str:\n location_string = self.location.string if self.location else None\n components = [self.name, self.owner, location_string]\n return ', '.join([component for component in components if component])",
"def brief(self):\n return self.name",
"def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health) + ' magic is: ' + str(self.__magic)).title()\n print(description)",
"def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health)).title()\n print(description)",
"def get_description(self):\n return self['contact_name']",
"def summary_title(tile_summary):\n return f\"Slide tile_summary.slide_name Tile Summary:\"",
"def __str__(self):\n return self.page.get_title()",
"def get_absolute_url(self):\n return reverse('trait_browser:harmonized:traits:detail', kwargs={'pk': self.pk})",
"def __str__(self):\n if len(self.lTraits_) == 0:\n return \"Aucun trait.\"\n str = u\"Liste de tous les traits : \"\n for trait in self.lTraits_:\n str = str + trait + \",\"\n return str",
"def __repr__(self):\n return f\"<Tutor {self.first_name.title()} {self.last_name.title()}>\"",
"def get_description(self):",
"def __html__(self) -> str:\n components = [\n self.attributee_html,\n self.linked_title if self.title else 'untitled document',\n self.date.string if self.date else '',\n self.descriptive_phrase,\n f'archived in {self.collection}' if self.collection else '',\n ]\n return self.components_to_html(components)",
"def get_details(self):\n print(self.name)\n print(10 * \"-\" + \"\\n\")\n print(self.description)\n for direction in self.linked_rooms:\n room = self.linked_rooms[direction]\n print(\"The \" + room.get_name() + \" is \" + direction)\n print(\"\\n\")",
"def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string",
"def __str__(self):\n return self.piece_behavior.summary",
"def get_text(self, course):\r\n return views.render_accordion(\r\n self.request, course, course.get_children()[0].scope_ids.usage_id.to_deprecated_string(), None, None\r\n )",
"def get_info(self) -> str:\n return textwrap.dedent(\n \"\"\"\n <h1>Test page</h1>\n \"\"\"\n )",
"def DescriptiveName(self):\r\n\t\treturn self._get_attribute('descriptiveName')",
"def desc(self):\n return LandCell.desc(self) + \"; plant=\" + str(self.plant)",
"def _repr_html_(self):\n return \"<td><b>{0}</b></td><td>{1}</td>\".format(self.id, self.title)",
"def print_traits ( self, show_help = False, **metadata ):\n\n if len( metadata ) > 0:\n names = self.trait_names( **metadata )\n else:\n names = self.trait_names( type = _is_not_event )\n if len( names ) == 0:\n print ''\n return\n\n result = []\n pad = max( [ len( x ) for x in names ] ) + 1\n maxval = 78 - pad\n names.sort()\n\n for name in names:\n try:\n value = repr( getattr( self, name ) ).replace( '\\n', '\\\\n' )\n if len( value ) > maxval:\n value = '%s...%s' % ( value[: (maxval - 2) / 2 ],\n value[ -((maxval - 3) / 2): ] )\n except:\n value = '<undefined>'\n lname = (name + ':').ljust( pad )\n if show_help:\n result.append( '%s %s\\n The value must be %s.' % (\n lname, value, self.base_trait( name ).setter.info() ) )\n else:\n result.append( '%s %s' % ( lname, value ) )\n\n print '\\n'.join( result )",
"def display(self):\n return self.name",
"def get_html(self):\r\n context = {\r\n 'course_key': self.runtime.course_id,\r\n 'display_name': self.display_name_with_default,\r\n 'tag': self.instructor_tags,\r\n 'source': self.source,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self.content,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user_email, self.annotation_token_secret),\r\n }\r\n return self.system.render_template('textannotation.html', context)"
] | [
"0.7173213",
"0.5908587",
"0.58575296",
"0.58464485",
"0.57854205",
"0.5763104",
"0.5562001",
"0.55412966",
"0.5497627",
"0.54678494",
"0.5463515",
"0.5406294",
"0.5376284",
"0.5343716",
"0.5327474",
"0.5324101",
"0.5305742",
"0.5296317",
"0.5279121",
"0.5275096",
"0.52688044",
"0.52396035",
"0.5232313",
"0.5214975",
"0.520701",
"0.51848656",
"0.5176148",
"0.51390845",
"0.513339",
"0.5123403"
] | 0.64885026 | 1 |
Return the most recent version of a trait. | def get_latest_version(self):
current_study_version = self.source_dataset.source_study_version.study.get_latest_version()
if current_study_version is None:
return None
# Find the same trait associated with the current study version.
try:
current_trait = SourceTrait.objects.get(
source_dataset__source_study_version=current_study_version,
i_dbgap_variable_accession=self.i_dbgap_variable_accession
)
except ObjectDoesNotExist:
return None
return current_trait | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)",
"def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None",
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version",
"def get_previous_version(self):\n previous_study_version = self.source_dataset.source_study_version.get_previous_version()\n if previous_study_version is not None:\n try:\n previous_trait = SourceTrait.objects.get(\n source_dataset__source_study_version=previous_study_version,\n i_dbgap_variable_accession=self.i_dbgap_variable_accession\n )\n except SourceTrait.DoesNotExist:\n return None\n return previous_trait",
"def get_latest_version(self, name):\n return self.filter(name=name).order_by('schema_version').last()",
"def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version",
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n # get the latest possible Version instance by using the workspace path\n if version is None:\n version = self.get_version_from_project_dir()\n\n return version",
"def get_latest_model():\n return get_models()[-1]",
"def latest(cls):\n releases = cls.query.all()\n if len(releases) == 0:\n return None\n\n releases.sort(key=lambda x: x.version)\n return releases[-1]",
"def version(self):\n self._get_latest_content()\n return self._data.get('version', None)",
"def get_version(self):\n pass",
"def get_latest_saved(self):\n doc = (get_latest_released_app_doc(self.domain, self._id)\n or get_latest_build_doc(self.domain, self._id))\n return self.__class__.wrap(doc) if doc else None",
"def last_revision(self):\n return self.revision_set.order_by(\"created_on\").last()",
"def get_version():\n return magpy.get_version()",
"def get_last_revision(self):\n return self.index.get_index_revision(self.name)",
"def latest(self):\n return self._latest",
"def get_version(self):\n return self.version",
"def get_version(self):\r\n\r\n return self.versions[0].number",
"def _get_version(self):",
"def latest_version(self) -> AwesomeVersion | None:\n return self.sys_updater.version_cli",
"def get(self):\n return self._version",
"def get_latest_tf_version(include_prerelease: bool = False) -> str:\n return get_available_tf_versions(include_prerelease)[0]",
"def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None",
"def get_default_version(self):\n # latest is a special case where we don't have to check if it exists\n if self.default_version == 'latest':\n return self.default_version\n # check if the default_version exists\n version_qs = self.versions.filter(\n slug=self.default_version,\n active=True\n )\n if version_qs.exists():\n return self.default_version\n return 'latest'",
"def GetInterfaceRevision():\n return 1",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"",
"def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version",
"def latest_ref(self):"
] | [
"0.6343139",
"0.6289052",
"0.61462194",
"0.605551",
"0.6044234",
"0.5944283",
"0.59256196",
"0.589782",
"0.578745",
"0.5734667",
"0.56906104",
"0.5669337",
"0.5609379",
"0.5605236",
"0.55892533",
"0.5564086",
"0.555119",
"0.5531066",
"0.5506513",
"0.5503882",
"0.549832",
"0.5480174",
"0.5449505",
"0.54410994",
"0.5430568",
"0.5405595",
"0.5405595",
"0.5405457",
"0.54002786",
"0.53991735"
] | 0.7126002 | 0 |
Returns the version of this SourceTrait from the previous study version. | def get_previous_version(self):
previous_study_version = self.source_dataset.source_study_version.get_previous_version()
if previous_study_version is not None:
try:
previous_trait = SourceTrait.objects.get(
source_dataset__source_study_version=previous_study_version,
i_dbgap_variable_accession=self.i_dbgap_variable_accession
)
except SourceTrait.DoesNotExist:
return None
return previous_trait | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_latest_version(self):\n current_study_version = self.source_dataset.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same trait associated with the current study version.\n try:\n current_trait = SourceTrait.objects.get(\n source_dataset__source_study_version=current_study_version,\n i_dbgap_variable_accession=self.i_dbgap_variable_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_trait",
"def get_previous_version(self):\n return self.get_previous_versions().first()",
"def sourceVersion(self):\n CraftCore.debug.trace(\"GitSource sourceVersion\")\n\n return self.__getCurrentRevision()",
"def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version",
"def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )",
"def get_latest_version(self):\n study = self.source_study_version.study\n current_study_version = self.source_study_version.study.get_latest_version()\n if current_study_version is None:\n return None\n # Find the same dataset associated with the current study version.\n try:\n current_dataset = SourceDataset.objects.get(\n source_study_version=current_study_version,\n i_accession=self.i_accession\n )\n except ObjectDoesNotExist:\n return None\n return current_dataset",
"def get_new_sourcetraits(self):\n previous_study_version = self.get_previous_version()\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n if previous_study_version is not None:\n qs = SourceTrait.objects.filter(\n source_dataset__source_study_version=self\n )\n # We can probably write this with a join to be more efficient.\n previous_variable_accessions = SourceTrait.objects.filter(\n source_dataset__source_study_version=previous_study_version\n ).values_list('i_dbgap_variable_accession', flat=True)\n qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)\n return qs\n else:\n return SourceTrait.objects.none()",
"def get_version(self):\n pass",
"def sourceVersion(self):\n # we hope that the build target is equal to the version that is build\n return self.subinfo.buildTarget",
"def get_version(self):\n return self.version",
"def version(self):\n return table_step.__version__",
"def getVersion(self):\n return _libsbml.SBase_getVersion(self)",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def version(self):\n self.version_list[-1] = self.revision\n version = '.'.join(self.version_list)\n return version",
"def version(self):\n raise NotImplementedError",
"def version(self):\n raise NotImplementedError",
"def version(self):\n raise NotImplementedError",
"def version(self):\n raise NotImplementedError",
"def Version(self) -> _n_0_t_12:",
"def Version(self) -> _n_0_t_12:",
"def sourceVersion(self):\n craftDebug.trace(\"HgSource.sourceVersion called\")\n\n if self.enableHg:\n # open a temporary file - do not use generic tmpfile because this doesn't give a good file object with python\n with open(os.path.join(self.checkoutDir().replace('/', '\\\\'), \".crafthgtip.tmp\"), \"wb+\") as tempfile:\n # run the command\n utils.system(\"%s tip\" % self.hgExecutable, stdout=tempfile)\n # TODO: check return value for success\n tempfile.seek(os.SEEK_SET)\n\n # read the temporary file and grab the first line\n revision = tempfile.readline().replace(\"changeset:\", \"\").strip()\n\n os.remove(os.path.join(self.checkoutDir().replace('/', '\\\\'), \".crafthgtip.tmp\"))\n # always return True to not break something serious\n return revision",
"def get_version(self):\r\n\r\n return self.versions[0].number",
"def _get_version(self):",
"def getversion(self):\n return self.__version",
"def restore_previous_ehr_version(self, ehr_record):\n return self.restore_ehr_version(ehr_record, ehr_record.version-1)[0]",
"def get_version(self):\n return 0",
"def version(self):",
"def version(self):\n if not self._version:\n self._version = self._get_version()\n\n return self._version",
"def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version"
] | [
"0.7203941",
"0.7022413",
"0.6539211",
"0.6528101",
"0.64088005",
"0.64069474",
"0.6406507",
"0.6177494",
"0.6143733",
"0.6135707",
"0.6052708",
"0.60357195",
"0.5994113",
"0.5994113",
"0.5989174",
"0.5963759",
"0.5963759",
"0.5963759",
"0.5963759",
"0.5931803",
"0.5931803",
"0.59005",
"0.59002566",
"0.5899564",
"0.5878624",
"0.58591646",
"0.58573055",
"0.58519787",
"0.58377534",
"0.5828993"
] | 0.84758323 | 0 |
Apply tags from the previous version of this SourceTrait to this version. | def apply_previous_tags(self, creator):
TaggedTrait = apps.get_model('tags', 'TaggedTrait')
DCCReview = apps.get_model('tags', 'DCCReview')
StudyResponse = apps.get_model('tags', 'StudyResponse')
previous_trait = self.get_previous_version()
if previous_trait is not None:
for old_tagged_trait in previous_trait.all_taggedtraits.non_archived():
# Raise an error if the review of the previous trait is incomplete.
# Check for unreviewed
if not hasattr(old_tagged_trait, 'dcc_review'):
raise ValueError(INCOMPLETE_REVIEW_ERROR.format(' (unreviewed)'))
elif old_tagged_trait.dcc_review.status == DCCReview.STATUS_FOLLOWUP:
if hasattr(old_tagged_trait.dcc_review, 'study_response'):
# Check for missing DCCDecision after disagree StudyResponse.
if old_tagged_trait.dcc_review.study_response.status == StudyResponse.STATUS_DISAGREE \
and not hasattr(old_tagged_trait.dcc_review, 'dcc_decision'):
raise ValueError(INCOMPLETE_REVIEW_ERROR.format(
' (no decision after disagree study response)'))
else:
# Check for missing StudyResponse and DCCDecision
if not hasattr(old_tagged_trait.dcc_review, 'dcc_decision'):
raise ValueError(INCOMPLETE_REVIEW_ERROR.format(
' (no response or decision after followup review)'))
try:
# Check if it already exists.
self.all_taggedtraits.non_archived().get(tag=old_tagged_trait.tag)
except TaggedTrait.DoesNotExist:
# Create a new TaggedTrait.
new_tagged_trait = TaggedTrait(
tag=old_tagged_trait.tag, trait=self, creator=creator, previous_tagged_trait=old_tagged_trait)
new_tagged_trait.full_clean()
new_tagged_trait.save()
# Create a DCCReview with confirmed status.
dcc_review = DCCReview(
tagged_trait=new_tagged_trait, status=DCCReview.STATUS_CONFIRMED, creator=creator)
dcc_review.full_clean()
dcc_review.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply_previous_tags(self, user):\n previous_study_version = self.get_previous_version()\n if previous_study_version is not None:\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n TaggedTrait = apps.get_model('tags', 'TaggedTrait')\n DCCReview = apps.get_model('tags', 'DCCReview')\n StudyResponse = apps.get_model('tags', 'StudyResponse')\n # Get the set of TaggedTraits from the previous study version.\n previous_tagged_traits = TaggedTrait.objects.non_archived().filter(\n trait__source_dataset__source_study_version=previous_study_version\n )\n # Raise an error if any of the previous taggedtraits have incomplete reviews.\n unreviewed_q = Q(dcc_review__isnull=True)\n no_response_q = Q(dcc_review__status=DCCReview.STATUS_FOLLOWUP) &\\\n Q(dcc_review__study_response__isnull=True) &\\\n Q(dcc_review__dcc_decision__isnull=True)\n no_decision_q = Q(dcc_review__status=DCCReview.STATUS_FOLLOWUP) &\\\n Q(dcc_review__study_response__status=StudyResponse.STATUS_DISAGREE) &\\\n Q(dcc_review__dcc_decision__isnull=True)\n incomplete_review_tagged_traits = previous_tagged_traits.filter(\n unreviewed_q | no_response_q | no_decision_q\n )\n if incomplete_review_tagged_traits.count() > 0:\n raise ValueError(INCOMPLETE_REVIEW_ERROR.format(''))\n # Get the set of variable accession numbers in the previous version that have tags applied them.\n previous_accessions_with_tags = previous_tagged_traits.values(\n trait_pk=F('trait__pk'),\n trait_accession=F('trait__i_dbgap_variable_accession')\n ).annotate(\n tt_count=Count('pk')\n ).filter(\n tt_count__gt=0\n ).values_list(\n 'trait_accession',\n flat=True\n ).distinct()\n traits_to_tag = SourceTrait.objects.filter(\n source_dataset__source_study_version=self,\n i_dbgap_variable_accession__in=previous_accessions_with_tags\n )\n for trait in traits_to_tag:\n trait.apply_previous_tags(user)",
"def get_source_tags(self):\n raise NotImplementedError(\"\")",
"def get_previous_version(self):\n previous_study_version = self.source_dataset.source_study_version.get_previous_version()\n if previous_study_version is not None:\n try:\n previous_trait = SourceTrait.objects.get(\n source_dataset__source_study_version=previous_study_version,\n i_dbgap_variable_accession=self.i_dbgap_variable_accession\n )\n except SourceTrait.DoesNotExist:\n return None\n return previous_trait",
"def tags_changed(self, tags):\n pass",
"def get_source_tags(self):\n return self._get_norm_tags(self.tag_manager.source_tags)",
"def setTags(self,newtags):\n\t\tself.tags = newtags;",
"def __get_previous_tags__(self, tags):\n if len(self.tags) == 0:\n return None, None\n if self.index == 1:\n return BEGIN, tags[self.index-1]\n elif self.index == 0:\n return BEGIN, BEGIN\n else:\n return tags[self.index-2], tags[self.index-1]",
"def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )",
"def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()",
"def remove_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags - set(tags)\n return cp",
"def pre_revert(self):",
"def add_tags(event):\n\n add_tags_from_presets()",
"def add_tags(self, tags):\n cp = self.copy()\n cp.tags = cp.tags.union(set(tags))\n return cp",
"def add_disabled_source_tag(self, source_tag=None):\n if source_tag is not None:\n self.source_tags_of_disabled_sources.append(source_tag)",
"def apply_prev(self, id, prev):\n self.apply_tactic(id, tactic.apply_prev(), prevs=[prev])",
"def set_tags(self, tags):\r\n current_tags = set(self.tag_names())\r\n updated_tags = set(tags)\r\n removed_tags = current_tags.difference(updated_tags)\r\n new_tags = updated_tags.difference(current_tags)\r\n \r\n for tag in new_tags:\r\n self.add_tag(tag)\r\n \r\n for tag in removed_tags:\r\n self.remove_tag(tag)",
"def get_source_tags(self):\n return ['en:' + self.tag_manager.normalize_tag_wtokenization(t, self.tries['en'], prefixed=False) for t in self.tag_manager.unprefixed_source_tags]",
"def get_new_sourcetraits(self):\n previous_study_version = self.get_previous_version()\n SourceTrait = apps.get_model('trait_browser', 'SourceTrait')\n if previous_study_version is not None:\n qs = SourceTrait.objects.filter(\n source_dataset__source_study_version=self\n )\n # We can probably write this with a join to be more efficient.\n previous_variable_accessions = SourceTrait.objects.filter(\n source_dataset__source_study_version=previous_study_version\n ).values_list('i_dbgap_variable_accession', flat=True)\n qs = qs.exclude(i_dbgap_variable_accession__in=previous_variable_accessions)\n return qs\n else:\n return SourceTrait.objects.none()",
"def apply_tags(self, tags):\n for tag_name in tags:\n tag = tag_name.strip().lower()\n self.tags.append(DBSession.merge(Tag(tag)))",
"def append_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.update(tags)\n self.tags.update(tags)",
"def _transform_known_tags(self):\n self.missing_known_tags = []\n\n for k, tf in self._known_tags.items():\n v = self.tags.get(k, [])\n if not v:\n self.missing_known_tags.append(k)\n continue\n\n if len(v) > 1:\n raise Exception(f\"multiple instances of tag {k}\")\n\n setattr(self, k, v[0])",
"def remove_tags(self, tags):\n\n tags = H.to_list(tags)\n # self._tags.difference_update(tags)\n self.tags.difference_update(tags)",
"def undo(self) :\n \n raise NotImplementedError()",
"def replace_version(self, source_version, target_version):\n raise NotImplementedError(\"replace_version is not implemented\")",
"def upgrade(self, old_version, new_version):\n pass",
"def tidy_tags(self, tags):\n tags = tags.split()\n # add target tag if not a calibrator\n if not any(\"cal\" in tag for tag in tags):\n if \"target\" not in tags:\n tags.append(\"target\")\n return \" \".join(tags)",
"def update(self, src, labels): # real signature unknown; restored from __doc__\n pass",
"def previous(self):\n\n pass",
"def set_previous(self, previous_layer):\n super().set_previous(previous_layer)\n self.set_output_shape()\n self.initialize_filter()",
"def update_tags(self, tags, **kwargs):\n request = RequestMiddleware.get_request()\n is_admin = request.user and request.user.is_admin\n # Keep all tags that start with pf: because they are reserved.\n preserved = [tag for tag in self.tags if tag.startswith('pf:')]\n if is_admin:\n remove = [tag[1:] for tag in tags if tag.startswith('-pf:')]\n preserved = [tag for tag in preserved if tag not in remove]\n\n # Filter out new tags that are invalid or reserved.\n accepted = [tag for tag in tags\n if TAG_REGEX_COMPILED.match(tag)\n and (is_admin or not tag.startswith('pf:'))]\n # Limit the number of tags per entity.\n if len(accepted + preserved) > settings.MAX_TAGS_PER_ENTITY:\n accepted = accepted[:settings.MAX_TAGS_PER_ENTITY - len(preserved)]\n self.tags = list(set(accepted + preserved))"
] | [
"0.67532045",
"0.560491",
"0.53911746",
"0.5234013",
"0.5228578",
"0.5195925",
"0.5186795",
"0.51826936",
"0.51673084",
"0.51250494",
"0.5079544",
"0.5074929",
"0.50660247",
"0.49464282",
"0.4945864",
"0.49275744",
"0.49202943",
"0.4895651",
"0.48858866",
"0.48719358",
"0.48688692",
"0.48672783",
"0.4817918",
"0.48019066",
"0.47846183",
"0.47486675",
"0.47373724",
"0.4726454",
"0.47220638",
"0.47191027"
] | 0.61335063 | 1 |
Custom save method for making the trait flavor name. Automatically sets the value for the harmonized trait's trait_flavor_name. | def save(self, *args, **kwargs):
self.trait_flavor_name = self.set_trait_flavor_name()
# Call the "real" save method.
super(HarmonizedTrait, self).save(*args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_trait_flavor_name(self):\n return '{}_{}'.format(self.i_trait_name, self.harmonized_trait_set_version.harmonized_trait_set.i_flavor)",
"def save(self, *args, **kwargs):\n self.name = unique_slugify(\n self.name,\n instance=self,\n queryset=AccountTeam.objects.filter(account=self.account),\n )\n return super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.entity_type = \"Charity\"\n super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.slug = \"/\".join([\n slugify(__class__.__name__.lower()),\n settings.PK_PLACEHOLDER,\n slugify(self.name)\n ])\n super(__class__, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n self.slug_name = slugify(self.name)\n super(Product, self).save(*args, **kwargs)",
"def _change_name(self, suff, info_extra):\n if 'cable-ring' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n f = i1 / i2\n suff = suff.replace('.png',\n f'-area-{i1:0.3f}-best-{i2:0.3f}-FRAC-{f:0.3f}.png')\n elif 'cloth-flat' in self.path:\n i1 = info_extra['cloth_coverage']\n suff = suff.replace('.png', f'-coverage-{i1:0.3f}.png')\n elif 'bag-alone' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}.png')\n else:\n pass\n return suff",
"def flavor(self, name=None):\n raise NotImplementedError",
"def flavor(self, flavor):\n self._flavor = flavor",
"def store(self, ftype):\n self.get_attr().SetValue(dumps(ftype))",
"def cleanup_sportstype(self, workout):\n if self.name.lower() in ['indoor_cycling', 'virtual_ride']:\n self.name = 'Indoor Cycling'\n elif self.name.lower() in ['cycling', 'road_biking']:\n self.name = 'Road Cycling'\n elif self.name.lower() in ['mountain_biking']:\n self.name = 'Mountain Biking'\n elif self.name.lower() in ['running']:\n self.name = 'Running'\n elif self.name.lower() in ['treadmill_running']:\n self.name = 'Treadmill Running'\n elif self.name.lower() in ['trail_running']:\n self.name = 'Trail Running'\n elif self.name.lower() in ['lap_swimming', 'swimming']:\n self.name = 'Pool Swimming'\n elif self.name.lower() in ['open_water_swimming']:\n self.name = 'Open Water Swimming'\n elif self.name.lower() in ['cardio', 'indoor_cardio']:\n self.name = 'Cardio'\n elif self.name.lower() in ['strength_training']:\n self.name = 'Strength'\n elif self.name.lower() in ['hiking']:\n self.name = 'Hiking'\n elif self.name.lower() in ['yoga']:\n self.name = 'Yoga'\n elif self.name.lower() in ['inline_skating', 'inline hockey']:\n self.name = 'Inline Skating'\n elif self.name.lower() in ['multi_sport']:\n self.name = 'Triathlon'\n elif self.name.lower() in ['wakeboarding']:\n self.name = 'Wakeboarding'\n elif self.name.lower() in ['surfing']:\n self.name = 'Surfing'\n elif self.name.lower() in ['other']:\n if workout.name:\n if workout.name == 'Yoga':\n self.name = 'Yoga'\n if workout.name == 'Inline Hockey':\n self.name = 'Inline Skating'\n if workout.name == 'Radfahren':\n self.name = 'Road Cycling'\n else:\n self.name = 'Other'",
"def save(self, update: bool = True, *args: Any, **kwargs: Any) -> None:\n super().save(*args, **kwargs)\n if update:\n self.update_name(force_update=True)",
"def pre_save(self, model_instance, add):\n value = super().pre_save(model_instance, add)\n if self.auto and not value:\n # Assign a new value for this attribute if required.\n value = shortuuid.uuid(name=self.namespace)\n if self.prefix:\n value = self.prefix + ':' + value\n setattr(model_instance, self.attname, value)\n return value",
"def create_flavor(cls, values):\n return cls.dbdriver.create_flavor(values)",
"def setName(self, *args):\n return _libsbml.SpeciesFeatureType_setName(self, *args)",
"def set_variations(self, instance=None, **kwargs):\n if getattr(instance, self.name):\n filename = self.generate_filename(instance,\n os.path.basename(getattr(instance, self.name).path))\n for variation in self.variations:\n if variation['name'] != 'size':\n variation_filename = self._get_variation_filename(variation, filename)\n variation_field = VariationField(variation_filename)\n setattr(getattr(instance, self.name), variation['name'], variation_field)",
"def on_cls_setting_myname(value):\n raise NotImplementedError()",
"def save(self, *args, **kwargs):\n self.name = unique_slugify(self.name, instance=self)\n\n if self.is_personal and self.user.username != self.name:\n self.user.username = self.name\n self.user.save()\n\n if self.is_customer:\n self.update_customer()\n\n if not self.image:\n self.set_image_from_name(should_save=False)\n\n return super().save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n\n if not self.id:\n slug = slugify(self.name)\n i = 2\n while Ingredient.objects.filter(slug=slug):\n slug = '{slug}-{i}'\n i += 1\n self.slug = slug\n self.name = capwords(self.name)\n return super(Ingredient, self).save(*args, **kwargs)",
"def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name)",
"def save(self, *args, **kwargs):\n empty_std_name = False\n if not self.standard_name or self.standard_name.isspace():\n empty_std_name = True\n\n empty_sys_name = False\n if not self.systematic_name or self.systematic_name.isspace():\n empty_sys_name = True\n\n if empty_std_name and empty_sys_name:\n raise ValueError(\n \"Both standard_name and systematic_name are empty\")\n\n super(Gene, self).save(*args, **kwargs) # Call the \"real\" save().",
"def get_trait_names(self):\n return self.harmonizedtrait_set.values_list('trait_flavor_name', flat=True)",
"def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super(Category, self).save(*args, **kwargs)",
"def save(self):\n try:\n self.connection.register_activity_type(\n self.domain.name,\n self.name,\n self.version,\n task_list=str(self.task_list),\n default_task_heartbeat_timeout=str(self.task_heartbeat_timeout),\n default_task_schedule_to_close_timeout=str(self.task_schedule_to_close_timeout),\n default_task_schedule_to_start_timeout=str(self.task_schedule_to_start_timeout),\n default_task_start_to_close_timeout=str(self.task_start_to_close_timeout),\n description=self.description,\n )\n except SWFTypeAlreadyExistsError:\n raise AlreadyExistsError(f\"{self} already exists\")\n except SWFResponseError as err:\n if err.error_code in [\"UnknownResourceFault\", \"TypeDeprecatedFault\"]:\n raise DoesNotExistError(err.body[\"message\"])\n raise",
"async def savename(self, ctx, *, iracing_name):\n if is_support_guild(ctx.guild.id):\n await ctx.send('Sorry, this discord does not allow update, saveid, savename, '\n 'leaderboard, and series commands so as not to overload me. '\n 'Try `!careerstats` or `!yearlystats` with your customer ID to test '\n 'or go to #invite-link to bring the bot to your discord for all functionality')\n return\n await self.save_name.call(ctx, iracing_name)",
"def _generate_name(self):\n nonexistent_flavor = str(int(time.time()))\n flavors = instance_types.get_all_types()\n while nonexistent_flavor in flavors:\n nonexistent_flavor += \"z\"\n else:\n return nonexistent_flavor",
"def name_title(self, val: str) -> None:\n\n # Make sure they don't pass underscores; title versions are just\n # words and spaces.\n if '_' in val:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' contains\"\n ' underscores; it must contain only spaces.'\n )\n\n # Make sure the value they're providing still matches their base\n # name. It could be easy to let this fall out of sync\n # accidentally.\n if val.lower().replace(' ', '_') != self._name:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' letters/spacing\"\n f\" does not match base name '{self._name}'.\"\n )\n\n # Ok val; we will accept you.\n self._name_title = val",
"def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super(Library, self).save(*args, **kwargs)",
"def __str__(self):\n return \"{}\".format(self.eTrait_)",
"def _save(self, name, content):\n cloud_obj = self.container.create_object(name)\n mimetype, _ = mimetypes.guess_type(name)\n cloud_obj.content_type = mimetype\n cloud_obj.send(content)\n return name",
"def save(self, *args, **kwargs):\n self.entity_type = \"Person\"\n super().save(*args, **kwargs)"
] | [
"0.7468916",
"0.55278254",
"0.5415232",
"0.51928246",
"0.5119848",
"0.511083",
"0.5096135",
"0.5090444",
"0.5069976",
"0.5035967",
"0.50008094",
"0.49549702",
"0.49256065",
"0.4919068",
"0.49156654",
"0.48997",
"0.48877004",
"0.48810652",
"0.48521546",
"0.4827719",
"0.48259613",
"0.47583342",
"0.47553936",
"0.47432783",
"0.47339022",
"0.47338188",
"0.472008",
"0.47182372",
"0.47169074",
"0.4712749"
] | 0.86165404 | 0 |
Automatically set trait_flavor_name from the trait's i_trait_name and the trait set's flavor name. Properly format the trait_flavor_name for this harmonized trait so that it's available for easy use later. | def set_trait_flavor_name(self):
return '{}_{}'.format(self.i_trait_name, self.harmonized_trait_set_version.harmonized_trait_set.i_flavor) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, *args, **kwargs):\n self.trait_flavor_name = self.set_trait_flavor_name()\n # Call the \"real\" save method.\n super(HarmonizedTrait, self).save(*args, **kwargs)",
"def get_trait_names(self):\n return self.harmonizedtrait_set.values_list('trait_flavor_name', flat=True)",
"def _get_flavor_name(self, flavor_id):\n for name, f_id in FLAVOR_ID.items():\n if f_id == flavor_id:\n return name",
"def FlavorName(flavor):\n if isinstance(flavor, tuple):\n return flavor[0]\n else:\n return flavor",
"def _family_name(set_id, name):\n return \"FAM\" + \"_\" + str(set_id) + \"_\" + \"_\".join(name)",
"def _str_make(self):\n return self._name if self._fact is None else f\"{self._fact} × {self._name}\"",
"def name_title(self, val: str) -> None:\n\n # Make sure they don't pass underscores; title versions are just\n # words and spaces.\n if '_' in val:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' contains\"\n ' underscores; it must contain only spaces.'\n )\n\n # Make sure the value they're providing still matches their base\n # name. It could be easy to let this fall out of sync\n # accidentally.\n if val.lower().replace(' ', '_') != self._name:\n raise CleanError(\n f\"Custom FeatureSet name_title '{val}' letters/spacing\"\n f\" does not match base name '{self._name}'.\"\n )\n\n # Ok val; we will accept you.\n self._name_title = val",
"def setName(self, *args):\n return _libsbml.SpeciesFeatureType_setName(self, *args)",
"def _change_name(self, suff, info_extra):\n if 'cable-ring' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n f = i1 / i2\n suff = suff.replace('.png',\n f'-area-{i1:0.3f}-best-{i2:0.3f}-FRAC-{f:0.3f}.png')\n elif 'cloth-flat' in self.path:\n i1 = info_extra['cloth_coverage']\n suff = suff.replace('.png', f'-coverage-{i1:0.3f}.png')\n elif 'bag-alone' in self.path:\n i1 = info_extra['convex_hull_area']\n i2 = info_extra['best_possible_area']\n suff = suff.replace('.png', f'-area-{i1:0.3f}-best-{i2:0.3f}.png')\n else:\n pass\n return suff",
"def flavor(self, name=None):\n raise NotImplementedError",
"def _generate_name(self):\n nonexistent_flavor = str(int(time.time()))\n flavors = instance_types.get_all_types()\n while nonexistent_flavor in flavors:\n nonexistent_flavor += \"z\"\n else:\n return nonexistent_flavor",
"def flavor(self, flavor):\n self._flavor = flavor",
"def setName(self, *args):\n return _libsbml.SpeciesFeature_setName(self, *args)",
"def __str__(self):\n return \"{}\".format(self.eTrait_)",
"def __str__(self):\n return '{trait_name} ({phv}): dataset {pht}'.format(trait_name=self.i_trait_name,\n phv=self.full_accession,\n pht=self.source_dataset.full_accession)",
"def uniquify_name(self):\n self.name = f'{self.get_name()}_{len(self.store.get_user_functions())}'",
"def cleanup_sportstype(self, workout):\n if self.name.lower() in ['indoor_cycling', 'virtual_ride']:\n self.name = 'Indoor Cycling'\n elif self.name.lower() in ['cycling', 'road_biking']:\n self.name = 'Road Cycling'\n elif self.name.lower() in ['mountain_biking']:\n self.name = 'Mountain Biking'\n elif self.name.lower() in ['running']:\n self.name = 'Running'\n elif self.name.lower() in ['treadmill_running']:\n self.name = 'Treadmill Running'\n elif self.name.lower() in ['trail_running']:\n self.name = 'Trail Running'\n elif self.name.lower() in ['lap_swimming', 'swimming']:\n self.name = 'Pool Swimming'\n elif self.name.lower() in ['open_water_swimming']:\n self.name = 'Open Water Swimming'\n elif self.name.lower() in ['cardio', 'indoor_cardio']:\n self.name = 'Cardio'\n elif self.name.lower() in ['strength_training']:\n self.name = 'Strength'\n elif self.name.lower() in ['hiking']:\n self.name = 'Hiking'\n elif self.name.lower() in ['yoga']:\n self.name = 'Yoga'\n elif self.name.lower() in ['inline_skating', 'inline hockey']:\n self.name = 'Inline Skating'\n elif self.name.lower() in ['multi_sport']:\n self.name = 'Triathlon'\n elif self.name.lower() in ['wakeboarding']:\n self.name = 'Wakeboarding'\n elif self.name.lower() in ['surfing']:\n self.name = 'Surfing'\n elif self.name.lower() in ['other']:\n if workout.name:\n if workout.name == 'Yoga':\n self.name = 'Yoga'\n if workout.name == 'Inline Hockey':\n self.name = 'Inline Skating'\n if workout.name == 'Radfahren':\n self.name = 'Road Cycling'\n else:\n self.name = 'Other'",
"def build_sticker_set_name(bot: Bot, sticker_set_prefix: str) -> str:\n return f\"{sticker_set_prefix}_by_{bot.username}\"",
"def _make_display_name(cls, key: str) -> str:\n return f\"{cls._temp_prefix}-{key}-{uuid.uuid4()}\"",
"def on_cls_setting_myname(value):\n raise NotImplementedError()",
"def get_name_link_html(self, max_popover_words=80):\n url_text = \"{{% url 'trait_browser:harmonized:traits:detail' pk={} %}} \".format(\n self.harmonized_trait_set_version.pk)\n if not self.i_description:\n description = '—'\n else:\n description = Truncator(self.i_description).words(max_popover_words)\n return POPOVER_URL_HTML.format(url=url_text, popover=description, name=self.trait_flavor_name)",
"def autoname(self):\n raise NotImplementedError()",
"def update_column_title(col):\n col_type = self.features_bucket_mapping_.get(col).type\n return [f\"Feature '{col}'\"], [col_type]",
"def setName(self, *args):\n return _libsbml.PossibleSpeciesFeatureValue_setName(self, *args)",
"def setName(self, *args):\n return _libsbml.SpeciesType_setName(self, *args)",
"def set_name(api_key, tygron_id, hex_id,\n api_endpoint=(\"https://engine.tygron.com/api/session/event/\"\n \"EditorBuildingEventType/SET_NAME/?\")):\n r = requests.post(url=api_endpoint+api_key, json=[tygron_id, str(hex_id)])\n return",
"def test_correct_trait_found_with_phv_in_name(self):\n models.SourceTrait.objects.all().delete()\n name_trait = factories.SourceTraitFactory.create(i_trait_name='phv557')\n phv_trait = factories.SourceTraitFactory.create(i_dbgap_variable_accession=557)\n url = self.get_url()\n response = self.client.get(url, {'q': 'phv557'})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(len(returned_pks), 2)\n self.assertIn(name_trait.pk, returned_pks)\n self.assertIn(phv_trait.pk, returned_pks)",
"def setName(self, *args):\n return _libsbml.SpeciesTypeInstance_setName(self, *args)",
"def autoname(self):\n\t\tself.name = self.role_profile",
"def suggest_preset_name(self, classname):\n i = 1\n name = classname + \"-\" + str(i)\n while self.preset_name_exists(name):\n i += 1\n name = classname + \"-\" + str(i)\n return name"
] | [
"0.6485175",
"0.572448",
"0.52455056",
"0.5239741",
"0.5102045",
"0.50971866",
"0.5092614",
"0.5031202",
"0.50118196",
"0.49256253",
"0.48779806",
"0.48407742",
"0.47497308",
"0.47167596",
"0.4700613",
"0.46749374",
"0.46679375",
"0.4615291",
"0.45903933",
"0.4571786",
"0.45556974",
"0.45448568",
"0.45020208",
"0.44829273",
"0.44724268",
"0.44702175",
"0.4460591",
"0.4427346",
"0.4424302",
"0.44213948"
] | 0.85946906 | 0 |
Get html for the trait name linked to the harmonized trait's detail page, with description as popover. | def get_name_link_html(self, max_popover_words=80):
url_text = "{{% url 'trait_browser:harmonized:traits:detail' pk={} %}} ".format(
self.harmonized_trait_set_version.pk)
if not self.i_description:
description = '—'
else:
description = Truncator(self.i_description).words(max_popover_words)
return POPOVER_URL_HTML.format(url=url_text, popover=description, name=self.trait_flavor_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_name_link_html(self, max_popover_words=80):\n if not self.i_description:\n description = '—'\n else:\n description = Truncator(self.i_description).words(max_popover_words)\n return POPOVER_URL_HTML.format(url=self.get_absolute_url(), popover=description,\n name=self.i_trait_name)",
"def get_name_link_html(self):\n url_text = \"{{% url 'trait_browser:source:studies:pk:detail' pk={} %}} \".format(self.pk)\n return URL_HTML.format(url=url_text, name=self.i_study_name)",
"def get_details_title(mat_dict):\n title = \"# Detail section for {} (COF {}) v{}\".format(mat_dict['name_conventional'], mat_dict['mat_id'],\n mat_dict['workflow_version'])\n return title",
"def get_name_link_html(self, max_popover_words=80):\n if not self.i_dbgap_description:\n description = '—'\n else:\n description = Truncator(self.i_dbgap_description).words(max_popover_words)\n return POPOVER_URL_HTML.format(url=self.get_absolute_url(), popover=description,\n name=self.dataset_name)",
"def __str__(self):\n return '{trait_name} ({phv}): dataset {pht}'.format(trait_name=self.i_trait_name,\n phv=self.full_accession,\n pht=self.source_dataset.full_accession)",
"def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health)).title()\n print(description)",
"def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health) + ' magic is: ' + str(self.__magic)).title()\n print(description)",
"def __str__(self):\n return \"{}\".format(self.eTrait_)",
"def summary_title(tile_summary):\n return f\"Slide tile_summary.slide_name Tile Summary:\"",
"def __html__(self) -> str:\n location_string = self.location.string if self.location else None\n components = [self.name, self.owner, location_string]\n return ', '.join([component for component in components if component])",
"def __str__(self):\n return self.page.get_title()",
"def brief(self):\n return self.name",
"def get_info(self) -> str:\n return textwrap.dedent(\n \"\"\"\n <h1>Test page</h1>\n \"\"\"\n )",
"def __str__(self):\n return self.piece_behavior.summary",
"def get_component_html(self):\n study_list = '\\n'.join([study.get_name_link_html() for study in self.get_source_studies()])\n age_list = '\\n'.join([trait.get_name_link_html() for trait in self.component_age_traits.all()])\n component_html = '\\n'.join([\n trait.get_component_html(harmonization_unit=self) for trait in self.harmonizedtrait_set.all()])\n panel_body = []\n if len(study_list) > 0:\n study_html = INLINE_LIST_HTML.format(list_title='Included studies', list_elements=study_list)\n panel_body.append(study_html)\n if len(age_list) > 0:\n age_html = INLINE_LIST_HTML.format(list_title='Component age variables', list_elements=age_list)\n panel_body.append(age_html)\n panel_body.append(component_html)\n panel_body = '\\n'.join(panel_body)\n unit_panel = PANEL_HTML.format(panel_title='Harmonization unit: {}'.format(self.i_tag), panel_body=panel_body)\n return unit_panel",
"def __html__(self) -> str:\n components = [\n self.attributee_html,\n self.linked_title if self.title else 'untitled document',\n self.date.string if self.date else '',\n self.descriptive_phrase,\n f'archived in {self.collection}' if self.collection else '',\n ]\n return self.components_to_html(components)",
"def get_absolute_url(self):\n return reverse('trait_browser:harmonized:traits:detail', kwargs={'pk': self.pk})",
"def detail_template(self):\n return '{}/{}.html'.format(self.object_name, self.detail_endpoint)",
"def test_can_find_apostrophes_in_description_field(self):\n trait = factories.HarmonizedTraitFactory.create(i_description=\"don't miss me\")\n response = self.client.get(self.get_url(), {'description': \"don't\"})\n context = response.context\n self.assertIn(trait, context['results_table'].data)",
"def get_description(self):",
"def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'element_id': self.element_id,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self._render_content()\r\n }\r\n\r\n return self.system.render_template('annotatable.html', context)",
"def DescriptiveName(self):\r\n\t\treturn self._get_attribute('descriptiveName')",
"def get_component_html(self, harmonization_unit):\n source = [tr.get_name_link_html() for tr in (\n self.component_source_traits.all() & harmonization_unit.component_source_traits.all())]\n harmonized_trait_set_versions = [trait_set_version for trait_set_version in (\n self.component_harmonized_trait_set_versions.all() &\n harmonization_unit.component_harmonized_trait_set_versions.all())]\n harmonized = [tr.get_name_link_html() for trait_set in harmonized_trait_set_versions\n for tr in trait_set.harmonizedtrait_set.all()\n if not tr.i_is_unique_key]\n component_html = ''\n if len(source) > 0:\n trait_list = '\\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in source])\n component_html += INLINE_LIST_HTML.format(\n list_title='Component study variables for {}'.format(self.trait_flavor_name),\n list_elements=trait_list)\n if len(harmonized) > 0:\n trait_list = '\\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in harmonized])\n component_html += '\\n' + INLINE_LIST_HTML.format(\n list_title='Component harmonized variables for {}'.format(self.trait_flavor_name),\n list_elements=trait_list)\n return component_html",
"def test_can_find_underscores_in_description_field(self):\n trait = factories.HarmonizedTraitFactory.create(i_description='description with_char')\n response = self.client.get(self.get_url(), {'description': 'with_char'})\n context = response.context\n self.assertIn(trait, context['results_table'].data)",
"def get_component_html(self):\n return '\\n'.join([hunit.get_component_html() for hunit in self.harmonizationunit_set.all()])",
"def get_description(obj):\n if not isinstance(obj.data, dict):\n return \"No description found.\"\n abstract = \"\"\n authors = []\n categories = []\n final_identifiers = []\n\n # Get identifiers\n dois = get_value(obj.data, \"dois.value\", [])\n if dois:\n final_identifiers.extend(dois)\n\n system_no = get_value(obj.data, \"external_system_numbers.value\", [])\n if system_no:\n final_identifiers.extend(system_no)\n\n # Get subject categories, adding main one first. Order matters here.\n record_categories = get_value(obj.data, \"arxiv_eprints.categories\", []) + \\\n get_value(obj.data, \"subject_terms.term\", [])\n for category_list in record_categories:\n if isinstance(category_list, list):\n categories.extend(category_list)\n else:\n categories.append(category_list)\n categories = list(OrderedDict.fromkeys(categories)) # Unique only\n abstract = get_value(obj.data, \"abstracts.value\", [\"\"])[0]\n authors = obj.data.get(\"authors\", [])\n return render_template('inspire_workflows/styles/harvesting_record.html',\n object=obj,\n authors=authors,\n categories=categories,\n abstract=abstract,\n identifiers=final_identifiers)",
"def get_description(self):\n print(\"This Iron door.\")",
"def get_html(self):\r\n if self.debug == 'True':\r\n # Reset the user vote, for debugging only!\r\n self.user_voted = False\r\n if self.hints == {}:\r\n # Force self.hints to be written into the database. (When an xmodule is initialized,\r\n # fields are not added to the db until explicitly changed at least once.)\r\n self.hints = {}\r\n\r\n try:\r\n child = self.get_display_items()[0]\r\n out = child.render('student_view').content\r\n # The event listener uses the ajax url to find the child.\r\n child_id = child.id\r\n except IndexError:\r\n out = u\"Error in loading crowdsourced hinter - can't find child problem.\"\r\n child_id = ''\r\n\r\n # Wrap the module in a <section>. This lets us pass data attributes to the javascript.\r\n out += u'<section class=\"crowdsource-wrapper\" data-url=\"{ajax_url}\" data-child-id=\"{child_id}\"> </section>'.format(\r\n ajax_url=self.runtime.ajax_url,\r\n child_id=child_id\r\n )\r\n\r\n return out",
"def _repr_html_(self):\n return \"<td><b>{0}</b></td><td>{1}</td>\".format(self.id, self.title)",
"def get_title(self, obj):\n title = obj.habit.title\n return title"
] | [
"0.65362686",
"0.5995364",
"0.5972799",
"0.5874273",
"0.58588713",
"0.582794",
"0.58238804",
"0.56961626",
"0.5597435",
"0.55740833",
"0.5513988",
"0.5506095",
"0.5495636",
"0.549422",
"0.5491055",
"0.54814446",
"0.5465395",
"0.54473466",
"0.5423525",
"0.54104114",
"0.5383758",
"0.53695333",
"0.5351447",
"0.5319279",
"0.5311848",
"0.5279703",
"0.5276194",
"0.52728206",
"0.52726704",
"0.526267"
] | 0.73566204 | 0 |
Get html for inline lists of source and harmonized component phenotypes for the harmonized trait. | def get_component_html(self, harmonization_unit):
source = [tr.get_name_link_html() for tr in (
self.component_source_traits.all() & harmonization_unit.component_source_traits.all())]
harmonized_trait_set_versions = [trait_set_version for trait_set_version in (
self.component_harmonized_trait_set_versions.all() &
harmonization_unit.component_harmonized_trait_set_versions.all())]
harmonized = [tr.get_name_link_html() for trait_set in harmonized_trait_set_versions
for tr in trait_set.harmonizedtrait_set.all()
if not tr.i_is_unique_key]
component_html = ''
if len(source) > 0:
trait_list = '\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in source])
component_html += INLINE_LIST_HTML.format(
list_title='Component study variables for {}'.format(self.trait_flavor_name),
list_elements=trait_list)
if len(harmonized) > 0:
trait_list = '\n'.join([LIST_ELEMENT_HTML.format(element=trait) for trait in harmonized])
component_html += '\n' + INLINE_LIST_HTML.format(
list_title='Component harmonized variables for {}'.format(self.trait_flavor_name),
list_elements=trait_list)
return component_html | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_component_html(self):\n return '\\n'.join([hunit.get_component_html() for hunit in self.harmonizationunit_set.all()])",
"def get_component_html(self):\n study_list = '\\n'.join([study.get_name_link_html() for study in self.get_source_studies()])\n age_list = '\\n'.join([trait.get_name_link_html() for trait in self.component_age_traits.all()])\n component_html = '\\n'.join([\n trait.get_component_html(harmonization_unit=self) for trait in self.harmonizedtrait_set.all()])\n panel_body = []\n if len(study_list) > 0:\n study_html = INLINE_LIST_HTML.format(list_title='Included studies', list_elements=study_list)\n panel_body.append(study_html)\n if len(age_list) > 0:\n age_html = INLINE_LIST_HTML.format(list_title='Component age variables', list_elements=age_list)\n panel_body.append(age_html)\n panel_body.append(component_html)\n panel_body = '\\n'.join(panel_body)\n unit_panel = PANEL_HTML.format(panel_title='Harmonization unit: {}'.format(self.i_tag), panel_body=panel_body)\n return unit_panel",
"def __html__(self) -> str:\n location_string = self.location.string if self.location else None\n components = [self.name, self.owner, location_string]\n return ', '.join([component for component in components if component])",
"def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'element_id': self.element_id,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self._render_content()\r\n }\r\n\r\n return self.system.render_template('annotatable.html', context)",
"def get_html(self) -> List[ComponentMeta]:\n return [Div(id=\"additions\")]",
"def _build_experiment_chiapet_embedded_list():\n antibody_embeds = DependencyEmbedder.embed_defaults_for_type(\n base_path='antibody',\n t='antibody')\n return (\n Experiment.embedded_list + antibody_embeds\n )",
"def get_html(self):\r\n pass",
"def __html__(self, tags:defaultdict) -> str:\n html = \"\"\n\n # Lens detail\n if tags['EXIF LensModel']:\n html += f\"<p class='lens'>{tags['EXIF LensModel']}</p>\\n\"\n \n # Focal length\n if tags['EXIF FocalLengthIn35mmFilm']:\n if tags['EXIF FocalLengthIn35mmFilm'] != tags['EXIF FocalLength']:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLengthIn35mmFilm']}mm (full frame equivalent)</p>\\n\"\n else:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLengthIn35mmFilm']}mm</p>\\n\"\n else:\n if tags['EXIF FocalLength']:\n html += f\"<p class='focal-length'>{tags['EXIF FocalLength']}mm</p>\\n\"\n\n # ISO, Shutter speed, Apperture\n if tags['EXIF ISOSpeedRatings']:\n html += f\"<p class='iso'>ISO {tags['EXIF ISOSpeedRatings']}</p>\\n\"\n if tags['EXIF ExposureTime']:\n html += f\"<p class='shutter-speed'>{tags['EXIF ExposureTime']} Second(s)</p>\\n\"\n if tags['EXIF FNumber']:\n from fractions import Fraction\n tags['EXIF FNumber'] = str(float(Fraction(str(tags['EXIF FNumber'])))) # Convert aperture to str i.e. 6.3\n html += f\"<p class='aperture'>f{tags['EXIF FNumber']}</p>\\n\"\n\n # Camera body details\n if tags['Image Make'] and tags['Image Model']:\n html += f\"<p class='camera-type'>{tags['Image Make']} {tags['Image Model']}</p>\\n\"\n elif tags['Image Make']:\n html += f\"<p class='camera-type'>{tags['Image Make']}</p>\\n\"\n elif tags[\"Image Model\"]:\n html += f\"<p class='camera-type'>{tags['Image Model']}</p>\\n\"\n else:\n ...\n return html",
"def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user, self.annotation_token_secret),\r\n 'tag': self.instructor_tags,\r\n 'openseadragonjson': self.openseadragonjson,\r\n }\r\n\r\n return self.system.render_template('imageannotation.html', context)",
"def epbunchlist2html(epbunchlist):\n def epbunch2html(epbunch):\n lines = epbunch.obj[:2]\n return '->'.join(lines)\n lines = [epbunch2html(epbunch) for epbunch in epbunchlist]\n return \", \".join(lines)",
"def gen_html(\n conversations,\n height,\n width,\n title,\n other_speaker,\n human_speaker,\n user_icon,\n alt_icon,\n):\n html_str = f\"\"\"<html>\n<head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\n <title> {title} </title>\n <style type=\"text/css\">\n @media print{{\n @page{{ margin: 0; size: {str(width)}in {str(height)}in; }}\n }}\n ul{{\n list-style: none;\n }}\n .{other_speaker}_img_div{{\n display: inline-block;\n float: left;\n margin: 18px 5px 0px -25px;\n }}\n .{human_speaker}_img_div{{\n display: inline-block;\n float: right;\n margin: 18px 15px 5px 5px;\n }}\n .{other_speaker}_img{{\n content:url({alt_icon});\n }}\n .{human_speaker}_img{{\n content:url({user_icon});\n }}\n .{other_speaker}_p_div{{\n float: left;\n }}\n .{human_speaker}_p_div{{\n float:right;\n }}\n p{{\n display:inline-block;\n overflow-wrap: break-word;\n border-radius: 30px;\n padding: 10px 10px 10px 10px;\n font-family: Helvetica, Arial, sans-serif;\n }}\n .clear{{\n float: none;\n clear: both;\n }}\n .{other_speaker}{{\n background: #eee;\n float: left;\n }}\n .{human_speaker}{{\n background: #0084ff;\n color: #fff;\n float: right;\n }}\n .breaker{{\n color: #bec3c9;\n display: block;\n height: 20px;\n margin: 20px 20px 20px 20px;\n text-align: center;\n text-transform: uppercase;\n }}\n img{{\n border-radius: 50px;\n width: 50px;\n height: 50px;\n }}\n </style>\n</head>\n<body>\n{gen_convo_ul(conversations)}\n</body>\n</html>\n \"\"\"\n return html_str",
"def get_html(self):\r\n if self.debug == 'True':\r\n # Reset the user vote, for debugging only!\r\n self.user_voted = False\r\n if self.hints == {}:\r\n # Force self.hints to be written into the database. (When an xmodule is initialized,\r\n # fields are not added to the db until explicitly changed at least once.)\r\n self.hints = {}\r\n\r\n try:\r\n child = self.get_display_items()[0]\r\n out = child.render('student_view').content\r\n # The event listener uses the ajax url to find the child.\r\n child_id = child.id\r\n except IndexError:\r\n out = u\"Error in loading crowdsourced hinter - can't find child problem.\"\r\n child_id = ''\r\n\r\n # Wrap the module in a <section>. This lets us pass data attributes to the javascript.\r\n out += u'<section class=\"crowdsource-wrapper\" data-url=\"{ajax_url}\" data-child-id=\"{child_id}\"> </section>'.format(\r\n ajax_url=self.runtime.ajax_url,\r\n child_id=child_id\r\n )\r\n\r\n return out",
"def generate_html(self):\n html_text_1 = \"\"\"\n <div class=\"concept\">\n\n \t\t<div class=\"concept-title\">\n\n \t\t\t\t\"\"\" + self.title\n\n html_text_2 = \"\"\"\n \t\t</div>\n\n \t\t<div class=\"concept-description\">\n\n\t\t <p>\n\t\t\t\n \t\t \t\t\"\"\" + self.description + \"\"\" \n \n </p>\"\"\"\n\n html_text_3 = '''\n\n \t\t</div>\n\n </div>'''\n\n return html_text_1 + html_text_2 + html_text_3",
"def content_to_html(self):\n if self.title != \"\":\n string_title = html_tag(\n plain_to_html(self.title), self.title, self.proc\n )\n string_title = html_heading(string_title, self.level)\n else:\n string_title = html_heading(html_line(\"1\"), self.level)\n\n if self.level == 1: # it's not a sub-analysis\n string_title = html_line_before(string_title, \"5\")\n\n # We render all our content before all our subsections to stop any of\n # our content looking like it belongs to the subsection.\n string_content = \"\".join(self.content)\n for section in self.subsections:\n string_content += section.content_to_html()\n\n return string_title + string_content",
"def __html__(self) -> str:\n components = [\n f'{self.name}' if self.name else '',\n f'{self.repository}',\n ]\n return ', '.join([component for component in components if component])",
"def __html__(self):\n return self.html",
"def workbench_scenarios():\n return [\n (\"HL rubric text XBlock\",\n \"\"\"<hl_rubric_text/>\n \"\"\"),\n\n ]",
"def workbench_scenarios():\n return [\n (\"Oppia Embedding\",\n \"\"\"<vertical_demo>\n <oppia oppiaid=\"0\" src=\"https://www.oppia.org\" width=\"700\" />\n </vertical_demo>\n \"\"\"),\n ]",
"def __html__(self) -> str:\n components = [\n self.attributee_html,\n self.linked_title if self.title else 'untitled document',\n self.date.string if self.date else '',\n self.descriptive_phrase,\n f'archived in {self.collection}' if self.collection else '',\n ]\n return self.components_to_html(components)",
"def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content",
"def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content",
"def get_inner_html(self):\n\n pass",
"def formula_list_html(header, model, tables_html):\n page = 'ms1_test_files'\n #epa template header\n html = render_to_string('01epa_drupal_header.html', {\n 'SITE_SKIN': os.environ['SITE_SKIN'],\n 'TITLE': u\"\\u00FCbertool\"\n })\n html += render_to_string('02epa_drupal_header_bluestripe_onesidebar.html', {})\n html += render_to_string('epa_drupal_section_title_nta.html', {})\n\n #main body\n html += render_to_string('06ubertext_start_index_drupal.html', {\n 'TITLE': header + ' References',\n 'TEXT_PARAGRAPH': tables_html\n })\n html += render_to_string('07ubertext_end_drupal.html', {})\n html += links_left.ordered_list(model, page)\n\n #css and scripts\n html += render_to_string('09epa_drupal_pram_css.html', {})\n html += render_to_string('09epa_drupal_pram_scripts.html', {})\n #html += render_to_string('09epa_drupal_pram_scripts.html', {})\n\n #epa template footer\n html += render_to_string('10epa_drupal_footer.html', {})\n return html",
"def generateSpecified(self):\n\n dsource = DataSource.DataSource('summer533moon')\n replacements = {}\n\n # Split into two cases where the lane parameter is non-trivial\n # and where the category parameter is non-trivial.\n if self.filter['lane'] != '':\n data = dsource.getLaneList(self.filter['lane'])\n result = '<p>'\n line_break_counter = 0\n for champ in data:\n if line_break_counter % 10 == 0 and line_break_counter != 0:\n result += '</p>'\n result += '<a href=\"index.py?champ_name=%s\"><img src=\"http://\\\nddragon.leagueoflegends.com/cdn/5.7.2/img/champion/%s.png\"></a>' %\\\n (champ, champ)\n line_break_counter += 1\n result += '</p>'\n elif self.filter['category'] != '':\n data = dsource.getCategoryList(self.filter['category'])\n result = '<p>'\n line_break_counter = 0\n for champ in data:\n if line_break_counter % 10 == 0 and line_break_counter != 0:\n result += '</p>'\n result += '<a href=\"index.py?champ_name=%s\"><img src=\"http://\\\nddragon.leagueoflegends.com/cdn/5.7.2/img/champion/%s.png\"></a>' %\\\n (champ, champ)\n line_break_counter += 1\n result += '</p>'\n # If somehow parameters do not match the cases above, generate homepage\n # with complete list of champions instead.\n else:\n self.generateAll()\n return\n\n replacements['champ_display'] = result\n self.content = self.content.format(**replacements)",
"def get_html(self):\r\n context = {\r\n 'course_key': self.runtime.course_id,\r\n 'display_name': self.display_name_with_default,\r\n 'tag': self.instructor_tags,\r\n 'source': self.source,\r\n 'instructions_html': self.instructions,\r\n 'content_html': self.content,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user_email, self.annotation_token_secret),\r\n }\r\n return self.system.render_template('textannotation.html', context)",
"def get_html(self):\r\n extension = self._get_extension(self.sourceurl)\r\n\r\n context = {\r\n 'course_key': self.runtime.course_id,\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'sourceUrl': self.sourceurl,\r\n 'typeSource': extension,\r\n 'poster': self.poster_url,\r\n 'content_html': self.content,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user_email, self.annotation_token_secret),\r\n }\r\n\r\n return self.system.render_template('videoannotation.html', context)",
"def _repr_html_(self) -> str:\n protostr = base64.b64encode(self._proto.SerializeToString()).decode('utf-8')\n html_template = '''\n <script src=\"{webcomponents_js}\"></script>\n <link rel=\"import\" href=\"{facets_html}\">\n <facets-overview id=\"overview_elem\"></facets-overview>\n <script>\n document.querySelector(\"#overview_elem\").protoInput = \"{protostr}\";\n </script>'''\n html = html_template.format(\n facets_html=FACETS_DEPENDENCIES['facets_html'],\n webcomponents_js=FACETS_DEPENDENCIES['webcomponents_js'],\n protostr=protostr,\n )\n return html",
"def _repr_html_(self):\n\n return self._repr__base(rich_output=True)",
"def _repr_html_(self):\n\n return self._repr__base(rich_output=True)",
"def _build_experiment_mic_embedded_list():\n imaging_path_embeds = DependencyEmbedder.embed_for_type(\n base_path='imaging_paths.path',\n t='imaging_path',\n additional_embeds=['imaging_rounds', 'experiment_type.title'])\n return (Experiment.embedded_list + imaging_path_embeds + [\n # Files linkTo\n 'files.accession', # detect display_title diff\n\n # MicroscopeSettings linkTo\n 'files.microscope_settings.ch00_light_source_center_wl',\n 'files.microscope_settings.ch01_light_source_center_wl',\n 'files.microscope_settings.ch02_light_source_center_wl',\n 'files.microscope_settings.ch03_light_source_center_wl',\n 'files.microscope_settings.ch04_light_source_center_wl',\n 'files.microscope_settings.ch00_lasers_diodes',\n 'files.microscope_settings.ch01_lasers_diodes',\n 'files.microscope_settings.ch02_lasers_diodes',\n 'files.microscope_settings.ch03_lasers_diodes',\n 'files.microscope_settings.ch04_lasers_diodes',\n\n # MicroscopeConfiguration linkTo\n 'microscope_configuration_master.title',\n 'microscope_configuration_master.microscope.Name',\n 'files.microscope_configuration.title',\n 'files.microscope_configuration.microscope.Name',\n\n # Image linkTo\n 'sample_image.title',\n 'sample_image.caption',\n 'sample_image.microscopy_file.accession',\n 'sample_image.microscopy_file.omerolink',\n 'sample_image.attachment.href',\n 'sample_image.attachment.type',\n 'sample_image.attachment.md5sum',\n 'sample_image.attachment.download',\n 'sample_image.attachment.width',\n 'sample_image.attachment.height',\n ]\n )"
] | [
"0.70497555",
"0.69843435",
"0.5943376",
"0.59310067",
"0.5700649",
"0.5586195",
"0.5549065",
"0.5441129",
"0.5421613",
"0.54212123",
"0.54004914",
"0.5393318",
"0.53882074",
"0.53824496",
"0.5381582",
"0.5377059",
"0.5372054",
"0.53646225",
"0.53409606",
"0.53304595",
"0.5311827",
"0.5306151",
"0.529839",
"0.5298358",
"0.52781636",
"0.5272792",
"0.52566695",
"0.5229512",
"0.5229512",
"0.52142763"
] | 0.707962 | 0 |
Pretty printing of HarmonizedTraitEncodedValue objects. | def __str__(self):
return 'encoded value {} for {}\nvalue = {}'.format(self.i_category, self.harmonized_trait, self.i_value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i",
"def pprint(self):\n\t\tPrettyPrintUnicode().pprint(self.data)",
"def PrettyPrint(self):\r\n print(self.data)\r\n return",
"def __str__(self):\n return '\\n'+'\\n'.join([\"%-15s: %s\" % (qq(w), str(v)) for w, v in sorted(self.value.items())]) + '\\0'",
"def __str__(self):\n\n string = \"values:\\n\\t\"\n string += \" x \".join(map(str, self.shape))\n\n string += \" {} ({})\\n\".format(type(self.values).__name__, self.values.dtype)\n\n if self.print_values is True:\n string += str(self.values) + \"\\n\"\n\n string += \"dims:\\n\\t\"\n\n string += \"{}\\n\".format(self.dims)\n\n string += \"coords:\\n\\t\"\n string += \"\\n\\t\".join(map(repr, self.coords))\n\n string += \"\\n\"\n\n string += \"attrs:\\n\"\n\n for ix, key in enumerate(self.attrs.keys()):\n if ix == self.max_print_attrs:\n string += \"\\t+%i attrs\" % (len(self.attrs) - self.max_print_attrs)\n break\n string += \"\\t{!r}: {!r}\\n\".format(key, self.attrs[key])\n\n return string",
"def pprint(self):\n print(self.pprint_str())",
"def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass",
"def pprint(self):\n return pformat(repr(self))",
"def dump(self) :\n st = \"%s=%s, valid=%d, found=%d, type=%s stringValue=%s\" \\\n %(self.name_, str(self.value_), self.valid_, self.found_, \\\n self.type_, self.stringValue_)\n print st",
"def print_traits ( self, show_help = False, **metadata ):\n\n if len( metadata ) > 0:\n names = self.trait_names( **metadata )\n else:\n names = self.trait_names( type = _is_not_event )\n if len( names ) == 0:\n print ''\n return\n\n result = []\n pad = max( [ len( x ) for x in names ] ) + 1\n maxval = 78 - pad\n names.sort()\n\n for name in names:\n try:\n value = repr( getattr( self, name ) ).replace( '\\n', '\\\\n' )\n if len( value ) > maxval:\n value = '%s...%s' % ( value[: (maxval - 2) / 2 ],\n value[ -((maxval - 3) / 2): ] )\n except:\n value = '<undefined>'\n lname = (name + ':').ljust( pad )\n if show_help:\n result.append( '%s %s\\n The value must be %s.' % (\n lname, value, self.base_trait( name ).setter.info() ) )\n else:\n result.append( '%s %s' % ( lname, value ) )\n\n print '\\n'.join( result )",
"def __str__(self):\n return_string = self.name + \"\\n\" + str(self.traits)\n\n return return_string",
"def __repr__(self):\n return self.pretty_print(self.__dict__)",
"def __repr__(self):\r\n return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])",
"def printpretty(self):\n print(self.string_rep())",
"def display(self):\n # type: ()->None\n print('============')\n for key, value in self._ifAttributes.items():\n if isinstance(value, list):\n print(key + ': ')\n for item in value:\n print('\\t' + item)\n elif isinstance(value, dict):\n print(key + ': ')\n for item in value.keys():\n print('\\t' + item + ': ' + value[item])\n else:\n print(key + ': ' + str(value))\n print('============')",
"def __str__(self) -> str:\n st = \"\\tmat = \" + self.mat\n st += \"\\n\\trotation = \" + str(self.ham_rot) + '\\n'\n pl_str = ['(' + p.join(' ') + ')' for p in self.planes]\n st += '\\tplane: ' + \", \".join(pl_str) + '\\n'\n return st",
"def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)",
"def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return dict.__repr__(self)",
"def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )",
"def __repr__(self):\n indent = len(self.type) + 2\n jstr = ',\\n' + ' ' * indent\n\n props = self._display_properties()\n\n params = jstr.join('{:}={:}'.format(p, summary(self[p],\n indent=indent))\n for (p, dp) in props)\n return '<{}({:})>'.format(self.type, params)",
"def __repr__(self):\n return str.format(\"Cards: {0} Rank: '{1}' Values: {2}\",\n self.__cards,\n Hand.RANKS[self.rank()],\n self.values())",
"def __str__(self):\n print_string = 'key: {} | value: {}'.format(\n str(self.key), str(self.value)\n )\n return print_string",
"def pretty(self):\n return self._pretty",
"def __repr__(self):\n return repr(dict([(k, v) for k, v in self.iteritems()]))",
"def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return defaultdict.__repr__(self)",
"def prettyPrint(self):\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n x=pp.pformat(self.__dict__)\n print x\n return",
"def __str__(self):\n txt = ''\n if self.PrintHeader:\n txt = \" |\" + \"|\".join(sorted(self.rows[0].keys())).expandtabs() + \"|\"\n txt += \"\\n\"\n txt += \"|-\"\n for r in self.rows:\n txt += \"\\n|\"\n txt += \"|\".join([str(uround(r[key] , 2) if isinstance(r[key], (int, long, float, complex , Variable,AffineScalarFunc )) else r[key]) for key in sorted(self.rows[0].keys())]) + \"|\"\n txt += \"\\n|-\"\n if self.PrintSum:\n txt += \"\\n\"\n sumRow = self.GetSumRow()\n txt += \"| |\" + \"|\".join( [str(uround(sumRow[key] , 2) if isinstance(sumRow[key], (int, long, float, complex , Variable ,AffineScalarFunc )) else sumRow[key]) for key in sorted(self.rows[0].keys())[1:]] ) + \"|\"\n\n return txt",
"def __repr__(self, indent=2):\n return pprint.pformat(self.to_dict(), indent=indent)",
"def pprint(self):\n import json\n return json.dumps(OrderedDict(self.items()), indent=4)",
"def __str__(self) -> str:\n return '\\n'.join([f'{hp}: {self.hyperparams[hp]}'\n for hp in self.hyperparams])"
] | [
"0.64984983",
"0.62469554",
"0.6195777",
"0.61902934",
"0.6169539",
"0.6151282",
"0.6125573",
"0.6070319",
"0.6042068",
"0.6039289",
"0.6027863",
"0.597896",
"0.5963519",
"0.59448206",
"0.59373057",
"0.59328187",
"0.5917587",
"0.5878207",
"0.58527887",
"0.585157",
"0.58463466",
"0.5812896",
"0.5810168",
"0.5807437",
"0.57882214",
"0.5783269",
"0.5760367",
"0.5744838",
"0.57398653",
"0.57358676"
] | 0.70111173 | 0 |
generate images using the latest saved check points and the images will be saved in 'save_path/images/' | def generate_image(noise_list, save_path):
check_points_path = os.path.join(save_path, 'check_points')
output_image_path = os.path.join(save_path, 'images')
components.create_folder(output_image_path, False)
latest_checkpoint = tf.train.latest_checkpoint(check_points_path)
assert latest_checkpoint is not None, "no check points found"
saver = tf.train.import_meta_graph(latest_checkpoint + '.meta')
with tf.Session() as sess:
saver.restore(sess, latest_checkpoint)
iterations = sess.run('saved_iterations:0')
for i in range(len(noise_list)):
generated_images = sess.run('generator/output_layer/tanh/during_inference:0',
feed_dict={"noise_for_inference:0": noise_list[i]})
Gan.__save_images(output_image_path, generated_images, int(np.sqrt(generated_images.shape[0])), iterations, i) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_img(self):\r\n self.extract_info_from_file()\r\n path_0 = os.path.join(self.output_path, self.field_id, self.patient_id + self.ext)\r\n path_1 = os.path.join(self.output_path, self.field_id + '_' + self.instance, self.patient_id + self.ext)\r\n if self.shot == '0': # first shot\r\n if os.path.exists(path_0) or os.path.exists(path_1):\r\n print(self.patient_id, 'already done')\r\n pass\r\n else:\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)\r\n else: # newer shot\r\n if not self.img_computed:\r\n self.compute_img()\r\n if self.instance == '0':\r\n self.img.save(path_0)\r\n else:\r\n self.img.save(path_1)",
"def save_images(self):\n for q in range(self.N_itr):\n plt.clf()\n self.plot_EM_estimate(q)\n plt.savefig('img%d.png' % (100 + q))",
"def save_step_1(imgs, output_path='./output/step1'):\n # ... your code here ...\n i=0\n for each in imgs:\n i+=1\n cv2.imwrite(output_path+\"/output\"+str(i)+\".jpg\", each)",
"def _dump_image(self):\n if not self._current_id == len(self._img_ids):\n warnings.warn(\n 'Recorded {} out of {} validation images, incomplete results'.format(\n self._current_id, len(self._img_ids)))\n try:\n for im_name, im in self._panoptic_images.items():\n cv2.imwrite(osp.join(self._save_imgpath, im_name), im)\n except IOError as e:\n raise RuntimeError(\"Unable to dump images, ignored. What(): {}\".format(str(e)))",
"def save(self, x, y, names, path=\"\", zoom=False):\n for i in range(len(x)):\n image = self.generate(x[i], label=np.argmax(y[i]), zoom=zoom)\n image = Image.fromarray((image*255).astype(\"uint8\"))\n image.save(path + names[i] + \".png\", \"PNG\")",
"def img_save(self):\n file_name, extension = return_folder_file_extension(self.img_name)[1:]\n image_name_save = \"%s_D=%s_Rs=%s_size=%s_offset=%i%s\" % (file_name, self.D, self.Rs, self.axe_X, self.offset_X+self.offset_X2, extension)\n\n if self.img2 is not None:\n self.img2.save(image_name_save)\n print(\"Saved \"+image_name_save)\n else:\n print(\"No image to save\")",
"def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n if not os.path.exists(os.path.join(self._images_dir, 'imgs')):\n os.makedirs(os.path.join(self._images_dir, 'imgs'))\n \n names = ['inputB_', 'fakeB_depth_' , 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n x1_t, name1 = self.dataset.next_batch()\n count = 0\n fake_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.cycle_images_b], \n feed_dict={self.input_b: x1_t})\n \n fakedepth = fake_A_temp[:,:,:,-1]\n tensors = [x1_t, fakedepth, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n #print(name)\n # if name == 'inputB_' or name == 'fakeB_depth_':\n # image_name = name1[count] + '_' + name + str(epoch) + \"_\" + str(i) + \".jpg\"\n # imsave(os.path.join(self._images_dir, 'imgs', image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n # else:\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name), ((tensor[0] + 1) * 127.5).astype(np.uint8))\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")\n count += 1",
"def save_step_4(imgs, output_path=\"./output/step4\"):\n # ... your code here ...\n cv2.imwrite(output_path+\"/output.jpg\", imgs)",
"def generate_and_save_images(model, epoch, test_input):\n #Training is set to false\n #so all layers run in inference mode (batchnorm)(?)\n predictions = model(test_input, training=False)\n fig = plt.figure(figsize=(4,4))\n for i in range(predictions.shape[0]):\n plt.subplot(4,4, i+1)\n img = tf.constant(predictions[i]) #Turn prediction into tf.constant\n #so it can easily be transformed int a uint8 array\n img = tf.image.convert_image_dtype(img, tf.uint8)\n plt.imshow(img)#Show the images in color\n plt.axis(\"off\")\n #for()\n\n #update epoch_total or create a new tracker\n if os.path.exists(os.path.join(\"outputColor\",\"epoch_total.txt\")):\n f = open(os.path.join(\"outputColor\",\"epoch_total.txt\"),\"r\")\n epoch = int(f.readline()) + 1\n print(\"Total Epochs:{}\".format(epoch))\n f = open(os.path.join(\"outputColor\",\"epoch_total.txt\"),\"w\")\n f.write(str(epoch))\n #if()\n else:\n f = open(os.path.join(\"outputColor\",\"epoch_total.txt\"),\"w\")\n f.write(str(epoch))\n #else()\n f.close()\n\n plt.savefig(\"outputPhotosColor/image_at_epoch_{:04d}.png\".format(epoch)) #save image\n #plt.show() # Turn on to show each new image after it's made\n plt.close()",
"def save_imgs(self, epoch):\n row, column = 5, 5\n\n # Generates r*c images from the model, saves them individually and as a gallery\n images_generated = self.generate_images(row * column)\n\n # ???\n images_generated = 0.5 * images_generated + 0.5\n\n for index, np_array_image in enumerate(images_generated):\n path = f\"{self.output_directory}/generated_{self.img_size[0]}x{self.img_size[1]}\"\n if not os.path.exists(path):\n os.makedirs(path)\n imsave(path + f\"/{unique_name()}_{epoch}_{index}.png\", np_array_image)\n\n # 4D array:\n nindex, height, width, intensity = images_generated.shape\n\n nrows = nindex // column\n\n assert nindex == nrows * column\n\n # Form the gallery by combining the data at pixel levels (may not be the best approach)\n # want result.shape = (height*n-rows, width*n-cols, intensity)\n gallery = (\n images_generated.reshape(nrows, column, height, width, intensity)\n .swapaxes(1, 2)\n .reshape(height * nrows, width * column, intensity)\n )\n\n path = f\"{self.output_directory}/gallery_generated_{self.img_size[0]}x{self.img_size[1]}\"\n if not os.path.exists(path):\n os.makedirs(path)\n imsave(path + f\"/{unique_name()}_{epoch}.png\", gallery)",
"def save_step_2(imgs, match_list, output_path=\"./output/step2\"):\n # ... your code here ...\n for i in range(len(imgs)):\n name1,tail1 = str.split(filenames[match_list[i][0]],\".\")\n name2,tail2 = str.split(filenames[match_list[i][2]],\".\")\n cv2.imwrite(output_path+\"/\"+name1+\"_\"+str(match_list[i][1])+\"_\"+name2+\"_\"+str(match_list[i][3])+\"_\"+str(match_list[i][4])+\".jpg\", imgs[i])",
"def save_images(self, sess, epoch):\n if not os.path.exists(self._images_dir):\n os.makedirs(self._images_dir)\n\n names = ['inputA_', 'inputB_', 'fakeA_',\n 'fakeB_', 'cycA_', 'cycB_']\n\n with open(os.path.join(\n self._output_dir, 'epoch_' + str(epoch) + '.html'\n ), 'w') as v_html:\n for i in range(0, self._num_imgs_to_save):\n print(\"Saving image {}/{}\".format(i, self._num_imgs_to_save))\n inputs = sess.run(self.inputs)\n fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = sess.run([\n self.fake_images_a,\n self.fake_images_b,\n self.cycle_images_a,\n self.cycle_images_b\n ], feed_dict={\n self.input_a: inputs['images_i'],\n self.input_b: inputs['images_j']\n })\n\n tensors = [inputs['images_i'], inputs['images_j'],\n fake_B_temp, fake_A_temp, cyc_A_temp, cyc_B_temp]\n\n for name, tensor in zip(names, tensors):\n image_name = name + str(epoch) + \"_\" + str(i) + \".jpg\"\n imsave(os.path.join(self._images_dir, image_name),\n ((tensor[0] + 1) * 127.5).astype(np.uint8)\n )\n v_html.write(\n \"<img src=\\\"\" +\n os.path.join('imgs', image_name) + \"\\\">\"\n )\n v_html.write(\"<br>\")",
"def save_images(self, samples, label=None, dir=\"\"):\n if label is None:\n label = self.global_step_\n fig = plt.figure()\n self.net_.eval()\n self.dist.visualize(fig, samples, self.energy)\n plot_fn = os.path.join(dir, f\"samples_{label}.png\")\n fig.savefig(plot_fn)\n plt.close(fig)",
"def test_save_images(self):\n save_file(self.quart.save_images, to_single_file=False)",
"def save_image(start, stop, imgcount, label):\n text = \"\"\n imgfile = select_file(label)\n for p in range(imgcount):\n pxcnt = randint(start, stop)\n imgcurrent = create_image(imgfile, pxcnt)\n filename = \"img_train_\" + str(label) + \"_\" + str(p) + \"_\" + str(pxcnt) + \".png\"\n text += \"ctq/dataset/train/\" + filename + \" \" + str(label) + \"\\n\"\n imgcurrent.save(filename)\n text_file = open(imgfile + \"_train_label.txt\", \"w\")\n text_file.write(text)\n text_file.close()",
"def generate_images(self, model, test_input, step, dst_dir):\n prediction = model(test_input)\n\n plt.figure(figsize=(12, 12))\n display_list = [test_input[0], prediction[0]]\n title = ['Input Image', 'Predicted Image']\n\n for i in range(2):\n plt.subplot(1, 2, i+1)\n plt.title(title[i])\n # getting the pixel values between [0, 1] to plot it.\n plt.imshow(display_list[i] * 0.5 + 0.5)\n plt.axis('off')\n filename = os.path.join(dst_dir, 'generated_imgs_at_step_{:06d}.png'.format(step))\n plt.savefig(filename)",
"def saveImage(self, observation):\n image_path = \"{}/{}/frame{:06d}\".format(self.data_folder, self.episode_folder, self.episode_step)\n relative_path = \"{}/{}/frame{:06d}\".format(self.name, self.episode_folder, self.episode_step)\n self.images_path.append(relative_path)\n # in the case of dual/multi-camera\n if observation.shape[2] > 3:\n observation1 = cv2.cvtColor(observation[:, :, :3], cv2.COLOR_BGR2RGB)\n observation2 = cv2.cvtColor(observation[:, :, 3:], cv2.COLOR_BGR2RGB)\n\n cv2.imwrite(\"{}_1.jpg\".format(image_path), observation1)\n cv2.imwrite(\"{}_2.jpg\".format(image_path), observation2)\n else:\n observation = cv2.cvtColor(observation, cv2.COLOR_BGR2RGB)\n cv2.imwrite(\"{}.jpg\".format(image_path), observation)",
"def save_images(self, step, images):\n\n # Save\n with self.summary_writer.as_default():\n for name, batch in images.items():\n image = batch[0]\n image = tf.expand_dims(image, axis=0)\n tf.summary.image(name, image, step)",
"def save_imgs(self):\n print(\"Saving the images with required categories ...\")\n os.makedirs(self.imgs_dir, exist_ok=True)\n # Save the images into a local folder\n for im in tqdm(self.images):\n img_data = requests.get(im['coco_url']).content\n with open(os.path.join(self.imgs_dir, im['file_name']), 'wb') as handler:\n handler.write(img_data)",
"def save_groudtruth(im, coords, filename):\n print 'Saving ground truth ......{0}'.format(filename)\n img_draw = Image.fromarray(cv2.cvtColor(im, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img_draw)\n for coord in coords:\n draw.polygon([(float(coord[0]), float(coord[1])), (float(coord[2]), float(coord[3])),\n (float(coord[4]), float(coord[5])), (float(coord[6]), float(coord[7]))],\n outline=\"red\", fill=\"blue\")\n img_draw = np.array(img_draw)\n img_draw = cv2.cvtColor(img_draw, cv2.COLOR_RGB2BGR)\n bname_excludepoint = filename.split('/')[-1].split('.')[0]\n image_path = '/home/yuquanjie/Documents/deep-direct-regression/result/' + bname_excludepoint + '_gt.jpg'\n cv2.imwrite(image_path, img_draw[0: img_draw.shape[0], 0: img_draw.shape[1]])",
"def save_result(save_path, npyfile):\n for i, item in enumerate(npyfile):\n img = item[:, :, 0]\n io.imsave(os.path.join(save_path, '%d_pred.png' % i), img)",
"def appendpics(pathofimg, w_sub, h_sub, step):\n num = 0\n dirlist = []\n images = [] # images in each folder\n for root, dirs, fileswer in os.walk(pathofimg):\n if len(dirs)!= 0:\n for dir in dirs:\n dirlist.append(dir)\n for rooert, dirwerwes, files in os.walk(pathofimg+'/'+dir):\n for file in files:\n if(file.endswith('.png')):\n images.append(Image.open(pathofimg+'/'+dir+'/'+file))\n if(len(images)==81):\n break\n target = montage(images, w_sub, h_sub, step)\n target.save(pathofimg +'/'+ dir + '.png', quality=100)\n else:\n dir = 'Generated'\n for file in fileswer:\n if (file.endswith('.png')):\n images.append(Image.open(pathofimg +'/'+ file))\n target1 = montage(images, w_sub, h_sub, step)\n savepath = pathofimg +'/'+ 'generated'\n os.makedirs(savepath)\n target1.save(savepath +'/'+ dir + '.png', quality=100)",
"def save_images(PATH, show_img, datasets, from_dataset):\n dataset = datasets[from_dataset]\n imgModels = dataset['models']\n for modelname, model in imgModels.items():\n print('save', modelname)\n plt.imshow(model[70])\n plt.set_cmap(\"gray\")\n plt.axis('off')\n plt.savefig(PATH + '/' + from_dataset + '_' + modelname + '.png', dpi=400)\n\n if show_img == True:\n plt.show()",
"def genImages(self, gen_ts):\n t1 = time.time()\n ngen = 0\n\n # determine how much logging is desired\n log_success = to_bool(search_up(self.image_dict, 'log_success', True))\n\n # Loop over each time span class (day, week, month, etc.):\n for timespan in self.image_dict.sections:\n\n # Now, loop over all plot names in this time span class:\n for plotname in self.image_dict[timespan].sections:\n\n # Accumulate all options from parent nodes:\n plot_options = accumulateLeaves(self.image_dict[timespan][plotname])\n\n plotgen_ts = gen_ts\n if not plotgen_ts:\n binding = plot_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n plotgen_ts = db_manager.lastGoodStamp()\n if not plotgen_ts:\n plotgen_ts = time.time()\n\n image_root = os.path.join(self.config_dict['WEEWX_ROOT'],\n plot_options['HTML_ROOT'])\n # Get the path that the image is going to be saved to:\n img_file = os.path.join(image_root, '%s.png' % plotname)\n\n # Convert from string to an integer:\n ai = weeutil.weeutil.nominal_spans(plot_options.get('aggregate_interval'))\n # Check whether this plot needs to be done at all:\n if skipThisPlot(plotgen_ts, ai, img_file):\n continue\n\n # skip image files that are fresh, but only if staleness is defined\n stale = to_int(plot_options.get('stale_age'))\n if stale:\n t_now = time.time()\n try:\n last_mod = os.path.getmtime(img_file)\n if t_now - last_mod < stale:\n log.debug(\"Skip '%s': last_mod=%s age=%s stale=%s\",\n img_file, last_mod, t_now - last_mod, stale)\n continue\n except os.error:\n pass\n\n # Create the subdirectory that the image is to be put in. Wrap in a try block in\n # case it already exists.\n try:\n os.makedirs(os.path.dirname(img_file))\n except OSError:\n pass\n\n # Create a new instance of a time plot and start adding to it\n plot = weeplot.genplot.TimePlot(plot_options)\n\n # Calculate a suitable min, max time for the requested time.\n minstamp, maxstamp, timeinc = weeplot.utilities.scaletime(\n plotgen_ts - int(plot_options.get('time_length', 86400)), plotgen_ts)\n # Override the x interval if the user has given an explicit interval:\n timeinc_user = to_int(plot_options.get('x_interval'))\n if timeinc_user is not None:\n timeinc = timeinc_user\n plot.setXScaling((minstamp, maxstamp, timeinc))\n\n # Set the y-scaling, using any user-supplied hints:\n yscale = plot_options.get('yscale', ['None', 'None', 'None'])\n plot.setYScaling(weeutil.weeutil.convertToFloat(yscale))\n\n # Get a suitable bottom label:\n bottom_label_format = plot_options.get('bottom_label_format', '%m/%d/%y %H:%M')\n bottom_label = time.strftime(bottom_label_format, time.localtime(plotgen_ts))\n plot.setBottomLabel(bottom_label)\n\n # Set day/night display\n plot.setLocation(self.stn_info.latitude_f, self.stn_info.longitude_f)\n plot.setDayNight(to_bool(plot_options.get('show_daynight', False)),\n weeplot.utilities.tobgr(plot_options.get('daynight_day_color',\n '0xffffff')),\n weeplot.utilities.tobgr(plot_options.get('daynight_night_color',\n '0xf0f0f0')),\n weeplot.utilities.tobgr(plot_options.get('daynight_edge_color',\n '0xefefef')))\n\n # Loop over each line to be added to the plot.\n for line_name in self.image_dict[timespan][plotname].sections:\n\n # Accumulate options from parent nodes.\n line_options = accumulateLeaves(self.image_dict[timespan][plotname][line_name])\n\n # See what observation type to use for this line. By default, use the section\n # name.\n var_type = line_options.get('data_type', line_name)\n\n # Look for aggregation type:\n aggregate_type = line_options.get('aggregate_type')\n if aggregate_type in (None, '', 'None', 'none'):\n # No aggregation specified.\n aggregate_type = aggregate_interval = None\n else:\n try:\n # Aggregation specified. Get the interval.\n aggregate_interval = weeutil.weeutil.nominal_spans(\n line_options['aggregate_interval'])\n except KeyError:\n log.error(\"Aggregate interval required for aggregate type %s\",\n aggregate_type)\n log.error(\"Line type %s skipped\", var_type)\n continue\n\n # Now its time to find and hit the database:\n binding = line_options['data_binding']\n db_manager = self.db_binder.get_manager(binding)\n # we need to pass the line options and plotgen_ts to our xtype\n # first get a copy of line_options\n option_dict = dict(line_options)\n # but we need to pop off aggregate_type and\n # aggregate_interval as they are used as explicit arguments\n # in our xtypes call\n option_dict.pop('aggregate_type', None)\n option_dict.pop('aggregate_interval', None)\n # then add plotgen_ts\n option_dict['plotgen_ts'] = plotgen_ts\n start_vec_t, stop_vec_t ,data_vec_t = weewx.xtypes.get_series(\n var_type,\n TimeSpan(minstamp, maxstamp),\n db_manager,\n aggregate_type=aggregate_type,\n aggregate_interval=aggregate_interval,\n **option_dict)\n\n # Get the type of plot (\"bar', 'line', or 'vector')\n plot_type = line_options.get('plot_type', 'line').lower()\n\n if aggregate_type and plot_type != 'bar':\n # If aggregating, put the point in the middle of the interval\n start_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in start_vec_t[0]], # Value\n start_vec_t[1], # Unit\n start_vec_t[2]) # Unit group\n stop_vec_t = ValueTuple(\n [x - aggregate_interval / 2.0 for x in stop_vec_t[0]], # Velue\n stop_vec_t[1], # Unit\n stop_vec_t[2]) # Unit group\n\n # Convert the data to the requested units\n new_data_vec_t = self.converter.convert(data_vec_t)\n\n # Add a unit label. NB: all will get overwritten except the last. Get the label\n # from the configuration dictionary.\n unit_label = line_options.get(\n 'y_label', self.formatter.get_label_string(new_data_vec_t[1]))\n # Strip off any leading and trailing whitespace so it's easy to center\n plot.setUnitLabel(unit_label.strip())\n\n # See if a line label has been explicitly requested:\n label = line_options.get('label')\n if label:\n # Yes. Get the text translation\n label = self.text_dict[label]\n else:\n # No explicit label. Look up a generic one.\n # NB: generic_dict is a KeyDict which will substitute the key\n # if the value is not in the dictionary.\n label = self.generic_dict[var_type]\n\n # See if a color has been explicitly requested.\n color = line_options.get('color')\n if color is not None: color = weeplot.utilities.tobgr(color)\n fill_color = line_options.get('fill_color')\n if fill_color is not None: fill_color = weeplot.utilities.tobgr(fill_color)\n\n # Get the line width, if explicitly requested.\n width = to_int(line_options.get('width'))\n\n interval_vec = None\n gap_fraction = None\n vector_rotate = None\n\n # Some plot types require special treatments:\n if plot_type == 'vector':\n vector_rotate_str = line_options.get('vector_rotate')\n vector_rotate = -float(vector_rotate_str) \\\n if vector_rotate_str is not None else None\n elif plot_type == 'bar':\n interval_vec = [x[1] - x[0] for x in\n zip(start_vec_t.value, stop_vec_t.value)]\n elif plot_type == 'line':\n gap_fraction = to_float(line_options.get('line_gap_fraction'))\n if gap_fraction is not None and not 0 < gap_fraction < 1:\n log.error(\"Gap fraction %5.3f outside range 0 to 1. Ignored.\",\n gap_fraction)\n gap_fraction = None\n else:\n log.error(\"Unknown plot type '%s'. Ignored\", plot_type)\n continue\n\n # Get the type of line (only 'solid' or 'none' for now)\n line_type = line_options.get('line_type', 'solid')\n if line_type.strip().lower() in ['', 'none']:\n line_type = None\n\n marker_type = line_options.get('marker_type')\n marker_size = to_int(line_options.get('marker_size', 8))\n \n # Add the line to the emerging plot:\n plot.addLine(weeplot.genplot.PlotLine(\n stop_vec_t[0], new_data_vec_t[0],\n label = label,\n color = color,\n fill_color = fill_color,\n width = width,\n plot_type = plot_type,\n line_type = line_type,\n marker_type = marker_type,\n marker_size = marker_size,\n bar_width = interval_vec,\n vector_rotate = vector_rotate,\n gap_fraction = gap_fraction))\n\n # OK, the plot is ready. Render it onto an image\n image = plot.render()\n\n try:\n # Now save the image\n image.save(img_file)\n ngen += 1\n except IOError as e:\n log.error(\"Unable to save to file '%s' %s:\", img_file, e)\n t2 = time.time()\n\n if log_success:\n log.info(\"Generated %d images for report %s in %.2f seconds\",\n ngen,\n self.skin_dict['REPORT_NAME'], t2 - t1)",
"def generate_image(self):\n pass",
"def view_image(train_dataloader):\n for (x, target) in train_dataloader:\n np.save(\"img.npy\", x)\n print(x.shape)\n exit(0)",
"def generate_imgs(self, count, threshold, modifier):\n self.build_gan()\n\n\n imgs = []\n for i in range(count):\n score = [0]\n while not(threshold[0] < score[0] < threshold[1]):\n img = self.gene_imgs(1)\n score = self.discriminator.predict(img)\n print(\"Image found: \", score[0])\n imgs.append(img)\n\n imgs = np.asarray(imgs).squeeze()\n imgs = 0.5 * imgs + 0.5\n\n print(imgs.shape)\n for i, img_array in enumerate(imgs):\n path = f\"{self.output_directory}/generated_{threshold[0]}_{threshold[1]}\"\n if not os.path.exists(path):\n os.makedirs(path)\n imsave(path + f\"/{modifier}_{i}.png\", self.pix_array_convert(img_array))",
"def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels",
"def save_unique_image():\r\n global folder_name\r\n filelist = [file for file in os.listdir('temp') if file.endswith('.png')]\r\n\r\n if filelist:\r\n for image_path in filelist:\r\n found = 0\r\n img_to_del = Image.open(\"temp/\" + image_path)\r\n if not get_immediate_subdirectories():\r\n found = 1\r\n os.makedirs('detected_faces/1/')\r\n img_to_del.save('detected_faces/1/'+ image_path)\r\n os.remove(os.path.join(temp_path, image_path))\r\n folder_name = 1\r\n else:\r\n for folder in get_immediate_subdirectories():\r\n folder_filelist = [file for file in os.listdir(\"detected_faces/\" + folder) if\r\n file.endswith('.png')]\r\n count = len(folder_filelist)\r\n file = folder_filelist[0]\r\n img_to_compare = Image.open(\"detected_faces/\" + folder + \"/\" + file)\r\n if img_to_del.size > img_to_compare.size:\r\n temp_image_resized = img_to_del.resize(img_to_compare.size, Image.ANTIALIAS)\r\n index = get_ssim(temp_image_resized, img_to_compare)\r\n elif img_to_del.size < img_to_compare.size:\r\n img_to_compare = img_to_compare.resize(img_to_del.size, Image.ANTIALIAS)\r\n index = get_ssim(img_to_del, img_to_compare)\r\n else:\r\n index = get_ssim(img_to_del, img_to_compare)\r\n if index > min_ssim_index_val:\r\n found = 1\r\n if count < 5:\r\n img_to_del.save(pathname + \"/\" + folder + \"/\" + image_path)\r\n print image_path\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))\r\n if found == 0:\r\n folder_name += 1\r\n os.makedirs('detected_faces/' + str(folder_name))\r\n img_to_del.save(pathname + \"/\" + str(folder_name) + \"/\" + image_path)\r\n if os.path.isfile(os.path.join(temp_path, image_path)):\r\n os.remove(os.path.join(temp_path, image_path))",
"def save_detection(self, image):\n\t\timg = self.visualize_detection(image)\n\t\timg = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)\n\t\tcv2.imwrite(f'{SAVE_PATH}{self.clip}{self.num_save}.jpg', img)"
] | [
"0.711237",
"0.67032856",
"0.6631163",
"0.6493553",
"0.6490636",
"0.64827114",
"0.64728135",
"0.64322054",
"0.64124805",
"0.64074713",
"0.63913244",
"0.63624316",
"0.63335747",
"0.6307022",
"0.630442",
"0.62582576",
"0.62520474",
"0.6236078",
"0.6206135",
"0.6205042",
"0.61506426",
"0.6134105",
"0.6098211",
"0.6095082",
"0.6091225",
"0.6085356",
"0.60550785",
"0.60435635",
"0.6042806",
"0.604002"
] | 0.71468616 | 0 |
Build personalization instance from a dict | def create_personalization(self, **kwargs):
personalization = Personalization()
_diff = set(emailconf.PERSONALIZATION_KEYS).intersection(set(kwargs.keys()))
if _diff:
for key in _diff:
item = kwargs.get(key)
if item:
if key in emailconf.EMAIL_KEYS and not isinstance(item, list):
item = item.split(',')
if isinstance(item, list):
for _addr in item:
if not isinstance(_addr, Email):
_addr = Email(_addr)
func = getattr(personalization, "add_{0}".format(key))
if func:
func(_addr)
else:
func = getattr(personalization, "{0}".format(key))
if func:
func(item)
return personalization | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create(cls, dictionary):\n return cls(**dictionary)",
"def create(cls, dictionary):\n return cls(**dictionary)",
"def build_personalization(personalization):\n mock_personalization = Personalization()\n for to_addr in personalization['to_list']:\n personalization.add_to(to_addr)\n\n for cc_addr in personalization['cc_list']:\n personalization.add_to(cc_addr)\n\n for bcc_addr in personalization['bcc_list']:\n personalization.add_bc(bcc_addr)\n\n for header in personalization['headers']:\n personalization.add_header(header)\n\n for substitution in personalization['substitutions']:\n personalization.add_substitution(substitution)\n\n for arg in personalization['custom_args']:\n personalization.add_custom_arg(arg)\n\n personalization.subject = personalization['subject']\n personalization.send_at = personalization['send_at']\n return mock_personalization",
"def from_dict(d: Dict[str, Any]) -> \"Provider\":\n return Provider(\n name=d[\"name\"],\n description=d.get(\"description\"),\n roles=d.get(\n \"roles\",\n ),\n url=d.get(\"url\"),\n extra_fields={\n k: v\n for k, v in d.items()\n if k not in {\"name\", \"description\", \"roles\", \"url\"}\n },\n )",
"def build_user(data: Dict[Any, Any]) -> User:\n return User(**data)",
"def from_dict(cls, dictionary: Dict[str, Any]):\n return cls(**dictionary)",
"def make_from_clean_dict(dict):\n household = Household()\n for k, v in dict.items():\n if k == \"head\":\n household.__setattr__(k, Member.make_from_clean_dict(v))\n elif k == \"spouse\":\n household.__setattr__(k, Member.make_from_clean_dict(v))\n elif k == \"others\":\n newvals = [Member.make_from_clean_dict(d) for d in v]\n household.__setattr__(k, newvals)\n elif k == \"address\":\n household.__setattr__(k, Address.make_from_clean_dict(v))\n elif k == \"clean_json_string\":\n pass\n else:\n household.__setattr__(k, v)\n return household",
"def from_dict(cls, copula_dict):\n raise NotImplementedError",
"def from_dict(cls, _dict: Dict) -> 'UserSettings':\n args = {}\n if 'language' in _dict:\n args['language'] = _dict.get('language')\n if 'notification_language' in _dict:\n args['notification_language'] = _dict.get('notification_language')\n if 'allowed_ip_addresses' in _dict:\n args['allowed_ip_addresses'] = _dict.get('allowed_ip_addresses')\n if 'self_manage' in _dict:\n args['self_manage'] = _dict.get('self_manage')\n return cls(**args)",
"def from_dictionary(cls,\n dictionary):\n if dictionary is None:\n return None\n\n # Extract variables from the dictionary\n protected_count = dictionary.get('protectedCount')\n protected_size = dictionary.get('protectedSize')\n unprotected_count = dictionary.get('unprotectedCount')\n unprotected_size = dictionary.get('unprotectedSize')\n\n # Return an object of this model\n return cls(\n protected_count,\n protected_size,\n unprotected_count,\n unprotected_size\n)",
"def from_dict(cls, data: Dict[str, any]):\n return cls(**data)",
"def __init__(self, persona: dict)->None:\n self.gender = persona['gender']\n self.age = persona['age']\n self.hypertension = 1 if 'hypertension' in persona else 0\n self.heart_disease = 1 if 'heart_disease' in persona else 0\n self.ever_married =\"Yes\" if 'ever_married' in persona else \"No\"\n self.work_type = persona['work_type']\n self.Residence_type = persona['Residence_type']\n self.avg_glucose_level = persona['avg_glucose_level']\n self.bmi = persona['bmi']\n self.smoking_status = persona['smoking_status']",
"def from_dict(cls, word_dict):\n\n return super().from_dict(word_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)",
"def _from_dict(cls, _dict):\n return cls.from_dict(_dict)"
] | [
"0.63244694",
"0.63244694",
"0.6232753",
"0.6030198",
"0.5900351",
"0.58583313",
"0.5845706",
"0.58420676",
"0.58329755",
"0.5826959",
"0.58106136",
"0.579965",
"0.5711809",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213",
"0.57004213"
] | 0.6677979 | 0 |
Returns a list of all Server Emojis | async def emojis(self, ctx):
server = ctx.message.server
await self.bot.say('This may take some time, generating list...')
data = discord.Embed(description="Emojilist")
for ej in server.emojis:
data.add_field(
name=ej.name, value=str(ej) + " " + ej.id, inline=False)
await self.bot.say(embed=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_slack_emoji():\n all_slack_emoji = []\n\n # load stock emoji from file\n with app.open_resource('../static/emoji-names.json') as f:\n stock_emojis = json.load(f)\n all_slack_emoji += stock_emojis\n\n # concat custom emoji by slack API call\n all_slack_emoji += sc.api_call('emoji.list')['emoji'].keys()\n return all_slack_emoji",
"def get_emojis(self):\n return self.tweets.str.findall(r':{1}[\\d\\w\\-]+:{1}')",
"async def fetch_emojis(self):\n data = await self.http.get_emojis()\n emojis = []\n for emoji_data in data['customReactions']:\n team = self.get_team(emoji_data['teamId'])\n emoji = Emoji(team=team, data=emoji_data, state=self.http)\n emojis.append(emoji)\n\n return emojis",
"async def _serveremoji(self, ctx):\n non_animated_list= [f'<:{i.name}:{i.id}>' for i in ctx.guild.emojis if not i.animated]\n animated_list= [f'<a:{i.name}:{i.id}>' for i in ctx.guild.emojis if i.animated]\n\n if len(non_animated_list)==0 and len(animated_list)==0:\n await ctx.send(f\"\"\":exclamation: {ctx.author.mention}\n```{random.choice(self.bot.SERVER_CONFIG['text_colors'])}\nNo custom emojis has been added in this Server.\n```\"\"\")\n else:\n #NON ANIMATED EMOJIS\n if len(non_animated_list)>0:\n await ctx.send(f'**{len(non_animated_list)} Server Emojis**')\n k=0\n non_animated=[]\n temp=''\n for i in range(ceil(len(non_animated_list)/5)):\n temp += ' '.join(non_animated_list[k:k+5])+'\\n'\n k+=5\n if k%25==0:\n non_animated.append(temp)\n temp=''\n non_animated.append(temp) if temp !='' else ''\n \n for i in non_animated:\n await ctx.send(i)\n\n\n #ANIMATED EMOJIS\n if len(animated_list)>0:\n await ctx.send(f'**{len(animated_list)} Server Animated Emojis**')\n k=0\n animated=[]\n temp=''\n for i in range(ceil(len(animated_list)/5)):\n temp += ' '.join(animated_list[k:k+5])+'\\n'\n k+=5\n if k%25==0:\n animated.append(temp)\n temp=''\n animated.append(temp) if temp !='' else ''\n \n for i in animated:\n await ctx.send(i)",
"def get_emoji_list():\n return list(map(lambda x: x.get('emoji'), emoji_list))",
"async def emojis(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"emojis\")",
"async def getemoji(self, ctx):\n pass",
"def get_emoticons_value(self, line):\n emoticons = list()\n # Finds any substring which represents an emote\n # Expression found at https://stackoverflow.com/questions/28783420/cannot-compile-8-digit-unicode-regex-ranges-in-python-2-7-re\n emoticons.extend(re.findall(u'[\\U00010000-\\U0010ffff]', line, flags=re.UNICODE))\n return emoticons",
"async def get_emojis(self, guild_id: int) -> List[Emoji]:\n if not guild_id:\n raise ValueError(\"Argument cannot be None: guild_id\")\n\n emojis = await self._request(Route(\"GET\", f'/guilds/{guild_id}/emojis'))\n\n return [Emoji(**emojis) for emoji in emojis]",
"def fetch_emojis(route):\n url = _config['emojicons_baseurl'] + route\n logging.debug(\"Requesting URL '{0}'\".format(url))\n page = requests.get(url)\n tree = html.fromstring(page.text)\n emojis = []\n for id, t, e in zip([re.search(\"^emoticon-(\\d+)$\", x).group(1) for x in tree.xpath(_config['xpath']['ids'])],\n tree.xpath(_config['xpath']['titles']),\n tree.xpath(_config['xpath']['emojis'])):\n emojis.append({'id': id, 'title': t, 'emoji': e})\n return emojis",
"def list_offline(args):\n json_file = args.file[0]\n emojis = load_file(json_file)\n print_table(emojis)",
"def init_emoji(self, client):\n for emoji in client.get_all_emojis():\n if emoji.name == self.emoji:\n self.emoji = str(emoji)\n return\n\n self.emoji = \":\" + self.emoji + \":\"",
"def get_random_emoji():\n return (random.choice(get_emoji_list())).encode('utf-8').decode('utf-8')",
"def read_all_status_characters(self):\n return self.STATUS_CHARACTERS",
"async def _e_list(self, ctx):\n event_list = self.database.get_guild_events(ctx.guild.id)\n if len(event_list) == 0:\n await ctx.send(\"This server has no custom events\")\n return\n out = \"```\\nServer Events:\\n\"\n for event in event_list:\n out += f\"{event.name} - {event.period}: {event.text}\\n\"\n out += \"```\"\n await ctx.send(out)",
"def find_emojis(text):\n emojis = []\n for emoji in emot.emoji(text):\n emojis.append(emoji['value'])\n text = text.replace(emoji['value'], '')\n\n return text, emojis",
"def get_color_emojis(self):\n\n emojis = {}\n color_assignable = self.assignable_roles[1]\n\n # start with getting all emojis that are used in those roles as a dict\n for emoji in self.bot.emojis:\n if emoji.name in color_assignable:\n emojis[emoji.name] = emoji\n\n return emojis",
"async def emojireact(self, ctx):\n if ctx.invoked_subcommand is None:\n guild = ctx.message.guild\n guild_emoji = await self.config.guild(guild).guild()\n unicode_emoji = await self.config.guild(guild).unicode()\n if ctx.channel.permissions_for(ctx.me).embed_links:\n em = discord.Embed(colour=discord.Colour.blue())\n em.title = _(\"Emojireact settings for \") + guild.name\n if guild_emoji:\n em.add_field(name=_(\"Server Emojis \"), value=str(guild_emoji))\n if unicode_emoji:\n em.add_field(name=_(\"Unicode Emojis \"), value=str(unicode_emoji))\n if len(em.fields) > 0:\n await ctx.send(embed=em)\n else:\n msg = _(\"Emojireact settings for \") + guild.name + \"\\n\"\n if guild_emoji:\n msg += _(\"Server Emojis \") + str(guild_emoji) + \"\\n\"\n if unicode_emoji:\n msg += _(\"Unicode Emojis \") + str(unicode_emoji) + \"\\n\"\n await ctx.send(msg)",
"def emoji(self):\n return self._manager.get_emoji(self.name)",
"def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]",
"def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())",
"def join(self) -> str:\n\n return _ZWJ.join(e.emoji for e in self.emojis)",
"def get_special_emojis(self):\n\n return self.assignable_roles[2]",
"def weather_emoji(description: str):\n\n emoji_map = {\n \"cloud\": \"☁️\",\n \"rain\": \"🌧\",\n \"sun\": \"☀️\",\n \"snow\": \"❄️\",\n }\n\n emojis = \"\"\n for key in emoji_map:\n if key in description:\n emojis += emoji_map[key]\n return emojis",
"def showEmoticonList(self):\n print \"Guess what? No emoticons. But I'll put in a random one for you\"\n self.appendImageAtCursor(\"throbber.gif\")",
"async def cringo_card(list_of_emojis: List[List[str]]) -> List[List[str]]:\n\n top_row = ['🇦', '🇧', '🇨', '🇩', '🇪', '🇫']\n side_column = ['<:lemonface:623315737796149257>', '1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣']\n\n list_of_emojis.insert(0, top_row[0:len(list_of_emojis)])\n\n emojis_to_send = []\n\n for row in range(0, len(list_of_emojis)):\n list_of_emojis[row].insert(0, side_column[row])\n emoji_string = '\\u200A'.join(list_of_emojis[row])\n emojis_to_send.append(emoji_string)\n\n return list_of_emojis",
"def fetch_all_characters(cls) -> Dict[str, Any]:\n res = cls._send_request(\"character\")\n return res",
"async def initial_request_emmojis(client):\n try:\n for automation_configuration in [\n automation_configuration for automation_configuration in AUTOMATION_CONFIGURATIONS.values()\n if automation_configuration.log_emoji_channel_id\n ]:\n await client.emoji_guild_get_all(automation_configuration.guild_id)\n except ConnectionError:\n # No internet connection\n return\n \n client.events.remove(initial_request_emmojis, name = 'ready')",
"def print_non_ascii_strings(self):\n for msg in MESSAGES:\n print('*INFO*' + msg)",
"async def listreact(self, ctx):\n emojis = await self.conf.guild(ctx.guild).reactions()\n msg = f\"Smart Reactions for {ctx.guild.name}:\\n\"\n for emoji in emojis:\n for command in emojis[emoji]:\n msg += f\"{emoji}: {command}\\n\"\n for page in pagify(msg, delims=[\"\\n\"]):\n await ctx.send(page)"
] | [
"0.6829809",
"0.67620057",
"0.6723042",
"0.66201127",
"0.66025",
"0.65196496",
"0.6501168",
"0.6340955",
"0.62791866",
"0.6245136",
"0.60249436",
"0.5856114",
"0.57686776",
"0.57601565",
"0.56315124",
"0.56029534",
"0.55517876",
"0.55233794",
"0.55045164",
"0.5494092",
"0.54903036",
"0.5455451",
"0.5445513",
"0.54231817",
"0.54208285",
"0.5407516",
"0.5389817",
"0.53652865",
"0.5362749",
"0.5360441"
] | 0.73602945 | 0 |
Coinflip, defaults to Kopf/Zahl if no players are given | async def coinflip(self, ctx, player1=None, *, player2=None):
rng = randint(1, 10)
if player1 is None and player2 is None:
if rng < 5:
return await self.bot.say("Kopf gewinnt!")
else:
return await self.bot.say("Zahl gewinnt!")
else:
if rng < 5:
return await self.bot.say("{} hat gewonnen!".format(player1))
else:
return await self.bot.say("{} hat gewonnen!".format(player2)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def flipcoin(self, ctx):\n flip = random.choice([True, False])\n if flip == True:\n msg = 'It\\'s heads!'\n await ctx.send(msg)\n elif flip == False:\n msg = 'It\\'s tails!'\n await ctx.send(msg)",
"def flip_player(cls):\n cls.current_player = 'X' if cls.current_player == 'O' else 'O'\n\n cls.display_board()\n cls.prompt_player()",
"async def coinflip(self, ctx):\n\n options = [\"Tails\", \"Heads\"]\n await ctx.send(random.choice(options))",
"def flip_player():\n global current_player\n # If current player is 'X', then set current player to 'O'.\n if current_player == 'X':\n current_player = 'O'\n # If current player is 'O', then set current player to 'X'.\n elif current_player == 'O':\n current_player = 'X'",
"async def coin(self, ctx):\n flip = random.randint(1, 2)\n if flip == 1:\n await ctx.send(\"You flipped heads!\")\n else:\n await ctx.send(\"You flipped tails!\")",
"def flip_coin_op() -> str:\n import random\n result = random.choice(['heads', 'tails'])\n print(result)\n return result",
"async def coinflip(self, ctx, choice: str):\n choices = [\"heads\", \"tails\"]\n number = random.randint(1, 2)\n if choice.lower() in choices:\n if choice.lower() == choices[number - 1].lower():\n await ctx.send(\"Yep that's right, you got {}\".format(choices[number - 1].title()))\n else:\n await ctx.send(\"Nope.\")\n else:\n await ctx.send(\"Are you trying to break me? Bastard :triumph:\")",
"def coinflip(num):\n heads = 0\n tails = 0\n for n in xrange(0, num):\n result = random.randint(0, 1)\n if result is 0:\n heads += 1\n else:\n tails += 1\n pctheads = float(heads)/num * 100\n pcttails = float(tails)/num * 100\n print '''\n coinflips: %i\n \n heads: %i %f%%\n \n tails: %i %f%% \n ''' % (num,\n heads, pctheads,\n tails, pcttails)",
"async def flip(self, ctx, user : discord.Member=None):\r\n if user != None:\r\n msg = \"\"\r\n if user.id == self.bot.user.id:\r\n user = ctx.author\r\n msg = \"Nice try. You think this is funny? How about *this* instead:\\n\\n\"\r\n char = \"abcdefghijklmnopqrstuvwxyz\"\r\n tran = \"ɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎz\"\r\n table = str.maketrans(char, tran)\r\n name = user.display_name.translate(table)\r\n char = char.upper()\r\n tran = \"∀qƆpƎℲפHIſʞ˥WNOԀQᴚS┴∩ΛMX⅄Z\"\r\n table = str.maketrans(char, tran)\r\n name = name.translate(table)\r\n await ctx.send(msg + \"(╯°□°)╯︵ \" + name[::-1])\r\n else:\r\n await ctx.send(\"*flips a coin and... \" + choice([\"HEADS!*\", \"TAILS!*\"]))",
"async def flip(self, ctx, user : discord.Member=None):\r\n if user != None:\r\n msg = \"\"\r\n if user.id == self.bot.user.id:\r\n user = ctx.message.author\r\n msg = \"Nice try. You think this is funny? How about *this* instead:\\n\\n\"\r\n char = \"abcdefghijklmnopqrstuvwxyz\"\r\n tran = \"ɐqɔpǝɟƃɥᴉɾʞlɯuodbɹsʇnʌʍxʎz\"\r\n table = str.maketrans(char, tran)\r\n name = user.display_name.translate(table)\r\n char = char.upper()\r\n tran = \"∀qƆpƎℲפHIſʞ˥WNOԀQᴚS┴∩ΛMX⅄Z\"\r\n table = str.maketrans(char, tran)\r\n name = name.translate(table)\r\n await self.bot.say(msg + \"(╯°□°)╯︵ \" + name[::-1])\r\n else:\r\n await self.bot.say(\"*flips a coin and... \" + choice([\"HEADS!*\", \"TAILS!*\"]))",
"def flip(numFlips):\n heads = 0\n for i in range(numFlips):\n if random.choice(('H', 'T')) == 'H':\n heads += 1\n return heads/numFlips",
"async def flip(message):\n choice = random.randint(0, 1)\n desc = \"heads\" if choice else \"tails\"\n return \"flipped a coin and picked: \" + desc",
"def flip_coins(num_coins, num_flips):\n return np.random.randint(2, size=(num_coins, num_flips))",
"def flip(): # No arguments here (important)\n\t\n\tif random.random () <.5:\n\t\treturn \"heads\"\n\telse:\n\t\treturn \"tails\"",
"def showdown(self):\r\n\r\n poker_hands = []\r\n message = \"\"\r\n for player in self.players:\r\n poker_hands.append(player.hand.best_poker_hand(self.community_cards.cards))\r\n\r\n # Reveal all cards when the round is over\r\n player.reveal_cards()\r\n\r\n if poker_hands[0].type > poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].type), str(poker_hands[1].type))\r\n self.players[0].credits += self.pot\r\n\r\n if poker_hands[0].type < poker_hands[1].type:\r\n message = \"Player {} won! \\nPoker hand >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].type), str(poker_hands[0].type))\r\n self.players[1].credits += self.pot\r\n\r\n if poker_hands[0].type == poker_hands[1].type:\r\n if poker_hands[0].highest_values > poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[0].name, str(poker_hands[0].highest_values), str(poker_hands[1].highest_values))\r\n self.players[0].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values < poker_hands[1].highest_values:\r\n message = \"Player {} won! \\nHighest value >{}< won against >{}<\".format(\r\n self.players[1].name, str(poker_hands[1].highest_values), str(poker_hands[0].highest_values))\r\n self.players[1].credits += self.pot\r\n\r\n elif poker_hands[0].highest_values == poker_hands[1].highest_values:\r\n message = \"It is a draw! Both players had >{}< and highest value >{}<\".format(\r\n poker_hands[0].type.name, str(poker_hands[0].highest_values))\r\n\r\n for player in self.players:\r\n player.credits += (self.pot // len(self.players))\r\n else:\r\n self.game_message_warning.emit(\"Incorrect comparison of poker hands\")\r\n\r\n self.new_output.emit(message)\r\n self.game_message.emit(message)\r\n self.new_credits.emit()\r\n self.new_pot.emit()",
"def flip_coin(p):\n\n assert 0 <= p <= 1, 'A probability should be between 0 and 1'\n return random.random() < p",
"async def flip(self, ctx, amount=1):\n if amount > 5:\n amount = 5\n possible_responses = [\"heads\", \"tails\"]\n for i in range(amount):\n await ctx.send(f\"> {ctx.author.mention} flipped `{random.choice(possible_responses)}`\")",
"async def foggle_flip(self, ctx: Context, base: Bases = 10):\n ...",
"def flip():\n #Roller turns to curl page\n pwm.ChangeDutyCycle(11)\n time.sleep(0.22)\n pwm.ChangeDutyCycle(0)\n time.sleep(3)\n\n #flipper flips\n pwm2.ChangeDutyCycle(7.8)\n time.sleep(4)\n pwm2.ChangeDutyCycle(0)\n time.sleep(3)\n\n \"\"\"#Flipper turns to flip page and flips back\n pwm2.ChangeDutyCycle(4)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(8)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(12)\n time.sleep(0.2)\n pwm2.ChangeDutyCycle(13)\n time.sleep(3)\n pwm2.ChangeDutyCycle(2.4)\n time.sleep(5)\"\"\"\n\n \"\"\"#Rollers turning back\n pwm.ChangeDutyCycle(1)\n time.sleep(0.2)\n pwm.ChangeDutyCycle(0)\n time.sleep(3)\"\"\"",
"def flipper(deck, message): #flips card in player hand\r\n\tflipcheck, flipcheck1 = 1, 0\r\n\ttempHand = []\r\n\r\n\tprint message,\r\n\ttime.sleep(0.33);print \".\",\r\n\ttime.sleep(0.33);print \".\",\r\n\ttime.sleep(0.34);print \".\"\r\n\r\n\ttry:\r\n\t\twhile flipcheck == 1:\r\n\t\t\ttry:\r\n\t\t\t\ttempHand = random.choice(deck) #grab card from player/cpu hand\r\n\t\t\t\tflipcheck = 0\r\n\r\n\t\t\texcept(TypeError):\r\n\t\t\t\tflipcheck1 += 1\r\n\r\n\t\t\t\tif flipcheck1 == 5:\r\n\t\t\t\t\tsys.exit(TypeError)\r\n\r\n\t\tif tempHand in deck:\r\n\t\t\tdeck.remove(tempHand) #removes tempHand from player/cpu hand\r\n\r\n\texcept(IndexError):\r\n\t\tpass\r\n\r\n\tif type(tempHand) == list:\r\n\t\tprint \"The card was a \" + str(tempHand[1]) + \" of \" + str(tempHand[0]) + \"!\\n\"\r\n\r\n\telse:\r\n\t\tprint \"The card was the \" + tempHand + \" wild card!\"\r\n\r\n\t\tif tempHand == 'MasterSpark': #MasterSpark Wild Card\r\n\t\t\tif deck == playerDeck:\r\n\t\t\t\tplayerScore -= 10\r\n\t\t\t\tprint 'MasterSpark!'\r\n\t\t\t\tplayerDisplayed.remove('MasterSpark')\r\n\t\t\telif deck == cpuDeck:\r\n\t\t\t\tplayerScore -= 10\r\n\t\t\t\tprint 'MasterSpark!'\r\n\t\t\t\tcpuDisplayed.remove('MasterSpark')\r\n\r\n\treturn [tempHand, deck] #returns two values. use arrays to get correct values with tempGrab[]\r",
"def coin(coins, heads):\n\treturn Fraction(int(fac(c) / fac(c-n) / fac(n)), 2**c)",
"def NFkB_cFlip_interaction():\n Parameter('Flip_degradase_0', 0)\n alias_model_components()\n \n Initial(Flip_degradase(bf=None), Flip_degradase_0)\n \n Rule('NFkB_cFlipL', NFkB() >> NFkB() + flip_L(bDED=None), Parameter('NFkB_FlipL', 1e-2))\n Rule('NFkB_cFlipS', NFkB() >> NFkB() + flip_S(bDED=None), Parameter('NFkB_FlipS', 1e-2))\n \n Rule('NFkB_degradase', NFkB() >> NFkB() + Flip_degradase(bf=None), Parameter('Deg_flip', 1e-6))\n Rule('Deg_cFlipL', Flip_degradase(bf=None) + flip_L(bDED=None) >> Flip_degradase(bf=None), Parameter('deg_FlipL', 5e-6))\n Rule('Deg_cFlipS', Flip_degradase(bf=None) + flip_S(bDED=None) >> Flip_degradase(bf=None), Parameter('deg_FlipS', 5e-6))",
"def flipper(s, rate, p, whitetile):\r\n if rate > p and whitetile:\r\n #flip spin\r\n return -s\r\n else:\r\n #don't flip spin\r\n return s",
"def main():\n\tprint(\"Let's flip a coin!\")\n\tnum_run = int(input('Number of runs: '))\n\trepeat = 0\n\n\t# 1st roll\n\troll = r.randint(1, 2)\n\tans = str(roll)\n\n\twhile True:\n\t\troll = r.randint(1, 2)\n\t\tans += str(roll)\n\n\t\t# 2nd roll: Same as the latter.\n\t\tif len(ans) == 2:\n\t\t\tif ans[0] == ans[1]:\n\t\t\t\trepeat += 1\n\t\t# Continuous roll: Same as the latter, different from the former.\n\t\telse:\n\t\t\tif ans[len(ans)-2] is ans[len(ans)-1] and ans[len(ans)-2] is not ans[len(ans)-3]:\n\t\t\t\trepeat += 1\n\t\tif repeat == num_run:\n\t\t\tbreak\n\n\t# print result\n\tresult = ''\n\tfor point in ans:\n\t\tif point is '1':\n\t\t\tresult += 'H'\n\t\telif point is '2':\n\t\t\tresult += 'T'\n\tprint(result)",
"def set_flip(self, val):\n self.flip = val",
"def set_flip(self, flipconv):\n if flipconv is None:\n flipconv = 'astro' # default\n if flipconv == 'astro': self._flip = -1\n elif flipconv == 'geo': self._flip = 1\n else: raise ValueError(\"flipconv must be 'astro', 'geo' or None for default.\")",
"def play_hanabi(num_players, strategy=None):\n game = Game(num_players)\n print game\n players = [Player(game, i, strategy) for i in range(num_players)]\n for player1 in players:\n for player2 in players:\n if player1.index == player2.index:\n continue\n player1.add_player(player2)\n turn = 0\n while game.draw:\n player = players[turn]\n player.play()\n turn = (turn + 1) % num_players\n # one last turn after the last card in drawn\n for i in range(num_players):\n player = players[turn]\n player.play()\n turn = (turn + 1) % num_players\n\n print game.firework\n print game.hands\n print game.score()",
"def other(player):\n return 1 - player",
"def other(player):\n return 1 - player",
"def other(player):\n return 1 - player"
] | [
"0.67842084",
"0.66972786",
"0.6554809",
"0.65462387",
"0.6510231",
"0.6505744",
"0.6217337",
"0.61918855",
"0.6128844",
"0.6107027",
"0.60780925",
"0.6042313",
"0.6022117",
"0.60063225",
"0.5861973",
"0.57754767",
"0.57477665",
"0.5738534",
"0.5674234",
"0.5642496",
"0.5630323",
"0.5612955",
"0.55905783",
"0.5583329",
"0.55711895",
"0.55634147",
"0.55033225",
"0.54656506",
"0.54656506",
"0.54656506"
] | 0.71466845 | 0 |
Moves all clients randomly in other channels for duration seconds. After the whirpool event, all clients will be in the same channel as before. Between the whirlpool cycles, the programm will sleep for relax_time seconds. | def whirlpool(ts3conn, duration=10, relax_time=0.5):
# Countdown till whirlpool
for i in range(5, 0, -1):
ts3conn.sendtextmessage(
targetmode=ts3.definitions.TextMessageTargetMode.SERVER,
target=0, msg="Whirpool in {}s".format(i))
time.sleep(1)
# Fetch the clientlist and the channellist.
clientlist = ts3conn.clientlist()
channellist = ts3conn.channellist()
# Ignore query clients
clientlist = [client for client in clientlist \
if client["client_type"] != "1"]
# Whirpool with one channel or no users is boring.
if len(channellist) == 1 or not clientlist:
return None
# We need this try-final construct to make sure, that all
# clients will be in the same channel at the end of the
# whirlpool as to the beginning.
try:
end_time = time.time() + duration
while end_time > time.time():
for client in clientlist:
clid = client["clid"]
cid = random.choice(channellist)["cid"]
try:
ts3conn.clientmove(clid=clid, cid=cid)
except ts3.query.TS3QueryError as err:
# Only ignore 'already member of channel error'
if err.resp.error["id"] != "770":
raise
time.sleep(relax_time)
finally:
# Move all clients back
for client in clientlist:
try:
ts3conn.clientmove(clid=client["clid"], cid=client["cid"])
except ts3.query.TS3QueryError as err:
if err.resp.error["id"] != "770":
raise
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def client_send(state):\n while state.running:\n disconnected_users = []\n time.sleep(0.05)\n for nick in users:\n nick, queue = nick, users[nick].queue\n while len(queue) > 0:\n sender, msg = queue.pop(0)\n message = '{}> {}'.format(sender, msg)\n print(message)\n try:\n for _usr in channels['SYSAR']:\n # if _usr != sender:\n print('should send')\n send_buf(users[_usr].socket, message)\n except:\n if nick not in disconnected_users:\n disconnected_users.append(nick)\n for nick in disconnected_users:\n print('ALERT::{} disconnected'.format(nick))\n del users[nick]",
"def RandomDelay():\r\n sleep(random())",
"def run(self):\n self.running = True\n for channel in self:\n sleep(self.hop_interval)\n if self.running is False:\n return\n self.hop_channel(channel)",
"def wait_to_active(max_players):\n while (len(active_sockets_dict) < max_players) and (len(queue) > 0):\n current_sock, current_client = queue.popitem(last=False)\n deep_clone_client = Client(current_client.socket, current_client.heaps, ACTIVE_GREETING)\n # current_client.TYPE = -5 # ACTIVE_GREETING\n # active_sockets_dict[current_sock] = current_client\n active_sockets_dict[deep_clone_client.socket] = deep_clone_client",
"def greedy_cow_transport(cows,limit=10):\n # TODO: Your code here\n pass",
"def send_state():\n while True:\n if I_AM_CRUSHED is False:\n sleep_time = random.randint(send_state_sec[0], send_state_sec[1])\n sock_check = socket.socket(type=socket.SOCK_DGRAM)\n sock_check.sendto(\"I'am healthy\", ('dispatcher', port_for_check))\n sock_check.close()\n time.sleep(sleep_time)",
"def take_readings(modules: list, mqtt_client: mqtt.Client, topic: str,\n sleep_duration: float = 0.5):\n while True:\n try:\n [mqtt_client.publish(f'{topic}{sonar.sonar_name}', f'{sonar.reading}') for sonar in modules]\n time.sleep(sleep_duration)\n except (KeyboardInterrupt, RuntimeError):\n # shutdown all board connections prior to quitting programme\n [sonar.close() for sonar in modules]\n break",
"def _burn_cpu():\n while True:\n random()*random()",
"async def discorole(self, ctx, times : int, *, role: discord.Role):\n time = 0\n while time < times:\n colour = ''.join([choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n await self.bot.edit_role(ctx.message.server, role, colour=discord.Colour(value=colour))\n time = time + 1\n await asyncio.sleep(5)",
"def setup(env, clerks, cachier):\n global workers_arrived\n while True:\n timeout = env.timeout(random.randint(IAT_MIN, IAT_MAX))\n yield timeout\n env.process(worker(env, workers_arrived, clerks, cachier))\n workers_arrived += 1",
"def cruise(self):\n while self.dist() > self.SAFE_STOP_DIST:\n time.sleep(.2)\n self.fwd()\n self.stop()",
"def topology_random_reconnect(self, probability):\n\t\tfor i in range(len(self.sites)):\n\t\t\tfor j in range(len(self.sites)):\n\t\t\t\tif (i != j) and (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability / 2.0:\n\t\t\t\t\t\tchoice_list = [s for s in self.sites if not (s in self.sites[i].neighbors)]\n\t\t\t\t\t\tif len(choice_list) > 0:\n\t\t\t\t\t\t\tchoosed = numpy.random.choice(choice_list)\n\t\t\t\t\t\t\tself.sites[i].neighbors.remove(self.sites[j])\n\t\t\t\t\t\t\tself.sites[j].neighbors.remove(self.sites[i])\n\t\t\t\t\t\t\tself.sites[i].neighbors.append(choosed)\n\t\t\t\t\t\t\tchoosed.neighbors.append(self.sites[i])",
"def run(self):\n while True:\n if not self.clientQueue.empty():\n clientObj= self.clientQueue.get() \n self.clientDict[clientObj.role] = clientObj \n time.sleep(self.interval)",
"def brute_force_cow_transport(cows,limit=10):\n # TODO: Your code here\n pass",
"def side_step(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(-10, -150)\n time.sleep(2)\n r.go(-20)\n time.sleep(1)\n r.go(-10, 150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(-10, 150)\n time.sleep(2)\n r.go(-20)\n time.sleep(1)\n r.go(-10, -150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(10, 150)\n time.sleep(2)\n r.go(20)\n time.sleep(1)\n r.go(10, -150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(10, -150)\n time.sleep(2)\n r.go(20)\n time.sleep(1)\n r.go(10, 150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)",
"def shuffle(self): \n for x in range(12):\n self.right(primary=-60, counter=0)\n time.sleep(.1)\n self.left(primary=-60, counter=0)\n time.sleep(.1)\n self.stop()",
"def sleeper(self):\n for waittime in (.01, .02, .05, .1, .2, .5):\n yield waittime\n while True:\n waittime = min(waittime + .2, 5)\n yield waittime",
"def twist(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(0, 50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)\n r.go(0, -50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)",
"def handle_timer(self):\n\n for dest in self.hosts_to_unused_ports:\n self.hosts_to_unused_ports[dest] = [host for host in self.hosts_to_unused_ports[dest] if api.current_time() != host.time_to_live] \n self.hosts_to_ports[dest] = self.find_minium_latency_unused_ports(self.hosts_to_unused_ports[dest])\n\n #Send the reachable routes (must be less than infinity)\n for dest in self.hosts_to_ports:\n if self.hosts_to_ports[dest].latency < INFINITY: \n distance_vector = self.hosts_to_ports[dest] \n host_latency = distance_vector.latency\n\n distance_vector = self.hosts_to_ports[dest]\n\n # Send normal route packet\n packet = basics.RoutePacket(dest, host_latency)\n self.send(packet, distance_vector.port)\n\n # Send poison packet if POISON_MODE is true\n if self.POISON_MODE == True:\n poison_packet = basics.RoutePacket(dest, INFINITY)\n self.send(poison_packet, distance_vector.port)",
"def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)",
"def mainLoop(options, modules, sender, tags):\n\n nextHeartbeat = int(time.time() + 600)\n while True:\n populateCollectors(options.cdir)\n reloadChangedConfigModules(modules, options, sender, tags)\n reapChildren()\n spawnChildren()\n time.sleep(MAIN_LOOP_INTERVAL)\n now = int(time.time())\n if now >= nextHeartbeat:\n LOG.info('Heartbeat (%d collectors running)'\n % sum(1 for col in allLivingCollectors()))\n next_heartbeat = now + 600",
"def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)",
"def sleep_sim_time(world, seconds, state_break=[False]):\n start = world.last_time if world.last_time else Time()\n remain = seconds\n\n while remain > 0 and not state_break[0]:\n yield From(trollius.sleep(0.1))\n now = world.last_time if world.last_time else Time()\n remain = seconds - float(now - start)",
"def work(self):\n time.sleep(random.randint(0, 200) / 100)\n pass",
"def wake_all_threads(self):\n self.advance_time(increment_by=0.0)",
"def sweep_loop(self, channels, reps, interval):\n\n PINS = self.select_CHPINS(channels)\n\n for i in range(reps):\n time.sleep(interval)\n gpio.output(PINS, 0)\n time.sleep(interval)\n gpio.output(PINS, 1)\n i += 1\n if i ==1:\n print ('1st cycle')\n elif i ==2:\n print ('2nd cycle')\n elif i ==3:\n print ('3rd cycle')\n else:\n print ('%rth cycle' %i)\n\n gpio.output(PINS, 0)",
"def cycle(self, message):\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n players = self.player_queue.pop_all()\n players_str = ' '.join(players)\n channel = SOCKET_ARGS['channel']\n if len(msg_list) > 1:\n credential_str = ' '.join(msg_list[1:])\n whisper_str = 'You may now join {} to play. The credentials you need are: {}'.format(\n channel, credential_str)\n self.player_queue_credentials = credential_str\n else:\n whisper_str = 'You may now join {} to play.'.format(channel)\n self.player_queue_credentials = None\n for player in players:\n self._add_to_whisper_queue(player, whisper_str)\n # self.command_queue.appendleft(('_delete_last_row', {}))\n self._add_to_chat_queue(\"Invites sent to: {} and there are {} people left in the queue\".format(\n players_str, len(self.player_queue.queue)))",
"def loop(self):\n while True:\n self.maybe_disconnect()\n\n # Grab any new events\n item_ids = []\n events = []\n come_back_soon = False\n try:\n while True:\n item = self.queuedir.pop()\n if not item:\n break\n if len(events) > 50:\n come_back_soon = True\n break\n\n try:\n item_id, fp = item\n item_ids.append(item_id)\n log.debug(\"Loading %s\", item)\n events.extend(json.load(fp))\n except:\n log.exception(\"Error loading %s\", item_id)\n raise\n finally:\n fp.close()\n log.info(\"Loaded %i events\", len(events))\n self.send(events)\n for item_id in item_ids:\n log.info(\"Removing %s\", item_id)\n try:\n self.queuedir.remove(item_id)\n except OSError:\n # Somebody (re-)moved it already, that's ok!\n pass\n except:\n log.exception(\"Error processing messages\")\n # Don't try again soon, something has gone horribly wrong!\n come_back_soon = False\n for item_id in item_ids:\n self.queuedir.requeue(item_id, self.retry_time, self.max_retries)\n\n if come_back_soon:\n # Let's do more right now!\n log.info(\"Doing more!\")\n continue\n\n # Wait for more\n # don't wait more than our max_idle/max_connect_time\n now = time.time()\n to_wait = None\n if self._disconnect_timer:\n to_wait = self._disconnect_timer - now\n if to_wait < 0:\n to_wait = None\n log.info(\"Waiting for %s\", to_wait)\n self.queuedir.wait(to_wait)",
"async def game(self):\n self.time_remaining = randint(\n int(pow(14 * len(self.participants), 0.8)),\n int(pow(30 * len(self.participants), 0.8))\n )\n\n member = choice(self.participants)\n Timer(1, self.timer).start()\n reply = True\n pass_to = []\n notify = randint(2, int(self.time_remaining / 2))\n\n while self.time_remaining > 0:\n if not pass_to:\n pass_from = list(self.participants)\n pass_from.pop(pass_from.index(member))\n pass_to = [choice(pass_from)]\n pass_from.pop(pass_from.index(pass_to[0]))\n pass_to.append(choice(pass_from))\n\n if reply is not None:\n await client.send_message(self.channel, \"{} :bomb: got the bomb! Pass it to either {} or {}!\".format(\n member.mention, pass_to[0].mention, pass_to[1].mention))\n\n def check(m):\n if len(m.mentions) > 0:\n if m.mentions[0] in pass_to:\n return True\n\n return False\n\n wait = (self.time_remaining - notify) if (self.time_remaining >= notify) else self.time_remaining\n reply = await client.wait_for_message(timeout=wait, channel=self.channel, author=member,\n check=check)\n\n if reply:\n member = reply.mentions[0]\n pass_to = []\n if self.member.permissions_in(self.channel).manage_messages:\n asyncio.ensure_future(client.delete_message(reply))\n elif self.time_remaining == notify:\n asyncio.ensure_future(client.send_message(self.channel, \":bomb: :fire: **IT'S GONNA BLOW!**\"))\n self.time_remaining -= 1\n\n await client.send_message(self.channel, \"{0.mention} :fire: :boom: :boom: :fire:\".format(member))\n await client.send_message(self.channel, \"**GAME OVER**\")",
"def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat"
] | [
"0.5521252",
"0.5468559",
"0.5398658",
"0.532204",
"0.5318605",
"0.5243049",
"0.521927",
"0.52137786",
"0.5190527",
"0.51749384",
"0.5163204",
"0.51520616",
"0.5127868",
"0.50814927",
"0.5065982",
"0.5059794",
"0.5046238",
"0.5038621",
"0.501738",
"0.5005819",
"0.4993451",
"0.49807587",
"0.49762204",
"0.4973308",
"0.49482927",
"0.49459425",
"0.4914924",
"0.49105948",
"0.49105692",
"0.49005085"
] | 0.7218653 | 0 |
Converts all seat strings into seat IDs and returns the highest seat ID found. | def highest_seat_id(raw_seat_string):
seat_list = raw_seat_string.split('\n')
return max(list(map(find_seat, seat_list))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_highest_seat_id(seat_ids):\n\n return max(seat_ids)",
"def get_max_seat_id(boarding_passes: list) -> int:\n return max(get_seat_id(boarding_pass) for boarding_pass in boarding_passes)",
"def part2(data: str):\n seat_ids = sorted(\n [bp.seat_id for bp in map(lambda row: BoardingPass(row), data.split(\"\\n\"))]\n )\n\n for i, seat_id in enumerate(seat_ids):\n if seat_ids[i + 1] != seat_id + 1:\n return seat_id + 1\n\n raise Exception(\"Couldn't find a missing seat id\")",
"def check_maximum_seat_number(data: list) -> int:\n max_id: int = 0\n for code in data:\n if count_seat_id(code) > max_id:\n max_id = count_seat_id(code)\n return max_id",
"def test_find_max_seat_id():\n data = [\n {\"seat_id\": 100},\n {\"seat_id\": 101},\n {\"seat_id\": 99},\n ]\n assert find_max_seat_id(data) == 101",
"def find_seat(seat_string):\n\n row = int(''.join([{'F':'0', 'B':'1'}[r] for r in seat_string[:7]]),2)\n col = int(''.join([{'L':'0', 'R':'1'}[c] for c in seat_string[7:]]),2)\n\n return row * 8 + col",
"def find_my_seat_ID(list_of_seat_IDs):\n # sort list in ascending order\n list_of_seat_IDs.sort(reverse=False)\n\n prev_seat_ID = list_of_seat_IDs[0]\n\n for seat_ID in list_of_seat_IDs[1:]:\n # check if current seat-ID is +1 above the previous one\n if seat_ID - prev_seat_ID != 1:\n # if this is not the case, stop loop -> found my seat-ID\n break\n\n prev_seat_ID = seat_ID\n\n return prev_seat_ID + 1",
"def main():\n boarding_passes = get_boarding_passes(\"./data_5.dat\")\n seat_ids = get_seat_ids(boarding_passes)\n print(get_highest_seat_id(seat_ids))\n print(get_missing_seat_id(seat_ids))",
"def compute_solution_of_puzzle():\n list_of_boarding_passes = get_list_of_boarding_passes()\n list_of_seat_IDs = get_all_seat_IDs(list_of_boarding_passes)\n\n print(\"[+] Solution of day5/puzzle1: {} is the highest seat ID\".format(max(list_of_seat_IDs)))\n\n my_seat_ID = find_my_seat_ID(list_of_seat_IDs)\n print(\"[+] Solution of day5/puzzle2: {} is my seat ID\".format(my_seat_ID))",
"def get_seat_id(boarding_pass):\n return int(re.sub(\"[FL]\", \"0\", re.sub(\"[BR]\", \"1\", boarding_pass)), 2)",
"def get_missing_seat_id(seat_ids):\n minimum, maximum = min(seat_ids), max(seat_ids)\n\n missing = [s for s in range(minimum, maximum) if s not in seat_ids]\n return missing[0]",
"def seat_id(row, col):\n return row * 8 + col",
"def get_seat_id(boarding_pass: str) -> int:\n row, column = parse_boarding_pass(boarding_pass)\n return row * 8 + column",
"def test_end_to_end_max_seat_id():\n assert process_max_seat_id(\"test/test_input.txt\") == 820",
"def find_best_candidate(s_array):\n best_string = ''\n max_val = 0\n for s in s_array:\n score = compare(s)\n if score > max_val:\n max_val = score\n best_string = s\n return best_string",
"def maxid() -> int:\n pass",
"def __map_player_id(self, seat): \n internal_player_id = None\n if seat:\n if seat == self.player_id:\n internal_player_id = self.COM_PLAYER_ID\n else:\n internal_player_id = self.OPPONENT_PLAYER_ID\n return internal_player_id",
"def brute_force_hashed(seats):\n seats = set(seats)\n for seat in seats:\n if seat + 1 not in seats and seat + 2 in seats:\n return seat + 1\n\n return None",
"def maximum_id(tweets):\n try:\n tree = etree.parse(StringIO(tweets), etree.XMLParser())\n statuses = tree.xpath('//statuses')\n id_str = statuses[0].xpath('./status/id/text()')\n ids = []\n for id in id_str:\n ids.append(int(id))\n return str(max(ids))\n\n except IndexError, e:\n raise e\n except ValueError, e:\n raise e",
"def _sorted_seat_ids(seats: list):\n seat_ids = [_seat_id(**seat) for seat in seats]\n return sorted(seat_ids)",
"def brute_force(seats):\n for seat in seats:\n if seat + 1 not in seats and seat + 2 in seats:\n return seat + 1\n\n return None",
"def count_seat_id(code: str) -> int:\n return (\n count_position(change_code_str_to_binary(code[:7], one=\"B\", zero=\"F\")) * 8 +\n count_position(change_code_str_to_binary(code[7:], one=\"R\", zero=\"L\"))\n )",
"def get_max_character(strings):\n m=0\n for string in strings:\n for char in string:\n if char>m:\n m=char\n return m",
"def longest_id(ids, seqs):\r\n lengths = map(len, [seqs.get(id_, '') for id_ in ids])\r\n return ids[argmax(lengths)]",
"def find_max_tidy_num(s_number):\n\n len_input = len(s_number) - 1\n\n if len_input == 0:\n return s_number\n\n for i in range(0, len_input):\n if int(s_number[i]) > int(s_number[i+1]):\n\n final_str = '9' * (len_input - i)\n s_number = s_number[:(i+1)]\n\n return ''.join([find_max_tidy_num(str(int(s_number)-1)), final_str])\n\n return s_number",
"def _row_seat_index(cls, res):\n seat = res[-1:]\n assert isinstance(seat, str)\n assert len(seat) == 1\n row_seat_index = cls._ROW_SEAT_INDEX.get(seat, None)\n assert row_seat_index is not None\n return row_seat_index",
"def id_for_station(station_name: str) -> Optional[int]:\n for s in STATIONS:\n if s[\"name\"] == station_name:\n return s[\"id\"]\n return None",
"def _compute_station_ids(prod, cli_station_name, is_multi):\n # Can't always use the AFOS as the station ID :(\n if is_multi:\n station = None\n for st in prod.nwsli_provider:\n if prod.nwsli_provider[st][\"name\"].upper() == cli_station_name:\n station = st\n break\n if station is None:\n raise CLIException(\n f\"Unknown CLI Station Text: |{cli_station_name}|\"\n )\n else:\n station = prod.source[0] + prod.afos[3:]\n # We have computed a four character station ID, is it known?\n if station not in prod.nwsli_provider:\n prod.warnings.append(\n f\"Station not known to NWSCLI Network |{station}|\"\n )\n return station, None, None\n\n access_station = None\n access_network = None\n # See if our network table provides an attribute that maps us to an ASOS\n val = prod.nwsli_provider[station].get(\"attributes\", {}).get(\"MAPS_TO\")\n if val is not None:\n tokens = val.split(\"|\")\n if len(tokens) == 2:\n access_station, access_network = tokens\n if access_station is None:\n # Our default mapping\n access_station = station[1:] if station.startswith(\"K\") else station\n access_network = f\"{prod.nwsli_provider[station].get('state')}_ASOS\"\n\n return station, access_station, access_network",
"def get_max_id(self):\r\n max_id = None\r\n for pid in self.players:\r\n if max_id is None or pid > max_id:\r\n max_id = pid\r\n return max_id",
"def find_latest_id(d, s):\n\n selected_tweets = [t['id'] for t in d if t['search_id'] == s]\n\n if selected_tweets:\n m = max(selected_tweets)\n else:\n m = None\n return m"
] | [
"0.74634355",
"0.668236",
"0.6261357",
"0.62445736",
"0.6111793",
"0.6099611",
"0.60279304",
"0.587468",
"0.5664313",
"0.5655479",
"0.55966735",
"0.5484212",
"0.53893024",
"0.5317917",
"0.5312065",
"0.52949077",
"0.5271475",
"0.52611125",
"0.5253164",
"0.52044994",
"0.5197342",
"0.5180566",
"0.5168977",
"0.5111602",
"0.5097587",
"0.5088534",
"0.5088001",
"0.50712",
"0.50581396",
"0.4990459"
] | 0.8179302 | 0 |
All seats start out empty | def __init__(self):
self.empty_seats = [row * 8 + col for row in self.rows for col in self.cols] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def empty_seats(seats, seat_numbers):\n\n for seat in seat_numbers:\n seats[seat] = None\n\n return seats",
"def fill_empty_seats(seats_names_dict, seats):\n for seat in seats:\n if seat not in seats_names_dict:\n seats_names_dict[seat] = 'empty'\n\n return seats_names_dict",
"def empty(self):",
"def empty():\n return CAT([], 0, 0, active=False)",
"def _reserve_seats(cls, N, S):\n unreserved_seats = cls._generate_plane_seats(N)\n reserved_seats = unreserved_seats[:]\n if len(S) > 0:\n for res in cls._parse_reservations_generator(N, S):\n row_seat_offset = cls._get_row_seat_offset(res)\n assert row_seat_offset < len(reserved_seats)\n reserved_seats[row_seat_offset] = 1\n\n return reserved_seats",
"def _empty_clusters(clusters):\n for clst in clusters:\n clst.points = []",
"def fill_first_stool(self, number_of_cheeses):\n for i in range(number_of_cheeses, 0, -1):\n self._stools[0].append(Cheese(i))",
"def seat_passenger(self, seat_id):\n self.empty_seats.remove(seat_id)",
"def empty(self):\n self.drop()\n self.create()",
"def create_empty_schedule():\n\n\t# create empty dictionary with all room-timelock combinations (roomlocks) as keys\n\troomlocks = list(range(0, 140))\n\tschedule = dict.fromkeys(roomlocks)\n\n\treturn schedule",
"def find_all_available_seats(seats):\n\n available = []\n for seat_num, value in seats.items():\n if value is None:\n available.append(seat_num)\n return available",
"def accommodate_waiting_guests(seats, guests):\n\n curr_empty_seats = current_empty_seat_capacity(seats)\n empty_seat_list = find_all_available_seats(seats)\n\n if len(guests) <= curr_empty_seats:\n for index, _ in enumerate(guests):\n seats[empty_seat_list[index]] = guests[index]\n\n return seats",
"def set_empty(self):\n pattern = [[0,0,0,0],\n [0,0,0,0],\n [0,0,0,0],\n [0,0,0,0]]\n self.set_pattern(pattern)",
"def updateEmptiesSet(self):\n self.emptiesSet = []\n for i in self.Range:\n if self.get_cell(i) == 0:\n self.emptiesSet.append(i)",
"def receive_round_start_message(self, round_count: int, hole_card: List[str],\n seats: List[Dict[str, Union[str, int]]]) -> None:\n self.__hole_cards = hole_card",
"def clear():",
"def empty(cls):\n pass",
"def remove_empty_suites(self):\n self.visit(EmptySuiteRemover())",
"def i_am_empty():\n pass",
"def __get_free_seats(self, game_state):\n free_seats = []\n for i in range(len(game_state)):\n for j in range(len(game_state[i])):\n if not game_state[i][j]:\n free_seats.append((i, j))\n return tuple(free_seats)",
"def get_seats():\n seats = []\n boarding_passes = _load_passes()\n\n for boarding_code in boarding_passes:\n col_code = boarding_code[7:]\n row_code = boarding_code[:7]\n seat = {\n 'col': _decode(col_code, SEAT_COLS),\n 'row': _decode(row_code, SEAT_ROWS)\n }\n seats.append(seat)\n\n return seats",
"def reset(self) :\n for i in range(len(self.playerCellList)) :\n for j in range(len(self.playerCellList[i])) :\n self.playerCellList[i][j].hasPawn = False",
"def empty_spots(self):\n\t\tret = []\n\t\tfor i in range(0, self.size):\n\t\t\tfor j in range(0, self.size):\n\t\t\t\tif(self.grid[i][j] == self.terminal):\n\t\t\t\t\tret.append((i,j))\n\t\treturn ret",
"def empty(stuff):\n\tfor i in range(len(stuff)):\n\t\tstuff.pop()",
"def empty(self):\n self.items = []\n self.totalWeight = 0",
"def missing_seat_ids(taken_seats):\n all_seats = set(range(min(taken_seats), max(taken_seats) + 1))\n return(set(taken_seats).symmetric_difference(all_seats))",
"def empty_board():\n return [['','',''],\n ['','',''],\n ['','','']]",
"def current_empty_seat_capacity(seats):\n\n count = 0\n for value in seats.values():\n if value is None:\n count += 1\n return count",
"def _compute_seats(self):\n # initialize fields to 0 + compute seats availability\n for ticket in self:\n ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'\n ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0\n # aggregate registrations by ticket and by state\n if self.ids:\n state_field = {\n 'draft': 'seats_unconfirmed',\n 'open': 'seats_reserved',\n 'done': 'seats_used',\n }\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n # compute seats_available\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)",
"def empty_bag(self):\n if self.peds is not None:\n for _, model in self.peds.items():\n model.reset()\n self.drone.reset()\n self.subject.reset()"
] | [
"0.6661953",
"0.603927",
"0.603626",
"0.5855473",
"0.5745834",
"0.55990493",
"0.55397326",
"0.55388844",
"0.55323654",
"0.5485067",
"0.5471141",
"0.5468245",
"0.5461583",
"0.54550487",
"0.54510456",
"0.5441734",
"0.5439312",
"0.5425242",
"0.5423252",
"0.54135686",
"0.53805137",
"0.53749514",
"0.5370407",
"0.53647834",
"0.5364135",
"0.5362838",
"0.535829",
"0.53495973",
"0.53082085",
"0.53073645"
] | 0.64161956 | 1 |
Each time a passenger is seated, the seat_id is removed from the empty seats list | def seat_passenger(self, seat_id):
self.empty_seats.remove(seat_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_player(self, seat_id):\n player_id = seat_id\n try:\n idx = self._seats.index(self._player_dict[player_id])\n self._seats[idx] = Player(0, stack=0, emptyplayer=True)\n del self._player_dict[player_id]\n self.emptyseats += 1\n except ValueError:\n pass",
"def empty_seats(seats, seat_numbers):\n\n for seat in seat_numbers:\n seats[seat] = None\n\n return seats",
"def missing_seat_ids(taken_seats):\n all_seats = set(range(min(taken_seats), max(taken_seats) + 1))\n return(set(taken_seats).symmetric_difference(all_seats))",
"def _passenger_seats(self):\n row_numbers, seat_letters = self._aircraft.seating_plan()\n for row in row_numbers:\n for letter in seat_letters:\n passenger = self._seating[row][letter]\n if passenger is not None:\n yield (passenger, f\"{row}{letter}\")",
"def get_missing_seat_ids(boarding_passes: list) -> set:\n seat_ids = {get_seat_id(boarding_pass) for boarding_pass in boarding_passes}\n all_seat_ids = set(range(min(seat_ids), max(seat_ids) + 1))\n return all_seat_ids - seat_ids",
"def decrement_seats(train_id, segments):\n for segment in segments:\n #Probably will need date as well to update FreeSeats\n cursor.execute(\"\"\"update seats_free set freeseat = freeseat - 1 \n where train_id = %s and segment_id = %s\"\"\",[train_id,segment])\n db.commit()",
"def removePlayer(self, index):\n serial = self.seats[index]\n self.seats[index]=0\n if serial in self.players:\n del self.players[serial]",
"def get_seats():\n seats = []\n boarding_passes = _load_passes()\n\n for boarding_code in boarding_passes:\n col_code = boarding_code[7:]\n row_code = boarding_code[:7]\n seat = {\n 'col': _decode(col_code, SEAT_COLS),\n 'row': _decode(row_code, SEAT_ROWS)\n }\n seats.append(seat)\n\n return seats",
"def final_seat_assignment():\n parties = get_sorted_parties()\n allocated_seats = get_sorted_allocated_seats() \n #list(zip(parties, allocated_seats))\n #pandas.concat([parties, allocated_seats], axis=1)\n distributed_seats = []\n for i in range(0, len(parties)):\n list_votes2 = get_sorted_votes2([\"state\"], parties[i]) \n list_min_seats = get_sorted_min_seats([\"state\"], parties[i]) \n list_ueberhang = get_sorted_ueberhang([\"state\"], parties[i])\n seats2dist = allocated_seats[i] - sum(list_ueberhang)\n print(parties[i])\n distributed_seats.append((parties[i]\n , max(distributeSeats(seats2dist, list_votes2, False, 100) , list_min_seats)\n )) # adding tuples\n \n return distributed_seats",
"def current_sats(seats_list, sold_seats, reserved_seats):\r\n for i in range(0, len(seats_list)):\r\n if seats_list[i] in sold_seats:\r\n seats_list[i] = \" X \"\r\n elif seats_list[i] in reserved_seats:\r\n seats_list[i] = \" O \"\r\n return seats_list",
"def update_booking(name,seat_assign):\r\n seats_taken.append([seat_assign[0],convert_numtoletter(seat_assign[1])])\r\n seats_avai.remove(seat_assign) \r\n c.execute(\"\"\"UPDATE seating SET name =? WHERE row=? and seat=?\"\"\",(name,seat_assign[0],convert_numtoletter(seat_assign[1])))",
"def fill_empty_seats(seats_names_dict, seats):\n for seat in seats:\n if seat not in seats_names_dict:\n seats_names_dict[seat] = 'empty'\n\n return seats_names_dict",
"def find_missing_seat():\n seats = get_seats()\n sorted_seats = _sorted_seat_ids(seats)\n\n missing = []\n for i in range(len(sorted_seats)):\n seat = sorted_seats[i]\n if (i + 1) == len(sorted_seats):\n continue\n if not sorted_seats[i + 1] == seat + 1:\n missing.append(seat + 1)\n\n return missing",
"def _sorted_seat_ids(seats: list):\n seat_ids = [_seat_id(**seat) for seat in seats]\n return sorted(seat_ids)",
"def remove_training_reserves():\n reserves = TrainingReserve.objects.all()\n now = timezone.now()\n for reserve in reserves:\n if reserve.date < now:\n reserve.delete()",
"def __get_free_seats(self, game_state):\n free_seats = []\n for i in range(len(game_state)):\n for j in range(len(game_state[i])):\n if not game_state[i][j]:\n free_seats.append((i, j))\n return tuple(free_seats)",
"def find_seats(seat_ids: Set[SeatID]) -> Set[DbSeat]:\n if not seat_ids:\n return set()\n\n seats = DbSeat.query \\\n .filter(DbSeat.id.in_(frozenset(seat_ids))) \\\n .all()\n\n return set(seats)",
"def allocate_seat(self, seat, passenger):\n\n row, letter = self._parse_seat(seat)\n\n if self._seating[row][letter] is not None:\n raise ValueError(f\"Seat {seat} already occupied!\")\n\n self._seating[row][letter] = passenger",
"def _reserve_seats(cls, N, S):\n unreserved_seats = cls._generate_plane_seats(N)\n reserved_seats = unreserved_seats[:]\n if len(S) > 0:\n for res in cls._parse_reservations_generator(N, S):\n row_seat_offset = cls._get_row_seat_offset(res)\n assert row_seat_offset < len(reserved_seats)\n reserved_seats[row_seat_offset] = 1\n\n return reserved_seats",
"def remove_everyone_from_station(self, line: int, station_num: int):\n removed_users = []\n for user in self.__users.values():\n for station in user.stations:\n if station.line_number == line and station.station_number == station_num:\n removed_users.append(user)\n for user in removed_users:\n self.__users.pop(user.id)\n # map( )\n return removed_users",
"def _compute_seats(self):\n # initialize fields to 0 + compute seats availability\n for ticket in self:\n ticket.seats_availability = 'unlimited' if ticket.seats_max == 0 else 'limited'\n ticket.seats_unconfirmed = ticket.seats_reserved = ticket.seats_used = ticket.seats_available = 0\n # aggregate registrations by ticket and by state\n if self.ids:\n state_field = {\n 'draft': 'seats_unconfirmed',\n 'open': 'seats_reserved',\n 'done': 'seats_used',\n }\n query = \"\"\" SELECT event_ticket_id, state, count(event_id)\n FROM event_registration\n WHERE event_ticket_id IN %s AND state IN ('draft', 'open', 'done')\n GROUP BY event_ticket_id, state\n \"\"\"\n self._cr.execute(query, (tuple(self.ids),))\n for event_ticket_id, state, num in self._cr.fetchall():\n ticket = self.browse(event_ticket_id)\n ticket[state_field[state]] += num\n # compute seats_available\n for ticket in self:\n if ticket.seats_max > 0:\n ticket.seats_available = ticket.seats_max - (ticket.seats_reserved + ticket.seats_used)",
"def allocate_seats(constit, party_seats):\n constituencies = dict(constit)\n constituency_seats = {}\n for constituency, _ in constituencies.items():\n constituency_seats[constituency] = ''\n sorted_seats = sort_parties_by_seats(party_seats)\n for party, seats in sorted_seats:\n allocated = 0\n sorted_constituencies = sort_constituencies_by_party_popularity(\n constituencies, party)\n for constituency in sorted_constituencies:\n if allocated == seats:\n break\n constituency_seats[constituency] = party\n constituencies.pop(constituency)\n allocated += 1\n return constituency_seats",
"def create_seating_graph(seats_arr, excludes=('nan',0), inc_self=True):\n # which chairs are neighbours\n ni, nj = seats_arr.shape\n seats_graph = {}\n # loop through each seat\n for indi in range(ni):\n for indj in range(nj):\n\n # get current seat\n seat = seats_arr[indi, indj]\n\n if seat not in excludes:\n # now get the neighbours of the seat\n # here we consider the diagonals to be a neighbour\n ii = np.array([-1, 0, 1]) + indi\n jj = np.array([-1, 0, 1]) + indj\n # keep the indices within the bounds\n ii = ii[(ii >= 0) & (ii < ni)]\n jj = jj[(jj >= 0) & (jj < nj)]\n\n # loop through the indices\n inds = [(i, j) for i in ii for j in jj]\n neighbours = [seats_arr[ind] for ind in inds if seats_arr[ind] not in [seat]+list(excludes)]\n\n if inc_self:\n neighbours.append(seat)\n\n seats_graph[seat] = neighbours\n\n return seats_graph",
"def allocate_seat(self,seat,passenger_name):\r\n\r\n # Extract the row number for referancing the lst index\r\n row = seat[:2]\r\n\r\n # Extract the seat number for referacing the dictionary key.\r\n seat_letter = seat[-1]\r\n\r\n # Get the plan for comparison checks, if the input seat is valid\r\n rows,seat_letters = self._aircraft.seating_plan()\r\n\r\n # Entered input not in list of seat_letters\r\n if seat_letter not in seat_letters:\r\n raise ValueError(f\"Invalid seat letter in {seat}\")\r\n\r\n # Convert the row number to integer for indexing, if no possible raise ValueError\r\n try:\r\n row = int(row)\r\n except ValueError:\r\n raise ValueError(f\"Invalid row in {seat}\")\r\n\r\n # Check if the obtained row is in given list of rows = range(self._num_rows+1). Eg: Range supportcontainer protocol\r\n # >>> l = 2\r\n # >>> l in range(1,100)\r\n # True\r\n if row not in rows:\r\n raise ValueError(f\"Invalid row. Row {row} is not present\")\r\n\r\n # Check if the seat is already occupied\r\n if self._seating[row][seat_letter] is not None:\r\n raise ValueError(f\"{seat} is already taken\")\r\n\r\n #Allocate the seat\r\n self._seating[row][seat_letter] = passenger_name",
"def kill_candidate(self, confid):\n for dct in self.c.select(gaid=confid):\n self.c.update(dct.id, extinct=1)",
"def delete_gkeeper(alist):\n\n res = [player for player in alist if player[2] != ['Por']]\n\n return res",
"def booking_single(i):\r\n assign = sorted(seats_avai,key=lambda x: x[0])[0]\r\n update_booking(name[i],assign)",
"def reserve_seat(self):\n try:\n # start a new transaction\n self.cnx.start_transaction()\n cur = self.cnx.cursor()\n\n # iterate through the rows of the result until\n # we find a seat that is open\n cur.execute(\"select seat, status from Flights\")\n found = None\n for row in cur.fetchall():\n if row[1] == 0:\n found = row[0]\n break\n\n # if we found an available seat\n if found is not None:\n # wait for user to confirm they want the seat\n print \"seat \", found, \" is open. <Enter> to continue.\"\n sys.stdin.readline()\n\n # update that the seat is taken\n cur.execute(\"update Flights set status = 1 where seat = %s\", (found,))\n self.cnx.commit()\n return found\n else:\n # if failed to reserve that seat then rollback and return None to indicate failure\n self.cnx.rollback()\n return None\n except mysql.connector.InternalError as e:\n print \"failed to reserve: \", e\n try:\n self.cnx.rollback()\n except mysql.connector.InternalError as e:\n # silence\n pass\n return None",
"def seats_count(self) -> int:\n return self.__seats_count",
"def get_seat_ids(boarding_passes):\n return list(map(get_seat_id, boarding_passes))"
] | [
"0.6723246",
"0.67226124",
"0.62767714",
"0.587385",
"0.58325046",
"0.57876456",
"0.5778089",
"0.5708203",
"0.5675765",
"0.5618063",
"0.5579411",
"0.5568858",
"0.5519024",
"0.5517418",
"0.54689866",
"0.54673475",
"0.5445841",
"0.54018897",
"0.5352401",
"0.5350176",
"0.5329276",
"0.530581",
"0.5277771",
"0.5266012",
"0.52350545",
"0.52178204",
"0.521467",
"0.52063435",
"0.516858",
"0.5167067"
] | 0.830451 | 0 |
Puts the skips in sequential cut | def put_skips_in_seq_cut(self):
# first, put skips when in some cut there is an ending activity
in_end_act = set(self.initial_end_activities)
i = 0
while i < len(self.children) - 1:
activities_set = set(self.children[i].activities)
intersection = activities_set.intersection(in_end_act)
if len(intersection) > 0:
j = i + 1
while j < len(self.children):
self.children[j].must_insert_skip = True
j = j + 1
i = i + 1
# second, put skips when in some cut you are not sure to pass through
i = 0
while i < len(self.children) - 1:
act_i = self.children[i].activities
act_i_output_appearences = {}
max_value = i
for act in act_i:
if act in self.outgoing:
for out_act in self.outgoing[act]:
act_i_output_appearences[out_act] = len(self.children) - 1
j = i + 1
while j < len(self.children):
act_children = self.children[j].activities
for act in act_children:
if act in act_i_output_appearences and act_i_output_appearences[act] == len(self.children) - 1:
act_i_output_appearences[act] = j
if j > max_value:
max_value = j
j = j + 1
j = i + 1
while j < max_value:
self.children[j].must_insert_skip = True
j = j + 1
i = i + 1
# third, put skips when some input activities do not pass there
out_start_activities = infer_start_activities_from_prev_connections_and_current_dfg(self.initial_dfg, self.dfg,
self.activities,
include_self=False)
out_start_activities_diff = out_start_activities - set(self.activities)
for act in out_start_activities_diff:
out_act_here = set()
for el in self.initial_dfg:
if el[0][0] == act and el[0][1] in self.activities:
out_act_here.add(el[0][1])
i = 0
while i < len(self.children):
child_act = set(self.children[i].activities)
inte = child_act.intersection(out_act_here)
if inte:
for el in inte:
out_act_here.remove(el)
if len(out_act_here) > 0:
self.children[i].must_insert_skip = True
i = i + 1
# fourth, put skips until all start activities are reached
remaining_act = self.start_activities
i = 0
while i < len(self.children):
child_act = set(self.children[i].activities)
inte = child_act.intersection(remaining_act)
if inte:
for el in inte:
remaining_act.remove(el)
if len(remaining_act) > 0:
self.children[i].must_insert_skip = True
i = i + 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def skipp(self):\n for x in range(4):\n self.fwd(right=100, left=100)\n time.sleep(.5)\n self.servo(1000)\n time.sleep(.1)\n self.servo(2000)\n time.sleep(.1)\n self.fwd(right=-100, left=-100)\n time.sleep(.1)\n self.servo(-1000)\n self.stop()",
"def _calc_skips(self, heatmap, num_lines):\n if num_lines < self.MIN_SKIP_SIZE:\n return []\n skips, prev_line = [], 0\n for line in sorted(heatmap):\n curr_skip = line - prev_line - 1\n if curr_skip > self.SKIP_LINES:\n skips.append((prev_line, curr_skip))\n prev_line = line\n if num_lines - prev_line > self.SKIP_LINES:\n skips.append((prev_line, num_lines - prev_line))\n return skips",
"def clip_scaffold_loops(self):\r\n start = 0\r\n index = 0\r\n ie = len(self.walk)\r\n while index < ie:\r\n segment = None\r\n try:\r\n segment = self.walk[index+1]\r\n except IndexError:\r\n self.remove_biggest_loop_in_range(start, index)\r\n return\r\n if segment is None or segment.value == 'RUNG':\r\n # Segment is essential.\r\n if start != index:\r\n ie -= self.remove_biggest_loop_in_range(start, index)\r\n start = index + 2\r\n index += 2",
"def skip(t, n):\n pu(t)\n fd(t, n)\n pd(t)",
"def skip(t, n):\n pu(t)\n fd(t, n)\n pd(t)",
"def cut( self, i_start, i_stop ):\n # create two series of indices, combine them and remove them from the data cube\n beginning = np.arange( i_start, dtype=int )\n end = np.arange( i_stop, self.n_steps, dtype=int )\n self._remove_steps( np.concatenate([beginning,end]).tolist() )",
"def cut_train_skip_predict(self, hits, *args):\n n_days = self.predict_window + self.train_window\n # How much free space we have to choose starting day\n free_space = self.inp.data_days - n_days - self.back_offset - self.start_offset\n if self.verbose:\n lower_train_start = pd.to_datetime(self.inp.data_start) + pd.Timedelta(self.start_offset, 'D')\n lower_test_end = lower_train_start + pd.Timedelta(n_days, 'D')\n lower_test_start = lower_test_end - pd.Timedelta(self.predict_window, 'D')\n upper_train_start = pd.to_datetime(self.inp.data_start) + pd.Timedelta(free_space - 1, 'D')\n upper_test_end = upper_train_start + pd.Timedelta(n_days, 'D')\n upper_test_start = upper_test_end - pd.Timedelta(self.predict_window, 'D')\n print(f\"Free space for training: {free_space} days.\")\n print(f\" Lower train {lower_train_start.date().strftime('%Y-%m-%d')}, prediction {lower_test_start.date().strftime('%Y-%m-%d')}..{lower_test_end.date().strftime('%Y-%m-%d')}\")\n print(f\" Upper train {upper_train_start.date().strftime('%Y-%m-%d')}, prediction {upper_test_start.date().strftime('%Y-%m-%d')}..{upper_test_end.date().strftime('%Y-%m-%d')}\")\n # Random starting point\n offset = tf.random_uniform((), self.start_offset,self.start_offset + free_space+1, dtype=tf.int32, seed=self.rand_seed)\n end = offset + n_days\n # Cut all the things\n return self.cut(hits, offset, end) + args",
"def onCut(self):\n pass",
"def KartDiscreteSkip(KartMultiDiscretizer):\n\n def __init__(self, env, max_skip):\n super(KartDiscreteSkip, self).__init__(env)\n\n self.max_skip = max_skip\n\n def reset(self, **kwargs):\n observation = super(KartDiscreteSkip, self).reset(**kwargs)\n observation, _, _, _ = self.env.step(self._actions[0].copy())\n return observation",
"def cut_lines(lines, pseudoread_length):\n step = int(pseudoread_length / 2)\n\n line_iterate = [x for x in range(0, len(lines), 2)]\n\n result = []\n\n for index in line_iterate:\n\n if (index % 100000) == 0:\n print(index)\n\n id = lines[index].strip()\n\n sequence = lines[index + 1].strip()\n\n # if sequence is shorter than single window, we return just window\n end_of_range = len(sequence) - step if (len(sequence) - step > 0) else len(sequence)\n range_iterate = [x for x in\n range(0, end_of_range, step)]\n\n for i in range_iterate:\n new_id = id + '|{}'.format(i)\n kmer = sequence[i:i + pseudoread_length]\n result.append(new_id)\n result.append(kmer)\n\n return result",
"def hollow(t, n):\n lt(t)\n skip(t, n)\n rt(t)",
"def skip_lines(nb):\n if nb == -1:\n os.system('cls' if os.name=='nt' else 'clear')\n else:\n print(\"\\n\" * (nb-1))",
"def compute_skiprows(start, end) -> List[int]:\n return list(range(start - 1)) + list(range(end, end + 20))",
"def cut_line(self):\r\n self.parachute.pop(0)",
"def partition(seq):\n\n return 0",
"def run_skip(self):\n pass",
"def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il",
"def stair(self, steps):\n s_list = range(steps, 0, -1)\n return _BosonicPartitions(s_list)",
"def consecutive_sections(): # noqa: D416",
"def fix_half_inning(self, half_inning):\n outs = 0\n active_runners = []\n for atbat in half_inning:\n self.hold_runners(active_runners, atbat)\n\n active_runners = [r for r in atbat.runners\n if not r.out and r.end != 4]\n outs = atbat.outs",
"def pyramid_slice(x1,y1,x2,y2,z,delta,deltaz,taper_x,taper_y,taper_straight,layers):\r\n\tcutlist = []\r\n\ty_max = abs(y1-y2)\r\n\tfor a in range(layers):\r\n\t\ti = 0\r\n\t\tnew_x1, new_y1, new_x2, new_y2 = x1 - a*taper_x, y1-a*taper_straight, x2+a*taper_x, y2+a*taper_y\r\n\t\twhile abs(new_y1 - (y1 - a*taper_straight)) < y_max and x1 > 0:\r\n\t\t\tif i % 2 == 0:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\telse:\r\n\t\t\t\tcutlist.append([\"jump\", f\"{new_x2:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\t\tcutlist.append([\"mark\", f\"{new_x1:.6f}\", f\"{new_y1:.6f}\"])\r\n\t\t\tnew_y1 = new_y1-delta\r\n\t\t\ti = i + 1\r\n\t\tif a < layers - 1:\r\n\t\t\tcutlist.append([\"z_step\", str(-deltaz)])\r\n\t\ty_max = y_max - taper_straight - taper_y\r\n\r\n\treturn cutlist",
"def skip ( nEvents ) :\n st = SUCCESS \n with DisabledAlgos() :\n st = run ( nEvents )\n \n return st",
"def test_skip_list_run_skip(self):\n mock_sqr = SequenceRun()\n mock_sqr.instrument_run_id = TestConstant.instrument_run_id.value\n\n mock_workflow = Workflow()\n mock_workflow.wfr_id = f\"wfr.{_rand(32)}\"\n mock_workflow.type_name = WorkflowType.BCL_CONVERT.value\n mock_workflow.end_status = WorkflowStatus.SUCCEEDED.value\n mock_workflow.sequence_run = mock_sqr\n mock_workflow.output = \"\"\n\n when(fastq_update_step).perform(...).thenReturn(\"FASTQ_UPDATE_STEP\")\n when(google_lims_update_step).perform(...).thenReturn('GOOGLE_LIMS_UPDATE_STEP')\n when(dragen_wgs_qc_step).perform(...).thenReturn('DRAGEN_WGS_QC_STEP')\n when(dragen_tso_ctdna_step).perform(...).thenReturn('DRAGEN_TSO_CTDNA_STEP')\n when(dragen_wts_step).perform(...).thenReturn('DRAGEN_WTS_STEP')\n\n run_id = TestConstant.instrument_run_id.value\n skiplist = {\n 'global': [],\n 'by_run': {\n run_id: [\n \"DRAGEN_WGS_QC_STEP\"\n ]\n }\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n self.assertFalse('DRAGEN_WGS_QC_STEP' in results)\n self.assertTrue('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertTrue('DRAGEN_WTS_STEP' in results)\n\n skiplist = {\n 'global': [\"DRAGEN_WGS_QC_STEP\"],\n 'by_run': {\n run_id: [\n \"DRAGEN_TSO_CTDNA_STEP\",\n \"DRAGEN_WTS_STEP\"\n ]\n }\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n self.assertFalse('DRAGEN_WGS_QC_STEP' in results)\n self.assertFalse('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertFalse('DRAGEN_WTS_STEP' in results)",
"def dumbSnake_burst(self, xStart, xEnd, yDelta, nRoundTrips, sweepTime,windowList,startgrid):#for burst mode\n self.sam_x.umv(xStart)\n self.sam_y.umv(windowList[startgrid])\n daq.connect()\n daq.begin()\n sleep(2)\n print('Reached horizontal start position')\n # looping through n round trips\n \n for j in range(len(windowList)-startgrid):\n self.sam_y.umv(windowList[startgrid+j])\n self.sam_y.wait()\n print('Windos position %f'%(self.sam_y.wm()))\n\n for i in range(nRoundTrips):\n try:\n print('starting round trip %d' % (i+1))\n self.sam_x.mv(xEnd)\n sleep(0.1)\n seq.start()#start sequence Need to be set \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition',self.sam_y.wm())\n sleep(1.2)#wait for turning around \n self.sam_x.mv(xStart)\n sleep(0.1)\n #pp.open()\n seq.start()#start sequence \n #sleep(sweepTime)\n #pp.close()\n self.sam_x.wait()\n self.sam_y.mvr(yDelta)\n print('yposition',self.sam_y.wm())\n sleep(1.2)\n except:\n print('round trip %d didn not end happily' % i)\n \n daq.end_run()\n daq.disconnect()\n\n\n #daq.end()",
"def test_skip_list_no_skip(self):\n mock_sqr = SequenceRun()\n mock_sqr.instrument_run_id = TestConstant.instrument_run_id.value\n\n mock_workflow = Workflow()\n mock_workflow.wfr_id = f\"wfr.{_rand(32)}\"\n mock_workflow.type_name = WorkflowType.BCL_CONVERT.value\n mock_workflow.end_status = WorkflowStatus.SUCCEEDED.value\n mock_workflow.sequence_run = mock_sqr\n mock_workflow.output = \"\"\n\n when(fastq_update_step).perform(...).thenReturn(\"FASTQ_UPDATE_STEP\")\n when(google_lims_update_step).perform(...).thenReturn('GOOGLE_LIMS_UPDATE_STEP')\n when(dragen_wgs_qc_step).perform(...).thenReturn('DRAGEN_WGS_QC_STEP')\n when(dragen_tso_ctdna_step).perform(...).thenReturn('DRAGEN_TSO_CTDNA_STEP')\n when(dragen_wts_step).perform(...).thenReturn('DRAGEN_WTS_STEP')\n\n skiplist = {\n 'global': [],\n 'by_run': {}\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n self.assertTrue('DRAGEN_WGS_QC_STEP' in results)\n self.assertTrue('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertTrue('DRAGEN_WTS_STEP' in results)",
"def remove_4s_every_other_in_between(seq):\n seq_copy = seq [4:-4:2]\n return seq_copy",
"def side_step(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(-10, -150)\n time.sleep(2)\n r.go(-20)\n time.sleep(1)\n r.go(-10, 150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(-10, 150)\n time.sleep(2)\n r.go(-20)\n time.sleep(1)\n r.go(-10, -150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(10, 150)\n time.sleep(2)\n r.go(20)\n time.sleep(1)\n r.go(10, -150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)\n for i in range(num_repeats):\n r.go(10, -150)\n time.sleep(2)\n r.go(20)\n time.sleep(1)\n r.go(10, 150)\n time.sleep(2)\n r.stop()\n time.sleep(.15)",
"def test_skip_list_wrong_run_skip(self):\n mock_sqr = SequenceRun()\n mock_sqr.instrument_run_id = TestConstant.instrument_run_id.value\n\n mock_workflow = Workflow()\n mock_workflow.wfr_id = f\"wfr.{_rand(32)}\"\n mock_workflow.type_name = WorkflowType.BCL_CONVERT.value\n mock_workflow.end_status = WorkflowStatus.SUCCEEDED.value\n mock_workflow.sequence_run = mock_sqr\n mock_workflow.output = \"\"\n\n when(fastq_update_step).perform(...).thenReturn(\"FASTQ_UPDATE_STEP\")\n when(google_lims_update_step).perform(...).thenReturn('GOOGLE_LIMS_UPDATE_STEP')\n when(dragen_wgs_qc_step).perform(...).thenReturn('DRAGEN_WGS_QC_STEP')\n when(dragen_tso_ctdna_step).perform(...).thenReturn('DRAGEN_TSO_CTDNA_STEP')\n when(dragen_wts_step).perform(...).thenReturn('DRAGEN_WTS_STEP')\n\n run_id = str(TestConstant.instrument_run_id.value).replace(\"2\", \"1\")\n skiplist = {\n 'global': [],\n 'by_run': {\n run_id: [\n \"DRAGEN_WGS_QC_STEP\"\n ]\n }\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n # by_run skip list should not apply, since run id mismatch, so all workflows should be listed\n self.assertTrue('DRAGEN_WGS_QC_STEP' in results)\n self.assertTrue('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertTrue('DRAGEN_WTS_STEP' in results)\n\n skiplist = {\n 'global': [\"DRAGEN_WGS_QC_STEP\"],\n 'by_run': {\n run_id: [\n \"DRAGEN_TSO_CTDNA_STEP\",\n \"DRAGEN_WTS_STEP\"\n ]\n }\n }\n\n results = orchestrator.next_step(mock_workflow, skiplist, None)\n logger.info(results)\n\n # only global skip list should apply, due to run ID mismatch\n self.assertFalse('DRAGEN_WGS_QC_STEP' in results)\n self.assertTrue('DRAGEN_TSO_CTDNA_STEP' in results)\n self.assertTrue('DRAGEN_WTS_STEP' in results)",
"def skip_loop(self, loops=1):\n self.decrease_count(loops)\n self.decrease_total_count(loops)\n return self",
"def skip_next_segment(self):\n self.test_script_source.next_segment()"
] | [
"0.6081469",
"0.59631395",
"0.590605",
"0.59015036",
"0.59015036",
"0.563135",
"0.5596978",
"0.5508532",
"0.550615",
"0.5485905",
"0.54270315",
"0.54068",
"0.5339759",
"0.53249735",
"0.52994674",
"0.5296712",
"0.5285578",
"0.5242751",
"0.52426267",
"0.5176084",
"0.5173648",
"0.516255",
"0.5151362",
"0.5130012",
"0.5122363",
"0.51050264",
"0.50944644",
"0.50829214",
"0.5082607",
"0.5067749"
] | 0.7369221 | 0 |
Start the monitoring loop for the downloads. | def start(self):
self._logger.info("Starting download monitor (interval: %d seconds)" % self.interval)
self.monitor_lc = ensure_future(looping_call(0, self.interval, self.monitor_downloads)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n self.monitor.start()",
"def start_download(self) -> NoReturn:\n if self.threaded:\n self.threaded_download()\n else:\n self.regular_download()",
"def monitor_downloads(self):\n return self.request_manager.get_downloads().addCallback(self.on_downloads)",
"def start_downloads():\n todownload = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='retrying' \" \\\n \"ORDER BY created_at ASC\")\n todownload += jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='new' \" \\\n \"ORDER BY created_at ASC\")\n\n for file in todownload:\n if can_download():\n dlm_cout.outs(\"Initiating download of %s\" % \\\n os.path.split(file['filename'])[-1])\n\n # Update file status and insert entry into download_attempts\n queries = []\n queries.append(\"UPDATE files \" \\\n \"SET status='downloading', \" \\\n \"details='Initiated download', \" \\\n \"updated_at='%s' \" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n jobtracker.query(queries)\n queries = []\n queries.append(\"INSERT INTO download_attempts (\" \\\n \"status, \" \\\n \"details, \" \\\n \"updated_at, \" \\\n \"created_at, \" \\\n \"file_id) \" \\\n \"VALUES ('%s', '%s', '%s', '%s', %d)\" % \\\n ('downloading', 'Initiated download', jobtracker.nowstr(), \\\n jobtracker.nowstr(), file['id']))\n insert_id = jobtracker.query(queries, fetchone=True)\n attempt = jobtracker.query(\"SELECT * FROM download_attempts \" \\\n \"WHERE id=%d\" % insert_id, fetchone=True)\n \n # download(attempt)\n DownloadThread(attempt).start()\n else:\n break",
"def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)",
"def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )",
"def monitor(self):\n if self.startup():\n time.sleep(0.250)\n self.run()",
"def start(self):\n self.watcher.start()\n self._asyncio_loop.run_forever()",
"def start_watcher():\n while True:\n request_date = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n pull_request_from_remote(remote_files=\"*%s*\" % request_date)\n new_requests = check_for_new_request(request_date=request_date)\n if not new_requests:\n time.sleep(5)\n continue\n\n # noinspection PyTypeChecker\n for r in new_requests:\n print(\"Processing %s\" % r)\n try:\n ret = process_new_request(r, request_date=request_date,\n add2db=True)\n print(ret)\n except:\n os.system('cp -r %s /home/sedm/growth_marshal/archived/failed/'\n % r)\n os.system('cp -r %s /home/sedm/growth_marshal/archived/%s/' %\n (r, request_date))\n\n print(\"Waiting %ss before checking for new request\" % 5)\n time.sleep(5)",
"def start(self):\n try:\n self.getEverything()\n self._watchFolder()\n except Unauthorized, e:\n self.authorize()\n self.start()\n \n #TODO: make this work\n #self._setPeriodicSync()\n \n print 'stopped'",
"def run(self):\n self._start_servers()\n monitor = KodiMonitor(self.nx_common, self.nx_common.log)\n while not monitor.abortRequested():\n monitor.update_playback_progress()\n try:\n if self.library_update_scheduled() and self._is_idle():\n self.update_library()\n except RuntimeError as exc:\n self.nx_common.log(\n 'RuntimeError: {}'.format(exc), xbmc.LOGERROR)\n if monitor.waitForAbort(5):\n break\n self._shutdown()",
"def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()",
"def start(self):\n logger.debug(\"Starting {0} downloaders\".format(self.num_downloaders))\n for p in self._downloaders:\n # p.daemon = True\n p.start()\n logger.debug(\"Starting {0} checkers\".format(self.num_checkers))\n for p in self._checkers:\n # p.daemon = True\n p.start()",
"def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)",
"def start_monitoring(self):\n pass",
"def start(self):\n self.monitor_lc.start(self.interval)",
"def run(self):\n download(self.attempt)",
"def watch(self):\n wm = pyinotify.WatchManager()\n self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback)\n wm.add_watch(self.directory, pyinotify.ALL_EVENTS)\n try:\n self.notifier.loop()\n except (KeyboardInterrupt, AttributeError):\n print_notification(\"Stopping\")\n finally:\n self.notifier.stop()\n self.terminate_processes()",
"def start(self):\n if self.driver:\n eventlet.spawn_n(self.driver.monitor_events)",
"def Listen(self):\n while True:\n time.sleep(1)",
"def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()",
"def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()",
"def start(self):\n\n ydl_opts = {}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n while True:\n videos = self.get_videos() # getting list of all videos from file\n print('{} videos to go'.format(len(videos))) # print no. of video remaining\n video = get_first_item(videos) # get next video for downloading\n if video is None: # check if video is there or not\n break\n\n ydl.download([video]) # downloading video\n videos.remove(video) # remove video from list\n self.save_file(videos) # save updated list to file\n\n print('All downloaded')",
"def run(self):\n\n self._daemon_thread.start()\n\n while True:\n time.sleep(5)",
"def run():\n check_active_requests()\n start_downloads()\n check_download_attempts()\n numsuccess = verify_files()\n recover_failed_downloads()\n check_downloading_requests()\n acknowledge_downloaded_files()\n if can_request_more():\n make_request()\n return numsuccess",
"def run(self):\n\t\tfor source in self.sources:\n\t\t\tstringutil.print_color(Fore.GREEN, 'Downloading from Source: %s' % source.get_alias())\n\t\t\tfor r in source.get_elements():\n\t\t\t\tr.set_source(source)\n\t\t\t\tself._queue.put(r)\n\n\t\t\t\t# Extra tracking stuff below:\n\t\t\t\twith self._c_lock:\n\t\t\t\t\tself._total_count+= 1\n\t\t\t\tif self._testing_cache is not None:\n\t\t\t\t\tself._testing_cache.append(r)\n\t\t#print(\"Element loading complete.\\n\")\n\t\tself._running = False",
"def run(self):\n self.empty_pid_file()\n self.queue = Queue()\n self.monitor_process = Process(\n target=ResourceMonitor.monitor_function,\n args=(self.launcher, self.pid_file, self.frequency, self.queue)\n )\n self.monitor_process.start()",
"def start(self):\n self._watchdog_thread.start()",
"def run(self):\n while self.running:\n self.handle_request()",
"def startDownloadQueue(self):\n\n self.runEventCallbacks('downloadQueueStarted') \n while len(self.downloadQueue):\n if self.downloadQueue[0]['dst'] != None:\n self.getFile(self.downloadQueue[0]['src'], \n self.downloadQueue[0]['dst'])\n self.runEventCallbacks('downloadQueueFinished') \n self.clearDownloadQueue()"
] | [
"0.7030247",
"0.6881864",
"0.681746",
"0.6760977",
"0.6704339",
"0.66790855",
"0.6652548",
"0.66350824",
"0.6631739",
"0.66223013",
"0.65687346",
"0.65481055",
"0.65054774",
"0.6494747",
"0.64725673",
"0.64176327",
"0.6264444",
"0.6254806",
"0.62482595",
"0.62001854",
"0.6194618",
"0.6184479",
"0.6183706",
"0.61817014",
"0.6178645",
"0.61733997",
"0.6166602",
"0.6160866",
"0.6158829",
"0.61492175"
] | 0.8476433 | 0 |
Returns true if the cooldown is ready. | def ready(self):
return self.time >= self.cooldown | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def in_cooldown(self) -> bool:\n return self.cooldown_counter > 0",
"def is_ready(self) -> bool:\n return self._ready.is_set()",
"def cooldown_checker(self):\n self.cooldown_tick += 1\n if self.cooldown_tick == self.pattern_cooldown:\n self.wait = False\n self.cooldown_tick = 0",
"async def check_cooldown(self, ctx: commands.context):\n if not self.enable_cooldown:\n return True\n\n now = datetime.utcnow()\n\n # Exclude bot owner from all cooldowns\n if await checks.is_owner(ctx):\n return True\n\n # Return if author never been in cooldown before\n last_time: datetime = self.invocation_times.get(ctx.author.id, None)\n if not last_time:\n self.invocation_times[ctx.author.id] = now\n return True\n\n cooldown_end = last_time + timedelta(seconds=self.cooldown)\n\n # Return if time has passed since cooldown end\n if cooldown_end < now:\n self.invocation_times[ctx.author.id] = now\n return True\n\n retry_after = (cooldown_end - now).total_seconds()\n print((cooldown_end - now))\n\n raise commands.CommandOnCooldown(None, retry_after)",
"def check_ready(self):\r\n print \"Checking ready\"\r\n\t\tif self.game.trough.is_full():\r\n print \"Ready\"\r\n\t\t\tself.ready()\r\n\t\t\treturn True\r\n\t\tprint \"Not Ready\"\r\n\t\treturn False",
"def is_ready(self):\n return self._is_ready()",
"def is_ready(self):\n return self.__is_ready",
"def is_ready(self) -> bool:\n pass",
"def is_ready(self):\n return self._is_ready",
"def is_ready() -> bool:\n return True",
"def canAct(self) -> bool:\n return self.cooldown < 1",
"def _is_ready(self):\n current_wait_time = 0\n start_time = time.time()\n while current_wait_time < self.max_wait_time_ready:\n try:\n response = requests.get(os.path.join(self.url, \"ready\"), timeout=1)\n if response.status_code == 200:\n break\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n current_wait_time = time.time() - start_time\n if current_wait_time >= self.max_wait_time_ready:\n raise TimeoutError(\"Interrupting execution\\n'/ready' endpoint is not ready \" +\n \"for maximum allowed {:d} seconds!\".format(self.max_wait_time_ready))",
"async def _check_cooldown(self, request_id: int) -> bool:\n raise NotImplementedError()",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def can_act(self) -> bool:\n return self.cooldown < 1",
"def IsReady(self):\r\n\t\treturn self._get_attribute('isReady')",
"def _is_ready(self):\n current_wait_time = 0\n start_time = time.time()\n while current_wait_time < self.max_wait_time_ready:\n try:\n response = requests.get(os.path.join(self.url, \"ready\"))\n if response.status_code == 200:\n break\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n time.sleep(1)\n current_wait_time = time.time() - start_time\n if current_wait_time >= self.max_wait_time_ready:\n raise TimeoutError(\"Interrupting execution\\n'/ready' endpoint is not ready \" +\n \"for maximum allowed {:d} seconds!\".format(self.max_wait_time_ready))",
"def ready(self):\n return self.counter > 0",
"def isReady(self):\n return self._lowLevelIsReady()",
"def is_on(self):\n return not self.ready",
"def is_ready(self):\n if self.game.has_started():\n return True\n return self.status == self.PLAYER_READY",
"def ready(self):\n # NOTE(priteau): Temporary compatibility with old and new lease status\n if self.lease.get('action') is not None:\n return self.status == ('START', 'COMPLETE')\n else:\n return self.status == 'ACTIVE'",
"def is_ready(self):\n if self.id is None:\n return False\n\n return True",
"def if_ready(self, **kwargs):\n return True",
"def is_ready(cls):\n\n return False",
"def is_ready_to_run(self, at_time):\n return (self.next_time - at_time) <= 0",
"def is_ready(self):\n return self.prep_job.is_done()",
"def wait_ready(self, timeout: Optional[float] = None) -> bool:\n return self._ready.wait(timeout=timeout)",
"def isReady(self):\n return self._state in self._ReadyStates",
"def available(self):\n return self._power is not None"
] | [
"0.80361766",
"0.73155785",
"0.7213164",
"0.7167499",
"0.7148745",
"0.7121509",
"0.71099555",
"0.7094376",
"0.70895165",
"0.7063325",
"0.70395815",
"0.7003874",
"0.69981563",
"0.69766104",
"0.69766104",
"0.6975192",
"0.69604623",
"0.69585705",
"0.69414705",
"0.6909078",
"0.68804795",
"0.6859239",
"0.683399",
"0.68064785",
"0.6793257",
"0.67828196",
"0.67774594",
"0.6766286",
"0.6756293",
"0.67408186"
] | 0.87662184 | 0 |
Get api_key in metadata, raise error if does not exist | def get_api_key(context) -> str:
provided_api_key = ""
for key, value in context.invocation_metadata():
if key == "api_key":
provided_api_key = str(value)
return provided_api_key
return provided_api_key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_api_key(api_key):\n api.get(api_key)",
"def resolve_apikey(self):\n # check the instance variable\n apikey = self.apikey\n if apikey is not None:\n return apikey\n\n # check the class variable and environment\n apikey = resolve_apikey()\n if apikey is not None:\n return apikey\n\n # if we got this far, the API key wasn't found\n raise MonitisError('The Monitis API key is required')",
"def _get_api_key():\n api_key_directory = os.getenv(\"KOKORO_GFILE_DIR\")\n api_key_file = os.path.join(api_key_directory, \"resultstore_api_key\")\n assert os.path.isfile(api_key_file), (\n \"Must add --api_key arg if not on \"\n \"Kokoro or Kokoro environment is not set up properly.\"\n )\n with open(api_key_file, \"r\") as f:\n return f.read().replace(\"\\n\", \"\")",
"def _apikey():\n return __opts__.get(\"bamboohr\", {}).get(\"apikey\", None)",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.quick,\n [\"0.0.0.0\"],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def _resolve_apikey(url: str, apikey: Optional[str]) -> Tuple[str, str]:\n # Even though the async api doesn't support apikey query parameter,\n # for ease of use support providing it as query parameter in the url.\n # authorization is always done via Authorization header\n url, params = UrlManipulation.separate_query_params(url, (\"apikey\",))\n try:\n apikey = params[\"apikey\"][0]\n except KeyError:\n pass\n\n if apikey is None:\n raise ValueError(\"apikey not defined\")\n\n return url, apikey",
"def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")",
"def api_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"api_key\")",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.query,\n [\"<query>\"],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def test_get_cloud_organization_api_key(self):\n pass",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.stats, [\"query\"], parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def get_api_key():\n try:\n return os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]\n except Exception:\n raise EnvError()",
"def _get_api_key():\n cfg = read_config()\n cfg = cfg['notifier']['telegram_bot']\n return cfg.get('api_key')",
"def test_api_key_error(api):\n\twith pytest.raises(top_stories.APIKeyError):\n\t\tmissingAPI = top_stories.TopStoriesAPI()",
"def get_apikey(cls) -> str:\r\n dotenv_path = Path(__file__).absolute().parents[2] / '.env'\r\n if dotenv_path.exists():\r\n load_dotenv(dotenv_path)\r\n try:\r\n apikey: str = os.environ[\"API_KEY\"]\r\n except KeyError:\r\n print(\"API_KEY doesn't exist\")\r\n raise KeyError\r\n\r\n return apikey",
"def get_api_key() -> dict:\r\n with open('config.json', 'r') as config_file:\r\n api_keys = json.load(config_file)\r\n return api_keys['newsapi']['api']",
"def test_api_key_not_found(self):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.decorator.load_config\") as load_config:\n load_config.return_value = {\"api_key\": \"\"}\n result = runner.invoke(\n subcommand.ip, [\"0.0.0.0\"], parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Error: API key not found\" in result.output",
"def get_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def _get_api_key(self):\n self.api.apikey = self.api.action.user_show(id=self.username)['apikey']",
"def _get_api_key():\n if not os.getenv(\"SPOON_API_KEY\"):\n raise RuntimeError(\"SPOON_API_KEY is not set\")\n return os.getenv(\"SPOON_API_KEY\")",
"def get_api_key ():\n PROJECT_PATH = os.path.abspath(os.path.dirname(__name__))\n key_file = open(PROJECT_PATH + \"/key_api.txt\", \"r\")\n return (key_file.read()).rstrip('\\n')",
"def test_get_test_organization_api_key(self):\n pass",
"def get_api_key(instance):\n\n # TODO make this work with environment variables or else\n # by getting the api-key from ~/.config/flywheel/user.json\n # if the KEY_FILE is not present but that doesn't honor the\n # \"instance\" argument to this method\n\n with open(KEY_FILE) as json_file:\n keys = json.load(json_file)\n the_user = keys[\"default\"]\n for key, val in keys[\"ids\"][the_user].items():\n if instance.startswith(key):\n api_key = val\n if not api_key:\n print(f\"{CR}Could not find instance '{instance}'{C0}\")\n return api_key",
"def read_api_key():\n script_path = os.path.dirname(os.path.realpath(__file__)) \n config = open(script_path + '/config', 'r')\n api_key = config.readline().rstrip()\n config.close()\n return(api_key)",
"def get_key_info(self, api_key, include_key=False):\n\t\ttry:\n\t\t\tvalidation.required(api_key, 'api_key')\n\t\texcept errors.ValidationError, ex:\n\t\t\tself.log.warning(\"Validation failure: %s\" % str(ex))\n\t\t\traise errors.APIError, str(ex)\n\n\t\treturn self.app.db.query(\n\t\t\t\"\"\"\n\t\t\tselect\n\t\t\t\tapi_key,\n\t\t\t\towner,\n\t\t\t\tapp_name,\n\t\t\t\temail,\n\t\t\t\turl,\n\t\t\t\tcreated\n\t\t\tfrom\n\t\t\t\tapi_keys\n\t\t\twhere\n\t\t\t\tapi_key = %s\n\t\t\t\"\"\", (api_key, ), single_row=True)",
"def get_api_key_from_response(response: requests.models.Response) -> str:\n api_key = None\n for line in response.text.splitlines():\n if \"Your API Key is: \" in line:\n api_key = line.split(\"Your API Key is: \")[1].split(\"<\")[0]\n return api_key\n raise ValueError(\"Cannot find API key\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")",
"def api_key(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"api_key\")"
] | [
"0.7678791",
"0.6980334",
"0.6893777",
"0.67977744",
"0.6746311",
"0.67275006",
"0.6724428",
"0.6724428",
"0.67214483",
"0.6710168",
"0.6701964",
"0.66950274",
"0.6663174",
"0.6648977",
"0.66346925",
"0.6588155",
"0.6586956",
"0.65624505",
"0.65600836",
"0.6558815",
"0.65576476",
"0.6534775",
"0.65253764",
"0.6519993",
"0.6453146",
"0.63950944",
"0.637815",
"0.637815",
"0.637815",
"0.637815"
] | 0.7251901 | 1 |
Update prefix with operator with best price. | def _update_prefix(self, prefix: str, operator: Operator):
cached_operator: Optional[Operator] = self.lookup(prefix)
if cached_operator:
cached_price = cached_operator.price_for_prefix(prefix)
if cached_price:
if operator.has_better_price_for_prefix(prefix, cached_price):
self.add_prefix(prefix=prefix, operator=operator)
else:
self.add_prefix(prefix=prefix, operator=operator) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_with_operator(self, operator: Operator):\n if not isinstance(operator, Operator):\n raise TypeError(\n f\"operator expected to be of type `Operator` but got type \"\n f\"{type(operator)}\"\n )\n\n for prefix in operator.rates.keys():\n self._update_prefix(prefix=prefix, operator=operator)",
"def update(self, price, volume):\r\n if price > self.hig:\r\n self.hig = price\r\n if price < self.low:\r\n self.low = price\r\n self.cls = price\r\n self.vol += volume",
"def change_price(self, value): \n value = self.price",
"def update(\n self,\n objective_function,\n best_code,\n objective_cost,\n weight,\n ):\n W = str(weight) # must be a string otherwise dupicate entries\n L = len(best_code)\n data = self._get_data(L)\n if W not in data:\n data[W] = {}\n if (objective_function not in data[W]\n or data[W][objective_function]['cost'] < objective_cost):\n data[W][objective_function] = {\n 'cost': objective_cost,\n 'code': best_code,\n }",
"def update_stock(option, stock):\n lowered_opt = option.lower()\n if lowered_opt == 'f':\n stock[\"five\"]+=1\n elif lowered_opt == 'o':\n stock[\"one\"] += 1\n elif lowered_opt == 'q':\n stock[\"quarter\"] += 1\n elif lowered_opt == 'd':\n stock[\"dime\"] += 1\n else:\n stock[\"nickel\"] +=1",
"def applyOperator(self, operator, operand):\n if self.currentTotal == None:\n self.currentTotal = operand\n elif operator == \"=\":\n self.equalsOp(operand)\n elif self.previousOperand:\n self.previousOperand = None\n else:\n self.computeTotal(operator, operand)\n if operator != \"=\":\n self.previousOperator = operator",
"def desired_price(self, new_desired_price):\n self._desired_price = new_desired_price",
"def __on_update_bookticker(self, action, bookticker):\n self.best_bid_price = float(bookticker['b'])\n self.best_ask_price = float(bookticker['a'])",
"def update_cursor_and_price(self, new_quant):\n self.quant = new_quant\n self.quantity_cursor.change_count(self.quant)\n self.cost_surf = \\\n self.text_maker.get_surface(str(self.item.sell_price * self.quant))",
"def modify_price(self, price):\n if price is not None and self.is_cancellable:\n log.info(\"bo#%s: modify price (pending) order \" % self.ticket)\n not_implemented_error(\"Can't modify price for now (only for pending orders which wasn't triggered\")\n order_id = self.order_id_master\n cancel_order(order_id) # DANGEROUS! it should be atomic operation!\n #style = self.style\n #if self.is_limit:\n #elif self.is_stop:\n #elif self.is_stop_limit\n #order_id = order(self.symbol, self.volume, style=new_style))\n \n else:\n return",
"def change_operator(self, text):\n self.operator = text\n if self.current_num:\n self.prev_num = self.current_num\n self.current_num = \"\"",
"def __call__(self, rate:'kW'):\n self.rate = rate\n self.cost = self.price * rate",
"def add_prefix(self, prefix: str, operator: Operator):\n if not isinstance(operator, Operator):\n raise TypeError(\n f\"`operator` expected to be of type `str` but got type \"\n f\"`{type(operator)}`\"\n )\n\n if not isinstance(prefix, str):\n raise TypeError(\n f\"`prefix` is expected to be of type `str` but got type \"\n f\"`{type(prefix)}`\"\n )\n\n if not prefix.isdigit():\n raise ValueError(\n \"Value of `prefix` is expected to a string representation \"\n \"of a digit\"\n )\n\n self.data[prefix] = operator # noqa",
"def incr_operand(self):\n pass",
"def modify_price(pid: int, price: float) -> ExecRet:\n if price < 0.0:\n return ExecRet.err(message='invalid price %.4f' % price)\n market = get_market()\n product = market.get_product(pid)\n if not product:\n return ExecRet.err(message='pid %d not exist' % pid)\n LOGGER.info('pid %s, pre-price: %.4f, new-price: %.4f' %\n (pid, product.price, price))\n time.sleep(3)\n product.price = price\n return ExecRet.ok()",
"def update(self, price, dt):\n \n price_trailing_diff = self.get('price_trailing_diff')\n price_trailing = self.get('price_trailing')\n \n if self.bo.price_diff_d is not None:\n if self.bo.price_diff_d>price_trailing_diff:\n new_stop = price-self.bo.direction*price_trailing_diff\n if price_trailing is None:\n self.modify_stop(new_stop) # ToFix! at first update we should only modify stop when it's closer than actual stop\n else:\n if self.bo.direction*(new_stop-price_trailing)>0:\n self.modify_stop(new_stop)",
"def _onchange_price(self):\n self.price_subtotal = self.price",
"async def update_base_rate(self, pair: str):\n\n value = self.close_values[pair][-1]\n\n try:\n old_value = self.base_rates[pair]\n except KeyError:\n old_value = 0.0\n\n if not math.isclose(old_value, value):\n self.log.debug(\"Updated {} base currency rate.\", pair, verbosity=1)\n self.log.debug(\"{} new currency rate is {}\", pair, value, verbosity=2)\n\n self.base_rates[pair] = value\n\n pair_split = pair.split('-')\n inverse_pair = '{}-{}'.format(pair_split[1], pair_split[0])\n self.base_rates[inverse_pair] = 1.0 / value\n\n self.save_attr('base_rates')",
"def set_price(self, request, pk):\n return Response('20$')",
"def set_price(self, request, pk):\n return Response('20$')",
"def update(self, state, action, nextState, reward):\n \"*** YOUR CODE HERE ***\"\n self.qValues[(state, action)] = ((1 - self.alpha) * self.getQValue(state, action)) + self.alpha \\\n * (reward + self.discount * self.computeValueFromQValues(nextState))",
"def update(self, operation, operand0, operand1, operand2):\n self.operation = operation\n self.operand0 = operand0\n self.operand1 = operand1\n self.operand2 = operand2",
"def _recalculate_opinions(self, idea):\r\n \r\n global INFLUENCE_FACTOR\r\n \r\n last_idea = self.opinions[idea.category]\r\n last_idea.weight = last_idea.weight+(idea.weight*INFLUENCE_FACTOR)\r\n if last_idea.weight >1:\r\n last_idea.weight = 1\r\n elif last_idea.weight <-1:\r\n last_idea.weight = -1",
"def updateCoeff(self, **args):\n for par in args:\n self.rateCoeffMeta[par] = args[par]\n meta = self.rateCoeffMeta\n if self.rateCoeffMeta['type'] ==\"constant\":\n self.k = cp.k_const(meta['k'])\n elif self.rateCoeffMeta['type'] ==\"Arrhenius\":\n self.k = cp.k_arr(meta['A'], meta['E'], meta['T'], meta['R'])\n elif self.rateCoeffMeta['type'] ==\"modifiedArrhenius\":\n self.k = cp.k_mod_arr(meta['A'], meta['b'], meta['E'], meta['T'], meta['R'])\n else:\n # Other type of reaction rate coeff\n self.k = None # k = cp.newMethodToComputeK(...)\n return",
"def updateMeter(self, name1, name2, op):\r\n mini = 0\r\n maxi = 100\r\n pos = (self.var.get() - mini) / (maxi - mini)\r\n self.updateMeterLine(pos * 0.6 + 0.2)",
"def update_op(self, loss, learning_rate,var):\n #train_op = None\n ####### Implementation Here ######\n #pass\n train_op = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss = loss,var_list = var )\n return train_op",
"def adjust_price(self, price):\n precision = self._price_limits[3] or 8\n tick_size = self._price_limits[2] or 0.00000001\n\n # adjusted price at precision and by step of pip meaning\n return truncate(round(price / tick_size) * tick_size, precision)",
"def applyOperator(self, operand1, operand2, operator):\n\n if operator == \"*\":\n return operand1 * operand2\n elif operator == \"/\":\n return operand1 / operand2\n elif operator == \"+\":\n return operand1 + operand2\n else:\n return operand1 - operand2",
"def prepend_operators(\n self, c2_prepared: Caffe2Rep, input_names: List[str]\n ) -> Tuple[Caffe2Rep, List[str]]:\n return onnx.add_feats_numericalize_ops(c2_prepared, self.vocab_map, input_names)",
"def _setup_proximal_operator(\n self,\n weight_list,\n learning_rate,\n regularization_lambda,\n reciprocal_stable_factor=0.0001,\n weight_reshape_to_norm=lambda x: x,\n weight_reshape_from_norm=lambda x: x\n ):\n eta = learning_rate * regularization_lambda\n epsilon = eta * reciprocal_stable_factor\n weight_update_ops = []\n weight_shapes = []\n weight_reshaped_list = []\n weight_reshaped_shapes = []\n for weight_origin in weight_list:\n weight = weight_reshape_to_norm(weight_origin)\n weight_shape = list(map(\n lambda x: x.value,\n weight.shape\n ))\n weight_shapes.append(weight_shape)\n weight_reshaped = tf.reshape(\n weight,\n shape=(weight_shape[0], weight_shape[1], -1)\n )\n weight_reshaped_list.append(weight_reshaped)\n weight_reshaped_shapes.append(\n list(map(lambda x: x.value, weight_reshaped.shape))\n )\n weight_reshaped_combined = tf.concat(\n values=weight_reshaped_list,\n axis=-1\n )\n # proximal update #\n weight_new_reshaped_combined = self.proximal_operator(\n weight=weight_reshaped_combined,\n eta=eta,\n epsilon=epsilon\n )\n\n weight_new_reshaped_list = tf.split(\n value=weight_new_reshaped_combined,\n num_or_size_splits=list(map(lambda x: x[-1], weight_reshaped_shapes)),\n axis=-1\n )\n for i in range(len(weight_new_reshaped_list)):\n weight_new_reshaped = weight_new_reshaped_list[i]\n weight_shape = weight_shapes[i]\n weight_origin = weight_list[i]\n weight_new = tf.reshape(\n weight_new_reshaped,\n shape=weight_shape,\n )\n weight_origin_new = weight_reshape_from_norm(weight_new)\n weight_update_op = weight_origin.assign(weight_origin_new)\n weight_update_ops.append(weight_update_op)\n return tf.group(*weight_update_ops)"
] | [
"0.6310741",
"0.57850754",
"0.57514006",
"0.54723537",
"0.54526275",
"0.5421909",
"0.53711325",
"0.5362892",
"0.5342558",
"0.52658045",
"0.5223748",
"0.5185856",
"0.51810634",
"0.51768",
"0.5171346",
"0.5168897",
"0.5147008",
"0.50996333",
"0.50994164",
"0.50994164",
"0.5083534",
"0.5061913",
"0.50549257",
"0.504966",
"0.50485146",
"0.5016943",
"0.50152",
"0.49930543",
"0.49847525",
"0.4981489"
] | 0.7551244 | 0 |
Updates `PrefixCache` with data from given operator. | def update_with_operator(self, operator: Operator):
if not isinstance(operator, Operator):
raise TypeError(
f"operator expected to be of type `Operator` but got type "
f"{type(operator)}"
)
for prefix in operator.rates.keys():
self._update_prefix(prefix=prefix, operator=operator) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_prefix(self, prefix: str, operator: Operator):\n cached_operator: Optional[Operator] = self.lookup(prefix)\n if cached_operator:\n cached_price = cached_operator.price_for_prefix(prefix)\n if cached_price:\n if operator.has_better_price_for_prefix(prefix, cached_price):\n self.add_prefix(prefix=prefix, operator=operator)\n else:\n self.add_prefix(prefix=prefix, operator=operator)",
"def build_cache(klass: \"PrefixCache\", operators: Tuple[Operator, ...]) -> \"PrefixCache\":\n prefix_cache = klass()\n for operator in operators:\n prefix_cache.update_with_operator(operator)\n\n return prefix_cache",
"def add_prefix(self, prefix: str, operator: Operator):\n if not isinstance(operator, Operator):\n raise TypeError(\n f\"`operator` expected to be of type `str` but got type \"\n f\"`{type(operator)}`\"\n )\n\n if not isinstance(prefix, str):\n raise TypeError(\n f\"`prefix` is expected to be of type `str` but got type \"\n f\"`{type(prefix)}`\"\n )\n\n if not prefix.isdigit():\n raise ValueError(\n \"Value of `prefix` is expected to a string representation \"\n \"of a digit\"\n )\n\n self.data[prefix] = operator # noqa",
"def lookup(self, prefix: str) -> Optional[Operator]:\n return self.data.get(prefix, None) # noqa",
"def with_prefix(self, prefix):\n return SessionMemoWithPrefix(prefix, self)",
"def add_prefix(self, prefix: str) -> 'Request':\n return replace(self, url=prefix + self.url)",
"def prefix(self, prefix):\n\n self._prefix = prefix",
"def prefix(self, prefix):\n\n self._prefix = prefix",
"def set_prefix(self, prefix):\n self._prefix = prefix\n self._update_layout()",
"async def prefix(self, ctx, prefix):\n if prefix.strip() == \"\":\n raise exceptions.Warning(\"Prefix cannot be empty.\")\n\n if prefix.startswith(\" \"):\n raise exceptions.Warning(\"Prefix cannot start with a space.\")\n\n if len(prefix) > 32:\n raise exceptions.Warning(\"Prefix cannot be over 32 characters.\")\n\n prefix = prefix.lstrip()\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO guild_prefix (guild_id, prefix)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n prefix = VALUES(prefix)\n \"\"\",\n ctx.guild.id,\n prefix,\n )\n self.bot.cache.prefixes[str(ctx.guild.id)] = prefix\n await util.send_success(\n ctx,\n f\"Command prefix for this server is now `{prefix}`. \"\n f\"Example command usage: {prefix}ping\",\n )",
"def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix",
"def prefixed(self, prefix):\n if not prefix:\n return self.clone()\n else:\n return self.using(join(prefix, self))",
"def test_ipam_prefixes_update(self):\n pass",
"async def set_command_prefix(self, guild: Guild, prefix: str) -> bool:\n\n async with self.db_pool.acquire() as conn:\n result = await conn.execute(\n f\"UPDATE {self.table_name} \"\n \"SET command_prefix = $1 \"\n \"WHERE id = $2\",\n prefix,\n guild.id,\n )\n\n if result := (int(result.split()[1]) == 1) is True:\n self._cache[guild.id].prefix = prefix\n\n return result",
"def setPrefix(self, *args):\n return _libsbml.ASTBasePlugin_setPrefix(self, *args)",
"def update(self, prefix, peer, value):\n peer_sym = self.peers.get(peer, None)\n if peer_sym is None:\n peer_sym = self.peers[peer] = peer\n node = self.radix.add(prefix)\n node.data[peer_sym] = value\n return node",
"def _update_cache(self, key, value, cache, decode_loop_step):\n # Combines cached keys and values with new keys and values.\n if decode_loop_step is not None:\n # TPU special case.\n key_seq_dim = cache[\"key\"].shape.as_list()[1]\n indices = tf.reshape(\n tf.one_hot(decode_loop_step, key_seq_dim, dtype=key.dtype),\n [1, key_seq_dim, 1, 1])\n key = cache[\"key\"] + key * indices\n value_seq_dim = cache[\"value\"].shape.as_list()[1]\n indices = tf.reshape(\n tf.one_hot(decode_loop_step, value_seq_dim, dtype=value.dtype),\n [1, value_seq_dim, 1, 1])\n value = cache[\"value\"] + value * indices\n else:\n key = tf.concat([tf.cast(cache[\"key\"], key.dtype), key], axis=1)\n value = tf.concat([tf.cast(cache[\"value\"], value.dtype), value], axis=1)\n\n # Update cache\n cache[\"key\"] = key\n cache[\"value\"] = value\n\n return key, value",
"async def setprefix(self, ctx, *, prefix=bot_prefix):\n prefix = prefix.lower()\n current_server_prefix = await self.ex.get_server_prefix(ctx.guild.id)\n if len(prefix) > 8:\n await ctx.send(\"> **Your prefix can not be more than 8 characters.**\")\n else:\n # Default prefix '%' should never be in DB.\n if current_server_prefix == \"%\":\n if prefix != \"%\":\n await self.ex.conn.execute(\"INSERT INTO general.serverprefix VALUES ($1,$2)\", ctx.guild.id, prefix)\n self.ex.cache.server_prefixes[ctx.guild.id] = prefix\n else:\n if prefix != \"%\":\n await self.ex.conn.execute(\"UPDATE general.serverprefix SET prefix = $1 WHERE serverid = $2\",\n prefix, ctx.guild.id)\n self.ex.cache.server_prefixes[ctx.guild.id] = prefix\n else:\n await self.ex.conn.execute(\"DELETE FROM general.serverprefix WHERE serverid = $1\", ctx.guild.id)\n self.ex.cache.server_prefixes.pop(ctx.guild.id, None)\n await ctx.send(f\"> **This server's prefix has been set to {prefix}.**\")",
"def test_ipam_prefixes_partial_update(self):\n pass",
"def with_prefix(\r\n self,\r\n prefix: str\r\n ):\r\n return CollectionPriorModel({\r\n key: value\r\n for key, value\r\n in self.items()\r\n if key.startswith(\r\n prefix\r\n )\r\n })",
"def prefix(self, prefix):\n self._path_prefix = prefix",
"def cache_dataset(dataset, prefix):\n if not os.path.exists(nmt._constants.CACHE_PATH):\n os.makedirs(nmt._constants.CACHE_PATH)\n src_data = np.concatenate([e[0] for e in dataset])\n tgt_data = np.concatenate([e[1] for e in dataset])\n src_cumlen = np.cumsum([0]+[len(e[0]) for e in dataset])\n tgt_cumlen = np.cumsum([0]+[len(e[1]) for e in dataset])\n np.savez(os.path.join(nmt._constants.CACHE_PATH, prefix + '.npz'),\n src_data=src_data, tgt_data=tgt_data,\n src_cumlen=src_cumlen, tgt_cumlen=tgt_cumlen)",
"def get_cache_key(prefix):\n return '%s' % (prefix)",
"async def prefix(self, ctx, prefix: str = None):\n if not prefix:\n try:\n return await ctx.send(f'My prefix here is `{self.bot.prefixes[str(ctx.guild.id)]}`. You can change that with `{ctx.prefix}prefix <prefix>`')\n except KeyError:\n return await ctx.send(f'My prefix here is `{config.prefix[0]}`. You can change that with `{ctx.prefix}prefix <prefix>`')\n db = pymysql.connect(config.db_ip, config.db_user, config.db_pass, config.db_name)\n cur = db.cursor()\n cur.execute(\n f\"\"\"INSERT INTO settings (guildid, prefix) VALUES ({ctx.guild.id}, \"{prefix}\") ON DUPLICATE KEY UPDATE prefix = \"{prefix}\";\"\"\")\n db.commit()\n db.close()\n self.bot.prefixes = get_all_prefixes()\n await ctx.send(f':ok_hand: Successfully set my prefix here to `{prefix}`')",
"def memoize(prefix, time=60):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n key = memoize_key(prefix, *args, **kwargs)\n data = cache.get(key)\n if data is not None:\n return data\n data = func(*args, **kwargs)\n cache.set(key, data, time)\n return data\n return wrapper\n return decorator",
"def add_prefix(self, prefix, iri):\n existing = self._prefixes.get(prefix)\n if existing:\n if existing != iri:\n raise mio.MIOException('The prefix \"%s\" is already asigned to \"%s\"' % (prefix, existing))\n return\n self._prefixes[prefix] = iri",
"def add_prefix(inputs, prefix):\n\n outputs = dict()\n for name, value in inputs.items():\n outputs[f\"{prefix}.{name}\"] = value\n\n return outputs",
"def set_prefix_expression(self, expression, clear_args = True):\n if expression and type(expression) is not str:\n raise TypeError('expression should be either string or None or False')\n if clear_args:\n self._prefix_kwargs = {}\n self._prefix_expression = expression",
"async def _setprefix(self, ctx, arg1):\n if len(arg1) > 6:\n await ctx.send(\"Keep the prefix under 6 chars, please.\")\n return\n\n guildconfig = database.getGuild(ctx.guild.id)\n\n if not guildconfig:\n guildconfig = database.newGuild(ctx.guild.id)\n\n database.setPrefix(guildconfig.ID, arg1)\n await ctx.send(\"Prefix set successfully!\")",
"def add_prefix(self, state_dict, prefix):\n print('add prefix \\'{}\\''.format(prefix))\n f = lambda x: x + prefix # 去除带有prefix的名字\n return {f(key): value for key, value in state_dict.items()}"
] | [
"0.7683273",
"0.6553203",
"0.64049375",
"0.52432764",
"0.51866513",
"0.51246953",
"0.5072463",
"0.5072463",
"0.5058224",
"0.50360274",
"0.5033406",
"0.49451602",
"0.49146992",
"0.48742005",
"0.4850131",
"0.4843754",
"0.47898152",
"0.47546908",
"0.47544464",
"0.47422978",
"0.47078148",
"0.46670234",
"0.46594873",
"0.46504092",
"0.46463746",
"0.46321166",
"0.46088699",
"0.46060976",
"0.4599175",
"0.4595461"
] | 0.68408626 | 1 |
Given a prefix, returns operator with best price in `PrefixCache` | def lookup(self, prefix: str) -> Optional[Operator]:
return self.data.get(prefix, None) # noqa | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_prefix(self, prefix: str, operator: Operator):\n cached_operator: Optional[Operator] = self.lookup(prefix)\n if cached_operator:\n cached_price = cached_operator.price_for_prefix(prefix)\n if cached_price:\n if operator.has_better_price_for_prefix(prefix, cached_price):\n self.add_prefix(prefix=prefix, operator=operator)\n else:\n self.add_prefix(prefix=prefix, operator=operator)",
"def memoize_get(prefix, *args, **kwargs):\n return cache.get(memoize_key(prefix, *args, **kwargs))",
"def get_cache_key(prefix):\n return '%s' % (prefix)",
"def build_cache(klass: \"PrefixCache\", operators: Tuple[Operator, ...]) -> \"PrefixCache\":\n prefix_cache = klass()\n for operator in operators:\n prefix_cache.update_with_operator(operator)\n\n return prefix_cache",
"def get_prefix(coef, bias=0.1, omit=None):\n if omit is None:\n omit = num_prefixes\n\n values = [val for key, val in six.iteritems(prefixes) if key not in omit]\n coefs = nm.array(values, dtype=nm.float64)\n coefs.sort()\n ii = nm.searchsorted(coefs, bias*coef, side='left')\n\n if ii == len(coefs):\n ii = ii - 1\n\n cc = coefs[ii]\n prefix = inv_prefixes[cc]\n mul = coef / cc\n\n return prefix, mul",
"def find_newest_matching_prefix(path, prefix):\n entries = os.listdir(path)\n result = None\n for entry in entries:\n if prefix.match(entry):\n fq_entry = os.path.join(path, entry)\n if result is None:\n result = fq_entry\n else:\n result_mtime = os.path.getmtime(result)\n entry_mtime = os.path.getmtime(fq_entry)\n if entry_mtime > result_mtime:\n result = fq_entry\n\n return result",
"def find_newest_matching_prefix(path, prefix):\n entries = os.listdir(path)\n result = None\n for entry in entries:\n if prefix.match(entry):\n fq_entry = os.path.join(path, entry)\n if result is None:\n result = fq_entry\n else:\n result_mtime = os.path.getmtime(result)\n entry_mtime = os.path.getmtime(fq_entry)\n if entry_mtime > result_mtime:\n result = fq_entry\n\n return result",
"def get_subtrie(\n self, search_prefix: str, current_trie_node_prefix: str = \"\"\n ) -> Tuple[str, \"TrieNode\"]:\n if search_prefix == self._value[: len(search_prefix)]:\n # search_prefix is a prefix of the current node (or equal to it)\n return (current_trie_node_prefix, self)\n elif self._value == search_prefix[: len(self._value)]:\n # The current node is a prefix of the search_prefix\n remainder = search_prefix[len(self._value) :]\n children = sorted(self.children, key=lambda node: node._value)\n for child in children:\n if child._value == remainder[: len(child._value)]:\n new_prefix = current_trie_node_prefix + self._value\n return child.get_subtrie(\n remainder, current_trie_node_prefix=new_prefix\n )\n elif remainder == child._value[: len(remainder)]:\n # The remainder is a prefix of the child\n return (current_trie_node_prefix, child)\n return (\"\", EMPTY_NODE)",
"def with_prefix(\r\n self,\r\n prefix: str\r\n ):\r\n return CollectionPriorModel({\r\n key: value\r\n for key, value\r\n in self.items()\r\n if key.startswith(\r\n prefix\r\n )\r\n })",
"def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])",
"def prefixSearch(self, prefix: str, _prec=\"\"):\n if prefix == \"\":\n # prefix exhasuted, match all\n yield from self.keys(_prec)\n else:\n try:\n # prefix not exhausted, traverse further\n chld = self.children[prefix[0]]\n yield from chld.prefixSearch(prefix[1:], _prec + self.ch)\n except IndexError:\n yield None\n except KeyError:\n yield None",
"def find(self, prefix):\n node = self.root\n for char in prefix:\n if char not in node.keys():\n return None\n node = node[char]\n\n return node",
"def get_tool(prefix):\r\n candidates = [tool for tool in TOOLS if tool.startswith(prefix.lower())]\r\n if not candidates:\r\n print(\"No tools match prefix %s. Available tools are: %s\" % (prefix, TOOLS.keys()))\r\n return None\r\n if len(candidates) > 1:\r\n print(\"Ambiguous prefix %s (matches %s)\" % (prefix, candidates))\r\n return None\r\n return candidates[0]",
"def startsWith(self, prefix):\n return self.dfsSearch(self.root, prefix, 0, True)\n\n\n # Your Trie object will be instantiated and called as such:\n # obj = Trie()\n # obj.insert(word)\n # param_2 = obj.search(word)\n # param_3 = obj.startsWith(prefix)",
"def find_best_match(self, number):\n best_match = None\n best_length = -1\n\n for prefix in self.pricing:\n # If the prefix is not long enough we continue out\n # if len(prefix) < best_length:\n # continue\n #\n # # If the route is our phone number break out\n # if number == prefix:\n # best_match = prefix\n # break\n\n # If our number starts with the route prefix and our best match is not set or our current prefix is greater\n # then the last match then we set our current prefix to our best match\n if number.startswith(prefix) and (best_match is None or len(prefix) > best_length):\n best_match = prefix\n best_length = len(prefix)\n\n # If we never got a best match return nothing\n if best_match is None:\n return None\n\n # Return the price of our best match\n return self.pricing[best_match]",
"def search(self, prefix: str) -> TrieNode:\n leaf = self.root\n for level in range(len(prefix)):\n letter = prefix[level]\n\n if letter not in leaf.children:\n return self.get_node()\n leaf = leaf.children[letter]\n\n if leaf is not None:\n return leaf\n return self.get_node()",
"def search(self, prefix: str) -> Generator[str, None, None]:\n # Perform a binary search to find where in the sorted list the prefix belongs.\n # Everything to the right will be lexicographically GTE than the prefix.\n start_position = bisect_left(self.data, prefix)\n\n for suggestion in self.data[start_position:]:\n if suggestion.startswith(prefix):\n yield suggestion\n else:\n break",
"def filter_by_prefix(query, key_name_prefix):\n root_kind = query._model_class.__name__\n min_key = db.Key.from_path(root_kind, key_name_prefix)\n max_key = db.Key.from_path(root_kind, key_name_prefix + u'\\uffff')\n return query.filter('__key__ >=', min_key).filter('__key__ <=', max_key)",
"def find_label_operator(query):\n # If you apply any changes into these regex patterns, please update the JSON schema consequently at:\n # depc/schemas/v1_config.json\n # Rule\n regex = r\"^rule.(.+|'.+')$\"\n match = re.search(regex, query)\n if match:\n rule = match.group(1)\n if rule.startswith(\"'\"):\n rule = rule[1:-1]\n return RuleOperator, {\"rule\": rule}\n\n # Operation AND, OR (no argument)\n regex = (\n r\"^operation.(AND|OR)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n )\n match = re.search(regex, query)\n if match:\n # Transform '[Foo, Bar]' into a Python list\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation ATLEAST (integer argument)\n regex = r\"^operation.(ATLEAST\\([0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Operation RATIO (float integer less than 0)\n regex = r\"^operation.(RATIO\\(0.[0-9]+\\))(\\[[A-Z]+[a-zA-Z0-9]*(, A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return OperationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # Aggregation AVERAGE, MIN, MAX\n regex = r\"^aggregation.(AVERAGE|MIN|MAX)\\(?\\)?(\\[[A-Z]+[a-zA-Z0-9]*(, [A-Z]+[a-zA-Z0-9]*)*?\\])$\"\n match = re.search(regex, query)\n if match:\n deps = match.group(2)[1:-1].split(\", \")\n return AggregationOperator, {\"type\": match.group(1), \"dependencies\": deps}\n\n # We validate the schema before save it in database,\n # it's not possible to go here.\n return None, None",
"def prefix(name):\n def rule(symbol):\n return symbol.startswith(name) or None\n return rule",
"def search_prefix(self, prefix):\n current = self.root\n for letter in prefix:\n\n current = current.get_child(letter)\n\n if not current:\n return\n\n return current",
"def first_uri_matching_prefix(xia, prefix):\n\n if xia is not None:\n for uri in xia:\n if uri.startswith(prefix):\n return uri\n return None",
"def completion_proximity_score(prefix, completion):\n if prefix == completion:\n return float(\"inf\")\n else:\n return 1.0 / float(len(completion))",
"def items_with_prefix(self, prefix):\n node = self.get_node(self.root, 0, prefix)\n # look at the middle subtree only (since only it has exact matches)\n return self.collect(node.middle, prefix)",
"def kmp_algo(inp_string: str, substr: str) -> Optional[int]:\n\n pi = _prefix_fun(substr)\n i, j = 0, 0\n while i <= len(inp_string)-len(substr):\n if inp_string[i] == substr[j]:\n first_occurrence = i\n while j < len(substr):\n if inp_string[i] != substr[j]:\n j = pi[j-1]\n break\n i += 1\n j += 1\n else:\n return first_occurrence\n else:\n i += 1\n return None",
"def filter_by_prefix(query, key_name_prefix, root_kind=None):\n root_kind = root_kind or query._model_class.__name__\n min_key = db.Key.from_path(root_kind, key_name_prefix)\n max_key = db.Key.from_path(root_kind, key_name_prefix + u'\\uffff')\n return query.filter('__key__ >=', min_key).filter('__key__ <=', max_key)",
"def prefixed(self, prefix):\n if not prefix:\n return self.clone()\n else:\n return self.using(join(prefix, self))",
"def startsWith(self, prefix):\r\n t = self.trie\r\n for w in prefix: \r\n if w not in t: \r\n return False\r\n t = t[w]\r\n return True",
"def startsWith(self, prefix):\n ret = True\n curr = self.trie\n for i, ch in enumerate(prefix):\n curr = curr.get(ch, {})\n if curr:\n continue\n else:\n break\n \n if i==len(prefix)-1:\n ret = True\n else:\n ret = False\n return ret",
"def get_op(prefix=None):\n dict = {}\n if prefix is not None and len(prefix) > 1:\n if prefix[-1] != '/':\n prefix += '/'\n res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)\n for t in res:\n key = t.name\n key = key[len(prefix):]\n dict[str(key)] = t\n return dict"
] | [
"0.7023152",
"0.57442355",
"0.56952125",
"0.5672677",
"0.5581838",
"0.5468779",
"0.5468779",
"0.5460756",
"0.5382311",
"0.5373651",
"0.5297867",
"0.52763575",
"0.5276098",
"0.52464044",
"0.5244506",
"0.52335495",
"0.51884717",
"0.51734436",
"0.51672626",
"0.5148878",
"0.5133226",
"0.5121348",
"0.511982",
"0.50703907",
"0.50269425",
"0.5022773",
"0.50135386",
"0.49916553",
"0.49833375",
"0.49593863"
] | 0.6808338 | 1 |
map given prefix to operator, overwriting exsisting cache for prefix entry. | def add_prefix(self, prefix: str, operator: Operator):
if not isinstance(operator, Operator):
raise TypeError(
f"`operator` expected to be of type `str` but got type "
f"`{type(operator)}`"
)
if not isinstance(prefix, str):
raise TypeError(
f"`prefix` is expected to be of type `str` but got type "
f"`{type(prefix)}`"
)
if not prefix.isdigit():
raise ValueError(
"Value of `prefix` is expected to a string representation "
"of a digit"
)
self.data[prefix] = operator # noqa | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_prefix(self, prefix: str, operator: Operator):\n cached_operator: Optional[Operator] = self.lookup(prefix)\n if cached_operator:\n cached_price = cached_operator.price_for_prefix(prefix)\n if cached_price:\n if operator.has_better_price_for_prefix(prefix, cached_price):\n self.add_prefix(prefix=prefix, operator=operator)\n else:\n self.add_prefix(prefix=prefix, operator=operator)",
"def build_cache(klass: \"PrefixCache\", operators: Tuple[Operator, ...]) -> \"PrefixCache\":\n prefix_cache = klass()\n for operator in operators:\n prefix_cache.update_with_operator(operator)\n\n return prefix_cache",
"def lookup(self, prefix: str) -> Optional[Operator]:\n return self.data.get(prefix, None) # noqa",
"def update_with_operator(self, operator: Operator):\n if not isinstance(operator, Operator):\n raise TypeError(\n f\"operator expected to be of type `Operator` but got type \"\n f\"{type(operator)}\"\n )\n\n for prefix in operator.rates.keys():\n self._update_prefix(prefix=prefix, operator=operator)",
"def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix",
"def platform_map(op):\n while True:\n found = platform_map_iterate(op)\n if not found:\n break\n op = found\n return op",
"def add_prefix(inputs, prefix):\n\n outputs = dict()\n for name, value in inputs.items():\n outputs[f\"{prefix}.{name}\"] = value\n\n return outputs",
"def _support_op(*args):\n def inner(func):\n for one_arg in args:\n _op_mapping_[one_arg] = func\n return func\n\n return inner",
"def apply_rule(operator, pattern, replacement):\n new_op = operator.match_first(pattern)\n if new_op is None:\n return None\n return new_op.replace_first(\"generic\", replacement)",
"def test_override_cache_aliasing(self):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.,\n mode='indexed').base.function\n a1 = symbol(name='a', dimensions=(i, j, k, l), value=3.,\n mode='indexed').base.function\n a2 = symbol(name='a', dimensions=(i, j, k, l), value=4.,\n mode='indexed').base.function\n eqn = Eq(a, a+3)\n op = Operator(eqn)\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1.data, np.zeros(shape) + 6))\n assert(np.allclose(a2.data, np.zeros(shape) + 7))",
"def SetLocationPrefixRewriteMap (prefix_map):\n\n LocationPrefixRewriteMap_.clear()\n LocationPrefixRewriteMap_.update(prefix_map)",
"def add_prefix(self, state_dict, prefix):\n print('add prefix \\'{}\\''.format(prefix))\n f = lambda x: x + prefix # 去除带有prefix的名字\n return {f(key): value for key, value in state_dict.items()}",
"def prefix_replace(original, old, new):\n ...",
"def add_prefix(self, prefix: str) -> 'Request':\n return replace(self, url=prefix + self.url)",
"def add_prefix(self, prefix, iri):\n existing = self._prefixes.get(prefix)\n if existing:\n if existing != iri:\n raise mio.MIOException('The prefix \"%s\" is already asigned to \"%s\"' % (prefix, existing))\n return\n self._prefixes[prefix] = iri",
"def get_cache_key(prefix):\n return '%s' % (prefix)",
"def prefixed(self, prefix):\n if not prefix:\n return self.clone()\n else:\n return self.using(join(prefix, self))",
"def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])",
"def _extend_with_prefix(base, extensions, prefix):\n for key, value in extensions.items():\n base[prefix + key] = value",
"def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def add(variable, value):\n prefixes[variable] = value",
"def mmap(function: tp.Callable, operator: tp.Callable = operators.concat) -> MergeMap:\n return MergeMap(MetaFunction.make_meta(function), operator)",
"def transform(self, prefix):\n nfa_transformed = copy.deepcopy(self)\n nfa_transformed._add_state(prefix)\n return nfa_transformed",
"def associate(op, args):\n args = dissociate(op, args)\n if len(args) == 0:\n return _op_identity[op]\n elif len(args) == 1:\n return args[0]\n else:\n return Expr(op, *args)",
"def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix",
"def get_op(prefix=None):\n dict = {}\n if prefix is not None and len(prefix) > 1:\n if prefix[-1] != '/':\n prefix += '/'\n res = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=prefix)\n for t in res:\n key = t.name\n key = key[len(prefix):]\n dict[str(key)] = t\n return dict",
"async def prefix(self, ctx, prefix):\n if prefix.strip() == \"\":\n raise exceptions.Warning(\"Prefix cannot be empty.\")\n\n if prefix.startswith(\" \"):\n raise exceptions.Warning(\"Prefix cannot start with a space.\")\n\n if len(prefix) > 32:\n raise exceptions.Warning(\"Prefix cannot be over 32 characters.\")\n\n prefix = prefix.lstrip()\n await self.bot.db.execute(\n \"\"\"\n INSERT INTO guild_prefix (guild_id, prefix)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE\n prefix = VALUES(prefix)\n \"\"\",\n ctx.guild.id,\n prefix,\n )\n self.bot.cache.prefixes[str(ctx.guild.id)] = prefix\n await util.send_success(\n ctx,\n f\"Command prefix for this server is now `{prefix}`. \"\n f\"Example command usage: {prefix}ping\",\n )",
"def set_prefix_expression(self, expression, clear_args = True):\n if expression and type(expression) is not str:\n raise TypeError('expression should be either string or None or False')\n if clear_args:\n self._prefix_kwargs = {}\n self._prefix_expression = expression",
"def prefix(name):\n def rule(symbol):\n return symbol.startswith(name) or None\n return rule",
"def series_add_prefix(series, prefix):\n f = partial(\"{prefix}{}\".format, prefix=prefix)\n\n return series.rename(index=f)"
] | [
"0.74195033",
"0.6135819",
"0.59976494",
"0.56019163",
"0.55027753",
"0.54626215",
"0.5361422",
"0.53344727",
"0.5287824",
"0.5260219",
"0.525283",
"0.5196512",
"0.5168845",
"0.5155864",
"0.5117644",
"0.511022",
"0.508657",
"0.5071373",
"0.5030445",
"0.49908844",
"0.49615845",
"0.49546325",
"0.49546024",
"0.49122277",
"0.4906942",
"0.4899821",
"0.48913023",
"0.48839104",
"0.48781353",
"0.48706368"
] | 0.63900906 | 1 |
Return prefix for `phone_number`. Use `lookup` to fetch operator. | def find_prefix(self, phone_number: str) -> Optional[str]:
if not isinstance(phone_number, str):
raise TypeError(
f"`phone_number` expected to be of type `str` "
f"but got type `{type(phone_number)}`"
)
if not phone_number.isdigit():
raise ValueError(
"Value of `phone_number` expected to be a string "
"representation of a digit"
)
phone_number = phone_number
match = None
for i, _ in enumerate(phone_number, start=1):
prefix = self.find(phone_number[:i])
if prefix:
match = prefix
else:
prefix_not_found = prefix is None # readability
if match and prefix_not_found:
return match
else:
continue
return match | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strip_phone_prefix(self, phone_num):\n # FIXME more accurate check\n if phone_num.startswith('+86'):\n return phone_num.replace('+86', '')\n if len(phone_num) != 11:\n return None\n return phone_num",
"def phone_number_organizer(self, key):\n\t\ttry:\n\t\t\tphone_number = key[u'phone']\n\t\t\tformat_number = '(' + phone_number[0:3] + ') ' + phone_number[3:6] + '-' + phone_number[6:]\n\t\t\treturn format_number\n\t\texcept KeyError:\n\t\t\tprint [u'name'], \"requires manual phone number verification.\"\n\t\t\treturn \"Manual Input\"",
"def address_prefix(self) -> Optional[str]:\n return pulumi.get(self, \"address_prefix\")",
"def phone_number(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"phone_number\")",
"def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")",
"def prefix(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"prefix\")",
"def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")",
"def phone_number(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone_number\")",
"def prefix(self):\n return str(self.operator) + \" \" + self.leftOperand.prefix() + \" \" + self.rightOperand.prefix()",
"def result_prefix(self):\n return self.calculation.result_prefix",
"def get_prefix(self):\n return self.prefix",
"def lookup_phone_number(phone):\n \n #create Twilio client\n client = Client(ACCOUNT_SID, AUTH_TOKEN)\n\n try:\n\n #check if number is real number using Twilio lookup\n phone_number = client.lookups \\\n .phone_numbers(phone) \\\n .fetch(type=['carrier'])\n\n #returns formmatted phone number\n return phone_number.phone_number\n\n #checks Twilio exception responses if number not real\n except TwilioRestException as e:\n\n #Number not found - return False\n if e.code == 20404:\n\n return False\n\n else:\n\n raise e",
"def add_prefix(self, field_name):\n return self.prefix and ('%s-%s' % (self.prefix, field_name)) or field_name",
"def phone(self) -> str:\n return pulumi.get(self, \"phone\")",
"def number(self):\n return str(self._phone)",
"def getPrefix(self):\n raise NotImplementedError",
"def add_prefix(self, field_name):\r\n return self.prefix and ('%s.%s' % (self.prefix, field_name)) or field_name",
"def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')",
"def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')",
"def prefix(self) -> typing.Optional[str]:\n return self._values.get('prefix')",
"def prefix(num):\n # determine which range it lies in, r1/r2 means reduction 1 or reduction 2\n divisors = [1e-24 * pow(10, 3 * x) for x in range(17)]\n prefixes = list(reversed(['Yotta (Y)', 'Zetta (Z)', 'Exa (E)', 'Peta (P)', 'Tera (T)', 'Giga (G)', 'Mega (M)',\n 'Kilo (K)', '', 'Milli (m)', 'Micro ($\\mu$)', 'Nano (n)', 'Pico (p)', 'Femto (f)',\n 'Atto (a)', 'Zepto (z)', 'Yocto (y)']))\n exp = np.floor(np.log10(np.abs(num)))\n if exp < 0:\n exp -= 3\n expIndex = int(exp / 3) + 8\n expIndex = 0 if expIndex < 0 else expIndex\n expIndex = len(prefixes)-1 if expIndex >= len(prefixes) else expIndex\n r1 = prefixes[expIndex]\n num1 = num / divisors[expIndex]\n if expIndex != len(prefixes):\n r2 = prefixes[expIndex + 1]\n num2 = num / divisors[expIndex + 1]\n else:\n num2 = None\n retStr = str(num1) + ' ' + r1\n if num2 is not None:\n retStr += '\\nor\\n' + str(num2) + ' ' + r2\n return retStr",
"def base_prefix(self):\n return self.calculation.base_prefix",
"def get_prefix(self):\n return self._prefix",
"def get_prefix(self):\n return self._prefix",
"def ad_rep_lead_phone(obj):\n if obj.phone_number is None:\n phone_number = ''\n else:\n phone_number = format_phone(obj.phone_number)\n return \"%s\" % phone_number",
"def prefix(self):\n return self[\"prefix\"]",
"def prefix(self):\n return self[\"prefix\"]",
"def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")",
"def phone(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"phone\")",
"def calculate_prefix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate prefix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\tfor e in reversed(elements):\n\t\t\tif e.isdigit():\n\t\t\t\tstack.append(int(e))\n\t\t\telse:\n\t\t\t\t# this is an operator\n\t\t\t\tif (len(stack) < 2):\n\t\t\t\t\tlogger.info(\"invalid input\")\n\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\toperand2 = stack.pop()\n\t\t\t\t\tif e == \"+\":\n\t\t\t\t\t\tresult = operand1 + operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"-\":\n\t\t\t\t\t\tresult = operand1 - operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"*\":\n\t\t\t\t\t\tresult = operand1 * operand2\n\t\t\t\t\t\tstack.append(int(result))\n\n\t\t\t\t\telif e == \"/\":\n\t\t\t\t\t\tresult = operand1 / operand2\n\t\t\t\t\t\tstack.append(int(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.exception(\"Unrecognized operator\")\n\t\t\t\t\t\traise Exception(\"Not a valid operator\")\n\t\treturn float(stack[0])"
] | [
"0.6136778",
"0.596885",
"0.586732",
"0.5830663",
"0.58184695",
"0.58184695",
"0.5758761",
"0.5758761",
"0.5729075",
"0.56406903",
"0.56264126",
"0.56246763",
"0.5618058",
"0.5616657",
"0.5613042",
"0.56065995",
"0.5562532",
"0.5562178",
"0.5562178",
"0.5562178",
"0.55411625",
"0.5532837",
"0.55299014",
"0.55299014",
"0.55297995",
"0.55283016",
"0.55283016",
"0.54525405",
"0.54525405",
"0.54249704"
] | 0.6957184 | 0 |
Build a `PrefixCache` from a tuple of `Operators`. | def build_cache(klass: "PrefixCache", operators: Tuple[Operator, ...]) -> "PrefixCache":
prefix_cache = klass()
for operator in operators:
prefix_cache.update_with_operator(operator)
return prefix_cache | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buildOperatorCache(ham: Dict[str, Any]) -> None:\n sysLevel = ham[\"circuit\"][\"sys_level\"]\n qubitNum = ham[\"circuit\"][\"qubits\"]\n\n # Generator the operator for all of the drift terms\n for key in ham[\"drift\"]:\n drifts = ham[\"drift\"][key]\n operator = generateOperator(drifts[\"on_qubits\"], drifts[\"matrices\"], sysLevel, qubitNum) * drifts[\"amp\"]\n ham[\"cache\"][\"operator\"][\"drift\"][key] = operator\n\n # Sum all the drift terms and save to the cache.\n if isinstance(sysLevel, int):\n driftTotal = numpy.zeros((sysLevel ** qubitNum, sysLevel ** qubitNum), dtype=complex)\n elif isinstance(sysLevel, list):\n dim = 1\n for i in sysLevel:\n dim = dim * i\n driftTotal = numpy.zeros((dim, dim), dtype=complex)\n for key in ham[\"cache\"][\"operator\"][\"drift\"]:\n driftTotal = driftTotal + ham[\"cache\"][\"operator\"][\"drift\"][key]\n ham[\"cache\"][\"matrix_of_drift\"] = driftTotal\n\n # Generator the pulse sequences for all of the control terms.\n for key in ham[\"control\"]:\n ctrls = ham[\"control\"][key]\n operator = generateOperator(ctrls[\"on_qubits\"], ctrls[\"matrices\"], sysLevel, qubitNum)\n ham[\"cache\"][\"operator\"][\"control\"][key] = operator",
"def declare_operators(*op_list):\n operators.update({op.__name__:op for op in op_list})\n return operators",
"def buildCache(ham: Dict[str, Any]) -> None:\n\n # Initialize the Hamiltonian\n clearCache(ham)\n\n # Build operators and sequences\n buildOperatorCache(ham)\n buildSequenceCache(ham)",
"def build_triples(x, y, op_str):\n if op_str not in EXPECTED_OPS:\n raise ValueError(f\"{op_str} should be in {EXPECTED_OPS}\")\n\n session = x.session\n shape_x = x.shape\n shape_y = y.shape\n conf = session.config\n min_val = conf.min_value\n max_val = conf.max_value\n\n # TODO: Move this to a library specific file\n a = torch.randint(min_val, max_val, shape_x).long()\n b = torch.randint(min_val, max_val, shape_y).long()\n\n cmd = getattr(operator, op_str)\n c = modulo(cmd(a, b).long(), session)\n\n from sympc.tensor import AdditiveSharingTensor\n\n session_copy = session.get_copy()\n session_copy.config.enc_precision = 0\n\n a_sh = AdditiveSharingTensor(secret=a, session=session_copy)\n b_sh = AdditiveSharingTensor(secret=b, session=session_copy)\n c_sh = AdditiveSharingTensor(secret=c, session=session_copy)\n\n return a_sh, b_sh, c_sh",
"def normalize_prefetch_lookups(lookups, prefix=None):\n ret = []\n for lookup in lookups:\n if not isinstance(lookup, Prefetch):\n lookup = Prefetch(lookup)\n if prefix:\n lookup.add_prefix(prefix)\n ret.append(lookup)\n return ret",
"def _update_prefix(self, prefix: str, operator: Operator):\n cached_operator: Optional[Operator] = self.lookup(prefix)\n if cached_operator:\n cached_price = cached_operator.price_for_prefix(prefix)\n if cached_price:\n if operator.has_better_price_for_prefix(prefix, cached_price):\n self.add_prefix(prefix=prefix, operator=operator)\n else:\n self.add_prefix(prefix=prefix, operator=operator)",
"def compose(*ops):\n if len(ops) == 0:\n return [0, 1, 2, 3, 4, 5, 6, 7]\n if len(ops) == 1:\n return ops[0]\n if len(ops) == 2:\n op1, op2 = ops\n return [op2[op1[v]] for v in range(8)]\n op1 = ops[0]\n rest = ops[1:]\n return compose(op1, compose(*rest))",
"def __init__(self, all_operators, loadouts_store):\n self._name_to_operator = {}\n self._id_to_operator = {}\n for operator_dict in all_operators:\n # separate out the parts of the dictionary that can be just passed through to the constructor\n finished_fields = {\n key: value for key, value in operator_dict.items()\n if key in (\"id\", \"name\", \"icon_url\", \"index\", \"roles\")\n }\n side = OperatorSide[operator_dict[\"side\"]]\n\n # convert the id -> actual loadout objects\n loadouts = []\n for loadout_id in operator_dict[\"loadouts\"]:\n found = loadouts_store.from_id(loadout_id)\n if found is not None:\n loadouts.append(found)\n else:\n logging.warning(\"Skipped a loadout from operator %s with id %s\", operator_dict[\"name\"], operator_dict[\"id\"])\n\n # load in the unique abilities\n op_stats = []\n for ability in operator_dict[\"unique_stats\"]:\n stat = UniqueOperatorStat(ability[\"id\"], ability[\"name\"])\n op_stats.append(stat)\n\n op = OperatorInfo(**finished_fields, side=side, loadouts=loadouts, unique_abilities=op_stats)\n self._id_to_operator[op.id] = op\n self._name_to_operator[op.name.lower()] = op",
"def operator_dict(self, index, vars, **kw):\n out = defaultdict(int)\n op0 = self.args[0].operator_dict(index, vars, **kw)\n op1 = self.args[1].operator_dict(index, vars, **kw)\n for var in set().union(op0, op1):\n if (var in op0) and (var in op1):\n out[var] = add_sparse(op0[var], op1[var])\n elif (var in op0):\n out[var] = op0[var]\n else:\n out[var] = op1[var]\n return out",
"def operator(operator_state):\n blank_position = operator_state.index(0)\n set_of_states = []\n swapping_positions = {\n 0: [1, 3],\n 1: [0, 2, 4],\n 2: [1, 5],\n 3: [0, 4, 6],\n 4: [1, 3, 5, 7],\n 5: [2, 4, 8],\n 6: [3, 7],\n 7: [4, 6, 8],\n 8: [5, 7],\n }\n for new_position in swapping_positions[blank_position]:\n new_state = list(operator_state)\n new_state[new_position] = operator_state[blank_position]\n new_state[blank_position] = operator_state[new_position]\n set_of_states.append(tuple(new_state))\n return set_of_states",
"def from_list_of_assignments(cls, assignments, new_id_prefix=None):\n from cascada.bitvector.operation import Operation\n for v_i, op_i in assignments:\n assert isinstance(v_i, core.Variable) and isinstance(op_i, Operation)\n\n my_table = MemoizationTable()\n my_table.counter = len(assignments)\n\n if new_id_prefix is None:\n first_var = assignments[0][0]\n for i, c in enumerate(first_var.name):\n if c.isdigit():\n index_first_digit = i\n break\n else:\n index_first_digit = len(first_var.name)\n my_table.id_prefix = first_var.name[:index_first_digit]\n else:\n my_table.id_prefix = new_id_prefix\n\n for v_i, op_i in assignments:\n if v_i.name.startswith(my_table.id_prefix) and \\\n v_i.name[len(my_table.id_prefix):].isdigit() and \\\n int(v_i.name[len(my_table.id_prefix):]) > my_table.counter:\n msg = \"invalid var name {} due to id_prefix {} and counter {}\\n{}\".format(\n v_i.name, my_table.id_prefix, my_table.counter, assignments)\n raise ValueError(msg)\n\n my_table.table = bidict.OrderedBidict(assignments)\n\n return my_table",
"def create_operators(params):\n assert isinstance(params, list), ('operator config should be a list')\n ops = []\n for operator in params:\n assert isinstance(operator,\n dict) and len(operator) == 1, \"yaml format error\"\n op_name = list(operator)[0]\n param = {} if operator[op_name] is None else operator[op_name]\n op = getattr(imaug, op_name)(**param)\n ops.append(op)\n\n return ops",
"def tuple_operation(a: list, b: list, op: str) -> list:\n o = []\n for i in range(0, 3):\n if op == \"xor\":\n o.append(a[i] ^ b[i])\n elif op == \"and\":\n o.append(a[i] & b[i])\n elif op == \"or\":\n o.append(a[i] | b[i])\n else:\n raise RuntimeError('Unknown operation')\n return o[0], o[1], o[2]",
"def ops2alg(ops):\n return Model(cardinality=len(ops[0]), \n operations=dict([\"h\"+str(i),list(ops[i])] for i in range(len(ops))))",
"def pre_build(cls, ops, signals, rng):\n\n logger.debug(\"===================\")\n logger.debug(\"PRE BUILD %s\", ops)\n logger.debug(\"sets %s\", [op.sets for op in ops])\n logger.debug(\"incs %s\", [op.incs for op in ops])\n logger.debug(\"reads %s\", [op.reads for op in ops])\n logger.debug(\"updates %s\", [op.updates for op in ops])\n\n if type(ops[0]) not in cls.builders:\n raise BuildError(\"No registered builder for operators of type %r\" %\n type(ops[0]))\n\n BuildClass = cls.builders[type(ops[0])]\n\n kwargs = {}\n if BuildClass.pass_rng:\n kwargs[\"rng\"] = rng\n\n cls.op_builds[ops] = BuildClass(ops, signals, **kwargs)",
"def from_operator(operation=debug):\r\n\r\n def C(*things):\r\n return Container(freezed(operation), list(things), [], [], [], [])\r\n return C",
"def _construct_conditional_from_prefix(self, prefix_tree, timestamps):\n # we don't need a deep copy because we are not using\n # the prefix tree anymore\n conditional_tree = prefix_tree\n\n for node in conditional_tree.items_ordered():\n if not self._get_recurrence(node[0], sorted(timestamps[node[0]])):\n # remove nodes with that don't satisfy the min_rec parameter\n conditional_tree.remove_nodes(node[0])\n\n return conditional_tree",
"def __init__(self, orbital_operators, orbital_labels, op_type, prefactor=1.0):\n\n self.orbital_operators = np.array(orbital_operators, dtype=str)\n self.orbital_labels = np.array(orbital_labels, dtype=int)\n self.op_type = op_type\n\n if len(self.orbital_operators) != len(self.orbital_labels):\n ValueError('The number of orbital operators and labels is inconsistent for the OperatorString: {} {}'.format(len(self.orbital_operators), len(self.orbital_labels)))\n\n self.prefactor = prefactor\n\n # Stored for use in computing commutators.\n # A dictionary of the labels to their index in the operator string.\n self._indices_orbital_labels = dict()\n for ind_orbital in range(len(self.orbital_labels)):\n self._indices_orbital_labels[self.orbital_labels[ind_orbital]] = ind_orbital\n \n # Compute the prefactor automatically if a Majorana operator.\n if self.op_type == 'Majorana':\n # Stored for use in computing commutators.\n # The labels of orbital operators that are 'A' or 'B'.\n self._labels_ab_operators = np.array([self.orbital_labels[ind] for ind in range(len(self.orbital_labels)) if self.orbital_operators[ind] in ['A', 'B']], dtype=int)\n num_ab = len(self._labels_ab_operators)\n\n # The prefactor is 1 or 1j, depending\n # on whether reversing the order of operators creates\n # a +1 or -1 sign due to anti-commutation operators.\n num_swaps_to_reorder = (num_ab*(num_ab-1))/2\n if num_swaps_to_reorder % 2 == 1:\n self.prefactor = 1j\n\n if (self.op_type == 'Pauli' and self.prefactor != 1) \\\n or (self.op_type == 'Majorana' and self.prefactor not in [1, 1j]) \\\n or (self.op_type == 'Fermion' and self.prefactor not in [1, 1j]):\n raise ValueError('Invalid prefactor {} for operator string of op_type {}'.format(self.prefactor, self.op_type))\n \n name_list = [str(self.prefactor),' ']\n for (op, la) in zip(self.orbital_operators, self.orbital_labels):\n name_list.extend([op, ' ', str(la), ' '])\n\n self.name = ''.join(name_list)",
"def _build_prefix(self):\r\n pattern = self.string2\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p",
"def __reduce__(self):\r\n # We need to remove 'joblib' from the end of cachedir\r\n cachedir = self.cachedir[:-7] if self.cachedir is not None else None\r\n return (self.__class__, (cachedir,\r\n self.mmap_mode, self.compress, self._verbose))",
"def group_into_tensor_product_basis_sets(operator, seed=None):\n if not isinstance(operator, QubitOperator):\n raise TypeError('Can only split QubitOperator into tensor product'\n ' basis sets. {} is not supported.'.format(\n type(operator).__name__))\n\n sub_operators = {}\n r = RandomState(seed)\n for term, coefficient in operator.terms.items():\n bases = list(sub_operators.keys())\n r.shuffle(bases)\n basis = _find_compatible_basis(term, bases)\n if basis is None:\n sub_operators[term] = QubitOperator(term, coefficient)\n else:\n sub_operator = sub_operators.pop(basis)\n sub_operator += QubitOperator(term, coefficient)\n additions = tuple(op for op in term if op not in basis)\n basis = tuple(\n sorted(basis + additions, key=lambda factor: factor[0]))\n sub_operators[basis] = sub_operator\n\n return sub_operators",
"def make_library_cache(prefix):\n # avoid cache prefix reuse\n assert prefix not in _lib_cache_prefixes\n _lib_cache_prefixes.add(prefix)\n\n class CustomCodeLibraryCacheImpl(CodeLibraryCacheImpl):\n _filename_prefix = prefix\n\n class LibraryCache(Cache):\n \"\"\"\n Implements Cache that saves and loads CodeLibrary objects for additional\n feature for the specified python function.\n \"\"\"\n _impl_class = CustomCodeLibraryCacheImpl\n\n return LibraryCache",
"def build_actions(list_of_tuples):\n node_dict = build_verticies(list_of_tuples)\n ACTIONS = lambda path: node_dict[path.end]\n return ACTIONS",
"def prefix(prefix_list):\n def add_attribute(func):\n if not hasattr(func, \"prefix\"):\n func.prefix = []\n func.prefix.append(prefix_list)\n return func\n return add_attribute",
"def _prefix_symbolic(maybe_iter, prefix,\n constants,\n updated_names):\n if not prefix: return maybe_iter\n\n if not isinstance(maybe_iter, str) and isinstance(maybe_iter, abc.Iterable):\n return tuple([\n _prefix_symbolic(a, prefix, constants, updated_names)\n for a in maybe_iter\n ])\n v = maybe_iter\n if not (isinstance(v, str) and (v.startswith(\"K:\") or v.startswith(\"S:\"))):\n return v\n\n k = v.split(\"*\")[0].split(\"%\")[0]\n if k.startswith(\"K:\"):\n # before \"K:T\" => constants[T]\n # after \"K:{prefix}T\" => constants[{prefix}T]\n if k[2:] in constants:\n v = f\"K:{prefix}{v[2:]}\"\n else:\n # before \"S:T:#\" => intermediate_value[T].shape[#]\n # after \"S:{prefix}T:#\" => intermediate_value[{prefix}T].shape[#]\n assert v.startswith(\"S:\")\n\n v = v[2:]\n if \":\" in v:\n arr = v.split(\":\")\n key = \":\".join(arr[:-1])\n try:\n key = int(key)\n except ValueError:\n key = updated_names.get(key, f\"{prefix}{key}\")\n v = f\"{key}:{arr[-1]}\"\n v = f\"S:{v}\"\n return v",
"def from_tuples(\n tuples: List[Tuple],\n sortorder: Optional[int] = None,\n names: Optional[List[Name]] = None,\n ) -> \"MultiIndex\":\n return cast(\n MultiIndex,\n ps.from_pandas(\n pd.MultiIndex.from_tuples(tuples=tuples, sortorder=sortorder, names=names)\n ),\n )",
"def build_prefix(self):\r\n pattern = self.pattern\r\n m = len(pattern)\r\n p = [None]*m\r\n p[0] = 0\r\n k = 0\r\n for i in range(1,m):\r\n while k > 0 and pattern[i] != pattern[k]:\r\n k = p[k-1]\r\n if pattern[k] == pattern[i]:\r\n k = k+1\r\n p[i] = k\r\n self._prefix = p",
"def _compile_param_map(prefix=None, delimiter='_', **kwargs):\n\n if prefix is not None:\n prefix += delimiter\n else:\n prefix = ''\n\n param_map = {k: k.split(prefix)[1]\n for k in kwargs\n if k.startswith(prefix)}\n\n return param_map",
"def from_tuples(\n cls,\n tuples: Iterable[tuple[Hashable, ...]],\n sortorder: int | None = None,\n names: Sequence[Hashable] | Hashable | None = None,\n ) -> MultiIndex:\n if not is_list_like(tuples):\n raise TypeError(\"Input must be a list / sequence of tuple-likes.\")\n if is_iterator(tuples):\n tuples = list(tuples)\n tuples = cast(Collection[tuple[Hashable, ...]], tuples)\n\n # handling the empty tuple cases\n if len(tuples) and all(isinstance(e, tuple) and not e for e in tuples):\n codes = [np.zeros(len(tuples))]\n levels = [Index(com.asarray_tuplesafe(tuples, dtype=np.dtype(\"object\")))]\n return cls(\n levels=levels,\n codes=codes,\n sortorder=sortorder,\n names=names,\n verify_integrity=False,\n )\n\n arrays: list[Sequence[Hashable]]\n if len(tuples) == 0:\n if names is None:\n raise TypeError(\"Cannot infer number of levels from empty list\")\n # error: Argument 1 to \"len\" has incompatible type \"Hashable\";\n # expected \"Sized\"\n arrays = [[]] * len(names) # type: ignore[arg-type]\n elif isinstance(tuples, (np.ndarray, Index)):\n if isinstance(tuples, Index):\n tuples = np.asarray(tuples._values)\n\n arrays = list(lib.tuples_to_object_array(tuples).T)\n elif isinstance(tuples, list):\n arrays = list(lib.to_object_array_tuples(tuples).T)\n else:\n arrs = zip(*tuples)\n arrays = cast(list[Sequence[Hashable]], arrs)\n\n return cls.from_arrays(arrays, sortorder=sortorder, names=names)",
"def __init__(self, *urls):\n\n self.urlpatterns = []\n\n try:\n if isinstance(urls[0], str):\n prefix = urls[0]\n urls = urls[1:]\n else:\n prefix = None\n except IndexError:\n prefix = None\n\n for t in urls:\n if isinstance(t, (list, tuple)):\n t = url(*t)\n\n if prefix and hasattr(t, 'add_prefix'):\n t.add_prefix(prefix)\n\n self.urlpatterns.append(t)"
] | [
"0.55122936",
"0.5408634",
"0.50833046",
"0.50592685",
"0.49883533",
"0.493835",
"0.4874338",
"0.48739326",
"0.48162144",
"0.47007602",
"0.46888414",
"0.4669612",
"0.46306923",
"0.46295217",
"0.46244913",
"0.46051258",
"0.46014902",
"0.4598839",
"0.45586753",
"0.45210305",
"0.45178232",
"0.45139113",
"0.45135745",
"0.4493744",
"0.44921762",
"0.44897035",
"0.44869068",
"0.4475693",
"0.4470233",
"0.44646534"
] | 0.8065964 | 0 |
Plots the latest COVID19 status of the country if name is not given then it plots the top10 | def plot(self, context=None):
response = requests.get(self.url).content
table = pd.read_html(response, attrs={"id": "main_table_countries_today"})
df = table[0].fillna(0)
# df.drop(df.index[0], inplace=True) # World
df.drop(["ActiveCases", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)
df.drop(df.columns[6], axis=1, inplace=True)
if len(context) > 3:
context = context.lower().capitalize()
df = df.loc[df["Country,Other"] == context]
if 4 > len(context) > 1:
context = context.upper()
df = df.loc[df["Country,Other"] == context]
if len(context) <= 1:
df = df[1:]
C_Names = df["Country,Other"].head(n=10).values.tolist()
T_Cases = df["TotalCases"].head(n=10).values.tolist()
# N_Cases = df["NewCases"].head(n=10).values.tolist() # not plotted
T_Deaths = df["TotalDeaths"].head(n=10).values.tolist()
# N_Deaths = df["NewDeaths"].head(n=10).values.tolist() # not plotted
T_Recovered = df["TotalRecovered"].head(n=10).values.tolist()
T_Tests = df["TotalTests"].head(n=10).values.tolist()
x = np.arange(len(C_Names))
width = 0.20
fig, ax = plt.subplots()
ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color="Blue")
ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color="Red")
ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color="Green")
ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color="Orange")
if len(context) > 1:
ax.set_title("{}'s Situation".format(context))
else:
ax.set_title("World's Top10 Situation")
ax.set_xticks(x)
ax.set_xticklabels(C_Names)
ax.legend()
plt.ticklabel_format(style='plain', axis="y")
fig.set_size_inches(18.5, 10.5)
fig.tight_layout()
plt.grid()
if len(context) > 1:
font1 = {'family': 'serif',
'color': 'blue',
'weight': 'bold',
'size': 20}
font2 = {'family': 'serif',
'color': 'red',
'weight': 'normal',
'size': 20}
font3 = {'family': 'serif',
'color': 'green',
'weight': 'normal',
'size': 20}
font4 = {'family': 'serif',
'color': 'orange',
'weight': 'normal',
'size': 20}
# bbox=dict(facecolor='black', alpha=0.5)
plt.text(0.863, 0.67, "Total Cases:\n{:,}".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)
plt.text(0.863, 0.57, "Total Deaths:\n{:,}".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)
plt.text(0.863, 0.47, "Total Tests:\n{:,}".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)
plt.text(0.863, 0.37, "Total Recovered:\n{:,}".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)
# plt.savefig('corona.png') # Uncomment it to save the figure
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_country(name, case):\n click.echo(click.style(\n \"Generating Plot....\", fg='cyan', bold='true'))\n plot_time_series.TimeSeriesPloTs.plot_country(case, name)\n click.echo(click.style(\n \"Done....\", fg='green', bold='true'))",
"def plot_country_representation():\n\n # Get all player data, drops duplicates\n all_players = players.copy().drop_duplicates(subset=\"name\", keep=\"first\")\n # Groupy origin, count unique names (unique since there are no duplicates)\n all_players = all_players.groupby(\"origin\")[\"name\"].count()\n # Push name and origin into columns\n all_players = pd.DataFrame(all_players.reset_index())\n\n # Get all top30 player data, drop duplicates\n top30_players = current_lineups.drop_duplicates(\n subset=\"name\", keep=\"first\")\n # Groupy origin, count unique names (unique since there are no duplicates)\n top30_players = top30_players.groupby(\"origin\")[\"name\"].count()\n # Push name and origin into columns\n top30_players = pd.DataFrame(top30_players.reset_index())\n\n # Get all player data\n majors = players.copy()\n # Filter so only players that have attended Major Tournaments are present\n majors = majors[majors[\"tournament\"].isin(large_tourneys)]\n # Drop duplicates\n majors = majors.drop_duplicates(subset=\"name\", keep=\"first\")\n # Groupby origin, count names\n majors = majors.groupby(\"origin\")[\"name\"].count()\n # Add name and origin back to columns\n majors = pd.DataFrame(majors.reset_index())\n\n # Sort values by count of player\n all_players = all_players.sort_values(by=\"name\", ascending=False)\n top30_players = top30_players.sort_values(by=\"name\", ascending=False)\n majors = majors.sort_values(by=\"name\", ascending=False)\n\n # Renaming columns to better describe data\n top30_players = top30_players.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n all_players = all_players.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n majors = majors.rename(\n columns={\"name\": \"Number of Players\", \"origin\": \"Country\"})\n\n return top30_players",
"def barplot_topn_countries(df: pd.core.frame.DataFrame, feature: str,\n topn: int, kind: str, year: str, figsize=(12,6)) -> None:\n if kind != 'Import' and kind != 'Export':\n raise ValueError('Trade flow is not set to Import or Export')\n\n plt.figure(figsize=figsize)\n g = sns.barplot(x='Reporter', y=(feature,'sum'), data=df[0:topn],\n palette='muted')\n\n if topn > 5 and topn <= 10:\n rot = 0\n elif topn > 10:\n rot = 75\n else:\n rot = 0\n\n g.set_xticklabels(g.get_xticklabels(), rotation=rot)\n plt.ticklabel_format(style='plain', axis='y')\n if year == 'all':\n plt.title(f'Top-{topn} {kind}ers of vaccines around the globe', fontweight='bold')\n else:\n plt.title(f'Top-{topn} {kind}ers of vaccines around the globe in {year}', fontweight='bold')\n plt.xlabel(f'{kind}er Country')\n if feature == 'Trade Value (US$)':\n plt.ylabel(f'Total amount of {kind}s in US$')\n else:\n plt.ylabel(f'Total amount of {kind}s in Netweight (kg)')\n plt.grid(True, alpha = 0.3)\n plt.show()",
"def plot_top(df,fig=None,ax=None,threshold=500):\n if fig==None or ax == None: fig, ax = plt.subplots()\n for c in df['CountryExp'].unique():\n aux = df[df['CountryExp']==c].sort_values(by='DateRep')\n Y = np.cumsum(aux['NewConfCases'])\n if np.max(Y) > threshold and not c.startswith('Cases'):\n ax.plot(aux['DateRep'], np.cumsum(aux['NewConfCases']), lw=3,label=c)\n ax.legend()\n ax.set_ylim(ymin=-1)",
"def plot_stat(data_source, fig_location = None, show_figure = False):\r\n \r\n accidents_counts = parse_counts(data_source[1])\r\n years = list(accidents_counts['years'])\r\n\r\n fig, axes = plt.subplots(ncols=1, nrows=len(years), sharey=True, constrained_layout=True, figsize=(7, 13))\r\n\r\n fig.suptitle(\"Počet nehôd v jednotlivých krajoch v Českej republike za určité obdobie\\n\")\r\n\r\n for ax in axes.flatten():\r\n year = years.pop(0)\r\n a = np.argsort(-accidents_counts[year])\r\n order = list(np.arange(accidents_counts['regions'].size))\r\n for num, index in enumerate(a):\r\n order[index] = num + 1\r\n ax.grid(axis=\"y\", color=\"black\", alpha=.3, linewidth=.5, zorder=1)\r\n rects = ax.bar(accidents_counts['regions'], accidents_counts[year], width=0.9, bottom=0,align='center', color='C3', zorder=3)\r\n ax.spines['top'].set_visible(False)\r\n ax.spines['right'].set_visible(False)\r\n ax.spines['bottom'].set_position('zero')\r\n ax.margins(0.05)\r\n ax.title.set_text(year)\r\n ax.set_ylabel('Počet nehôd')\r\n ax.set_xlabel('Skratka kraja')\r\n for rect in rects:\r\n height = rect.get_height()\r\n ax.annotate('{}.'.format(order.pop(0)),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, 3),\r\n textcoords=\"offset points\",\r\n ha='center', va='bottom')\r\n ax.annotate('{}'.format(height),\r\n xy=(rect.get_x() + rect.get_width() / 2, height),\r\n xytext=(0, -1),\r\n textcoords=\"offset points\",\r\n ha='center', va='top', fontsize=8, color='white')\r\n \r\n if fig_location is not None:\r\n directory = os.path.dirname(fig_location)\r\n if not os.path.isdir(directory if directory != '' else '.'):\r\n os.mkdir(directory)\r\n plt.savefig(fig_location, facecolor='white', edgecolor='white', transparent=False)\r\n \r\n if show_figure:\r\n plt.show()",
"def get_top_10(df):\n\n grouped_df = df.groupby(\"country\").max()\n\n # Confirmed cases\n print(grouped_df.sort_values(\"confirmed\",\n ascending=False)[\"confirmed\"][:10])\n\n # Deaths\n print(grouped_df.sort_values(\"deaths\", ascending=False)[\"deaths\"][:10])\n\n # Recoveries\n print(grouped_df.sort_values(\"recovered\",\n ascending=False)[\"recovered\"][:10])\n\n a = grouped_df.sort_values(\"recovered\", ascending=False)[\"recovered\"][:10]\n print(a.to_markdown())",
"def topCountries(top=10):\r\n #top 10 deadly countries\r\n countries = agg('country')[:top].index\r\n #grab aggregated data for these countries\r\n dataOfTop10 = agg(['year','country']).query(\"country in @countries\")### interesting...\r\n #unstack data\r\n dataOfTop10 = dataOfTop10.unstack(1)\r\n #remove multiindexes\r\n dataOfTop10 = dataOfTop10.transpose().reset_index(level=0, drop=True).transpose()\r\n #sort by year\r\n dataOfTop10.sort_index(inplace=True)\r\n return dataOfTop10",
"def plot_overview():\n try:\n from cartopy.io.img_tiles import Stamen\n from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\n import cartopy.crs as ccrs\n\n tiler = Stamen(\"terrain\")\n mercator = tiler.crs\n\n f = plt.figure(figsize = (10, 8))\n gs = GridSpec(1, 2, width_ratios=[1.0, 0.03])\n\n ax = plt.subplot(gs[0], projection=mercator)\n ax.set_extent([-130, -60, 20, 50])\n ax.add_image(tiler, 5)\n #ax.coastlines('10m', lw=0.5)\n\n ax.set_xlabel(\"Longitude\")\n ax.set_ylabel(\"Latitude\")\n xticks = np.arange(-135, -64, 10)\n yticks = np.arange(25, 46, 10)\n ax.set_xticks(xticks, crs=ccrs.PlateCarree())\n ax.set_yticks(yticks, crs=ccrs.PlateCarree())\n lon_formatter = LongitudeFormatter(zero_direction_label=True)\n lat_formatter = LatitudeFormatter()\n ax.xaxis.set_major_formatter(lon_formatter)\n ax.yaxis.set_major_formatter(lat_formatter)\n\n img=ax.scatter(gauge_information[\"longitude\"],\n gauge_information[\"latitude\"],\n transform=ccrs.PlateCarree(),\n c=gauge_information[\"gauge id\"],\n s=2,\n cmap=\"plasma\")\n ax.set_title(\"Geographic locations of gauges in CAMELS dataset\")\n asp = ax.get_aspect()\n\n ax = plt.subplot(gs[1])\n cb = plt.colorbar(img, label=\"Gauge ID\", cax=ax)\n cb.formatter.set_useOffset(False)\n cb.formatter.set_scientific(False)\n cb.formatter.set_powerlimits((-10, 20))\n cb.update_ticks()\n plt.tight_layout()\n\n except:\n url = \"http://spfrnd.de/datasets/camels/overview.png\"\n img = plt.imread(url)\n plt.imshow(img)\n plt.gca().set_axis_off()",
"def plot_countries(self):\n import matplotlib.pyplot as plt\n import seaborn as sns\n from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n ###\n #check if internships are available in the given file\n ###\n to_read = str(self.exchanged_offers_filepath.get())\n if not os.path.isfile(to_read):\n tkMessageBox.showwarning(title=\"File doesn't exist\",message=\"The filename or the location you entered does not exist!\")\n return None\n else:\n self.exchanged = pd.read_csv(to_read,sep=',',usecols=['domestic offer code','foreign offer code','country','field','min duration','max duration'])\n \n if self.exchanged.empty:\n tkMessageBox.showwarning(title=\"No available data\",\n message=\"No exchanged offers are available in the given file! Add some offers first and try again later\")\n return None\n else:\n ###\n #use pandas functionalities for the plots\n ### \n frequency = pd.DataFrame() \n for country in self.exchanged['country'].unique():\n frequency[country] = [len(self.check_country(self.exchanged,country))]\n frequency = frequency.transpose()\n frequency.columns=['values']\n \n ###\n #making figure\n ###\n fig, ax = plt.subplots(figsize=(4,14))\n frequency.sort_values(by='values').plot(ax=ax,kind='barh',figsize=(4,10))\n ax.tick_params(axis='both', labelsize=16)\n fig.tight_layout()\n\n ###\n #show figure in new tkinter window\n ###\n figure_window_2 = tk.Toplevel()\n figure_window_2.title('Figure')\n \n ###\n #create label to put figure in\n ###\n figure_canvas = FigureCanvasTkAgg(fig,master=figure_window_2)\n figure_canvas.get_tk_widget().grid(column=0,row=0)",
"def do_plot(self):\n years = sorted(set(self.prediction_df_without_covid19['Year']))\n predict_without_covid_country = self.prediction_df_without_covid19[\n self.prediction_df_without_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n predict_with_covid_country = self.prediction_df_with_covid19[\n self.prediction_df_with_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n # ------------------------------------------------------------------------------------------------------\n pa = \\\n predict_without_covid_country.loc[predict_without_covid_country['Year'] == 1990][\n 'Total_CO2_Emissions'].values[\n 0]\n x = []\n for i in range(len(years)):\n x.append(pa * 0.6)\n # ------------------------------------------------------------------------------------------------------\n fig = Figure()\n ax = fig.subplots()\n ax.grid(True, alpha=0.3)\n # plot_title = 'Total CO2 Emissions predicted from 2019-2030 for ' + self.country\n plot_title = 'Total ' + '$CO_2$' + ' Emissions predicted from 2019-2030 for ' + self.country\n label_country_without_covid = 'Total CO2 emissions without covid'\n label_country_with_covid = 'Total CO2 emissions with Covid-19'\n # ------------------------------------------------------------------------------------------------------\n params = {'mathtext.default': 'regular'}\n rcParams.update(params)\n rcParams['font.size'] = 7\n rcParams['lines.markersize'] = 4\n rcParams['figure.figsize'] = [7, 4]\n rcParams['figure.dpi'] = 150\n rcParams['font.family'] = 'Verdana'\n rcParams[\"font.weight\"] = \"normal\"\n font = {'family': 'Verdana',\n 'color': 'xkcd:darkgreen',\n 'weight': 'normal',\n 'size': 9,\n }\n colors = rcParams['axes.prop_cycle'].by_key()['color']\n l1, = ax.plot(years, predict_without_covid_country['Total_CO2_Emissions'], color='xkcd:dark blue green',\n marker='o',\n label=label_country_without_covid)\n l2, = ax.plot(years, predict_with_covid_country['Total_CO2_Emissions'], color='xkcd:neon pink', marker='.',\n label=label_country_with_covid)\n l3, = ax.plot(years, x, color='xkcd:orchid', marker='1')\n print('without covid: ', predict_without_covid_country['Total_CO2_Emissions'].values)\n print('with covid: ', predict_with_covid_country['Total_CO2_Emissions'].values)\n ax.set_xlabel('Years', fontdict=font)\n ax.set_ylabel('Emissions (Gg)', fontdict=font)\n ax.set_title(plot_title, fontsize=12, fontweight='normal')\n ax.patch.set_facecolor('xkcd:green')\n ax.set_facecolor('xkcd:pale green')\n fig.legend((l1, l2, l3), ('Prediction without Covid19', 'Prediction with Covid19', 'Paris Agreement'),\n bbox_to_anchor=(0.907, 0.89))\n fig.savefig(OUTPUT_GRAPH_PATH)",
"def test_top_country(self):\n tabular_format_countries_list = [['Canada', 66, '20'], ['United States', 33, '10']]\n\n result = InstallationStatistics.get_statistics_top_country(tabular_format_countries_list)\n\n self.assertEqual('Canada', result)",
"def return_figures():\n\n # first chart plots arable land from 1990 to 2015 in top 10 economies \n # as a line chart\n graph_one = [] \n df_melt = clean_data('data/b055f1ad-17cc-43fd-bc5e-8a9572a0e573_Data.csv')\n df_melt.columns = ['country', 'year', 'population']\n df_melt.sort_values('population', ascending=False, inplace=True)\n top10 = df_melt.country.unique().tolist()\n \n for country in top10:\n x_val = df_melt[df_melt['country']==country].year.tolist()\n y_val = df_melt[df_melt['country']==country].population.tolist() \n \n \n graph_one.append(\n go.Scatter(\n x = x_val,\n y = y_val,\n mode = 'lines',\n name = country\n )\n )\n\n layout_one = dict(title = 'Most Populous countries growth(2000-2015)',\n xaxis = dict(title = 'Year'),\n yaxis = dict(title = 'Population'),\n )\n \n# second chart plots ararble land for 2015 as a bar chart \n \n graph_two = []\n \n df_2 = clean_data(\"data/co2emissions.csv\")\n df_2.columns = ['country', 'years','CO2']\n df_2.sort_values('CO2', ascending=False, inplace=True)\n for country in top10:\n x_val = df_2[df_2['country']==country].years.tolist()\n y_val = df_2[df_2['country']==country].CO2.tolist() \n graph_two.append(\n go.Scatter(\n x = x_val,\n y = y_val,\n mode = 'lines+markers',\n name = country\n )\n )\n\n layout_two = dict(title = 'CO2 emissions in most populous countries',\n xaxis = dict(title = 'Year'),\n yaxis = dict(title = 'CO2 emissions'),\n )\n\n\n# third chart plots percent of population that is rural from 1990 to 2015\n graph_three = []\n df_3 = clean_data('data/GDP.csv')\n df_3.columns = ['country','year','GDP']\n df_3.sort_values('GDP', ascending=False, inplace=True)\n df_3=df_3[df_3['year'] ==2014]\n graph_three.append(\n go.Bar(\n x = df_3.country.tolist(),\n y = df_3.GDP.tolist(),\n )\n )\n\n layout_three = dict(title = 'GDP in USD',\n xaxis = dict(title = 'Country'),\n yaxis = dict(title = 'GDP(USD)')\n )\n \n# fourth chart shows rural population vs arable land\n graph_four = []\n df_4 = clean_data('data/TotalArea.csv')\n df_4.columns = ['country','year', 'area']\n df_4.sort_values('area', ascending=False, inplace=True)\n df_4=df_4[df_4['year']==2014]\n graph_four.append(\n go.Bar(\n x = df_4.country.tolist(),\n y = df_4.area.tolist(),\n )\n )\n\n layout_four = dict(title = 'Total Area (Sq. Km)',\n xaxis = dict(title = 'Country'),\n yaxis = dict(title = 'Total Area'),\n )\n \n # append all charts to the figures list\n figures = []\n figures.append(dict(data=graph_one, layout=layout_one))\n figures.append(dict(data=graph_two, layout=layout_two))\n figures.append(dict(data=graph_three, layout=layout_three))\n figures.append(dict(data=graph_four, layout=layout_four))\n\n return figures",
"def plot_new_cases(data, case_type, country, province):\n # Copy the identifying columns on geography\n identifier = data[case_type][[\n 'province/state', 'country/region', 'lat', 'long']]\n\n # Insert first column\n col = data[case_type].iloc[:, 4]\n daily_new = col.to_frame()\n\n for i in range(5, len(data[case_type].columns)):\n col = pd.Series(data[case_type].iloc[:, i] -\n data[case_type].iloc[:, i-1])\n daily_new[data[case_type].columns[i]] = col\n\n # Append with geography identifier\n daily_new = pd.concat([identifier, daily_new], axis=1)\n\n # Append active cases into master data\n data['daily_new'] = daily_new\n\n # Plot active cases by country\n\n fig, ax = plt.subplots(1, 1)\n # case_type = 'daily_new'\n\n # Choose color scheme\n color_daily = get_rgb((44, 160, 44))\n\n dates = get_dates(data, 'daily_new')\n num_cases = get_num_cases(data, 'daily_new', country, province)\n ax.bar(dates, num_cases, color=color_daily)\n\n # x axis\n ax.set_xlabel('End of month')\n ax.set_xticks(get_end_months(dates))\n ax.set_xticklabels([format_datetime(end_month)\n for end_month in get_end_months(dates)])\n ax.xaxis.set_tick_params(direction='in')\n\n # y axis\n ax.set_ylabel('Number of new ' + case_type + ' cases')\n ax.set_yticklabels(['{:,}'.format(int(x)) for x in ax.get_yticks().tolist()])\n ax.yaxis.set_tick_params(direction='in')\n \n # Set graph title\n ax.set_title(get_title(country, province))\n\n sns.despine(ax=ax)\n\n fig.tight_layout()\n path = 'plots/daily_' + case_type + '_case_by_country.pdf'\n fig.savefig(path, bbox_inches='tight')\n print('Saved to {}'.format(path))",
"def plot_country(country, data, log='lin', end=None, start=None, limit=0):\n \n import matplotlib.pyplot as plt\n import matplotlib.ticker as ticker\n import seaborn as sns\n\n if country=='all' or country=='World' or len(country)==0:\n temp = data.sort_values('dates')\n temp['cases'] = temp.groupby(['dates'])['cases'].transform('sum')\n temp['deaths'] = temp.groupby(['dates'])['deaths'].transform('sum')\n temp.drop_duplicates(subset=['dates'], inplace=True)\n else:\n temp = data[data.countries == country].sort_values('dates')\n\n temp['cumcases']=temp.cases.cumsum().values\n temp['cumdeaths']=temp.deaths.cumsum().values\n first_date2 = next((ti['dates'] for ind, ti in temp.iterrows() if ti['cumcases'] > limit), None)\n \n if first_date2 == None:\n #print('no cumulative cases over the limit %f for country '%limit, country)\n return\n \n if not(start is None):\n first_date2 = max(start, first_date2)\n \n temp = temp[temp.dates>= first_date2]\n if end is None:\n end = temp.dates.max()\n #print('date range with non-zero data: \\n', first_date2, '-', end)\n else:\n #print('date range with non-zero data: \\n', first_date2, '-', end)\n temp = temp[temp.date <= end]\n\n fig, ax = plt.subplots(figsize=[12, 8])\n plt.plot_date(temp.dates, temp.cumcases.values, '', linewidth=3.5, label='cases', color='#005082', alpha=.5)\n plt.plot_date(temp.dates, temp.cumdeaths.values, '', linewidth=3, label='deaths', color='#FF1053', alpha=.5)\n\n\n if log == \"log\":\n ax.set_yscale('log')\n\n fig.autofmt_xdate()\n ax.xaxis.set_major_locator(ticker.AutoLocator())\n ax.xaxis.set_minor_locator(ticker.AutoMinorLocator())\n ax.xaxis.set_tick_params(labelsize=10)\n plt.title(country + ': Cases and Deaths', fontsize=20)\n plt.xlabel('', fontsize=12, labelpad=8)\n plt.ylabel('total', fontsize=12, labelpad=8)\n plt.legend()\n\n ax.tick_params(axis='both', which='major', pad=8)\n\n sns.despine()\n\n return first_date2",
"async def c19_command(self, ctx, *, country: Optional[str]):\n with ctx.channel.typing():\n country = country or \"nepal\"\n logoUrl = \"http://covidcp.org/images/logo-icononly.png\"\n url = f\"https://coronavirus-19-api.herokuapp.com/countries/{country}\"\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as resp:\n data = await resp.json()\n cntry = data[\"country\"]\n cases = data[\"cases\"]\n todayCases = data[\"todayCases\"]\n deaths = data[\"deaths\"]\n recovered = data[\"recovered\"]\n active = data[\"active\"]\n output = f\"Total Cases - **{cases}** \\n Cases Today - **{todayCases}** \\nTotal Deaths - **{deaths}** \\nActive Cases - **{active}** \\nTotal Recovered - **{recovered}**\"\n embed = Embed(\n color=Color.blurple(), timestamp=datetime.utcnow(), description=output\n )\n embed.set_author(name=f\"COVID-19 Stats for {cntry}\")\n embed.set_thumbnail(url=logoUrl)\n await ctx.send(embed=embed)",
"def overview(self, minState=5):\n n = 600\n \n ### first plot: the RTOFFSETs and STATES\n plt.figure(10)\n plt.clf()\n plt.subplots_adjust(hspace=0.05, top=0.95, left=0.05,\n right=0.99, wspace=0.00, bottom=0.1)\n ax1 = plt.subplot(n+11)\n try:\n print self.insmode+' | pri:'+\\\n self.getKeyword('OCS PS ID')+' | sec:'+\\\n self.getKeyword('OCS SS ID')\n \n plt.title(self.filename+' | '+self.insmode+' | pri:'+\n self.getKeyword('OCS PS ID')+' | sec:'+\n self.getKeyword('OCS SS ID'))\n except:\n pass\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('FUOFFSET')*1e3,\n color=(1.0, 0.5, 0.0), label=self.DLtrack+' (FUOFFSET)',\n linewidth=3, alpha=0.5)\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+12, sharex=ax1) # == DDL movements\n \n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field(self.DDLtrack),\n color=(0.0, 0.5, 1.0), linewidth=3, alpha=0.5,\n label=self.DDLtrack)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n 1e3*self.raw['DOPDC'].data.field('PSP'),\n color=(0.0, 0.5, 1.0), linewidth=1, alpha=0.9,\n label='PSP', linestyle='dashed')\n plt.legend(prop={'size':9})\n plt.ylabel('(mm)')\n plt.xlim(0)\n \n plt.subplot(n+13, sharex=ax1) # == states\n plt.plot(self.raw['OPDC'].data.field('TIME'),\n self.raw['OPDC'].data.field('STATE'),\n color=(1.0, 0.5, 0.0), label='OPDC')\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n self.raw['DOPDC'].data.field('STATE'),\n color=(0.0, 0.5, 1.0), label='DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('STATES')\n yl=plt.ylim()\n plt.ylim(yl[0]-1, yl[1]+1)\n plt.xlim(0)\n ### fluxes\n plt.subplot(n+14, sharex=ax1)\n try:\n fsua_dark = self.fsu_calib[('FSUA', 'DARK')][0,0]\n fsub_dark = self.fsu_calib[('FSUB', 'DARK')][0,0]\n fsua_alldark = self.fsu_calib[('FSUA', 'DARK')].sum(axis=1)[0]\n fsub_alldark = self.fsu_calib[('FSUB', 'DARK')].sum(axis=1)[0]\n except:\n print 'WARNING: there are no FSUs calibrations in the header'\n fsua_dark = 0.0\n fsub_dark = 0.0\n fsua_alldark = 0.0\n fsub_alldark = 0.0\n\n M0 = 17.5\n fluxa = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n fsua_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU1 DIT'))\n print 'FLUX FSUA (avg, rms):', round(fluxa.mean(), 0), 'ADU/s',\\\n round(100*fluxa.std()/fluxa.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxa.mean()),2)\n fluxb = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]+\n self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n fsub_alldark)/\\\n (4*self.getKeyword('ISS PRI FSU2 DIT'))\n print 'FLUX FSUB (avg, rms):', round(fluxb.mean(), 0), 'ADU/s',\\\n round(100*fluxb.std()/fluxb.mean(), 0), '%'\n print ' -> pseudo mag = '+str(M0)+' - 2.5*log10(flux) =',\\\n round(M0-2.5*np.log10(fluxb.mean()),2)\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\\\n fluxa/1000, color='b', alpha=0.5, label='FSUA')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\\\n fluxb/1000, color='r', alpha=0.5, label='FSUB')\n\n plt.ylim(1)\n plt.legend(prop={'size':9})\n plt.ylabel('flux - DARK (kADU)')\n plt.xlim(0)\n plt.subplot(n+15, sharex=ax1)\n try:\n # -- old data version\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field('OPDSNR'),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field('OPDSNR'),\n color='r', alpha=0.5, label='FSUB SNR')\n except:\n plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUA'].data.field(self.OPDSNR),\n color='b', alpha=0.5, label='FSUA SNR')\n plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n self.raw['IMAGING_DATA_FSUB'].data.field(self.OPDSNR),\n color='r', alpha=0.5, label='FSUB SNR')\n plt.legend(prop={'size':9})\n \n A = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUA'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUA', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUA', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUA', 'DARK')][0,3])\n snrABCD_a = ((A-C)**2+(B-D)**2)\n snrABCD_a /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUA'].data.field('TIME'),\n # snrABCD_a, color='b', alpha=0.5, linestyle='dashed')\n \n A = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA1')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,0])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,0]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,0])\n B = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA2')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,1])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,1]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,1])\n C = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA3')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,2])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,2]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,2])\n D = (self.raw['IMAGING_DATA_FSUB'].data.field('DATA4')[:,0]-\n self.fsu_calib[('FSUB', 'DARK')][0,3])/\\\n (self.fsu_calib[('FSUB', 'FLAT')][0,3]-\n 2*self.fsu_calib[('FSUB', 'DARK')][0,3])\n \n snrABCD_b = ((A-C)**2+(B-D)**2)\n snrABCD_b /= ((A-C).std()**2+ (B-D).std()**2)\n #plt.plot(self.raw['IMAGING_DATA_FSUB'].data.field('TIME'),\n # snrABCD_b, color='r', alpha=0.5, linestyle='dashed') \n \n # -- SNR levels:\n #plt.hlines([self.getKeyword('INS OPDC OPEN'),\n # self.getKeyword('INS OPDC CLOSE'),\n # self.getKeyword('INS OPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(1.0, 0.5, 0.0))\n #plt.hlines([self.getKeyword('INS DOPDC OPEN'),\n # self.getKeyword('INS DOPDC CLOSE'),\n # self.getKeyword('INS DOPDC DETECTION')],\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').min(),\n # self.raw['IMAGING_DATA_FSUB'].data.field('TIME').max(),\n # color=(0.0, 0.5, 1.0))\n # -- plot thresholds\n plt.ylabel('SNR')\n plt.xlim(0)\n \n if self.getKeyword('OCS DET IMGNAME')=='PACMAN_OBJ_ASTRO_':\n # == dual FTK\n plt.subplot(n+16, sharex=ax1)\n plt.ylabel('PRIMET ($\\mu$m)')\n #met = interp1d(np.float_(self.raw['METROLOGY_DATA'].\\\n # data.field('TIME')),\\\n # self.raw['METROLOGY_DATA'].data.field('DELTAL'),\\\n # kind = 'linear', bounds_error=False, fill_value=0.0)\n met = lambda x: np.interp(x,\n np.float_(self.raw['METROLOGY_DATA'].data.field('TIME')),\n self.raw['METROLOGY_DATA'].data.field('DELTAL'))\n metro = met(self.raw['DOPDC'].data.field('TIME'))*1e6\n n_ = min(len(self.raw['DOPDC'].data.field('TIME')),\n len(self.raw['OPDC'].data.field('TIME')))\n\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n metro, color=(0.5,0.5,0.), label='A-B')\n\n w1 = np.where((self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'OPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'OPDC FTK stat: 0%'\n\n w1 = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DOPDC FTK stat:', round(100*len(w1[0])/float(n_), 1), '%'\n except:\n print 'DOPDC FTK stat: 0%'\n\n w = np.where((self.raw['DOPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['DOPDC'].data.field('STATE')[:n_]<=7)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]>=minState)*\\\n (self.raw['OPDC'].data.field('STATE')[:n_]<=7))\n try:\n print 'DUAL FTK stat:', round(100*len(w[0])/float(n_),1), '%'\n except:\n print 'DUAL FTK stat: 0%'\n\n plt.xlim(0)\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], '.g', linewidth=2,\n alpha=0.5, label='dual FTK')\n #plt.legend()\n if len(w[0])>10 and False:\n coef = np.polyfit(self.raw['DOPDC'].data.field('TIME')[w],\n metro[w], 2)\n plt.plot(self.raw['DOPDC'].data.field('TIME'),\n np.polyval(coef, self.raw['DOPDC'].\n data.field('TIME')),\n color='g')\n plt.ylabel('metrology')\n\n print 'PRIMET drift (polyfit) :', 1e6*coef[1], 'um/s'\n slope, rms, synth = NoisySlope(self.raw['DOPDC'].\n data.field('TIME')[w],\n metro[w], 3e6)\n plt.figure(10)\n yl = plt.ylim()\n plt.plot(self.raw['DOPDC'].data.field('TIME')[w],\n synth, color='r')\n plt.ylim(yl)\n print 'PRIMET drift (NoisySlope):',\\\n slope*1e6,'+/-', rms*1e6, 'um/s'\n else:\n # == scanning\n plt.subplot(n+16, sharex=ax1)\n fringesOPDC = \\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('DATA3')[:,0]\n \n fringesDOPDC =\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA1')[:,0]-\\\n self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('DATA3')[:,0]\n \n plt.plot(self.raw['IMAGING_DATA_'+self.primary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesOPDC/fringesOPDC.std()),\n color=(1.0, 0.5, 0.0), alpha=0.6,\n label=self.primary_fsu+'/OPDC')\n plt.plot(self.raw['IMAGING_DATA_'+self.secondary_fsu].data.field('TIME'),\n scipy.signal.wiener(fringesDOPDC/fringesDOPDC.std()),\n color=(0.0, 0.5, 1.0), alpha=0.6,\n label=self.secondary_fsu+'/DOPDC')\n plt.legend(prop={'size':9})\n plt.ylabel('A-C')\n plt.xlabel('time stamp ($\\mu$s)')\n return",
"def main():\n \n \"\"\" Download and load data\"\"\"\n dfs = get_data()\n \n \"\"\" Preprocess data, combine rows for country provinces\"\"\"\n combine_list = [\"Australia\", \"US\", \"Canada\", \"Mainland China\", \"China\"]\n for key in dfs.keys():\n dfs[key] = preprocess(df=dfs[key], combine_list=combine_list)\n \n \"\"\" Compute additional variables\"\"\"\n dfs = compute_deaths_over_closed(dfs)\n dfs = compute_active_cases(dfs)\n dfs = compute_death_rate(dfs)\n dfs = compute_df_reindexed(dfs, \"active_cases\")\n dfs = compute_df_reindexed(dfs, \"death_rate\")\n \n \"\"\"Remove 0 and 1 from rate variables\"\"\"\n for keys in [\"death_rate\", \"death_rate_reindexed\", \"deaths_over_closed\"]:\n dfs[keys] = remove_corner_values(dfs[keys])\n \n \"\"\" Set parameters for plotting\"\"\"\n titles = {\"active_cases\": \"COVID-19 Active Cases\", \"active_cases_reindexed\": \"COVID-19 Active Cases (Days from the Start of the Outbreak)\", \"deaths_over_closed\": \"COVID-19 Deaths over (Deaths + Recovered)\", \"death_rate\": \"COVID-19 Death Rate\", \"death_rate_reindexed\": \"COVID-19 Death Rate (Days from the Start of the Outbreak)\"}\n filenames = {\"active_cases\": \"covid19_active.png\", \"active_cases_reindexed\": \"covid19_active_ri.png\", \"deaths_over_closed\": \"covid19_death_over_closed.png\", \"death_rate\": \"covid19_death_rate.png\", \"death_rate_reindexed\": \"covid19_death_rate_ri.png\"}\n row_inclusion_index_threasholds = {\"active_cases\": 770, \"active_cases_reindexed\": 500, \"deaths_over_closed\": 770, \"death_rate\": 770, \"death_rate_reindexed\": 500}\n row_inclusion_indices = {}\n #row_inclusion_indices.get(x) is None:\n # row_inclusion_indices = dfs[\"cases\"].iloc[:,-1] > x\n\n \"\"\" Plot\"\"\"\n for key in row_inclusion_index_threasholds.keys():\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-1] > row_inclusion_index_threasholds[key]\n if key in [\"active_cases_reindexed\", \"death_rate_reindexed\"]:\n row_inclusion_indices[key] = dfs[\"cases\"].iloc[:,-5] > row_inclusion_index_threasholds[key]\n plot(dfs[key], row_inclusion_indices.get(key), titles[key], filenames[key])",
"def plot_active_cases(data, country, province):\n # Create a data frame with number of active cases\n active = (data['confirmed'].iloc[:, 4:] -\n data['recovered'].iloc[:, 4:] -\n data['death'].iloc[:, 4:])\n\n # Copy the identifying columns on geography\n identifier = data['confirmed'][['province/state',\n 'country/region', 'lat', 'long']]\n\n # Append two dataframes\n active = pd.concat([identifier, active], axis=1)\n\n # Append active cases into master data\n data['active'] = active\n\n # Plot active cases by country\n fig, ax = plt.subplots(1, 1)\n case_type = 'active'\n\n # Choose color scheme\n color_active = get_rgb((188, 189, 34))\n\n dates = get_dates(data, case_type)\n num_cases = get_num_cases(data, case_type, country, province)\n ax.plot(dates, num_cases, color=color_active)\n\n ax.text(dates[-1], num_cases[-1], '{:,.0f}'.format(num_cases[-1]),\n color=color_active, ha='left', va='center')\n\n # x axis\n ax.set_xlabel('End of month')\n ax.set_xticks(get_end_months(dates))\n ax.set_xticklabels([format_datetime(end_month)\n for end_month in get_end_months(dates)])\n ax.xaxis.set_tick_params(direction='in')\n\n # y axis\n ax.set_ylabel('Number of active cases')\n ax.yaxis.set_tick_params(direction='in')\n ax.set_yscale('log')\n\n # Set graph title\n ax.set_title(get_title(country, province))\n\n sns.despine(ax=ax)\n\n fig.tight_layout()\n path = 'plots/active_case_by_country.pdf'\n fig.savefig(path, bbox_inches='tight')\n print('Saved to {}'.format(path))",
"def graph_year_state_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 10)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"2001 and 2007 State Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"state\", \"count\", data=df, palette=\"bone\", hue='year')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_top_state_count.png\")",
"def do_stateplot(df: pd.DataFrame, thru: date):\n\tst_dict = dict({\"1\":\"AL\",\"2\":\"AK\",\"4\":\"AZ\",\"5\":\"AR\",\"6\":\"CA\",\"8\":\"CO\",\"9\":\"CT\",\"10\":\"DE\",\"11\":\"DC\",\"12\":\"FL\",\"13\":\"GA\",\"15\":\"HI\",\n\t\"16\":\"ID\",\"17\":\"IL\",\"18\":\"IN\",\"19\":\"IA\",\"20\":\"KS\",\"21\":\"KY\",\"22\":\"LA\",\"23\":\"ME\",\"24\":\"MD\",\"25\":\"MA\",\"26\":\"MI\",\"27\":\"MN\",\"28\":\"MS\",\n\t\"29\":\"MO\",\"29\":\"MO\",\"30\":\"MT\",\"31\":\"NE\",\"32\":\"NV\",\"33\":\"NH\",\"34\":\"NJ\",\"35\":\"NM\",\"36\":\"NY\",\"37\":\"NC\",\"38\":\"ND\",\"39\":\"OH\",\"40\":\"OK\",\n\t\"41\":\"OR\",\"42\":\"PA\",\"44\":\"RI\",\"45\":\"SC\",\"46\":\"SD\",\"47\":\"TN\",\"48\":\"TX\",\"49\":\"UT\",\"50\":\"VT\",\"51\":\"VA\",\"53\":\"WA\",\"54\":\"WV\",\"55\":\"WI\",\n\t\"56\":\"WY\"})\n\tlocs = []\n\tfor x in iter(df.fips):\n\t\tlocs.append(st_dict[x])\n\tdf['text'] = \"Total Deaths: \"+ str(df['Deaths'].astype('int'))\n\n\tfig = go.Figure(data=go.Choropleth(locations=locs,\n\t\tlocationmode='USA-states', z=df.fatalityrate.round(2),\n\t\tcolorscale='Viridis', hovertext=df['text'],\n\t\tcolorbar_title=\"Deaths per 100 residents\"\n\t\t))\n\n\tfig.update_layout(hovermode=\"x unified\"\n\t\t)\n\tfig.update_layout(title_text='covid mortality by State thru ' +\n\t\tthru.strftime('%m-%d-%Y')+ \" -custom data analysis by Brian Herbert\", geo_scope='usa'\n\t\t)\n\treturn fig",
"def plot_countryperskill(data_df, **args):\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n order = args.get('order', np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int))\n dd = args.get('dd', .7) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n colors14 = args.get('colors14', ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', \\\n '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', \\\n '#cab2d6', '#6a3d9a', '#ffff99', '#b15928', \\\n '#dd1c77', '#8dd3c7'])\n plt.figure(facecolor='w', figsize=(wdth, hght))\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n idx = idx[order]\n meth_labels = [meth_labels[i] for i in order]\n # empty plots for legend handlers:\n for i in np.arange(0, len(countries_sel)): # country\n plt.scatter([], [], marker='o', s=markersize, edgecolor='black', linewidth='.4',\\\n c=colors14[i], label=countries[countries_sel[i]])\n plt.legend()\n\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white') # legendspace\n\n # actual plotting:\n for i in np.arange(0, len(countries_sel)): # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([j], data_df[countries[countries_sel[i]]][idx[j]], marker='o', \\\n s=markersize, edgecolor='black', linewidth='.4',\\\n alpha=1., c=colors14[i], zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, j], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(np.arange(0, len(idx)), meth_labels, color='black', rotation=30)\n plt.grid(axis='y')\n # plt.xlabel('Method')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_perScore_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()",
"def worldplot_2(data, cc, pc):\n # define the columns of input\n # cc = data.columns[checkcol]\n #pc = data.columns[plotcol]\n \n plt.rcParams['font.size'] = 18\n # generate standart geopandas dataframe\n world_df = geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));\n #check indicies of the input dataframe and modify standart geopandas df\n world_df = world_df[world_df[\"iso_a3\"].isin(data[cc])];\n\n #world_2df.[\"OFa_all_con\"] = np.nan;\n #world_2df.sort_values(by=\"iso_a3\").head()\n for i in world_df.index:\n for j in data.index:\n if world_df.loc[i,\"iso_a3\"] == data.loc[j, cc]:\n try:\n world_df.loc[i,pc] = data.loc[j, pc];\n except: \n print(\"\\nError! Invalid Input. Example for input: OFa_all_con\")\n return\n \n\n fig, ax = plt.subplots(1,1, figsize=(22,12))\n ax.axis('off')\n \n \n if pc == \"OFa_all_con\":\n fig.suptitle('Chinese Development Finance (financial amount)', fontsize=25)\n world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={\"label\":\"\\n Chinese Development Finance in $10 bln (2000-2014)\",\n \"orientation\": \"horizontal\"}, \n missing_kwds={\"color\": \"lightgrey\",\n \"edgecolor\": \"red\",\n \"hatch\": \"///\",\n \"label\": \"Missing values\"});\n else:\n fig.suptitle('Chinese Development Finance (probability)', fontsize=25)\n world_df.plot(column=pc, ax = ax, legend=True, cmap='jet', legend_kwds={\"label\":\"\\n Probability of receiving Chinese Development Finance (2000-2014)\",###ADDDDJUST!!!!!\n \"orientation\": \"horizontal\"}, \n missing_kwds={\"color\": \"lightgrey\",\n \"edgecolor\": \"red\",\n \"hatch\": \"///\",\n \"label\": \"Missing values\"});",
"def plot_compare_first(data, n, case_type, countries, path=None):\n\n fig, ax = plt.subplots(1, 1)\n\n palette10 = [(31, 119, 180), (255, 127, 14),\n (44, 160, 44), (214, 39, 40),\n (148, 103, 189), (140, 86, 75),\n (227, 119, 194), (127, 127, 127),\n (188, 189, 34), (23, 190, 207)]\n\n for country in countries:\n table = get_first(data, n, case_type, country, None)\n if table is None:\n pass\n else:\n # Get color\n color = get_rgb(palette10[countries.index(country)])\n ax.plot(table['num_days'].values,\n table['cases'].values,\n color=color)\n\n # No legend\n ax.text(table['num_days'].values[-1],\n table['cases'].values[-1],\n ' ' + country,\n ha='left', va='center', color=color)\n\n # x axis\n ax.set_xlabel('Days since ' + str(n) + 'th ' + case_type + ' case')\n ax.xaxis.set_tick_params(direction='in')\n\n # y axis\n ax.set_ylabel('Number of ' + case_type + ' cases')\n ax.yaxis.set_tick_params(direction='in')\n ax.set_yticklabels(['{:,}'.format(int(x)) \n for x in ax.get_yticks().tolist()])\n ax.set_yscale('log')\n\n # title\n ax.set_title('As of ' + datetime.today().strftime('%Y-%m-%d'))\n\n sns.despine(ax=ax)\n\n fig.tight_layout()\n\n if path is None:\n path = 'plots/compare_first_{}.pdf'.format(case_type)\n\n fig.savefig(path, bbox_inches='tight')\n print('Saved to {}'.format(path))",
"def plot_all_deaths_report_country(\n model_df,\n country,\n *,\n country_label=None,\n **kwargs\n):\n # Select the active columns\n active_columns = [\n \"deaths_c\",\n ]\n plot_quantity = \"Reported total deaths\"\n date_label = max(model_df[\"time\"])\n country_field=\"region\"\n if country_label is None:\n country_label = default_label_generator(country, \"Reported deaths to \", date_label)\n \n return plot_core.plot_timeseries_country(\n model_df,\n active_columns, \n plot_quantity, # y label of the plot\n country,\n country_label=country_label,\n country_field=country_field,\n timeseries_type=\"reports\",\n **kwargs\n )",
"def plot_co2_by_country(values, country, start, end):\r\n\r\n filtered = values.loc[(values['Country'] == country) &\r\n (values['Year'] >= start) &\r\n (values['Year'] <= end)]\r\n\r\n # x axis values\r\n x1 = filtered['Year']\r\n # corresponding y axis values\r\n y1 = filtered['CO2']\r\n\r\n # plotting the points\r\n plt.plot(x1, y1, label = \"line 1\")\r\n\r\n # naming the x axis\r\n plt.xlabel('x - axis - year')\r\n # naming the y axis\r\n plt.ylabel('y - axis - co2')\r\n\r\n # giving a title to my graph\r\n plt.title('CO2 from ' + start + ' to ' + end + ' for ' + country)\r\n\r\n # function to show the plot\r\n plt.show()",
"def get_ax(self, data):\n timezone = list([x for x in data if 'UTC' in x])\n\n timezone_start = tuple((x/255 for x in (0, 255, 0, 100)))\n country_start = tuple((x/255 for x in (0, 100, 0)))\n # We ignore some countries, as they are too big and need a higher\n # resolution for precise timezone assignment.\n ignored_countries = ['United States', 'Australia', 'Brazil', 'Canada']\n\n ax = plt.axes(projection=ccrs.PlateCarree())\n\n # Print countries and state borders\n ax.add_feature(cartopy.feature.LAND)\n ax.add_feature(cartopy.feature.OCEAN)\n ax.add_feature(cartopy.feature.COASTLINE)\n ax.add_feature(cartopy.feature.BORDERS)\n for state in self.states:\n ax.add_geometries(\n state.geometry,\n ccrs.PlateCarree(),\n facecolor=np.array((240, 240, 220)) / 256,\n edgecolor='black',\n label=state.attributes['name'],\n )\n\n collected_countries = []\n collected_timezones = []\n collected_states = []\n\n timezones_to_draw = []\n countries_to_draw = []\n states_to_draw = []\n for name in data:\n # Color the timezone if we find one\n name = map_timezone_to_utc(name)\n if name in self.timezones_by_name:\n timezone = self.timezones_by_name[name]\n\n # Prevent timezone from being applied multiple times.\n utc_name = timezone.attributes['utc_format']\n if utc_name not in collected_timezones:\n collected_timezones.append(utc_name)\n timezones_to_draw.append(timezone)\n\n # Check if we find a country for this timezone and draw it\n if name in timezone_country:\n # Check if we have a country code for this timezone\n country_code = timezone_country[name]\n\n # We have no country for this code.\n # Unfortunately the natural earth database is a little inconsistent.\n # Try to get the full name of the country by using pycountry\n # and resolve the country by this name.\n if country_code not in self.countries_by_iso_a2:\n try:\n name = pycountries.get(alpha_2=country_code).name\n except KeyError:\n continue\n\n # We found a full name for this code.\n # Check if we have a country for this name.\n if name not in self.countries_by_name:\n continue\n\n # We found a country for this name. Proceed\n country = self.countries_by_name[name]\n\n else:\n country = self.countries_by_iso_a2[country_code]\n\n # This country is too big and has many timezones it it.\n # Try to get the state name and to color only the interesting states.\n if country.attributes['NAME_LONG'] in ignored_countries:\n state = map_timezone_to_state(name)\n\n # We couldn't find a state for this timezone\n if state is None:\n continue\n\n # We don't have this state name in our world data\n if state not in self.states_by_name:\n continue\n\n # We already have this state\n if state in collected_states:\n continue\n\n # Found a state\n collected_states.append(state)\n state = self.states_by_name[state]\n states_to_draw.append(state)\n\n continue\n\n # Avoid to draw the same country multiple times\n country_name = country.attributes['NAME_LONG']\n if country_name in collected_countries:\n continue\n\n collected_countries.append(country_name)\n countries_to_draw.append(country)\n\n # Draw everything at the end.\n # Otherwise timezones might draw over countries and fuck up the image.\n for timezone in timezones_to_draw:\n ax.add_geometries(\n timezone.geometry,\n ccrs.PlateCarree(),\n facecolor=timezone_start,\n label=name,\n )\n\n for country in countries_to_draw:\n ax.add_geometries(\n country.geometry,\n ccrs.PlateCarree(),\n facecolor=country_start,\n edgecolor='black',\n label=country_name,\n )\n\n for state in states_to_draw:\n ax.add_geometries(\n state.geometry,\n ccrs.PlateCarree(),\n facecolor=country_start,\n edgecolor='black',\n label=state.attributes['name'],\n )\n\n return ax",
"def worldplot(data):\n \n plt.rcParams['font.size'] = 18\n world_df= geopandas.read_file(geopandas.datasets.get_path('naturalearth_lowres'));\n\n world_df = world_df[world_df[\"iso_a3\"].isin(data[\"recipient_iso3\"])];\n\n #world_2df.[\"OFa_all_con\"] = np.nan;\n #world_2df.sort_values(by=\"iso_a3\").head()\n for i in world_df.index:\n for j in data.index:\n if world_df.loc[i,\"iso_a3\"] == data.loc[j,\"recipient_iso3\"]:\n world_df.loc[i,\"OFa_all_con\"] = data.loc[j, \"OFa_all_con\"];\n\n\n fig, ax = plt.subplots(1,1, figsize=(22,14))\n ax.axis('off')\n fig.suptitle('Chinese Development Finance', fontsize=25)\n \n world_df.plot(column='OFa_all_con', ax = ax, legend=True, legend_kwds={\"label\":\"\\n Chinese Development Finance in $10 bln.\",\n \"orientation\": \"horizontal\"}, \n missing_kwds={\"color\": \"lightgrey\",\n \"edgecolor\": \"red\",\n \"hatch\": \"///\",\n \"label\": \"Missing values\"});",
"def draw_observation(data, date_obj, map_region):\n\n # set mapbox token\n px.set_mapbox_access_token(CONFIG.CONFIG['MAPBOX']['token'])\n\n # create figures\n map_center = {'lat':(map_region[2] + map_region[3]) * 0.5,\n 'lon':(map_region[0] + map_region[1]) * 0.5}\n figs = collections.OrderedDict()\n\n # draw precipitation\n bins = [0.1, 10, 25, 50, 100, 250, 1200]\n keys = ['0.1~10', '10~25', '25~50', '50~100', '100~250', '>=250']\n cols = ['lightgreen', 'yellow', 'lightskyblue', 'blue', 'magenta','maroon']\n cols_map = dict(zip(keys, cols))\n data['rain'] = pd.cut(data['PRE_Time_0808'], bins=bins, labels=keys)\n data['Rainfall'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['PRE_Time_0808'].astype(str)\n data['rain_size'] = data['PRE_Time_0808'] + data['PRE_Time_0808'].mean()\n df = data[data['rain'].notna()]\n if df.shape[0] >= 2:\n figs['Rainfall'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"rain\", category_orders={'rain': keys}, color_discrete_map = cols_map,\n hover_data={'Rainfall':True, 'Lon':False, 'Lat':False, 'rain':False, 'rain_size':False},\n mapbox_style='satellite-streets', size=\"rain_size\", center=map_center, size_max=10, zoom=4,\n title = 'Accumulated precipitation ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw maximum temperature\n bins = [35, 37, 40, 60]\n keys = ['35~37', '37~40', '>=40']\n cols = ['rgb(255,191,187)', 'rgb(250,89,0)', 'rgb(230,0,8)']\n cols_map = dict(zip(keys, cols))\n data['max_temp_warning'] = pd.cut(data['TEM_Max'], bins=bins, labels=keys)\n data['max_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['TEM_Max'].astype(str)\n df = data[data['max_temp_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Max_temperature'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"max_temp_warning\", category_orders={'max_temp_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'max_temp':True, 'Lon':False, 'Lat':False, 'max_temp_warning':False, 'TEM_Max':False},\n mapbox_style='satellite-streets', size=\"TEM_Max\", center=map_center, size_max=10, zoom=4,\n title = 'Maximum temperature ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw minimum temperature\n bins = [-120, -40, -30, -20, -10, 0]\n keys = ['<=-40','-40~-30', '-30~-20', '-20~-10', '-10~0']\n cols = ['rgb(178,1,223)', 'rgb(8,7,249)', 'rgb(5,71,162)', 'rgb(5,109,250)', 'rgb(111,176,248)']\n cols_map = dict(zip(keys, cols))\n data['min_temp_warning'] = pd.cut(data['TEM_Min'], bins=bins, labels=keys)\n data['min_temp'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['TEM_Min'].astype(str)\n df = data[data['min_temp_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Min_temprature'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"min_temp_warning\", category_orders={'min_temp_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'min_temp':True, 'Lon':False, 'Lat':False, 'min_temp_warning':False, 'TEM_Min':False},\n mapbox_style='satellite-streets', size=-1.0*df[\"TEM_Min\"], center=map_center, size_max=10, zoom=4,\n title = 'Minimum temperature ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw low visibility\n data['VIS_Min'] /= 1000.0\n bins = [0, 0.05, 0.2, 0.5, 1]\n keys = ['<=0.05','0.05~0.2', '0.2~0.5', '0.5~1']\n cols = ['rgb(0,82,77)', 'rgb(0,153,160)', 'rgb(0,210,204)', 'rgb(95,255,252)']\n cols_map = dict(zip(keys, cols))\n data['min_vis_warning'] = pd.cut(data['VIS_Min'], bins=bins, labels=keys)\n data['VIS_Min_size'] = 2.0-data[\"VIS_Min\"]\n data['min_vis'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['VIS_Min'].astype(str)\n df = data[data['min_vis_warning'].notna()]\n if df.shape[0] >= 2:\n figs['Low_visibility'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"min_vis_warning\", category_orders={'min_vis_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'min_vis':True, 'Lon':False, 'Lat':False, 'min_vis_warning':False, 'VIS_Min_size':False},\n mapbox_style='satellite-streets', size=\"VIS_Min_size\", center=map_center, size_max=10, zoom=4,\n title = 'Low visibility ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=900, height=700)\n\n # draw high wind\n bins = [10.8, 13.9, 17.2, 20.8, 24.5, 28.5, 32.7, 37.0, 120]\n keys = ['10.8~13.8','13.9~17.1', '17.2~20.7', '20.8~24.4', '24.5~28.4', '28.5~32.6', '32.7~36.9', '>=37.0']\n cols = ['rgb(0,210,244)', 'rgb(0,125,255)', 'rgb(253,255,0)', 'rgb(247,213,0)',\n 'rgb(255,141,0)', 'rgb(251,89,91)', 'rgb(255,3,0)', 'rgb(178,1,223)']\n cols_map = dict(zip(keys, cols))\n data['max_win_warning'] = pd.cut(data['WIN_S_Max'], bins=bins, labels=keys)\n data['max_win'] = '['+data['Lon'].round(2).astype(str) + ',' + data['Lat'].round(2).astype(str) + ']: ' + \\\n data['WIN_S_Max'].astype(str)\n df = data[data['max_win_warning'].notna()]\n if df.shape[0] >= 2:\n figs['High_wind'] = px.scatter_mapbox(\n df, lat=\"Lat\", lon=\"Lon\", color=\"max_win_warning\", category_orders={'max_win_warning': keys}, \n color_discrete_map = cols_map,\n hover_data={'max_win':True, 'Lon':False, 'Lat':False, 'max_win_warning':False, 'WIN_S_Max':False},\n mapbox_style='satellite-streets', size=\"WIN_S_Max\", center=map_center, size_max=10, zoom=4,\n title = 'Maximum wind speed ({})'.format(date_obj.strftime(\"%Y%m%d 08-08\")),\n width=1000, height=800)\n\n return figs",
"def plot_Rt_forecast_country(\n model_df,\n country,\n *,\n country_label=None,\n **kwargs\n):\n # Select the active columns\n active_columns = [\n \"rt\", \"rt_min\", \"rt_max\",\n ]\n plot_quantity = \"Forecast $R_t$\"\n date_label = min(model_df[\"time\"])\n country_field=\"country\"\n if country_label is None:\n country_label = default_label_generator(country, \"forecast to \", date_label)\n \n return plot_core.plot_timeseries_confidence_interval_country(\n model_df,\n active_columns, \n plot_quantity, # y label of the plot\n country,\n country_label=country_label,\n country_field=country_field,\n timeseries_type=\"Forecast R(t)\",\n **kwargs\n )",
"def plot_forecast_country(\n forecast_df,\n country,\n *,\n country_label=None,\n **kwargs\n):\n # Select the active columns\n active_columns = [\n 'estimated_deaths_forecast',\n 'estimated_deaths_forecast_min',\n 'estimated_deaths_forecast_max',\n ]\n plot_quantity = \"Forecasted daily deaths\"\n date_label = min(forecast_df[\"time\"])\n \n if country_label is None:\n country_label = default_label_generator(country, \"forecast from \", date_label)\n country_field = \"country\"\n\n return plot_core.plot_timeseries_confidence_interval_country(\n forecast_df,\n active_columns,\n plot_quantity, # y label of the plot\n country,\n country_label=country_label,\n country_field=country_field,\n timeseries_type=\"forecast\",\n **kwargs\n )"
] | [
"0.5984105",
"0.5937387",
"0.5841578",
"0.56083816",
"0.5559453",
"0.54571724",
"0.54451543",
"0.5368691",
"0.5362344",
"0.5346326",
"0.53422797",
"0.52614784",
"0.52426875",
"0.52363175",
"0.5218532",
"0.5201958",
"0.5179261",
"0.51773536",
"0.51521355",
"0.5141482",
"0.513415",
"0.51306945",
"0.5123098",
"0.5106267",
"0.51015776",
"0.5082028",
"0.5079858",
"0.50797254",
"0.5078472",
"0.50678873"
] | 0.6362874 | 0 |
Create the luis api query url from a csv. Requires the csv to contain the endpoint url, app id and primary key | def get_luis_url(folder: WindowsPath = None) -> str:
if folder is None:
folder = Path.cwd().joinpath('CONFIG')
path = folder.joinpath('luis_keys.csv')
df = pd.read_csv(path, index_col='key')
endpoint = df.loc['endpoint', 'value']
app_id = df.loc['app_id', 'value']
primary_key = df.loc['subscription_key', 'value']
result = (
f'{endpoint}luis/v2.0/apps/{app_id}?verbose=true&timezoneOffset'
f'=0&subscription-key={primary_key}&q='
)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_urls(csvfile):\n result = []\n with open(csvfile, 'rU') as infile: \n reader = csv.DictReader(infile, dialect=csv.excel,\n fieldnames=['ID','URL','Latitude','Longitude'])\n for row in reader:\n idnum = row['ID']\n url = row['URL']\n lat = row['Latitude']\n lon = row['Longitude']\n result.append((url, idnum, lat, lon))\n return result",
"def csv_to_field_Urls(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(url_splitter)\n entity.string = splitter.split(value)",
"def create_query_url(self):\n self.__log('Starting to create the query URL.')\n query_url = self.config['API_URI']\n for key, value in self.options.items():\n if value:\n if query_url == self.config['API_URI']:\n query_url = query_url + str(key) + \"=\" + str(value)\n else:\n query_url = query_url + \"&\" + str(key) + \"=\" + str(value)\n query_url = query_url.replace(' ', '%20')\n self.__log(f'Done creating query url. URL to query: \"{query_url}\"')\n return query_url",
"def make_urls(row):\n mapping = {\n 'base': self.course.moodle.base_url,\n 'cmid': row['cmid'],\n 'subid': row['subid'],\n }\n url = self._submission_url.format(**mapping)\n return url",
"def csv_url(self, csv_url):\n\n self._csv_url = csv_url",
"def csv_url(self, csv_url):\n\n self._csv_url = csv_url",
"def compute_url_link(row):\n return f'https://twitter.com/-/status/{row[\"id\"]}'",
"def initial_csv_wrangling(csv_file):\n df = pd.read_csv(csv_file)\n df = df.fillna('')\n columns = list(df.columns)\n\n # check that \"url\" column exists (required)\n if 'url' not in columns:\n raise Exception('Input csv file requires a \"url\" column, which does not seem to exist. Exiting.')\n\n # check if \"pos_concepts\" column exists and parse accordingly (not required)\n if 'pos_concepts' in columns:\n print('Found \"pos_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['pos_concepts'] = df['pos_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"neg_concepts\" column exists and parse accordingly (not required)\n if \"neg_concepts\" in columns:\n print('Found \"neg_concepts\" column. Values will be split by pipe/vertical bar \"|\" into a python list.')\n df['neg_concepts'] = df['neg_concepts'].map(lambda x: list(set(x.split('|'))))\n\n # check if \"metadata\" column exists and load accordingly (not required)\n if \"metadata\" in columns:\n print('Found \"metadata\" column. Attempting to ingest.')\n try:\n df['metadata'] = df['metadata'].replace('','{}').map(json.loads)\n except:\n raise Exception('Value in \"metadata\" column does not seem to be a properly JSON formatted str.')\n\n return df",
"def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)",
"def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)",
"def run_create_hyper_file_from_csv():\n if args.preprocessed:\n print('running on 4 columns')\n else:\n print('running on 16 columns')\n\n load_time = -1\n query_time = -1\n tstart = time.time()\n path_to_database = Path(\"lineitem.hyper\")\n\n # Optional process parameters.\n # They are documented in the Tableau Hyper documentation, chapter \"Process Settings\"\n # (https://help.tableau.com/current/api/hyper_api/en-us/reference/sql/processsettings.html).\n process_parameters = {\n # Limits the number of Hyper event log files to two.\n #\"log_file_max_count\": \"2\",\n # Limits the size of Hyper event log files to 100 megabytes.\n #\"log_file_size_limit\": \"100M\"\n \"soft_concurrent_query_thread_limit\" : \"16\",\n \"hard_concurrent_query_thread_limit\" : \"16\",\n \"memory_limit\" : \"100g\"\n }\n\n # single threaded?\n if args.single_threaded:\n process_parameters[\"soft_concurrent_query_thread_limit\"] = \"1\"\n process_parameters[\"hard_concurrent_query_thread_limit\"] = \"1\"\n\n result = None\n\n # Starts the Hyper Process with telemetry enabled to send data to Tableau.\n # To opt out, simply set telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU.\n with HyperProcess(telemetry=Telemetry.DO_NOT_SEND_USAGE_DATA_TO_TABLEAU,\n parameters=process_parameters) as hyper:\n\n # Optional connection parameters.\n # They are documented in the Tableau Hyper documentation, chapter \"Connection Settings\"\n # (https://help.tableau.com/current/api/hyper_api/en-us/reference/sql/connectionsettings.html).\n connection_parameters = {\"lc_time\": \"en_US\"}\n\n # Creates new Hyper file \"customer.hyper\".\n # Replaces file with CreateMode.CREATE_AND_REPLACE if it already exists.\n with Connection(endpoint=hyper.endpoint,\n database=path_to_database,\n create_mode=CreateMode.CREATE_AND_REPLACE,\n parameters=connection_parameters) as connection:\n\n table_name = ''\n if args.preprocessed:\n connection.catalog.create_table(table_definition=lineitem_table_preprocessed)\n table_name = lineitem_table_preprocessed.table_name\n else:\n connection.catalog.create_table(table_definition=lineitem_table)\n table_name = lineitem_table.table_name\n\n # Using path to current file, create a path that locates CSV file packaged with these examples.\n path_to_csv = args.data_path\n\n # Load all rows into \"Lineitem\" table from the CSV file.\n # `execute_command` executes a SQL statement and returns the impacted row count.\n count_in_lineitem_table = connection.execute_command(\n command=f\"COPY {table_name} from {escape_string_literal(path_to_csv)} with \"\n f\"(format csv, NULL 'NULL', delimiter '|')\")\n\n print(f\"The number of rows in table {lineitem_table.table_name} is {count_in_lineitem_table}.\")\n load_time = time.time() - tstart\n print('Loading CSV to Hyper took {}s'.format(load_time))\n tstart = time.time()\n # issue query\n # here, TPC-H Q6\n # SELECT\n # sum(l_extendedprice * l_discount) as revenue\n # FROM\n # lineitem\n # WHERE\n # l_shipdate >= date '1994-01-01'\n # AND l_shipdate < date '1994-01-01' + interval '1' year\n # AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n # AND l_quantity < 24;\n\n q = ''\n if args.preprocessed:\n q = f\"\"\"SELECT\n sum(l_extendedprice * l_discount) as revenue\nFROM\n {table_name}\nWHERE\n l_shipdate >= 19940101\n AND l_shipdate < 19950101\n AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n AND l_quantity < 24\"\"\"\n else:\n q = f\"\"\"SELECT\n sum(l_extendedprice * l_discount) as revenue\nFROM\n {table_name}\nWHERE\n l_shipdate >= date '1994-01-01'\n AND l_shipdate < date '1994-01-01' + interval '1' year\n AND l_discount between 0.06 - 0.01 AND 0.06 + 0.01\n AND l_quantity < 24\"\"\"\n\n result = connection.execute_list_query(query=q)\n query_time = time.time() - tstart\n print('Query took {}s'.format(query_time))\n print('Result::')\n print(result)\n \n print(\"The connection to the Hyper file has been closed.\")\n print(\"The Hyper process has been shut down.\")\n print('framework,version,load,query,result\\n{},{},{},{},{}'.format('hyper',hyperversion,load_time, query_time, str(result)))",
"def extract_base_path(input_row):\n try:\n link = input_row['url'].strip()\n except KeyError:\n raise KeyError('Input CSV should contain a column header \"url\"')\n\n return link, link.replace('https://www.gov.uk', '')",
"def get_url_data(csv_file_path: str):\n with open(csv_file_path, \"r\", encoding=\"latin-1\") as url_records:\n for url_records in csv.reader(url_records):\n yield url_records",
"def csv_reader(file_obj):\n reader = csv.reader(file_obj)\n for row in reader:\n data = Body(posLinkToken=row[5]).__dict__\n print(\" \".join(row))\n client = APIClient(login=login, password=password, data=data, count=row[4])\n status = client.retail_point_update()\n print(status.status_code, status.content)",
"def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")",
"def url(self, path=None, type_of=\"csv\"):\n\n if \"https://\" in str(path) or \"http://\" in str(path) or \"file://\" in str(path):\n return self.data_loader(str(path), type_of)\n else:\n print(\"Unknown sample data identifier. Please choose an id from the list below\")",
"def ingest_file(input, fields, advanced_operators, output, delimiter=',', quotechar='\"'):\n with open(input, 'rb') as csv_file:\n reader = csv.DictReader(csv_file)\n\n with open(output, 'a') as write_csvfile:\n fieldnames = ['acronym', 'title', 'projectUrl',\n 'foundProjectUrl1', 'foundProjectUrl2',\n 'foundProjectUrl3', 'foundProjectUrl4',\n 'foundProjectUrl5', 'foundProjectUrl6',\n 'foundProjectUrl7', 'foundProjectUrl8',\n 'foundProjectUrl9', 'foundProjectUrl10']\n\n writer = csv.DictWriter(write_csvfile, fieldnames=fieldnames)\n writer.writeheader() # this method only available at python 2.7\n\n search_engine = SearchWeb()\n\n # iterate reader\n for row in reader:\n query_string = str(concatenate(row, fields))\n\n response = search_engine.search(query_string, advanced_operators)\n\n projectsUrl = []\n results_size = len(response)\n\n # TODO print with logger\n print \"INFO: RESULT SIZE - %s\" % results_size\n\n for i in range(10):\n if i < results_size:\n projectsUrl.append(response[i]['Url'])\n else:\n projectsUrl.append('')\n\n # TODO print with logger\n print \"INFO: FIRST RESULT - %s\" % projectsUrl[0]\n writer.writerow(dict(acronym=row['acronym'], title=row['title'], projectUrl=row['projectUrl'],\n foundProjectUrl1=projectsUrl[0], foundProjectUrl2=projectsUrl[1],\n foundProjectUrl3=projectsUrl[2], foundProjectUrl4=projectsUrl[3],\n foundProjectUrl5=projectsUrl[4], foundProjectUrl6=projectsUrl[5],\n foundProjectUrl7=projectsUrl[6], foundProjectUrl8=projectsUrl[7],\n foundProjectUrl9=projectsUrl[8], foundProjectUrl10=projectsUrl[9]))",
"def get_csv(csv_url, csv_key):\n y = requests.get(csv_url, verify=False)\n f = io.BytesIO(y.content)\n\n table_set = mt.CSVTableSet(f)\n row_set = table_set.tables[0]\n offset, headers = mt.headers_guess(row_set.sample)\n row_set.register_processor(mt.headers_processor(headers))\n row_set.register_processor(mt.offset_processor(offset + 1))\n types = mt.type_guess(row_set.sample, strict=True)\n row_set.register_processor(mt.types_processor(types))\n\n # Get dataset title from filename\n dataset_title = re.split('\\.|\\/', csv_url)[-2]\n\n dataset = etree.Element(\"Dataset\")\n etree.SubElement(dataset, \"DatasetURI\").text = 'N_A'\n etree.SubElement(dataset, \"Organization\").text = 'N_A'\n etree.SubElement(dataset, \"Title\").text = dataset_title[:25]\n etree.SubElement(dataset, \"Abstract\").text = 'N_A'\n etree.SubElement(dataset, \"ReferenceDate\").text = 'N_A'\n etree.SubElement(dataset, \"Version\").text = '0'\n etree.SubElement(dataset, \"Documentation\").text = 'N_A'\n columnset = etree.SubElement(dataset, \"Columnset\")\n fkey = etree.SubElement(\n columnset,\n \"FrameworkKey\",\n complete=\"true\",\n relationship=\"one\")\n attrib = etree.SubElement(columnset, \"Attributes\")\n\n for header in (row_set.sample.__next__()):\n header_type = type(header.type).__name__.lower()[:-4]\n if header.column == csv_key:\n col = etree.SubElement(\n fkey,\n \"Column\",\n name=header.column,\n type=\"http://www.w3.org/TR/xmlschema-2/#\" + header_type,\n length=\"255\")\n else:\n col = etree.SubElement(\n attrib,\n \"Column\",\n name=header.column,\n type=\"http://www.w3.org/TR/xmlschema-2/#\" + header_type,\n length=\"255\")\n etree.SubElement(col, \"Title\").text = \"N_A\"\n etree.SubElement(col, \"Abstract\").text = \"N_A\"\n rowset = etree.SubElement(dataset, \"Rowset\")\n\n # For some reason the offset doesn't work, so we skip the headers with\n # a workaround\n iter_rows = iter(row_set)\n next(iter_rows)\n for row in iter_rows:\n rw = etree.SubElement(rowset, \"Row\")\n for cell in row:\n if cell.column == csv_key:\n k = etree.Element(\"K\")\n k.text = str(cell.value)\n rw.insert(0, k)\n else:\n k = etree.SubElement(rw, \"V\")\n k.text = str(cell.value)\n\n return dataset",
"def get_api_url(self, query_, api):\n api_url = \"%s%s%s\" % (api, query_, self.api_key)\n\n return api_url",
"def build_url(self, config, query):\n if(not os.environ['FLICKR_API_KEY']):\n raise ValueError('Environement variable \"FLICKR_API_KEY\" is empty')\n \n current_provider = [provider for provider in config['providers'] if provider['name'] == self.provider_name][0]\n current_provider['query']['text'] = str(query)\n current_provider['query']['api_key'] = os.environ['FLICKR_API_KEY']\n\n query_strings = helper.build_query_strings(current_provider['query'])\n\n return current_provider['base_url'] + query_strings",
"def import_from_url(jamsite, url, fieldnames=None):\n\t# import csv, from the webz.\n\tcsvfile = fetch_csv_from_url(url)\n\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def load_csv(filename: str, solr_url: typing.Optional[str]):\n\n solr_client = Solr(solr_url, always_commit=True) if solr_url else Solr(\"\")\n\n csv_data = { row[\"Item ARK\"]: row for row in csv.DictReader(open(filename)) }\n\n config = {\n \"collection_names\": {\n row[\"Item ARK\"]: row[\"Title\"] for row in csv_data.values() if row[\"Object Type\"] == \"Collection\"\n },\n \"controlled_fields\": load_field_config(\"./fields\"),\n \"child_works\": collate_child_works(csv_data),\n }\n\n controlled_fields = load_field_config(\"./fields\")\n\n mapped_records = []\n for row in rich.progress.track(csv_data.values(), description=f\"Importing {filename}...\"):\n if row[\"Object Type\"] not in (\"ChildWork\", \"Page\"):\n mapped_records.append(map_record(row, solr_client, config=config))\n\n if solr_url:\n solr_client.add(mapped_records)\n else:\n print(json.dumps(mapped_records))",
"def make_url(site,node,instrument,method,stream,API_USERNAME,API_TOKEN):\n\n SENSOR_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/'\n VOCAB_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12586/vocab/inv'\n meta_request_url ='/'.join((VOCAB_BASE_URL,site,node,instrument)) # Python wizard best\n data_request_url ='/'.join((SENSOR_BASE_URL,site,node,instrument,method,stream))\n\n # Retrieve vocabulary information for a given instrument\n r = requests.get(meta_request_url, auth=(API_USERNAME, API_TOKEN))\n meta_data = r.json()\n\n return (data_request_url,meta_data)",
"def build_url(ori_lon, ori_lat, des_lon, des_lat, year, month, day, hour, minute, args={}):\n options = dict()\n with open(option_file, 'r', newline='') as file:\n # Read the options file\n for line in file:\n if line[0] == '#': # if the first character of a line is '#' skip it\n continue\n splited_line = line.rstrip().split(':')\n if len(splited_line) < 2: # if it is a line with no ':'\n continue\n options[splited_line[0]] = splited_line[1]\n base_URL = 'localhost:' + port + '/otp/routers/default/plan'\n fromPlace = ori_lon + ',' + ori_lat\n toPlace = des_lon + ',' + des_lat\n date = year + '/' + month + '/' + day\n time = hour + ':' + minute + ':00'\n\n url = 'http://' + base_URL + '?fromPlace=' + fromPlace + '&toPlace=' + toPlace + '&date=' + date + '&time=' + time\n for option_name in options.keys():\n option = options[option_name]\n url += '&' + option_name + '=' + option\n if not 'mode' in url:\n url += '&mode=TRANSIT,WALK'\n for key in args.keys():\n url+= '&' + key + '=' + args[key]\n\n return url",
"def make_url(api_key, url, args=None):\n if args is None:\n args = []\n argsep = '&'\n if '?' not in url:\n argsep = '?'\n if '?apiKey=' not in url and '&apiKey=' not in url:\n args.insert(0, ('apiKey', api_key))\n return url + argsep + '&'.join(['='.join(t) for t in args])",
"def process_csv():\n csv_rows = []\n fieldnames = ['site',\n 'latitude',\n 'longitude',\n 'city',\n 'region_code',\n 'country_code',\n 'continent_code',\n 'min_ip_hex',\n 'max_ip_hex',\n 'transit_provider',\n 'min_ip',\n 'max_ip',\n 'ip_prefix',\n 'min_ipv6_hex',\n 'max_ipv6_hex',\n 'min_ipv6',\n 'max_ipv6',\n 'ipv6_prefix']\n\n location_map = build_location_map()\n\n # Read in the CSV file and augment the columns\n with open(INPUT_FILE, 'rb') as csvfile:\n reader = csv.DictReader(csvfile)\n\n for row in reader:\n csv_rows.append(process_row(row, location_map))\n\n # Write the new CSV file with new columns\n with open(OUTPUT_FILE, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n for row in csv_rows:\n writer.writerow(row)\n\n print(\"MLab Sites CSV generated at {0}\".format(OUTPUT_FILE))",
"def generate_call_string(self):\n if(self.api_key is None):\n raise error(\"API Key is not defined\");#Should base class do this? \n \n self.call_url=self.baseurl;\n if hasattr(self,'search_str'):\n self.call_url+=self.search_str;\n if hasattr(self,'filter_field_str'):\n self.call_url=self.call_url+'&'+self.filter_field_str;\n \n #loop over the parameters dict\n for key in self.input_params:\n self.call_url+=self.input_params[key];\n \n #finally add api key. at this point already checked it exists\n self.call_url=self.call_url+'&'+\"api-key=\"+str(self.api_key);\n return;",
"def main(args):\n \n args_are_valid, input_filepath, output_filepath, base_url, message = handle_arguments(args)\n if not args_are_valid:\n return print(message)\n \n with open(input_filepath, newline=\"\") as input_csv:\n csvreader = csv.reader(input_csv, delimiter=\",\",)\n\n needed_input_columns = [\"Account ID\",\"First Name\", \"Created On\"]\n needed_output_columns = [\"Account ID\",\"First Name\", \"Created On\", \"Status\", \"Status Set On\"]\n headers = next(csvreader) #grab first row as headers\n if not set(needed_input_columns).issubset(headers):\n print('ERROR - input csv must contain columns [\"Account ID\",\"First Name\", \"Created On\"] as headers')\n\n with open(output_filepath, mode = \"w\", newline = \"\") as output_csv:\n csvwriter = csv.DictWriter(output_csv, fieldnames = needed_output_columns)\n csvwriter.writeheader()\n\n index_of = {}\n for index,header in enumerate(headers):\n index_of[header] = index\n write_dict = {}\n\n #Loop through inputfile\n for row in csvreader:\n still_valid = True\n if len(row) != len(headers):\n message = \"ERROR - csv row has incomplete data\"\n still_valid = False\n if still_valid:\n # extract data from row, columns can be in any order\n for column in needed_input_columns:\n write_dict[column] = row[index_of[column]]\n still_valid, write_dict, message = verify_and_clean_input(write_dict)\n if still_valid:\n write_dict, message = extend(write_dict, query(write_dict[\"Account ID\"], base_url))\n #only write to csv if all input data valid, query data nulled out if invalid\n csvwriter.writerow(write_dict) \n print(message)\n\n output_csv.close() \n input_csv.close()",
"def fetch(url, header_path, id, ip, dbase, targets_table):\n # url = 'http://esimbad/testGSAV7/reslabo?FENID=resLaboPatDitep&NIP={}' \\\n # '&STARTDATE={}&ENDDATE={}'\n\n # header_path = '~/workspace/data/biology/header.csv'\n # constant names specific to our database\n KEY1 = 'id'\n KEY2 = 'NIP'\n C1J1 = 'C1J1'\n\n header = pd.read_csv(header_path, sep=';', encoding='latin1').columns\n\n\n engine = get_engine(id, ip, dbase)\n\n df_ids = sql2df(engine, targets_table)[[KEY1, 'nip', C1J1]]\n df_ids.rename({'nip': KEY2}, inplace=True, axis=1)\n df_ids['patient_id'] = df_ids[KEY1]\n\n cols = [KEY2, 'Analyse', 'Resultat', 'Date prelvt']\n df_res = pd.DataFrame(data=None, columns=cols)\n\n for index, row in df_ids.iterrows():\n nip = row[KEY2].replace(' ', '')\n patient_id = row['patient_id']\n c1j1_date = row[C1J1].date()\n start_date = c1j1_date - timedelta(weeks=8)\n\n c1j1 = str(c1j1_date).replace('-', '')\n start = str(start_date).replace('-', '')\n\n req = requests.get(url.format(nip, start, c1j1))\n values = BeautifulSoup(req.content, 'html.parser').body.text\n\n new_df = pd.read_csv(StringIO(values), sep=';', header=None,\n index_col=False, names=header)\n new_df = new_df.loc[:, cols + ['LC']] # remove LC\n\n # normalize nip\n new_df[KEY2] = row[KEY2]\n # new_df[KEY2] = new_df[KEY2].map(str)\n # new_df[KEY2] = [nip[:4] + '-' + nip[4:] for nip in new_df[KEY2]]\n\n new_df.drop('LC', axis=1, inplace=True)\n\n df_res = pd.concat([df_res, new_df], axis=0,\n sort=False, ignore_index=True)\n\n return df_res",
"def csv_serving_input_fn():\n csv_row = tf.placeholder(\n shape=[None],\n dtype=tf.string\n )\n features = parse_csv(csv_row)\n features.pop(LABEL_COLUMN)\n return tf.contrib.learn.InputFnOps(features, None, {'csv_row': csv_row})"
] | [
"0.62318754",
"0.5654017",
"0.5514315",
"0.5352997",
"0.5324021",
"0.5324021",
"0.51919746",
"0.51832896",
"0.5166666",
"0.5166666",
"0.5151871",
"0.5135357",
"0.5133279",
"0.512845",
"0.50671405",
"0.50671405",
"0.5013714",
"0.5009945",
"0.49635574",
"0.4927147",
"0.49232295",
"0.49197155",
"0.4916538",
"0.49122825",
"0.48952997",
"0.48928204",
"0.48921433",
"0.48770964",
"0.4871772",
"0.48698154"
] | 0.6092655 | 1 |
Sends student comments to the LUIS.ai API in batches and saves the intemediate results into the OUTPUT folder | def request_api(
student_comments: pd.Series,
url: str,
chunk_size: int = 50
) -> pd.Series:
for i, chunk in enumerate(chunks(student_comments, chunk_size)):
print(f'Processing batch {i} of size {len(chunk)}')
response = chunk.apply(lambda x: requests.get(f'{url}&q={x}') if x is not None else None)
response.to_pickle(Path.cwd().joinpath('OUTPUT').joinpath(f'luis_result_{str(i).zfill(4)}')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\r\n \r\n data_dir = Path.cwd().joinpath('OUTPUT')\r\n config_dir = Path.cwd().joinpath('CONFIG')\r\n \r\n # Load deduplicated comments\r\n data = utils.load(data_dir, 'student_comment_deduplicated')\r\n \r\n # Get the luis API url\r\n with open(config_dir.joinpath('luis_url.txt'), 'r') as f:\r\n luis_url = f.readline()\r\n \r\n request_api(\r\n data,\r\n luis_url,\r\n 1000,\r\n )",
"def _send_batch_feedback(\n batch_idx: int,\n batch_instance_id: int,\n input_raw: str,\n data_type: str,\n sc: SeldonClient,\n retries: int,\n batch_id: str,\n) -> str:\n\n feedback_kwargs = {}\n meta = {\n \"tags\": {\n \"batch_id\": batch_id,\n \"batch_instance_id\": batch_instance_id,\n \"batch_index\": batch_idx,\n }\n }\n # Feedback protos do not support meta - defined to include in file output only.\n try:\n data = json.loads(input_raw)\n feedback_kwargs[\"raw_request\"] = data\n\n str_output = None\n for i in range(retries):\n try:\n seldon_payload = sc.feedback(**feedback_kwargs)\n assert seldon_payload.success\n\n # Update Tags so we can track feedback instances in output file\n tags = seldon_payload.response.get(\"meta\", {}).get(\"tags\", {})\n tags.update(meta[\"tags\"])\n if \"meta\" not in seldon_payload.response:\n seldon_payload.response[\"meta\"] = {}\n seldon_payload.response[\"meta\"][\"tags\"] = tags\n str_output = json.dumps(seldon_payload.response)\n break\n except (requests.exceptions.RequestException, AssertionError) as e:\n logger.error(f\"Exception: {e}, retries {retries}\")\n if i == (retries - 1):\n raise\n\n except Exception as e:\n error_resp = {\n \"status\": {\"info\": \"FAILURE\", \"reason\": str(e), \"status\": 1},\n \"meta\": meta,\n }\n logger.error(\"Exception: %s\" % e)\n str_output = json.dumps(error_resp)\n\n return str_output",
"def process_comments(session, comments):\n for c in tqdm(comments, desc=\"Injecting comments into DB\"):\n db_comment = session.query(Comment).get(c['id'])\n if db_comment:\n db_comment.update(session, **c)\n else:\n Comment.create(session, **c)",
"def get_whole_and_per_sentence_flair_sentiments(list_of_comments):\n\n for comment in list_of_comments:\n result_sum = get_whole_flair_sentiment(comment)\n print(comment)\n print('Whole comment sentiment:', result_sum)\n print()\n sentence_score_list = get_sentence_sentiments(comment)\n print(comment)\n print('per sentence sentiment:', sentence_score_list)\n print()",
"def test_sent_comments(sent_loader):\n respx.put(re.compile(r\"https://management\\.azure\\.com/.*\")).respond(\n 200, json={\"name\": \"97446b1b-26cf-4034-832b-895da135c535\"}\n )\n respx.get(re.compile(r\"https://management\\.azure\\.com/.*\")).respond(\n 200, json=_INCIDENT\n )\n sent_loader.update_incident(\n incident_id=\"13ffba29-971c-4d70-9cb4-ddd0ec1bbb84\",\n update_items={\"severity\": \"High\"},\n )",
"def save_batch(self):\n self._batch_counter += 1\n write_to_disk(\n self._batch_cases,\n os.path.join(\n self.crop.location,\n \"batches\",\n BTCH_NM.format(self._batch_counter),\n ),\n )\n self._batch_cases = []\n self._counter = 0",
"def test_sent_updates(sent_loader):\n respx.put(re.compile(r\"https://management\\.azure\\.com/.*\")).respond(\n 201, json={\"name\": \"97446b1b-26cf-4034-832b-895da135c535\"}\n )\n respx.get(re.compile(r\"https://management\\.azure\\.com/.*\")).respond(\n 200, json=_INCIDENT\n )\n sent_loader.post_comment(\n incident_id=\"13ffba29-971c-4d70-9cb4-ddd0ec1bbb84\", comment=\"test\"\n )",
"def sendToDailies(path, comments, bkp_script, firstFrame, lastFrame, thumb, show=os.getenv('JOB'), shot=os.getenv('SHOT'), asset=os.getenv('ASSET'), seq=os.getenv('SEQ'), task=os.getenv('TASK'), status=\"WORK IN PROGRESS\"):\n\n db = get_connection()\n dailiesCollections = db['submissions']\n new_ptuid = ptuid.ptuid(show, shot, task)\n # creation of the dailies submission entry\n Submission = dict()\n Submission['Date'] = now\n Submission['type'] = \"dailies\"\n Submission['status'] = status\n Submission['timestamp'] = datetime.datetime.utcnow()\n Submission['Show'] = show\n Submission['seq'] = seq\n Submission['Task'] = task\n Submission['Username'] = main_user\n Submission['ptuid'] = new_ptuid\n\n entity = utils.check_entity(task)\n if entity is not None:\n if entity == 'shot':\n Submission['Shot'] = shot\n Submission['first_frame'] = firstFrame\n Submission['last_frame'] = lastFrame\n elif entity == 'asset':\n Submission[entity] = asset\n else:\n Submission['entity'] = None\n\n Submission['bkp_script'] = bkp_script\n Submission['Path'] = path\n Submission['comment'] = comments\n Submission['thumbnail'] = thumb\n Submission['thumbnail_s3'] = \"https://s3.amazonaws.com/cyclopsvfx/\" + os.path.basename(thumb)\n dailiesCollections.save(Submission)\n send_to_S3(thumb)\n users_list = dbq.get_users_from_shot(shot)\n notifications.push_notifications({\"name\": main_user, \"email\": os.getenv('USER_EMAIL')}, users_list, \"dailies\", shot, now)",
"def create_training_set_with_comments(posts_dict, comments_dict, output_filename=direc+\"/training_with_comments.txt\"):\r\n print(\"Creating training set with comments...\")\r\n with open(output_filename, 'w') as f:\r\n total = len(posts_dict)\r\n print(\"# of questions: \" + str(total))\r\n current = 0\r\n for question in posts_dict:\r\n accepted = posts_dict[question]['accepted']\r\n others = posts_dict[question]['other']\r\n line = question\r\n if question in comments_dict:\r\n line += \" \" + \" \".join(comments_dict[question])\r\n \r\n line += \"\\t\" + accepted\r\n if accepted in comments_dict:\r\n line += \" \" + \" \".join(comments_dict[accepted])\r\n \r\n for other in others:\r\n line += \"\\t\" + other\r\n if other in comments_dict:\r\n line += \" \" + \" \".join(comments_dict[other])\r\n line += \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, total)\r\n print(\"\\nFinished creating training set with comments.\\n\")",
"def update_comments(comments, account_name, post_url):\n inc_number = 0\n for index, comment in comments.iterrows():\n # increment + 1\n inc_number = inc_number + 1\n # get preprocessed comment\n comment_spaces, comment_no_stopwords = preprocess_comment(comment['comment'])\n # get sentiment score from comment\n sentiment_score = get_sentiment(comment_no_stopwords)\n # update collection with comments\n collection.update_one(\n {\n 'Codename': account_name,\n 'Posts.URL': post_url\n },\n {\n '$push': {\n 'Posts.$.All Comments': {'comment_id': inc_number,\n 'user': comment['user'],\n 'comment': comment['comment'],\n 'comment_no_stopwords': comment_no_stopwords,\n 'comment_spaces': comment_spaces,\n 'like': comment['like'],\n 'sentiment_score': sentiment_score\n }\n }\n }\n )",
"def create_batch(self, batch_name, priority = 0, comments = '', notifications = []):\n\n url = self._base_url + urlConfig.URLS['Project'] + '/' + self._project_id + '/batch'\n batch = {\n \"batch_name\": batch_name,\n \"priority\": priority,\n \"comments\": comments,\n \"notifications\": [\n ]\n }\n data = json.dumps(batch)\n response = apiCall.post(self._get_token(), url,self._proxy,data, 30)\n logging.debug(response['id'])\n return response['id']",
"def test_get_whole_flair_sentiment():\n\n comments = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n for x in comments:\n result_sum = get_whole_flair_sentiment(x)\n print(x)\n print('Whole comment sentiment:', result_sum)\n print()",
"def submit_sample_submissions(datadir: str) -> None:\n for competition in KAGGLE_COMPETITIONS:\n print(competition[NAME])\n if os.path.exists(os.path.join(datadir, competition[NAME])):\n log.info(\"Skipping %s already present\", competition[NAME])\n continue\n fd.fetch_kaggle_files(competition[NAME], datadir)\n fetch_processor = competition_meta.get(FETCH_PROCESSOR)\n if fetch_processor:\n files = fetch_processor(files)\n api = kaggle_api()\n for competition in KAGGLE_COMPETITIONS:\n sample_submission = os.path.join(datadir, competition[NAME], 'sample_submission.csv')\n res = submit_kaggle_competition(competition[NAME], sample_submission)\n print(res)",
"def core(self):\n \n \n comments = self.bot.subreddit(\n \"all\").stream.comments(\n skip_existing = True)\n \n \n for comment in comments:\n \n text = comment.body.lower().replace(\".\", \"\")\n \n for card in self.catalog:\n \n if (\n card[1].lower() in text\n and card[0].lower() in text\n and not comment.submission.id in self.responded\n and not comment.subreddit.user_is_banned):\n\n self.get_info(card)\n\n if not self.details:\n \n break\n\n audio = [\n \"audiobook\", \n \"audio book\"]\n \n author_format = [\n name.lower() for name in card[1].split(\" \") \n if len(name) >= 3]\n\n if (\n self.details[\"duration\"] > 10800\n and card[0].lower() in self.details[\n \"title\"].lower()\n and any(\n item in self.details[\n \"title\"].lower() for item in audio)\n and all(\n item in self.details[\n \"title\"].lower() for item in author_format)):\n \n \n saw_the_sign = (\n \"\"\"[^(Source Code)](https://capybasilisk.com/posts/\"\"\"\n \"\"\"2020/04/speculative-fiction-bot/) \"\"\"\n \"\"\"^| [^(Feedback)](https://www.reddit.com/message/\"\"\"\n \"\"\"compose?to=Capybasilisk&subject=Robot) \"\"\"\n \"\"\"^| [^(Programmer)](https://www.reddit.com/u/\"\"\"\n \"\"\"capybasilisk) \"\"\"\n \"\"\"^| ^(Downvote To Remove) \"\"\" \n \"\"\"^| ^(Version 1.4.0) \"\"\"\n \"\"\"^| ^(Support Robot Rights!)\"\"\")\n \n\n comment.reply(\n f\"\"\"Hi. You just mentioned *{card[0]}* by \"\"\" \n f\"\"\"{card[1]}.\\n\\nI've found an audiobook of \"\"\" \n \"\"\"that novel on YouTube. You can listen to it here\"\"\"\n f\"\"\":\\n\\n[YouTube | {self.details['title']}]\"\"\"\n f\"\"\"({self.details['webpage_url']})\\n\\n*I\\'m a bot that \"\"\" \n \"\"\"searches YouTube for science fiction and fantasy\"\"\" \n f\"\"\" audiobooks.*\\n***\\n{saw_the_sign}\"\"\")\n\n \n self.responded.append(\n comment.submission.id)\n \n with open(\n \"activity.csv\", \n \"a\", \n encoding = \"UTF-8\") as actlog:\n\n activity = clevercsv.writer(\n actlog)\n\n if actlog.tell() == 0:\n\n activity.writerow(\n [\"Book\",\n \"Comment\", \n \"Author\", \n \"Thread\", \n \"Subreddit\", \n \"Time\"])\n\n activity.writerow(\n [f\"{card[0]} by {card[1]}\",\n f\"{comment.body}\",\n f\"{comment.author}\",\n f\"{comment.submission.title}\",\n f\"{comment.subreddit}\",\n f\"{pendulum.now().to_datetime_string()}\"])\n \n self.details = None\n \n break\n \n break \n \n if pendulum.now().to_time_string().endswith(\n \"0:00\"):\n \n self.tidy()",
"def build_newscomment_large(self):\n logging.info('Building news commentary only dataset')\n logging.info(self.configs[NEWS_COMMENTARY])\n builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[NEWS_COMMENTARY],\n data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec(start=9000, percent=False,\n max_size=9000+self.newscommentary_size)\n logging.info('Training on TFDS dataset %s with split %s',\n WMT_BASE_DATASET_NAME, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=False)\n return train_data, None",
"def submission():\n logging.info('Loading and compiling models...')\n model_systole = get_model()\n model_diastole = get_model()\n\n logging.info('Loading models weights...')\n model_systole.load_weights('../models/weights/weights_systole_best.hdf5')\n model_diastole.load_weights('../models/weights/weights_diastole_best.hdf5')\n\n logging.info('Loading validation data...')\n X, ids = load_validation_data()\n\n logging.info('Pre-processing images...')\n X = preprocess(X)\n\n batch_size = 32\n logging.info('Predicting on validation data...')\n pred_systole = model_systole.predict(X, batch_size=batch_size, verbose=1)\n pred_diastole = model_diastole.predict(X, batch_size=batch_size, verbose=1)\n\n # real predictions to CDF\n cdf_pred_systole = correct_cdf(pred_systole)\n cdf_pred_diastole = correct_cdf(pred_diastole)\n\n logging.info('Accumulating results...')\n sub_systole = accumulate_study_results(ids, cdf_pred_systole)\n sub_diastole = accumulate_study_results(ids, cdf_pred_diastole)\n\n # write to submission file\n logging.info('Writing submission to file...')\n fi = csv.reader(open('../input/sample_submission_validate.csv'))\n f = open('../submissions/submission_13.csv', 'w')\n fo = csv.writer(f, lineterminator='\\n')\n fo.writerow(next(fi))\n for line in fi:\n idx = line[0]\n key, target = idx.split('_')\n key = int(key)\n out = [idx]\n if key in sub_systole:\n if target == 'Diastole':\n out.extend(list(sub_diastole[key][0]))\n else:\n out.extend(list(sub_systole[key][0]))\n else:\n logging.info('Miss {0}'.format(idx))\n fo.writerow(out)\n f.close()\n\n logging.info('Done.')",
"def supports_commenting_batch(self):\n return False",
"def test_get_sentence_sentiments():\n long_comment = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n\n sentence_score_list = get_sentence_sentiments(long_comment[0])\n print(long_comment[0])\n print('per sentence sentiment:', sentence_score_list)\n print()",
"def load_one_batch(adapter, nipt_results_path:str):\n \n batch_data = parse_batch_file(nipt_results_path)\n for sample in batch_data:\n mongo_sample = build_sample(sample)\n adapter.add_or_update_document(mongo_sample, adapter.sample_collection)\n mongo_batch = build_batch(batch_data[0])\n adapter.add_or_update_document(mongo_batch, adapter.batch_collection)",
"def test_batch(self):\n pass",
"def save_and_upload_batch_sample_sets(batch_samples, batch_tumors, batch_normals, tsca_id, namespace, workspace):\n # Save to file\n os.system('mkdir -p %s'%tsca_id)\n batch_samples_filename = './%s/fc_upload_sample_set_tsca_%s.txt' % (tsca_id, tsca_id)\n batch_tumors_filename = './%s/fc_upload_sample_set_tsca_%s_tumors.txt' % (tsca_id, tsca_id)\n batch_normals_filename = './%s/fc_upload_sample_set_tsca_%s_normals.txt' % (tsca_id, tsca_id)\n \n batch_samples.to_csv(batch_samples_filename , sep=\"\\t\", index=False )\n batch_tumors.to_csv(batch_tumors_filename , sep=\"\\t\", index=False )\n batch_normals.to_csv(batch_normals_filename , sep=\"\\t\", index=False )\n\n r1 = upload_entities_from_tsv(namespace, workspace, batch_samples_filename)\n r2 = upload_entities_from_tsv(namespace, workspace, batch_tumors_filename)\n r3 = upload_entities_from_tsv(namespace, workspace, batch_normals_filename)\n return (r1, r2, r3)",
"def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH",
"def run(self, batch):\n response = self.post(batch)\n log.info(\"< Discarding batch response\")\n response.close()",
"def build_newscomment_ft(self):\n logging.info('Building news commentary only dataset')\n logging.info(self.configs[NEWS_COMMENTARY_FT])\n builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[NEWS_COMMENTARY_FT],\n data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec(max_size=6000, percent=False)\n logging.info('Training on TFDS dataset %s with split %s',\n WMT_BASE_DATASET_NAME, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=False)\n valid_shard_spec = self.build_shard_spec(max_size=9000, percent=False,\n start=6000)\n eval_data = builder.as_dataset(split='train' + valid_shard_spec,\n shuffle_files=False)\n return train_data, eval_data",
"def _build_student_data(self, data, csvwriter):\n url_base = data['base_url']\n course_id = data['course']\n is_resumen = data['format']\n course_key = CourseKey.from_string(course_id)\n if is_resumen:\n header = ['Username', 'Email', 'Run', 'Seccion', 'SubSeccion', 'Unidad', 'Titulo', 'Intentos', 'Pts Ganados', 'Pts Posibles', 'block id', 'Has saved answers']\n else:\n header = ['Username', 'Email', 'Run', 'Seccion', 'SubSeccion', 'Unidad', 'Titulo', 'Pregunta', 'Respuesta Estudiante', 'Resp. Correcta', 'Intentos', 'Pts Ganados', 'Pts Posibles', 'Pts Total Componente', 'block id', 'Has saved answers', 'State']\n csvwriter.writerow(_get_utf8_encoded_rows(header))\n store = modulestore()\n list_blocks = self.get_block_keys(course_key)\n with store.bulk_operations(course_key):\n for block_key in list_blocks:\n try:\n block_item = store.get_item(block_key)\n except Exception as e:\n continue\n # assume all block_key are directly children of unit\n block_ancestors = self.get_block_ancestors(block_item, store)\n display_name = block_item.display_name.replace(\"\\n\", \"\")\n #jump_to_url = url_base + reverse('jump_to',kwargs={\n # 'course_id': course_id,\n # 'location': str(block_key)})\n # only problem block\n if is_resumen:\n student_states = self.get_user_states(course_key, block_key)\n for response in student_states:\n user_state = json.loads(response['state'])\n report = {}\n report['username'] = response['student__username']\n report['email'] = response['student__email']\n report['user_rut'] = response['student__edxloginuser__run']\n report['attempts'] = user_state['attempts']\n report['gained'] = user_state['score']['raw_earned']\n report['total'] = user_state['score']['raw_possible']\n row = [\n response['student__username'],\n response['student__email'],\n response['student__edxloginuser__run'],\n block_ancestors[2]['display_name'],\n block_ancestors[1]['display_name'],\n block_ancestors[0]['display_name'],\n display_name,\n user_state['attempts'],\n user_state['score']['raw_earned'],\n user_state['score']['raw_possible'],\n str(block_key)\n ]\n if 'has_saved_answers' in user_state and user_state['has_saved_answers']:\n row.append('has_saved_answers')\n csvwriter.writerow(row)\n else:\n for response in self.generate_report_data(block_item):\n if response is None:\n continue\n row = [ \n response['username'],\n response['email'],\n response['user_rut'],\n block_ancestors[2]['display_name'],\n block_ancestors[1]['display_name'],\n block_ancestors[0]['display_name'],\n display_name,\n response['question'].replace(\"\\n\", \"\"),\n response['answer'].replace(\"\\n\", \"\"),\n response['correct_answer'].replace(\"\\n\", \"\"),\n response['attempts'],\n response['gained'],\n response['possible'],\n response['total'],\n str(block_key),\n 'has_saved_answers' if response['has_saved_answers'] else ''\n ]\n if response['state']:\n row.append(response['state'])\n csvwriter.writerow(row)\n return csvwriter",
"def _train_batch(self, review_fwd, review_bwd, summary):\n # feed in the data for forward model\n feed_dict_fwd = {self.enc_inp_fwd[t]: review_fwd[t] for t in range(self.seq_length)}\n feed_dict_fwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # feed in the data for the backward model\n feed_dict_bwd = {self.enc_inp_bwd[t]: review_bwd[t] for t in range(self.seq_length)}\n feed_dict_bwd.update({self.labels[t]: summary[t] for t in range(self.seq_length)})\n\n # train forward model\n print 'Forward Batch Training.......'\n _, loss_t_forward = self.sess.run([self.train_op_fwd, self.loss_fwd], feed_dict_fwd)\n\n # train backward model\n print 'Backward Batch Training.......'\n _, loss_t_backward = self.sess.run([self.train_op_bwd, self.loss_bwd], feed_dict_bwd)\n\n return loss_t_forward, loss_t_backward",
"def after_batch(self):\n if self.trainer._mode == 'train':\n with open(os.path.join(self.root_path, 'loss.txt'), 'a+') as fout:\n fout.write(str(self.trainer._epoch) + '\\t' +\n str(self.trainer._loss.detach().cpu().item()) + '\\n')\n\n if self.trainer._mode == 'test' and (self.f is not None):\n for index in range(len(self.trainer._ids)):\n one_input = self.get_one(self.trainer._input, index)\n one_output = self.get_one(self.trainer._output, index)\n\n res = self.f(one_input, one_output)\n id = self.trainer._ids[index]\n\n self.show(res, id)",
"def write(self, batch):\n time.sleep(self.WRITE_DELAY)",
"def preprocess(self, requests):\r\n input_batch = None\r\n for idx, data in enumerate(requests):\r\n text = data.get(\"data\")\r\n if text is None:\r\n text = data.get(\"body\")\r\n input_text = text.decode('utf-8')\r\n\r\n ################input处理\r\n question = input_text\r\n entity = self.NER(question)\r\n print('your question:{}\\nentity:{}'.format(question,entity))\r\n ################处理完毕\r\n return [entity]",
"def build_newscomment_limited(self):\n logging.info('Building news commentary only dataset')\n logging.info(self.configs[NEWS_COMMENTARY])\n builder = tfds.builder(WMT_BASE_DATASET_NAME,\n config=self.configs[NEWS_COMMENTARY],\n data_dir=self.data_dir)\n self.default_builder_obj = builder\n shard_spec = self.build_shard_spec(start=84000, percent=False,\n max_size=85000) # 284246 full\n logging.info('Training on TFDS dataset %s with split %s',\n WMT_BASE_DATASET_NAME, 'train' + shard_spec)\n train_data = builder.as_dataset(split='train' + shard_spec,\n shuffle_files=False)\n return train_data, None"
] | [
"0.62581986",
"0.56374973",
"0.55773365",
"0.5480655",
"0.54645246",
"0.5297529",
"0.52373004",
"0.52115196",
"0.5190923",
"0.5185241",
"0.51840454",
"0.5158123",
"0.51344043",
"0.5082114",
"0.50543535",
"0.50502497",
"0.50455976",
"0.5043005",
"0.5015503",
"0.49875677",
"0.49788997",
"0.4970721",
"0.49422464",
"0.49344593",
"0.49286214",
"0.49249244",
"0.49171203",
"0.4893047",
"0.4881596",
"0.4872072"
] | 0.6514273 | 0 |
Merging the results from the luis response with the lemmatised comments. | def merge_comments(df1: pd.DataFrame, df2: pd.DataFrame, out_dir: str) -> pd.DataFrame:
nlp = spacy.load('en_core_web_lg')
df1 = pd.DataFrame(load_pickles(out_dir))
df1.columns = ['response']
df1['student_comment_apostrophe'] = df1.response.apply(lambda x: x['query'] if x is not None else None)
df1.dropna(subset=['student_comment_apostrophe'], inplace=True)
df2 = pd.read_csv("D:/OneDrive - UTS/36102 iLab 1 - Spring 2019/CODE/OUTPUT/student_comment_lemmatised_nostopwords_nopunct.csv")
df2.rename(columns={'Unnamed: 0': 'index'}, inplace=True)
# Create a merging column by putting the comment through the nlp
# preprocessing steps
df1['merge_col'] = df1.student_comment_apostrophe.apply(lambda x: ' '.join([token.lemma_ for token in nlp(x) if not token.is_punct and not token.is_stop]))
merged_df = df2.merge(df1,
how='left',
left_on='student_comment_lemmatised_nostopwords_nopunct',
right_on='merge_col')
merged_df.dropna(inplace=True)
merged_df.set_index('index', inplace=True)
return merged_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _proc(dat):\n def lemma(text):\n lemmatizer = WordNetLemmatizer()\n w_tokenizer = WhitespaceTokenizer()\n return [lemmatizer.lemmatize(w) for w in w_tokenizer.tokenize(text)]\n\n dat['text_lemmatized'] = dat['clean_comments'].apply(lemma)\n dat['text_lemmatized'] = dat['text_lemmatized'].apply(' '.join)",
"def get_whole_and_per_sentence_flair_sentiments(list_of_comments):\n\n for comment in list_of_comments:\n result_sum = get_whole_flair_sentiment(comment)\n print(comment)\n print('Whole comment sentiment:', result_sum)\n print()\n sentence_score_list = get_sentence_sentiments(comment)\n print(comment)\n print('per sentence sentiment:', sentence_score_list)\n print()",
"def combine_comments(short_comments_df, long_comments_df):\n long_comments_df.set_index('perfume_id', inplace=True)\n long_comments_df['long_comments'] = long_comments_df['comments'].apply(','.join)\n all_comments = pd.merge(short_comments_df, long_comments_df, how='left', left_index=True, right_index=True)\n all_comments = all_comments.fillna('.')\n all_comments['all_comments'] = all_comments['short_comments'] + all_comments['long_comments']\n all_comments.drop(['comments', 'short_comments', 'long_comments', 'url'], axis=1, inplace=True)\n all_comments = all_comments.reset_index().rename(columns={'index':'perfume_id'})\n return all_comments",
"def lemma(comment):\n words = comment.split()\n tags = []\n lemma_words = []\n result=[]\n for i in range(len(words)):\n tags.append(words[i][words[i].rindex(\"/\"):])\n words[i] = words[i][:words[i].rindex(\"/\")]\n doc = spacy.tokens.Doc(nlp.vocab, words)\n doc = nlp.tagger(doc)\n for i in doc:\n if i.lemma_[0] ==\"-\" and i.string[0]!=\"-\":\n lemma_words.append(i.text)\n else:\n lemma_words.append(i.lemma_)\n \n for i in range(len(lemma_words)):\n \n result.append(lemma_words[i]+tags[i])\n \n return \" \".join(result)",
"def extract_comments(self, response):\n\n # use the comment_parser package to extract HTML and JS comments\n try:\n html_comments = comment_parser.extract_comments_from_str(response.text, mime=\"text/html\")\n except (UnterminatedCommentError, CP_ParseError):\n html_comments = []\n try:\n js_comments = comment_parser.extract_comments_from_str(response.text, mime=\"application/javascript\")\n except (UnterminatedCommentError, CP_ParseError):\n js_comments = []\n\n # put the discovered comments together\n comments = list()\n for comment in html_comments:\n comments.append({\"line\": comment.line_number(), \"comment\": \"<!--\" + comment.text() + \"-->\"})\n for comment in js_comments:\n if comment.is_multiline():\n comments.append({\"line\": comment.line_number(), \"comment\": \"/*\" + comment.text() + \"*/\"})\n else:\n comments.append({\"line\": comment.line_number(), \"comment\": \"//\" + comment.text()})\n\n # store the discovered comments w.r.t. the response's path & query\n if comments:\n parsed_url = urllib.parse.urlparse(response.url)\n if self.config[\"crawl_parameter_links\"].lower() == \"true\":\n self.comments[parsed_url.path + parsed_url.query] = comments\n else:\n self.comments[parsed_url.path] = comments",
"def make_parsed_comments(self):\n if not hasattr(self, 'separated_comments'):\n self.separated_comments = self.separate_comments()\n \n # build comments list of dictionaries, one dictionary for each article\n self.comments = []\n for self.separated_comment in self.separated_comments:\n try:\n comment_data = self.get_comment_data(self.separated_comment)\n self.comments.append(comment_data)\n except Exception as e:\n pass\n return self.comments",
"def core(self):\n \n \n comments = self.bot.subreddit(\n \"all\").stream.comments(\n skip_existing = True)\n \n \n for comment in comments:\n \n text = comment.body.lower().replace(\".\", \"\")\n \n for card in self.catalog:\n \n if (\n card[1].lower() in text\n and card[0].lower() in text\n and not comment.submission.id in self.responded\n and not comment.subreddit.user_is_banned):\n\n self.get_info(card)\n\n if not self.details:\n \n break\n\n audio = [\n \"audiobook\", \n \"audio book\"]\n \n author_format = [\n name.lower() for name in card[1].split(\" \") \n if len(name) >= 3]\n\n if (\n self.details[\"duration\"] > 10800\n and card[0].lower() in self.details[\n \"title\"].lower()\n and any(\n item in self.details[\n \"title\"].lower() for item in audio)\n and all(\n item in self.details[\n \"title\"].lower() for item in author_format)):\n \n \n saw_the_sign = (\n \"\"\"[^(Source Code)](https://capybasilisk.com/posts/\"\"\"\n \"\"\"2020/04/speculative-fiction-bot/) \"\"\"\n \"\"\"^| [^(Feedback)](https://www.reddit.com/message/\"\"\"\n \"\"\"compose?to=Capybasilisk&subject=Robot) \"\"\"\n \"\"\"^| [^(Programmer)](https://www.reddit.com/u/\"\"\"\n \"\"\"capybasilisk) \"\"\"\n \"\"\"^| ^(Downvote To Remove) \"\"\" \n \"\"\"^| ^(Version 1.4.0) \"\"\"\n \"\"\"^| ^(Support Robot Rights!)\"\"\")\n \n\n comment.reply(\n f\"\"\"Hi. You just mentioned *{card[0]}* by \"\"\" \n f\"\"\"{card[1]}.\\n\\nI've found an audiobook of \"\"\" \n \"\"\"that novel on YouTube. You can listen to it here\"\"\"\n f\"\"\":\\n\\n[YouTube | {self.details['title']}]\"\"\"\n f\"\"\"({self.details['webpage_url']})\\n\\n*I\\'m a bot that \"\"\" \n \"\"\"searches YouTube for science fiction and fantasy\"\"\" \n f\"\"\" audiobooks.*\\n***\\n{saw_the_sign}\"\"\")\n\n \n self.responded.append(\n comment.submission.id)\n \n with open(\n \"activity.csv\", \n \"a\", \n encoding = \"UTF-8\") as actlog:\n\n activity = clevercsv.writer(\n actlog)\n\n if actlog.tell() == 0:\n\n activity.writerow(\n [\"Book\",\n \"Comment\", \n \"Author\", \n \"Thread\", \n \"Subreddit\", \n \"Time\"])\n\n activity.writerow(\n [f\"{card[0]} by {card[1]}\",\n f\"{comment.body}\",\n f\"{comment.author}\",\n f\"{comment.submission.title}\",\n f\"{comment.subreddit}\",\n f\"{pendulum.now().to_datetime_string()}\"])\n \n self.details = None\n \n break\n \n break \n \n if pendulum.now().to_time_string().endswith(\n \"0:00\"):\n \n self.tidy()",
"def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields",
"def extract_relevant(self):\n item_extraction = self.data\n my_dict = {'tweeted_time': item_extraction['created_at'],\n 'tweet_id': item_extraction['id'],\n # If the time comes when the below becomes more significant, it will be no trouble at all to make an\n # additional column for it, but delimiting it with a ` creates less clutter in the Database\n 'in_reply_to':\n \"NAME/\" + str(item_extraction['in_reply_to_screen_name']) + \"`\" +\n \"STATUSID/\" + str(item_extraction['in_reply_to_status_id_str']) + \"`\" +\n \"USERID/\" + str(item_extraction['in_reply_to_user_id_str']),\n 'lang': item_extraction['lang'],\n 'place': item_extraction['place'], 'source': item_extraction['source']}\n if item_extraction['place'] is not None:\n my_dict['place'] = item_extraction['place']['full_name']\n if 'retweeted_status' in item_extraction.keys():\n my_dict['original_author_id'] = item_extraction['retweeted_status']['user']['id']\n my_dict['original_author_handle'] = item_extraction['retweeted_status']['user']['screen_name']\n tester = item_extraction['retweeted_status']['text']\n cleaned = ' '.join(re.sub(\"(RT : )|(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", tester).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n # This final text will make it a lot easier to run NLP\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n else:\n my_dict['original_author_id'] = item_extraction['user']['id']\n my_dict['original_author_handle'] = item_extraction['user']['screen_name']\n cleaned = ' '.join(re.sub(\"(@[\\S]+)|(&\\S+)|(http\\S+)\", \" \", item_extraction['text']).split())\n removed_others = \" \".join(re.sub(\"(#\\S+)\", ' ', cleaned).split())\n final_text = ''.join(list(filter(lambda x: x.isalpha() or x is ' ', removed_others)))\n final_text = final_text.strip().replace(' ', ' ').replace(' ', ' ')\n my_dict['plain_text'] = final_text.lower()\n my_dict['tweet'] = cleaned\n return my_dict",
"def build_reply(results, is_list):\n \n \n\n #results and requests should be of the same size\n #additionally, a failed result should be represented by an empty entry\n reply_string = \"\"\n #sort(results)\n for entry in range(len(results)):\n entry_string = \"\"\n entry_dicts = sort_results(results[entry])\n tournament_set = set()\n if is_list:\n entry_string += make_section(entry_dicts)\n for row in entry_dicts:\n if row[\"tournament\"] not in tournament_set:\n #make a tournament heading\n entry_string += row[\"tournament\"] + \":\" + ENDL\n tournament_set.add(row[\"tournament\"])\n entry_string += video_format.format(bracket=row[\"bracket\"],\n video_id=row[\"video\"]\n )\n reply_string += entry_string + LINE\n else:\n pass\n #additionally add a footer to the message that gives info on the bot\n return reply_string",
"def test_get_sentence_sentiments():\n long_comment = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n\n sentence_score_list = get_sentence_sentiments(long_comment[0])\n print(long_comment[0])\n print('per sentence sentiment:', sentence_score_list)\n print()",
"def _get_comments(**kwargs):\r\n\r\n # Log in to get cookies.\r\n cookies = _login(**kwargs)\r\n\r\n if 'r' not in kwargs:\r\n # This is the first comments request.\r\n # Make the comments request and set an empty list.\r\n kwargs['r'] = requests.get('https://news.ycombinator.com/threads?id=%s' % kwargs['args'].username,\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n kwargs['comments'] = []\r\n\r\n # Grab the comments.\r\n J = pq(kwargs['r'].content)\r\n comments = J('table table td.default')\r\n\r\n for c in comments:\r\n\r\n comment = _sanitize_comment(J, c)\r\n\r\n if kwargs['args'].no_owner and comment['user'] == kwargs['args'].username:\r\n continue\r\n\r\n # Add the comment to the saved list.\r\n kwargs['comments'].append({\r\n 'user': comment['user'],\r\n 'comment': comment['comment'],\r\n 'reply': comment['reply'],\r\n 'points': comment['points'],\r\n 'link': comment['link'],\r\n 'parent': comment['parent'],\r\n 'story': comment['story'],\r\n 'date': comment['date'],\r\n })\r\n\r\n # If we're getting all comments.\r\n if kwargs['args'].all:\r\n\r\n # Find the 'More' link and load it.\r\n last = J('a', J('table table tr td.title:last'))\r\n if last.text() == 'More':\r\n kwargs['r'] = requests.get('https://news.ycombinator.com%s' % last.attr('href'),\r\n cookies=cookies)\r\n\r\n # Check to make sure we have a good response.\r\n if not _good_response(**kwargs):\r\n kwargs.pop('r')\r\n return _get_comments(**kwargs)\r\n\r\n # Call this function again, this time with the new list.\r\n return _get_comments(**kwargs)\r\n\r\n return kwargs['comments']",
"def get_comments(self,comments):\n all_comments = []\n for comment in comments:\n try :\n all_comments.append({\n 'comment':comment['data']['body'],\n 'score':comment['data']['score']\n })\n except: pass\n return all_comments",
"def _format_response(self, response):\n texts = []\n for result in response.results: \n texts.append(result.alternatives[0].transcript)\n return texts",
"def build_course_dictionary(title_result_set, desc_result_set) -> Dict[str, List[str]]:\n\n course_dictionary = {} # placeholder dictionary\n\n for (tagged_title, tagged_description) in zip(title_result_set, desc_result_set): # iterate through multiple result sets\n full_title_desc_list = {}\n full_title_desc_list = [str(tagged_title.text)] + str(tagged_description.text).strip().splitlines() # remove trailing whitespace, then get list of lines\n course_dictionary[str(tagged_title.text)[:8]] = full_title_desc_list\n\n return course_dictionary",
"def parse_response(user_response):\n stop_words = stopwords.words(\"english\") + ['Green', \"Bay\", 'Packers']\n user_words = word_tokenize(user_response)\n updated = [word for word in user_words if word not in stop_words and word not in punctuation]\n wnl = WordNetLemmatizer()\n words = []\n\n for word in updated:\n word = wnl.lemmatize(word)\n if word in ['draft', 'scout', 'college', 'prospect', 'pick']:\n words.append(word)\n elif word in ['free', 'agency', 'agent', 'signing']:\n words.append(word)\n elif word in ['contract', 'negotiation']:\n words.append(word)\n\n return words",
"def parse_comments_html(advertise: Dict[str, Any]) -> Optional[List[str]]:\n if \"comments_html\" in advertise.keys():\n\n filtred_comments: str = advertise[\"comments_html\"][200::]\n\n tmp: List[str] = re.split(\"[ \\n\\t]{2,}\", filtred_comments)\n if '' in tmp:\n tmp.remove('')\n\n # Breaking comments\n master: List[List[str]] = []\n tmp_vec: List[str] = []\n for line in tmp:\n\n if re.search(\"de \\d{4,}\", line): # matches 'de 2018' that signals the end of comment\n master.append(tmp_vec)\n tmp_vec = []\n else:\n tmp_vec.append(line)\n\n # Cleaning comments\n for comment in master:\n if \"...\" in comment:\n comment.remove(\"...\")\n if \"O usuário contratou o serviço em\" in comment:\n comment.remove(\"O usuário contratou o serviço em\")\n\n return [\" \".join(m) for m in master]",
"def calc_comments(self):\n for comment in self.pull_request.get_comments():\n self._users.add(comment.user.login)\n lowercase_body = comment.body.lower()\n if \"protm\" in lowercase_body:\n self.num_protm += 1\n self.num_comments += 1\n if comment.body is not None:\n self.len_comments += len(comment.body)\n for reaction in comment.get_reactions():\n self._users.add(reaction.user.login)\n self.comment_reactions += 1",
"def test_get_whole_and_per_sentence_flair_sentiments():\n long_comments = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\",\n\n \"it's intended to make the polling places dangerous by contaminating the air inside with virus \"\n \"that can linger for hours\",\n\n \"simple, just create an unmasked line in a separate part of the location let them infect each \"\n \"other\"]\n get_whole_and_per_sentence_flair_sentiments(long_comments)",
"def entity_snippet(response):\n for result in response.results:\n e_set = extract_entities(result.summary)\n result.summary = ' '.join(e_set)\n return response",
"def get_cmt(l_data):\n\n starter = l_data[\"slcmt\"]\n return starter",
"def go_fetch(query):\n responses = []\n explained = \"\"\n global query_type\n global verbose\n global GLOSS\n global MAXIMUM\n\n if query_type == \"PRO\": explained = f\"You asked for the pronunciation of '{query['sp']}'.\"\n elif query_type == \"DEF\": explained = f\"You asked for the definition of '{query['sp']}'.\"\n else:\n # loop through the dictionary, one key at a time, and explain what each entry is for\n query_glossed = []\n for param in query:\n # if this one is max or md (metadata), skip it\n if param == \"max\" or param == \"md\": continue\n # it's not an accident that the keys in query are the same as the keys in GLOSS\n query_glossed.append(f\"{GLOSS[param]} {query[param]}\")\n # eg GLOSS has {\"sp\": \"are spelled like\"} and query has {\"sp\": \"dear\"}, then\n # explained[0] == f\"{GLOSS['sp']} {query['sp']} == \"are spelled like dear\"\n explained = \"You asked for words which \" + \" and \".join(query_glossed)\n\n # Let's set a default\n if \"max\" not in query: query[\"max\"] = MAXIMUM\n\n # there's a TON of stuff going on in this line\n datamuse = requests.get('https://api.datamuse.com/words',params=query)\n # first, the requests library's get() function \"urlencodes\" the url and parameters\n # e.g. if query == {\"ml\": \"ringing in the ears\"}, it becomes \"?ml=ringing+in+the+ears\"\n # next, it opens an http connection to datamuse.com, something like:\n # * Trying 54.225.209.164...\n # * Connected to api.datamuse.com (54.225.209.164) port 443 (#0)\n # then, it sends an http request which consists of a \"header\" and (optionally) a \"body\"\n # which looks something like this:\n #\n # GET https://api.datamuse.com/words?ml=ringing+in+the+ears\n # Connection: 'keep-alive'\n # Accept: */*\n # User-Agent: python-requests/2.18.4\n # Accept-Encoding: gzip, deflate\n # \n # and the datamuse API sends back a response which looks something like:\n # HTTP/1.1 200 OK\n # Cache-Control: no-transform, max-age=86400\n # Content-Type: application/json\n # Date: Fri, 02 Feb 2018 02:53:45 GMT\n # Vary: Accept-Encoding\n # Content-Length: 4634\n # Connection: keep-alive\n #\n # [{\"word\":\"tinnitus\",\"score\":51691,\"tags\":[\"syn\",\"n\"]},{\"word\":\"ring\",\". . . \n #\n # then the response is parsed into a python object\n # (sticks the headers in one variable, the body into another, etc)\n # and the object is returned from get() and we store it in \"datamuse\"\n # finally, we stick the response object into a list, like so:\n responses.append(datamuse)\n\n # If a definition is asked for, we'll use two APIs\n if query_type == \"DEF\":\n owlbot = requests.get(f\"https://owlbot.info/api/v2/dictionary/{query['sp']}\")\n responses.append(owlbot)\n\n # print out helpful info if the user asked for it\n if verbose: print(explained) # Plain english description of our query\n\n return responses",
"def get_mentions_from_body_and_issue_comments(pull_request_body, pull_request_comments):\n unique_mentions = set()\n unique_mentions.update(github_mention_regex.findall(pull_request_body))\n for comment in pull_request_comments:\n unique_mentions.update(github_mention_regex.findall(comment.body))\n\n return unique_mentions",
"def previous_comment_features(posts, previous_comment_file=os.path.join(os.environ['NOBULL_PATH'], 'user2comment.json'), max_per_user=10):\n with open(previous_comment_file, \"r\", encoding=\"utf8\") as f:\n d = f.readlines()[0]\n user2previouscomment_map = json.loads(d)\n \n X_previous_comments = []\n posts_dict = {}\n comments_added = []\n for post in posts:\n #n_comment = get_hostile_indices(post)[0] + 1\n n_comment = post['n_comments_observed']\n create_at = post[\"create_at\"]\n userlist = post[\"users\"][:n_comment]\n current_X_text_list = []\n for user in userlist:\n if user in user2previouscomment_map:\n previouscomment = user2previouscomment_map[user]\n int_key = {float(k):v for k, v in previouscomment.items()}\n n_per_user = 0\n for ts, tx in sorted(int_key.items(), reverse = True):\n if ts < int(create_at):\n current_X_text_list.append(tx)\n n_per_user += 1\n if n_per_user > max_per_user:\n break\n \n if len(current_X_text_list) == 0:\n current_X_text_list.append('blank_comment')\n comments_added.append(len(current_X_text_list))\n current_X_text = \" \".join(current_X_text_list) \n cleaned_text = cleanText(current_X_text)\n X_previous_comments.append(cleaned_text) \n X_hatebase, header_hatebase = hatebase(X_previous_comments)\n X_profane, header_profane = profaneLexicon(X_previous_comments)\n X_aggregation, header_w2v = w2v_aggregation_letters(X_previous_comments)\n \n vec = CountVectorizer(min_df=2)\n X = vec.fit_transform(X_previous_comments)\n \n X_overall = sparse.hstack([X, X_aggregation, X_hatebase, X_profane]).tocsr()\n \n header_vec = vec.get_feature_names()\n header = header_vec + header_w2v + header_hatebase + header_profane\n header_ = ['pre_c_'+ h for h in header]\n return X_overall, header_",
"def purify_comments(csv_file, keep_stops=False, POS=False, lemmatize=False, popular=0):\r\n\r\n df = pd.read_csv(csv_file)\r\n df = df.loc[df[\"author\"] != \"[deleted]\"] # trim out comments whose authors have deleted their accounts\r\n df = df.loc[df[\"score\"] != \"score\"] # this is an error in the code when building new csv_files from dask\r\n\r\n # extracts only the popular comments\r\n if popular > 0:\r\n df = df.loc[pd.to_numeric(df[\"score\"]) > popular]\r\n\r\n comments = df[\"body\"]\r\n del df # no need for this anymore, and it'll merely eat up memory\r\n\r\n nlp = en_core_web_sm.load()\r\n\r\n revised_comments = []\r\n for comment in comments.astype('unicode').values:\r\n comment = comment[1:] # remove the initial 'b' bytes-representation character\r\n comment = comment.encode(\"utf-8-sig\").decode(\"utf-8-sig\") # get rid of BOM character\r\n comment = comment.lower().replace(r\"\\n\", r\"\").replace(r'\"', r'')\r\n\r\n tokens = nlp(comment)\r\n\r\n # actual specification section\r\n for sent in tokens.sents:\r\n\r\n if POS: # conversion of comments to tokens/lemmas-POS tags\r\n if lemmatize:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n else:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n elif lemmatize: # just lemmatization\r\n if keep_stops:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n else: # nothing but removal of stop words (or not)\r\n if keep_stops:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n revised_comments.append(\" \".join(revised_tokens))\r\n\r\n return pd.Series(revised_comments)",
"def comments(self):\r\n return comments.ForumSuggestionComments(self)",
"def extract_info_from_video_comments(av):\n sex_list = []\n reply_list = []\n try:\n # maximum result per page is 49 (ps), sorting = 2 (热度排), pn 页数\n comment_url = f\"https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn={1}&type=1&oid={av}&ps={49}&sort={2}\"\n response = requests.get(comment_url).json()\n replies = response[\"data\"][\"replies\"]\n for reply_thread in replies:\n # each single reply thread contains child replie\n parent_msg = reply_thread[\"content\"][\"message\"]\n reply_list.append(parent_msg)\n sex_list.append(reply_thread[\"member\"][\"sex\"])\n children_replies = reply_thread.get(\"replies\")\n children_replies = children_replies if children_replies is not None else []\n for child_reply in children_replies:\n child_msg = child_reply[\"content\"][\"message\"]\n reply_list.append(child_msg)\n sex_list.append(child_reply[\"member\"][\"sex\"])\n return sex_list, reply_list\n except:\n return sex_list, reply_list",
"def test_get_whole_flair_sentiment():\n\n comments = [\"This was a really sucky movie. I will probably never go see this movie ever again. I am going to \"\n \"tell my whole family never to watch this movie. I very much enjoyed the special cameo in it \"\n \"though. I loved the plot line.\"]\n for x in comments:\n result_sum = get_whole_flair_sentiment(x)\n print(x)\n print('Whole comment sentiment:', result_sum)\n print()",
"def parseComments(data):\n global comments\n reviewBegins = '<div style=\"margin-left:0.5em;\">'\n reviewEnds = '<div style=\"padding-top: 10px; clear: both; width: 100%;\">'\n stars_line = 'margin-right:5px;'\n stars = re.compile('\\d+.\\d+ out of 5 stars')\n header_line = '<span style=\"vertical-align:middle;\"'\n helpful_line ='people found the following review helpful'\n helpful = re.compile('\\d+ of \\d+ people found the following review helpful')\n reviewText = '<span class=\"h3color tiny\">' # Actual review\n\n boundaries = commentsStartStopLineNmbr(data)\n for i in range(boundaries[0], boundaries[1] + 1):\n if reviewBegins in data[i]:\n curcomment = Comment()\n while reviewEnds not in data[i]:\n # Parse stars\n if stars_line in data[i]:\n stars_found = re.search(stars, data[i])\n if stars_found != None:\n curcomment.stars = stars_found.group()\n # Parse header\n elif header_line in data[i]:\n line = data[i]\n begin = line.find('<b>') + 3\n end = line.find('</b>')\n curcomment.header = line[begin : end]\n # Parse helpfulness\n elif helpful_line in data[i]:\n helpful_found = data[i].replace(\",\", \"\")\n helpful_found = re.search(helpful, helpful_found)\n if helpful_found != None:\n curcomment.helpful = helpful_found.group()\n # Parse body text\n elif reviewText in data[i]:\n i += 3\n if '<span class=\"small\"' in data[i]: # Yep, dirty trick :(\n i += 3\n data[i] = stripHtmlTags(data[i])\n curcomment.comment = re.sub(\"\\s+\", \" \", data[i])\n i += 1\n comments.append(curcomment.getonelinecomment())\n #comments.append(curcomment.__repr__())",
"def main():\r\n \r\n data_dir = Path.cwd().joinpath('OUTPUT')\r\n config_dir = Path.cwd().joinpath('CONFIG')\r\n \r\n # Load deduplicated comments\r\n data = utils.load(data_dir, 'student_comment_deduplicated')\r\n \r\n # Get the luis API url\r\n with open(config_dir.joinpath('luis_url.txt'), 'r') as f:\r\n luis_url = f.readline()\r\n \r\n request_api(\r\n data,\r\n luis_url,\r\n 1000,\r\n )"
] | [
"0.5736245",
"0.57142824",
"0.5411729",
"0.540935",
"0.54072136",
"0.5370507",
"0.5283631",
"0.5237631",
"0.5222202",
"0.52172023",
"0.52092206",
"0.5151312",
"0.5109762",
"0.5085416",
"0.5063539",
"0.5046727",
"0.49920905",
"0.49881884",
"0.4975726",
"0.49670464",
"0.49519247",
"0.4945787",
"0.49069357",
"0.49025923",
"0.48889568",
"0.488102",
"0.48792085",
"0.48630247",
"0.48574898",
"0.48537675"
] | 0.6131447 | 0 |
Extract the element, e.g. topScoringIntent, from the LUIS.api json response s the pd.Series of the unravelled json response element the elements, such as "sentiment" or "intent" | def get_luis_element(s: pd.Series, element: str) -> pd.Series:
result = 0
if element == 'intent':
result = s.apply(lambda x: x['topScoringIntent']['intent'])
elif element == 'entities':
result = s.apply(lambda x: x['entities'])
elif element == 'sentiment':
valence = s.apply(lambda x: x['sentimentAnalysis']['label']).map({'positive': 1,
'neutral': 0,
'negative': -1})
result = s.apply(lambda x: x['sentimentAnalysis']['score']) * valence
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_response(self, response):\n\t\tself.context = response['context']\n\t\ttext = response['output']['text']\n\t\tintents = response['intents'] #is a list, should filter\n\t\tif len(intents) > 0:\n\t\t\tintent = intents[0]['intent'] #get the intent of the message\n\t\telse:\n\t\t\tintent = \"\"\n\t\t\t\n\t\treturn str(text[0]), intent",
"def parse(self, message):\n resp = json.loads((self.send_api_request(message)).decode('utf-8'))\n\n nlu_response = NLUResponse()\n nlu_response.text = message\n intent_schema = IntentSchema()\n if resp[\"result\"][\"metadata\"]:\n intent_schema.name = resp[\"result\"][\"metadata\"][\"intentName\"]\n intent_schema.confidence = resp[\"result\"][\"score\"]\n else: # fallback if no intent is given by the nlu\n intent_schema.name = \"greet\"\n intent_schema.confidence = 0.0\n nlu_response.intent = intent_schema\n print(\"Recognized Intent by Dialogflow {}\".format(intent_schema.name ))\n\n pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(resp)\n\n try:\n nlu_response.entities = []\n entities = resp[\"result\"][\"parameters\"]\n resolved_query = resp[\"result\"][\"resolvedQuery\"]\n\n for key, value in entities.items():\n if value:\n entity_schema = EntitiesSchema()\n entity_schema.start = resolved_query.find(value)\n entity_schema.end = resolved_query.find(value) + len(value)\n entity_schema.entity = key\n entity_schema.value = value\n nlu_response.entities.append(entity_schema)\n #print(\"Key: {}, Value: {}\".format(key, value))\n except Exception as err:\n logging.warning('No Entites extracted {}'.format(err))\n\n schema = RasaNLUSchema()\n data, error = schema.dump(nlu_response)\n\n return data",
"def get_intent(self, text):\n payloads = self.get_payloads(text)\n with ThreadPoolExecutor(max_workers=2) as pool:\n responses = list(pool.map(self.post, payloads))\n\n for response in responses:\n if response.status_code == 503:\n raise RuntimeError(\n f'My natural language engine that helps me understand what you say is still loading. Please try again in {int(json.loads(response.content.decode(\"utf-8\"))[\"estimated_time\"])} seconds')\n return [json.loads(response.content.decode('utf-8'))['labels'][0] for response in responses]",
"def _unpack_sentiment_data(self):\n get_neg = lambda x: x.get('probability').get('neg')\n get_pos = lambda x: x.get('probability').get('pos')\n get_neutral = lambda x: x.get('probability').get('neutral')\n get_label = lambda x: x.get('label')\n self.dataframe['negative_sentiment'] = self.dataframe['sentiment'].map(get_neg)\n self.dataframe['positive_sentiment'] = self.dataframe['sentiment'].map(get_pos)\n self.dataframe['neutral_sentiment'] = self.dataframe['sentiment'].map(get_neutral)\n self.dataframe['sentiment_label'] = self.dataframe['sentiment'].map(get_label)",
"def extract_transcript(resp: str):\n if 'result' not in resp:\n raise ValueError({'Error non valid response from api: {}'.format(resp)})\n for line in resp.split(\"\\n\"):\n try:\n line_json = json.loads(line)\n out = line_json['result'][0]['alternative'][0]['transcript']\n return out\n except:\n continue",
"def response(row):\n return row['response']",
"def read_json(self):\n utterances, labels = [], []\n for log in self.log_json:\n for turn in log['turns']:\n utterance = turn['output']['transcript']\n label = turn['output']['dialog-acts'][0]['act']\n utterances.append(utterance)\n labels.append(label)\n\n return utterances, labels",
"def get_sentiment(sentence):\n\tblob = tb.TextBlob(sentence.decode('utf-8','ignore'))\n\treturn blob.sentiment[0]",
"def parse_get_responses():\n json_data = open('/Users/williamliu/GitHub/surveys/get_responses.json')\n loaded_data = json.load(json_data)\n test = json_normalize(loaded_data['data'])\n\n print type(test)\n print test.head()\n\n # Get first respondent's questions and answers back\n #print loaded_data['data'][1]['questions'][1]['question_id'] # Get respondent's question_id\n #print loaded_data['data'][1]['questions'][1]['answers'] # Get respondent's question_id",
"def get_predictions(payload):\n return sm_client.invoke_endpoint(EndpointName=EMBEDDING_MODEL_ENDPOINT_NAME, \n Body=payload,\n ContentType='application/json')",
"def _get_serieses(parsed_response: dict) -> list:\n serieses = parsed_response[\"message:GenericData\"][\"message:DataSet\"][\"generic:Series\"]\n if type(serieses) != list:\n serieses = [serieses]\n return serieses",
"def extract_impression_targeting(impression):\n targets = impression.get('matchedTargetingCriteria')\n if not targets:\n return []\n\n\n screenname = impression['advertiserInfo']['screenName']\n itime = impression['impressionTime']\n if impression.get('promotedTweetInfo'):\n tid = impression.get('promotedTweetInfo')['tweetId']\n else:\n tid = None\n\n tid = impression['promotedTweetInfo']['tweetId'] if impression.get('promotedTweetInfo') else None\n trendname = impression['promotedTrendInfo']['name'] if impression.get('promotedTrendInfo') else None\n\n\n return [{ 'advertiserScreenName': screenname,\n 'impressionTime': itime,\n 'type': t['targetingType'],\n 'value': t['targetingValue'] if t.get('targetingValue') else None,\n 'tweetId': tid,\n 'trendName': trendname,\n } for t in targets]",
"def predict(self, sentence, top=1):\n returnJson = self.classifier.predict(sentence.title(), top)\n return returnJson",
"def response_dict_to_objectives(self, response_dict):\n objectives = (response_dict[\"loglik\"],)\n return objectives",
"def extract ( self, response ):\n\t\tresponse = response.json()['response']\n\t\traworders = response['orderstatus']['order']\n\n\t\tif not isinstance(raworders, list):\n\t\t\traworders = [raworders]\n\n\t\torders = [ Order(fixml=x['fixmlmessage']) for x in raworders]\n\n\t\treturn orders",
"def compute_flatten_retweeted_status_attribute(row):\n retweeted_status_original_field_names = [\n 'created_at', 'id', 'full_text', 'in_reply_to_status_id', 'in_reply_to_user_id', 'in_reply_to_screen_name',\n 'retweet_count', 'favorite_count', 'lang', 'entities', 'user', 'coordinates', 'place']\n\n if not pd.isnull(row[\"retweeted_status\"]):\n series = pd.read_json(json.dumps(row[\"retweeted_status\"]), typ='series')\n return series[retweeted_status_original_field_names]\n row[retweeted_status_original_field_names] = np.NaN\n return row[retweeted_status_original_field_names]",
"def _parse_ts_response(self, response, prompt):\n \n if prompt != SBE37Prompt.COMMAND:\n raise InstrumentProtocolException('ts command not recognized: %s', response)\n \n sample = None\n for line in response.split(SBE37_NEWLINE):\n sample = self._extract_sample(line, True)\n if sample:\n break\n \n if not sample: \n raise SampleException('Response did not contain sample: %s' % repr(response))\n \n return sample",
"def predictionSentiment(company):\n #change the key for the API in here. This is the AlchemyDataNews\n KEY = '2190f450728492113ce4e5b880a72eefbea73308'\n alchemy_data_news = AlchemyDataNewsV1(api_key=KEY)\n timeBegin ='now-2d'\n timeEnd = 'now'\n company_query = '|text=' + company + ',type=company|'\n results = alchemy_data_news.get_news_documents(\n start=timeBegin,\n end=timeEnd,\n return_fields=['enriched.url.title',\n 'enriched.url.entities.entity.sentiment.type',\n 'enriched.url.entities.entity.sentiment.score'\n ],\n query_fields={'q.enriched.url.enrichedTitle.entities.entity': company_query})\n r = json.dumps(results, indent=2)\n f = open(\"/home/kid/Github/Oracle/watson/jsonp2.json\", 'w')\n f.write(str(r))",
"def response(sentence, model, user_id='123', context={}, show_details=False):\n # Load intents\n data_path = os.path.join(\"data/\", \"data_intents.json\")\n with open(data_path) as json_data:\n intents = json.load(json_data)\n\n # Classify sentence\n results = classify(sentence, model)\n # if we have a classification then find the matching intent tag\n if results:\n # loop as long as there are matches to process\n while results:\n for i in intents['intents']:\n # find a tag matching the first result\n if i['tag'] == results[0][0]:\n # set context for this intent if necessary\n if 'context_set' in i:\n if show_details: print('context:', i['context_set'])\n context[user_id] = i['context_set']\n\n # check if this intent is contextual and applies to this user's conversation\n if not 'context_filter' in i or \\\n (user_id in context and 'context_filter' in i and i['context_filter'] == context[user_id]):\n if show_details: print ('tag:', i['tag'])\n # a random response from the intent\n if i[\"tag\"] == \"goodbye\":\n print(random.choice(i['responses']))\n sys.exit()\n else:\n return print(random.choice(i['responses']))\n\n results.pop(0)",
"def get_sentiment(ticker_symbol, page=None):\n if page is None:\n page = scrape_page(BASE_URL + ticker_symbol)\n\n #get strings\n bullish_sentiment = get_bullish_sentiment(ticker_symbol, page)\n bearish_sentiment = get_bearish_sentiment(ticker_symbol, page)\n price = get_price(ticker_symbol, page)\n name = get_name(ticker_symbol, page)\n\n title = get_title(ticker_symbol, page)\n article = get_article(ticker_symbol, page)\n link = get_link(ticker_symbol, page)\n\n my_trader = Robinhood()\n logged_in = my_trader.login(username=username, password=password)\n description = my_trader.get_fundamentals(ticker_symbol)\n news = my_trader.get_news(ticker_symbol)\n\n #see strings for verification\n #print(bullish_sentiment);\n #print(bearish_sentiment);\n\n #find digits in string\n bull=int(''.join(list(filter(str.isdigit, bullish_sentiment))))\n bear=int(''.join(list(filter(str.isdigit, bearish_sentiment))))\n #price=int(''.join(list(filter(str.isdigit, price))))\n #print(bull)\n #print(bear)\n\n\n\n return Response({\"bullish\": bull, \"bearish\": bear, \"price\":price, \"name\":name, \"description\":description, \"news\":news})\n\n '''\n if bull>bear:\n print(\"bull!\")\n import example\n else:\n return None\n '''\n #if bullish_sentiment:\n # return bullish_sentiment, get_bearish_sentiment(ticker_symbol, page)\n\n #else:\n # return None",
"def get_skill(self, utterance, lang=\"en-us\"):\n intent = self.get_intent(utterance, lang)\n if not intent:\n return None\n # theoretically skill_id might be missing\n if intent.get(\"skill_id\"):\n return intent[\"skill_id\"]\n # retrieve skill from munged intent name\n if intent.get(\"intent_name\"): # padatious + adapt\n return intent[\"name\"].split(\":\")[0]\n if intent.get(\"intent_type\"): # adapt\n return intent[\"intent_type\"].split(\":\")[0]\n return None # raise some error here maybe? this should never happen",
"def getSentiment(s):\n headers = {\"Ocp-Apim-Subscription-Key\" : \"4c28d3a67a12442cad6666a3200c49f5\",\n \"Content-Type\" : \"application/json\", \"Accept\" : \"application/json\"}\n url = \"https://westus.api.cognitive.microsoft.com/text/analytics/v2.0/sentiment\"\n json = {\"documents\": [{\"language\": \"en\", \"id\" : \"1\"}]}\n json['documents'][0]['text'] = s\n sentiment = r.post(url, headers = headers, json = json)\n sentiment = j.loads(sentiment.text)\n return sentiment['documents'][0]['score']",
"def get_whole_flair_sentiment(comment):\n # print(comment[:int(len(comment) * .2)])\n text = flair.data.Sentence(comment)\n # print('before predict',len(text.labels), text.labels)\n flair_sentiment.predict(text)\n # print('after predict',len(text.labels), text.labels)\n if len(text.labels) == 1:\n value = text.labels[0].to_dict()['value']\n if value == 'POSITIVE':\n whole_comment_sentiment = text.to_dict()['labels'][0]['confidence']\n else:\n whole_comment_sentiment = -(text.to_dict()['labels'][0]['confidence'])\n\n whole_comment_sentiment = round(whole_comment_sentiment, 6)\n\n return whole_comment_sentiment\n else:\n return 0",
"def _handle_search_results(self, response: TextResponse) -> ScrapyYelpItem:\n\n # get yConfig\n pattern = re.compile(r\"\"\"\\n\\s+yConfig\\s+=\\s+\"\"\", re.MULTILINE | re.DOTALL)\n soup = BeautifulSoup(response.text, \"html.parser\")\n script = soup.find(\"script\", text=pattern)\n myjson = script.get_text()\n # remove start pattern (js assignment)\n s = re.sub(pattern, '', myjson)\n # remove html (parser problems)\n s = re.sub('<[^<]+?>', '', s)\n # remove last semi colon (end-of-data)\n s = s[0:s.rfind(';')]\n json_object = json.loads(s,strict=False)\n\n keys = [x for x in json_object[\"js_display\"][\"hovercard_data\"] if x.isnumeric()]\n # first part is the hovercard data - which contains most of the aggregate biz informative\n # such as total_reviews and summary_score\n df_hovercard_data = pd.DataFrame()\n for x in keys:\n tmpdf = json_normalize(json_object[\"js_display\"][\"hovercard_data\"][x])\n df_hovercard_data = df_hovercard_data.append(tmpdf,ignore_index=True)\n\n df_hovercard_data = df_hovercard_data.set_index(\"result_number\")\n df_hovercard_data.index = df_hovercard_data.index.astype(int)\n # second part is the resourceid which might be useful later on, not sure if this is used at all, but\n # it serves as a good example of how to join to other \"parts\" of the nested json structure and flatten it\n df_markers = json_normalize(json_object[\"js_display\"][\"map_state\"][\"markers\"])\n df_markers = df_markers[df_markers['resourceType'] == 'business'].loc[:, [\"url\",\"resourceId\",\"hovercardId\",\"label\",\"location.latitude\",\"location.longitude\",]]\n df_markers = df_markers.set_index('label')\n df_markers.index = df_markers.index.astype(int)\n\n # combine data into a single dataframe which will eventually be written out by our pipeline\n df = df_hovercard_data.join(df_markers)\n\n # at this point we want to also scrape the indvidual biz listing for the menu, syntax is verbose here\n\n\n ## deubg write to file\n #json_formatted = json.dumps(json_object, indent=2)\n # print(json_formatted)\n # with open(\"files/\"+'blah.json', 'wb') as file:\n # file.write(str.encode(json_formatted))\n\n \"\"\"\n\n Here is a smample of what the yConfig object looks like:\n\n json_object.keys() ====>\n ['cookies', 'gaConfig', 'adjustAndroidPaidTrafficUrl', 'webviewFlow', 'enabledSitRepChannels',\n isWebviewRequest', 'js_display', 'isLoggedIn', 'uaInfo', 'isSitRepEnabled', 'comscore', 'isBugsnagEnabled',\n 'support', 'deprecatedEncryptedYUV', 'vendorExternalURLs', 'smartBannerFallbackActive', 'version',\n 'recaptchaV3PublicKey', 'googlePlacesUrl', 'redesignActive', 'currentBaseLang', 'isClientErrorsEnabled',\n 'uniqueRequestId', 'yelpcodeTemplateVersion', 'appInstallDialogEnabled', 'smartBannerPersistent',\n 'imageUrls', 'siteUrl', 'referrer', 'webviewInfo', 'cookieDomain', 'recaptchaPublicKey',\n 'send_user_agent_to_ga', 'pGifUrl']\n\n\n json_object[\"js_display\"].keys() ===>\n ['polyglot_translations', 'raq_links', 'locale', 'hovercard_data', 'is_first_ad_hovercard_opened',\n 'zoom', 'centerLng', 'map_state', 'advertising_business_id_list', 'centerLat', 'pager']\n\n json_object[\"js_display\"][\"hovercard_data\"] ==>\n '1': {'resource_id': None,\n 'result_number': 1,\n 'biz': {'alias': 'lou-malnatis-pizzeria-chicago',\n 'review_count': 5998,\n 'name': \"Lou Malnati's Pizzeria\",\n 'rating': 4.07785928642881,\n 'url': 'https://m.yelp.com/biz/lou-malnatis-pizzeria-chicago',\n 'price': '$$',\n 'categories': 'Pizza, Italian, Sandwiches',\n 'distance': '2.5 mi'},\n 'lat': 41.890357,\n 'lng': -87.633704,\n 'type': 'natural'},\n '2': {'resource_id': None,\n ....\n\n\n json_object[\"js_display\"][\"map_state\"][\"markers\"] ===>\n [{'resourceType': 'business',\n 'url': '/biz/lou-malnatis-pizzeria-chicago',\n 'resourceId': '8vFJH_paXsMocmEO_KAa3w',\n 'label': '1',\n 'shouldOpenInNewTab': False,\n 'location': {'latitude': 41.890357, 'longitude': -87.633704},\n 'key': 1,\n 'hovercardId': 'Q6nXAEw3UuAVFSztE4lPnA',\n 'icon': {'name': 'business',\n 'anchorOffset': [12, 32],\n 'activeOrigin': [24, 0],\n 'scaledSize': [48, 320],\n 'regularUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'size': [24, 32],\n 'activeUri': 'https://media0.fl.yelpcdn.com/mapmarkers/yelp_map_range/20160801/1/10.png',\n 'regularOrigin': [0, 0]}},\n {'resourceType': 'business',\n 'url': '/biz/pequods-pizzeria-chicago',\n 'resourceId': 'DXwSYgiXqIVNdO9dazel6w',\n 'label': '2',\n 'shouldOpenInNew\n ...\n\n \"\"\"\n #print(json_object[\"js_display\"][\"hovercard_data\"])\n\n\n\n return df",
"def rank_pre_extract(self, mention_data, predictions):\n mdata = pd.DataFrame(mention_data)\n\n\n\n\n pass",
"def get_answer(response):\n knowledge_base = load_knowledge_base()\n\n answer = {}\n intents, entities = [], []\n for k, v in response[\"entities\"].items():\n if k == \"intent\":\n print(\"Intent:\", str(v))\n intents.append(v[0][\"value\"])\n else:\n print(\"Entities:\", str(v))\n if \"suggested\" not in list(v[0].keys()):\n entities.append(v[0][\"value\"])\n else:\n print(\"Ignoring entity suggestion...\")\n\n intent = intents[0] if len(intents) > 0 else \"\"\n entity = entities[0] if len(entities) > 0 else \"\"\n\n answer_found = False\n for answer_object in knowledge_base[\"answers\"]:\n if answer_object[\"intent\"] == intent and answer_object[\"entity\"] == entity:\n print(\"Answer found:\", str(answer_object))\n answer = answer_object\n answer_found = True\n\n if not answer_found:\n print(\"Answer not found for intent \\\"{}\\\" and entities \\\"{}\\\"\".format(\n intent, str(entities)))\n\n return answer",
"def extract_details(df):\n df_RSinfo = df[['pentamer', 'Step details', 'RouteScore details',\n 'Isolated', 'RouteScore', 'log(RouteScore)']]\n\n last3_rxns = ['Buchwald_deprotection', 'Buchwald', 'SNAr']\n for rxn in last3_rxns:\n df_RSinfo[rxn] = [next(step for step in row[-3:] if step['reaction'] == rxn) for row in df['Step details']]\n\n for key in df_RSinfo['RouteScore details'][0].keys():\n df_RSinfo[key] = [row[key] for row in df['RouteScore details']]\n\n return df_RSinfo",
"def parse_lti_2_0_result_json(self, json_str):\r\n try:\r\n json_obj = json.loads(json_str)\r\n except (ValueError, TypeError):\r\n msg = \"Supplied JSON string in request body could not be decoded: {}\".format(json_str)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # the standard supports a list of objects, who knows why. It must contain at least 1 element, and the\r\n # first element must be a dict\r\n if type(json_obj) != dict:\r\n if type(json_obj) == list and len(json_obj) >= 1 and type(json_obj[0]) == dict:\r\n json_obj = json_obj[0]\r\n else:\r\n msg = (\"Supplied JSON string is a list that does not contain an object as the first element. {}\"\r\n .format(json_str))\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # '@type' must be \"Result\"\r\n result_type = json_obj.get(\"@type\")\r\n if result_type != \"Result\":\r\n msg = \"JSON object does not contain correct @type attribute (should be 'Result', is {})\".format(result_type)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # '@context' must be present as a key\r\n REQUIRED_KEYS = [\"@context\"] # pylint: disable=invalid-name\r\n for key in REQUIRED_KEYS:\r\n if key not in json_obj:\r\n msg = \"JSON object does not contain required key {}\".format(key)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n # 'resultScore' is not present. If this was a PUT this means it's actually a DELETE according\r\n # to the LTI spec. We will indicate this by returning None as score, \"\" as comment.\r\n # The actual delete will be handled by the caller\r\n if \"resultScore\" not in json_obj:\r\n return None, json_obj.get('comment', \"\")\r\n\r\n # if present, 'resultScore' must be a number between 0 and 1 inclusive\r\n try:\r\n score = float(json_obj.get('resultScore', \"unconvertable\")) # Check if float is present and the right type\r\n if not 0 <= score <= 1:\r\n msg = 'score value outside the permitted range of 0-1.'\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n except (TypeError, ValueError) as err:\r\n msg = \"Could not convert resultScore to float: {}\".format(err.message)\r\n log.info(\"[LTI] {}\".format(msg))\r\n raise LTIError(msg)\r\n\r\n return score, json_obj.get('comment', \"\")",
"def predict_intent():\n\n start_time = time()\n request.json[\"request_id\"] = uuid.uuid4().hex\n app.logger.info(f\"Request: {request.json['request_id']}. Processing request '/recommend': {request.json}\")\n\n # Prime filters\n uniq_id = request.json.get('uniq_id')\n if not uniq_id:\n message = f'Request: {request.json[\"request_id\"]}. Missing uniq_id in request'\n delta = time() - start_time\n app.logger.error(f\"{message} Elapsed time: {delta} secs\")\n return jsonify(message=message), 404\n \n\n result, code = recommender.get_recommendation(uniq_id)\n\n delta = time() - start_time\n app.logger.info(f\"Request: {request.json['request_id']}. Endpoint response '/recommend': {result}. Elapsed time: {delta} secs\")\n return jsonify(result), code",
"def parse(self, text):\n\n goal = NLUGoal()\n goal.text = str(text)\n self._nlu_client.send_goal_and_wait(goal)\n result = self._nlu_client.get_result()\n\n #no intent found, return None \n if result.intentName == \"\":\n return None, None, None\n else:\n #parse\n slot_info = json.loads(result.slot_json_string)\n return result.intentName, result.probability, slot_info"
] | [
"0.5717776",
"0.55163515",
"0.5311165",
"0.52464586",
"0.5228804",
"0.52271056",
"0.52187705",
"0.52037966",
"0.5090575",
"0.50477314",
"0.5046301",
"0.5021355",
"0.500153",
"0.49620596",
"0.49437866",
"0.4942395",
"0.4940954",
"0.49293894",
"0.4871135",
"0.4847061",
"0.48190314",
"0.48040915",
"0.47906458",
"0.47862527",
"0.4783378",
"0.47795853",
"0.47603855",
"0.47506627",
"0.47484866",
"0.47459525"
] | 0.6376146 | 0 |
Load the deduplicated comments Get the luis url Call the API in batches Aggregate the batched pickled files with 'luis_result' in the filename, Extract the top scoring intent, entities and the sentiment of each row Save each of the three series as separate pickle files | def main():
data_dir = Path.cwd().joinpath('OUTPUT')
config_dir = Path.cwd().joinpath('CONFIG')
# Load deduplicated comments
data = utils.load(data_dir, 'student_comment_deduplicated')
# Get the luis API url
with open(config_dir.joinpath('luis_url.txt'), 'r') as f:
luis_url = f.readline()
request_api(
data,
luis_url,
1000,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_api(\r\n student_comments: pd.Series, \r\n url: str, \r\n chunk_size: int = 50\r\n ) -> pd.Series:\r\n \r\n for i, chunk in enumerate(chunks(student_comments, chunk_size)):\r\n print(f'Processing batch {i} of size {len(chunk)}')\r\n \r\n response = chunk.apply(lambda x: requests.get(f'{url}&q={x}') if x is not None else None)\r\n response.to_pickle(Path.cwd().joinpath('OUTPUT').joinpath(f'luis_result_{str(i).zfill(4)}'))",
"def get_data(args, load_extracted=True):\n path = args.data_path1\n tokenizer_en = tokener()\n table = str.maketrans(\"\", \"\", '\"#$%&\\'()*+-/:;<=>@[\\\\]^_`{|}~')\n if load_extracted:\n df = load_pickle(\"df_unencoded.pkl\")\n else:\n logger.info(\"Extracting CNN stories...\")\n df = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df.iloc[idx][\"body\"] = body\n df.iloc[idx][\"highlights\"] = highlights\n \n if len(args.data_path2) > 2:\n path = args.data_path2\n logger.info(\"Extracting dailymail stories...\")\n df1 = pd.DataFrame(index=[i for i in range(len(os.listdir(path)))], columns=[\"body\", \"highlights\"])\n for idx, file in tqdm(enumerate(os.listdir(path)), total=len(os.listdir(path))):\n with open(os.path.join(path, file), encoding=\"utf8\") as csv_file:\n csv_reader = csv.reader(csv_file)\n text = \"\"\n for row in csv_reader:\n text += \"\".join(t for t in row)\n highlights = re.search(\"@highlight(.*)\", text).group(1)\n highlights = highlights.replace(\"@highlight\", \". \")\n body = text[:re.search(\"@highlight\", text).span(0)[0]]\n df1.iloc[idx][\"body\"] = body\n df1.iloc[idx][\"highlights\"] = highlights\n df = pd.concat([df, df1], ignore_index=True)\n del df1\n \n save_as_pickle(\"df_unencoded.pkl\", df)\n logger.info(\"Dataset length: %d\" % len(df)) \n \n if (args.level == \"word\") or (args.level == \"char\"):\n logger.info(\"Tokenizing and cleaning extracted text...\")\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en), \\\n axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n \n logger.info(\"Limiting to max features length, building vocab and converting to id tokens...\")\n df = df[df[\"body_length\"] <= args.max_features_length]\n v = vocab(level=args.level)\n v.build_vocab(df[\"body\"])\n v.build_vocab(df[\"highlights\"])\n df.loc[:, \"body\"] = df.apply(lambda x: v.convert_w2idx(x[\"body\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: v.convert_w2idx(x[\"highlights\"]), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], 0, 2), axis=1)\n save_as_pickle(\"df_encoded.pkl\", df)\n save_as_pickle(\"vocab.pkl\", v)\n \n elif args.level == \"bpe\":\n encoder = Encoder(vocab_size=args.bpe_vocab_size, pct_bpe=args.bpe_word_ratio, word_tokenizer=tokenizer_en.tokenize)\n df.loc[:, \"body\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"body\"], table, tokenizer_en, clean_only=True), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: clean_and_tokenize_text(x[\"highlights\"], table, tokenizer_en, clean_only=True), \\\n axis=1)\n logger.info(\"Training bpe, this might take a while...\")\n text_list = list(df[\"body\"])\n text_list.extend(list(df[\"highlights\"]))\n encoder.fit(text_list); del text_list\n \n logger.info(\"Tokenizing to ids and limiting to max features length...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform([x[\"body\"]])), axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform([x[\"highlights\"]])), axis=1)\n df.loc[:, \"body_length\"] = df.apply(lambda x: len(x['body']), axis=1)\n df.loc[:, \"highlights_length\"] = df.apply(lambda x: len(x['highlights']), axis=1)\n df = df[(df[\"body_length\"] > 0) & (df[\"highlights_length\"] > 0)]\n df = df[df[\"body_length\"] <= args.max_features_length]\n \n '''\n logger.info(\"Converting tokens to ids...\")\n df.loc[:, \"body\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"body\"])))),\\\n axis=1)\n df.loc[:, \"highlights\"] = df.apply(lambda x: next(encoder.transform(list(\" \".join(t for t in x[\"highlights\"])))),\\\n axis=1)\n '''\n df.loc[:, \"highlights\"] = df.apply(lambda x: pad_sos_eos(x[\"highlights\"], encoder.word_vocab[\"__sos\"], encoder.word_vocab[\"__eos\"]),\\\n axis=1)\n \n save_as_pickle(\"df_encoded.pkl\", df)\n encoder.save(\"./data/vocab.pkl\")\n return df",
"def get_dataset(FOLD, AR_PERCENTAGE, d_type='yelp', AUTHOR='inf', POST='inf'):\n global AR_TYPE\n\n # dataset = loader.load(d_type, AUTHOR, POST)\n first_dataset = loader.unimportant_load(AUTHOR, POST * FOLD, AR_TYPE)\n datasets = first_dataset.fold_to(FOLD)\n \n for i in range(0, len(datasets)):\n dataset = datasets[i]\n dataset.divide_ar_ir(AR_PERCENTAGE)\n texts = []\n\n # check if we have this dataset already calculated.\n \n ir_filename = 'processed/' + get_ir_identifier(d_type, i, AUTHOR, POST)\n ar_filename = 'processed/' + get_ar_identifier(d_type, i, AUTHOR, POST)\n\n ir_features = None\n if os.path.isfile(ir_filename):\n print '@get: we have the file', ir_filename, 'and going to load it.'\n with open(ir_filename, 'rb') as fp:\n ir_features = pickle.load(fp)\n \n ar_features = None\n if os.path.isfile(ar_filename):\n print '@get: we have the file', ar_filename, 'and going to load it.'\n with open(ar_filename, 'rb') as fp:\n ar_features = pickle.load(fp)\n\n\n if ir_features is not None:\n for author in dataset.authors:\n dataset.features[author][-1] = ir_features[author]\n\n if ar_features is not None:\n for author in dataset.authors:\n dataset.features[author][:-1] = ar_features[author]\n\n for author in dataset.authors:\n if ar_features is None:\n texts.extend(dataset.get_ars(author))\n if ir_features is None: \n texts.append(dataset.get_ir(author))\n\n print '@getting_features, #dataset'#, index_fold\n pool = Pool(processes=NUMBER_OF_CORES)\n it = pool.imap(get_dataset_features, texts)\n pool.close()\n pool.join()\n\n print '@getting_features FINISHED, adding features to dictionary'\n for author in dataset.authors:\n # for each ar + ir, get back the features\n if ar_features is None:\n for i in range(0, dataset.get_ar_size(author)):\n dataset.put_feature(author, i, it.next())\n if ir_features is None:\n dataset.put_feature(author, dataset.get_ar_size(author), it.next())\n\n if ir_features is None:\n print '@get: we DONOT have the file', ir_filename, 'is going to be created and saved.'\n with open(ir_filename, 'wb') as fp:\n tmp = dict()\n for key, value in dataset.features.iteritems():\n tmp[key] = value[-1]\n pickle.dump(tmp, fp)\n\n if ar_features is None:\n print '@get: we DONOT have the file', ar_filename, 'is going to be created and saved.'\n with open(ar_filename, 'wb') as fp:\n tmp = defaultdict(list)\n for key, value in dataset.features.iteritems():\n tmp[key] = value[:-1]\n pickle.dump(tmp, fp)\n\n return datasets",
"def getData():\n pathToData = '../../data.json'\n data = loadData(pathToData) # load Data\n tweets = [data[i]['text'] for i in range(len(data))] # tweets before preprocessing\n processed_tweets = [process(item, False) for item in tweets] # tweets after preprocessing\n processed_tweets_with_stopwords = [process(item, False, True) for item in\n tweets] # tweets after preprocessing without removing stopwords\n filtered_tweets_result = tweetsEntitiesMapping(processed_tweets)\n filtered_tweets = filtered_tweets_result[1] # tweets after filtering by target entity\n filtered_tweets_with_stopwords = tweetsEntitiesMapping(processed_tweets_with_stopwords)[\n 1] # tweets without removing stopwords after filtering by target entity\n tweets_entities_mapping = filtered_tweets_result[2] # map each tweet to its target entity\n ids_list = filtered_tweets_result[3] # keep track of the ids of filtered tweets from the complete list of tweets\n\n '''\n This part of code is for POS tagging the filtered tweets.\n Since the process of POS tagging is time consuming here, we used pickling to save the result once and use it\n multiple times later.\n \n # tagged_tweets = [posTag(sentence) for sentence in tweets]\n # with open(\"../resources/taggedTweets.txt\", \"wb\") as fp: # Pickling\n # pickle.dump(tagged_tweets, fp)\n '''\n\n with open(\"../resources/taggedTweets.txt\", \"rb\") as fp: # Unpickling\n tagged_tweets = pickle.load(fp) # load tagged tweets\n\n aspects = aspectsFromTaggedTweets(tagged_tweets) # extract the list of Aspects using a frequency-based method\n aspects += ['job', 'wirtschaft', 'politik']\n print(aspects)\n tweets_aspects_mapping = tweetAspectMapping(tagged_tweets, aspects) # map each tweet to its target aspect\n opinions = [\n Opinion(data[ids_list[i]]['user_screeen_name'], data[ids_list[i]]['created_at'], data[ids_list[i]]['text']) for\n i in range(len(\n filtered_tweets))] # create a list of Opinion object that has opinionHolder, postDate and text attributes\n for key in tweets_entities_mapping:\n opinions[key].setTargetEntity(tweets_entities_mapping[key]) # set the target entity for each tweet\n opinions[key].setTargetAspect(tweets_aspects_mapping[key]) # set the target aspect for each tweet\n\n return opinions, aspects, filtered_tweets, tagged_tweets, filtered_tweets_with_stopwords",
"def sina_weibo_emotion4(root):\n start = time.time()\n task_path = assert_dirs(root, 'chinese_reviews_sina_weibo_emotion4')\n url_json = 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4.json'\n url_txt = ['https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4_01.txt',\n 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4_02.txt',\n 'https://raw.githubusercontent.com/Hourout/datasets/master/nlp/chinese_reviews_sina_weibo_emotion4/chinese_reviews_sina_weibo_emotion4_03.txt',]\n rq.json(url_json, path_join(task_path, 'chinese_reviews_sina_weibo_emotion4.json'))\n data = pd.DataFrame()\n for url in url_txt:\n s = requests.get(url).content\n data = pd.concat([data, pd.read_csv(io.StringIO(s.decode('utf-8')))])\n data.to_csv(path_join(task_path, 'chinese_reviews_sina_weibo_emotion4.txt'), index=False)\n print('chinese_reviews_sina_weibo_emotion4 dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))\n return task_path",
"def previous_comment_features(posts, previous_comment_file=os.path.join(os.environ['NOBULL_PATH'], 'user2comment.json'), max_per_user=10):\n with open(previous_comment_file, \"r\", encoding=\"utf8\") as f:\n d = f.readlines()[0]\n user2previouscomment_map = json.loads(d)\n \n X_previous_comments = []\n posts_dict = {}\n comments_added = []\n for post in posts:\n #n_comment = get_hostile_indices(post)[0] + 1\n n_comment = post['n_comments_observed']\n create_at = post[\"create_at\"]\n userlist = post[\"users\"][:n_comment]\n current_X_text_list = []\n for user in userlist:\n if user in user2previouscomment_map:\n previouscomment = user2previouscomment_map[user]\n int_key = {float(k):v for k, v in previouscomment.items()}\n n_per_user = 0\n for ts, tx in sorted(int_key.items(), reverse = True):\n if ts < int(create_at):\n current_X_text_list.append(tx)\n n_per_user += 1\n if n_per_user > max_per_user:\n break\n \n if len(current_X_text_list) == 0:\n current_X_text_list.append('blank_comment')\n comments_added.append(len(current_X_text_list))\n current_X_text = \" \".join(current_X_text_list) \n cleaned_text = cleanText(current_X_text)\n X_previous_comments.append(cleaned_text) \n X_hatebase, header_hatebase = hatebase(X_previous_comments)\n X_profane, header_profane = profaneLexicon(X_previous_comments)\n X_aggregation, header_w2v = w2v_aggregation_letters(X_previous_comments)\n \n vec = CountVectorizer(min_df=2)\n X = vec.fit_transform(X_previous_comments)\n \n X_overall = sparse.hstack([X, X_aggregation, X_hatebase, X_profane]).tocsr()\n \n header_vec = vec.get_feature_names()\n header = header_vec + header_w2v + header_hatebase + header_profane\n header_ = ['pre_c_'+ h for h in header]\n return X_overall, header_",
"def purify_comments(csv_file, keep_stops=False, POS=False, lemmatize=False, popular=0):\r\n\r\n df = pd.read_csv(csv_file)\r\n df = df.loc[df[\"author\"] != \"[deleted]\"] # trim out comments whose authors have deleted their accounts\r\n df = df.loc[df[\"score\"] != \"score\"] # this is an error in the code when building new csv_files from dask\r\n\r\n # extracts only the popular comments\r\n if popular > 0:\r\n df = df.loc[pd.to_numeric(df[\"score\"]) > popular]\r\n\r\n comments = df[\"body\"]\r\n del df # no need for this anymore, and it'll merely eat up memory\r\n\r\n nlp = en_core_web_sm.load()\r\n\r\n revised_comments = []\r\n for comment in comments.astype('unicode').values:\r\n comment = comment[1:] # remove the initial 'b' bytes-representation character\r\n comment = comment.encode(\"utf-8-sig\").decode(\"utf-8-sig\") # get rid of BOM character\r\n comment = comment.lower().replace(r\"\\n\", r\"\").replace(r'\"', r'')\r\n\r\n tokens = nlp(comment)\r\n\r\n # actual specification section\r\n for sent in tokens.sents:\r\n\r\n if POS: # conversion of comments to tokens/lemmas-POS tags\r\n if lemmatize:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.lemma_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n else:\r\n if keep_stops:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [\"{}-{}\".format(token.orth_, token.tag_) for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n elif lemmatize: # just lemmatization\r\n if keep_stops:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.lemma_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n else: # nothing but removal of stop words (or not)\r\n if keep_stops:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_punct]\r\n else:\r\n revised_tokens = [token.orth_ for token in sent\r\n if not token.is_stop and not token.is_punct\r\n and not token.orth_ == \"n't\" and not token.orth_ == \"'s\"]\r\n\r\n revised_comments.append(\" \".join(revised_tokens))\r\n\r\n return pd.Series(revised_comments)",
"def save_csv(file_name, N_rows=None, N_rows_start = 0, file_url_list='artworks_urls_full.pkl', processes = 8):\n\n with open('../data/'+file_url_list, 'rb') as f:\n if N_rows is None:\n url_list = pickle.load(f)\n else:\n url_list = pickle.load(f)[N_rows_start:N_rows]\n pool = Pool(processes=processes)\n\n start = time.time()\n\n #header for problematic_urls file\n filename = 'problematic_urls.txt'\n f = open(filename, 'a')\n f.write('run on ' + time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.localtime()) +':\\n')\n f.close()\n\n # go through url list\n data = pool.map(job, url_list)\n list_data = [i[0] for i in data if i[0] != None]\n missed_urls = [i[1] for i in data if i[1] != None]\n # print(\"Successful:\",len(list_data))\n # print(\"Errors: \",len(missed_urls))\n del data\n\n # redo all missed urls until no one is left behind\n while len(missed_urls) >0:\n # print('another round')\n data = pool.map(job, missed_urls)\n list_data_add = [i[0] for i in data if i[0] != None]\n missed_urls = [i[1] for i in data if i[1] != None]\n del data\n list_data.extend(list_data_add)\n del list_data_add\n # print(\"Successful:\",len(list_data))\n # print(\"Errors: \",len(missed_urls))\n\n end = time.time()\n pd.DataFrame(list_data).to_csv(file_name,index=False)\n\n pool.close()\n\n return end-start",
"def Classify_Data(self):\n\n lem = lemmatization()\n\n # Get Mongo Client\n client = MongoClient()\n db = client['allMovies']\n collection = db['Movies']\n\n # Path to folder containing the training model files\n path = self.path\n\n # Get the list of doc ids trained\n trained_docs = []\n\n # Mongo queries to retrieve Horror, Romance and Crime movies\n qr1 = self.collection.find({\"content.genres.name\": \"Horror\"})\n qr2 = self.collection.find({\"content.genres.name\": \"Romance\"})\n qr3 = self.collection.find({\"content.genres.name\": \"Crime\"})\n qr4 = self.collection.find({\"content.genres.name\": \"Comedy\"})\n print(\"111\")\n print(qr3)\n\n myfile = open('doc_ids.pkl', 'rb')\n trained_docs = pickle.load(myfile)\n # Get 100 Horror, Romance and Crime movies each, which are not in the trained data set\n\n horr = []\n i = 0\n for rec in qr1:\n if rec['_id'] not in trained_docs:\n i = i + 1\n horr.append(rec)\n\n if i >= 333:\n break\n rom = []\n i = 0\n for rec in qr2:\n if rec['_id'] not in trained_docs:\n i = i + 1\n rom.append(rec)\n\n if i >= 333:\n break\n\n crime = []\n i = 0\n for rec in qr3:\n if rec['_id'] not in trained_docs:\n i = i + 1\n crime.append(rec)\n\n if i >= 334:\n break\n comedy = []\n i = 0\n for rec in qr4:\n if rec['_id'] not in trained_docs:\n i = i + 1\n comedy.append(rec)\n\n if i >= 334:\n break\n\n # Combine the query results\n query_results = []\n for rec in horr:\n query_results.append(rec)\n for rec in rom:\n query_results.append(rec)\n for rec in crime:\n query_results.append(rec)\n print(query_results)\n # Data to be classified\n test_data = []\n\n # Genres of records to be classified\n categories = []\n a = 0\n for movie in query_results:\n test_data.append(movie['content']['overview'])\n for genre in movie['content']['genres']:\n a = a + 1\n if ((genre['name'] == 'Horror') or (genre['name'] == 'Romance') or (genre['name'] == 'Crime') or (\n genre['name'] == 'Comedy') and a <= 80):\n categories.append(genre['name'])\n\n # Lists of training models and vectorizers\n models = [\"SVM\", \"LOGISTIC REGRESSION\", \"GAUSSIAN NB\",\n \"MULTINOMIAL NB\", \"BERNOULLI NB\", \"RANDOM FOREST\", \"BAGGING\", \"GRADIENT\",\n \"Voting\", \"Voting With Weights\"]\n\n vectorizers = [\"COUNT VECTORIZER\", \"TFIDF VECTORIZER\"]\n\n # Load dictionary containing terms appearing in genres\n dictionary = joblib.load(path + \"_Genre_Dictionary\")\n\n vec_1 = feature_extraction.text.CountVectorizer(vocabulary=dictionary)\n vec_2 = feature_extraction.text.TfidfVectorizer(vocabulary=dictionary)\n vec_list = [vec_1, vec_2]\n\n # List to store the classification stats for each model\n stats = []\n # Generate results\n for i in range(0, len(models)):\n for j in range(0, len(vectorizers)):\n time0 = time.process_time()\n model = joblib.load(path + models[i] + \"_\" + vectorizers[j].replace('-', '') + \".pkl\")\n vec = vec_list[j]\n Y = vec.fit_transform(test_data).toarray()\n print(\"y\", Y)\n predicted_genres = model.predict(Y)\n\n k = 0\n horror = 0\n romance = 0\n crime = 0\n\n # Keeps track of correct predictions\n y_correct = []\n\n # Keeps track of incorrect predictions\n y_predicted = []\n for pred in predicted_genres:\n if (categories[k] == \"Horror\"):\n if (pred == \"Horror\"):\n horror += 1\n y_predicted.append(0)\n elif (pred == \"Romance\"):\n y_predicted.append(1)\n else:\n y_predicted.append(2)\n y_correct.append(0)\n elif (categories[k] == \"Romance\"):\n if (pred == \"Romance\"):\n romance += 1\n y_predicted.append(1)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(2)\n y_correct.append(1)\n elif (categories[k] == \"Crime\"):\n if (pred == \"Crime\"):\n crime += 1\n y_predicted.append(2)\n elif (pred == \"Horror\"):\n y_predicted.append(0)\n else:\n y_predicted.append(1)\n y_correct.append(2)\n k = k + 1\n\n # Print results\n score = precision_recall_fscore_support(y_correct, y_predicted, average='weighted')\n # print(\"Number of records classified per second = %d\" % (round((1000/(time.process_time()-time0)),3)))\n print(\"________SCORES__________\")\n print(\"MODEL : \" + models[i])\n print(\"VECTORIZER : \" + vectorizers[j])\n print(\"Horror : %d/333\" % (horror))\n print(\"Romance : %d/333\" % (romance))\n print(\"Crime : %d/334\" % (crime))\n print(\"Precision : %.5f\" % (score[0]))\n print(\"Recall : %.5f\" % (score[1]))\n print(\"F(1) Score : %.5f\" % ((score[1] * score[0] / (score[1] + score[0])) * 2))\n print(\"F(W) Score : %.5f\" % (score[2]))\n print(\"Accuracy : %.5f\" % accuracy_score(y_correct, y_predicted))\n # print(confusion_matrix(y_correct, y_predicted))\n\n dic = {}\n dic['model'] = models[i].title()\n dic['vectorizer'] = vectorizers[j][:-11]\n dic['horror'] = str(horror) + '/' + '333'\n dic['romance'] = str(romance) + '/' + '333'\n dic['crime'] = str(crime) + '/' + '334'\n dic['precision'] = round(score[0], 3)\n dic['Recall'] = round(score[1], 3)\n dic['F(1) Score'] = round(((score[1] * score[0] / (score[1] + score[0])) * 2), 3)\n dic['F(W) Score'] = round(score[2], 3)\n dic['accuracy'] = round(accuracy_score(y_correct, y_predicted), 3)\n stats.append(dic)\n # Store stats in file\n joblib.dump(stats, path + \"classification_results.txt\")\n\n print(\"Done\")\n return stats",
"def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def parse_file(file_path, batch_size=100, how_many=-1):\n db = MySQLdb.connect(**login_info)\n\n # From http://stackoverflow.com/questions/3942888/unicodeencodeerror-latin-1-codec-cant-encode-character\n db.set_character_set('utf8')\n\n cursor = db.cursor()\n\n # From http://stackoverflow.com/questions/3942888/unicodeencodeerror-latin-1-codec-cant-encode-character\n cursor.execute('SET NAMES utf8;')\n cursor.execute('SET CHARACTER SET utf8;')\n cursor.execute('SET character_set_connection=utf8;')\n\n print \"Dropping indexes and clearing tables\"\n drop_indexes(cursor)\n clear_tables(cursor)\n row_count = 0\n\n list_of_yros = []\n print \"Processing Review File\"\n\n start_time = time.time()\n update_time = start_time\n with open(file_path) as the_file:\n for a_line in the_file:\n json_object = json.loads(a_line)\n list_of_yros.append(YelpReview(json_object))\n row_count += 1\n if row_count % batch_size == 0:\n persist_list_o_review_objects(list_of_yros, cursor)\n list_of_yros = []\n\n if row_count % 1000 == 0:\n total_time = (time.time() - start_time)\n time_since_last_post = time.time() - update_time\n update_time = time.time()\n print \"Up to row {:} in Review file. Total Time: {:.4g}; TimeSinceLastPost:{:.4g}\" \\\n .format(row_count, total_time, time_since_last_post)\n\n if how_many > 0 and row_count % how_many == 0:\n break\n\n # catch the stragglers\n persist_list_o_review_objects(list_of_yros, cursor)\n\n print \"Creating indexes\"\n create_indexes(cursor)\n\n db.commit()\n db.close()\n print \"Review File Complete. {0} rows processed\".format(row_count)",
"def fetch_data():\n for category in CHEATSHEETS.items():\n subprocess.call(f'curl -o {PATH}{category[0] + \".csv\"} {category[1]}', shell=True)\n\n index = -1\n for filename in os.listdir(PATH):\n for idx, row in pd.read_csv(PATH + filename, on_bad_lines='skip').replace(np.nan, '').iterrows():\n name = row['Model']\n url = REDIRECT_URL + name.lower()\n category = filename.split('.')[0]\n featurizers = row['Acceptable Featurizers'].split(' ') if row['Acceptable Featurizers'] != '' else []\n backends = ['PyTorch' if item in {\"PTorch\", \"Torch\", \"PyTorch \"} else item for item in row['Backend'].split('/')]\n types = row['Type'] if filename != 'general.csv' else row['Classifier/Regressor']\n types = types.split('/') if filename == 'material.csv' else types.split('/ ')\n index += 1\n\n backend_list.append(backends)\n type_list.append(types)\n featurizer_list.append(featurizers)\n model_list.append(Model(name, url, category, featurizers, backends, types, index))",
"def get_data_loaders_3sentences():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_3generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+3):]\n #history_complete.append(history)\n if len(history) > 6:\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def load_davis_dataset():\n trainn_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'folds', 'train_fold_setting1.txt')))\n train_fold = []\n for e in zip(*trainn_fold):\n for ee in e:\n train_fold.append(ee)\n #train_fold = [ee for e in trainn_fold for ee in e]\n test_fold = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'folds', 'test_fold_setting1.txt')))\n ligands = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'ligands_can.txt')),\n object_pairs_hook=OrderedDict)\n proteins = json.load(\n open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'proteins.txt')),\n object_pairs_hook=OrderedDict)\n \n affinity = pickle.load(open(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'Y'), \n 'rb'), encoding='latin1')\n smiles_lst, protein_lst = [], []\n\n for k in ligands.keys():\n smiles = ligands[k]\n smiles_lst.append(smiles)\n for k in proteins.keys():\n protein_lst.append(proteins[k])\n\n affinity = [-np.log10(y / 1e9) for y in affinity]\n affinity = np.asarray(affinity)\n \n os.makedirs(os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'processed'), exist_ok=True)\n train_test_dataset = []\n for split in ['train', 'test']:\n split_dir = os.path.join('dataset', 'regression', 'benchmark', 'DAVIStest', 'processed', split)\n os.makedirs(split_dir, exist_ok=True)\n fold = train_fold if split == 'train' else test_fold\n rows, cols = np.where(np.isnan(affinity) == False)\n rows, cols = rows[fold], cols[fold]\n \n data_lst = [[] for _ in range(1)]\n for idx in range(len(rows)):\n data = {}\n data['smiles'] = smiles_lst[rows[idx]]\n data['protein'] = protein_lst[cols[idx]]\n af = affinity[rows[idx], cols[idx]]\n data['aff'] = af\n\n data_lst[idx % 1].append(data)\n random.shuffle(data_lst)\n train_test_dataset.append(data_lst[0])\n return train_test_dataset",
"def load_one_batch(adapter, nipt_results_path:str):\n \n batch_data = parse_batch_file(nipt_results_path)\n for sample in batch_data:\n mongo_sample = build_sample(sample)\n adapter.add_or_update_document(mongo_sample, adapter.sample_collection)\n mongo_batch = build_batch(batch_data[0])\n adapter.add_or_update_document(mongo_batch, adapter.batch_collection)",
"def download_and_prepare():\n # set source twitter IDS\n user = 759251 # @CNN\n news1 = 807095 # @nytimes\n news2 = 1367531 # @FoxNews\n news3 = 1652541 # @Reuters\n news4 = 3108351 # @WSJ\n news5 = 2467791 # @washingtonpost\n\n # grab all tweets from user\n userHistory = []\n tu = threading.Thread(target=get_all_tweets, args=(user, userHistory))\n # get all tweets from context users\n news1History = []\n t1 = threading.Thread(target=get_all_tweets, args=(news1, news1History))\n news2History = []\n t2 = threading.Thread(target=get_all_tweets, args=(news2, news2History))\n news3History = []\n t3 = threading.Thread(target=get_all_tweets, args=(news3, news3History))\n news4History = []\n t4 = threading.Thread(target=get_all_tweets, args=(news4, news4History))\n news5History = []\n t5 = threading.Thread(target=get_all_tweets, args=(news5, news5History))\n\n # run threads\n threads = [tu, t1, t2, t3, t4, t5]\n for th in threads:\n th.start()\n for th in threads:\n th.join()\n\n # clean urls of all tweets\n allTweets = [userHistory, news1History, news2History, news3History, news4History, news5History]\n for i in range(len(allTweets)):\n allTweets[i] = cleanse_tweets(allTweets[i])\n\n # construct context dict for train and test\n context_dict, context_dict_valid = group_by_date(allTweets)\n\n ##############################################################################\n # some of the following code adapted from tensorflow example file data_utils #\n ##############################################################################\n\n # set paths for storing data\n data_dir = \"tweet_data\"\n train_dir = \"train_dir\"\n train_path = os.path.join(train_dir, \"train\")\n dev_path = os.path.join(train_dir, \"test1\")\n\n # paths for storing initial data\n user_file_path = os.path.join(data_dir, \"data.user\")\n context_file_path = os.path.join(data_dir, \"data.context\")\n\n # move data into expected directories/make data available\n data_to_file(context_dict, context_dict_valid, allTweets, user_file_path, context_file_path, dev_path + \".user\", dev_path + \".context\")\n\n user_path = os.path.join(data_dir, \"vocab%d.user\" % vocab_size)\n context_path = os.path.join(data_dir, \"vocab%d.context\" % vocab_size)\n create_vocabulary(context_path, context_file_path, vocab_size, None) # None: user default tokenizer\n create_vocabulary(user_path, user_file_path, vocab_size, None)\n\n # Create token ids for the training data.\n user_train_ids_path = train_path + (\".ids%d.user\" % vocab_size)\n context_train_ids_path = train_path + (\".ids%d.context\" % vocab_size)\n data_to_token_ids(user_file_path, user_train_ids_path, user_path, None)\n data_to_token_ids(context_file_path, context_train_ids_path, context_path, None)\n\n print(\"made it\")\n\n # Create token ids for the development data.\n user_dev_ids_path = dev_path + (\".ids%d.user\" % vocab_size)\n context_dev_ids_path = dev_path + (\".ids%d.context\" % vocab_size)\n data_to_token_ids(dev_path + \".user\", user_dev_ids_path, user_path, None)\n data_to_token_ids(dev_path + \".context\", context_dev_ids_path, context_path, None)\n\n # TODO return paths to directories of input and output\n return (user_train_ids_path, context_train_ids_path,\n context_dev_ids_path, user_dev_ids_path,\n context_path, user_path)",
"def merge_tweets_v3():\n filename_list = []\n for filename in os.listdir('.'):\n if filename.startswith(\"trecis\") and filename.endswith(\".json\"):\n filename_list.append(filename)\n filename_list = sorted(filename_list)\n\n formatted_tweet_list_train = []\n formatted_tweet_list_test = []\n count_inconsistent = 0\n for filename in filename_list:\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n content = json.loads(line)\n formatted_content = json.loads(content['allProperties']['srcjson'])\n formatted_content['full_text'] = formatted_content['text']\n\n if 'entities' not in formatted_content:\n count_inconsistent += 1\n entities = dict()\n entities[\"symbols\"] = formatted_content['symbolEntities']\n entities[\"urls\"] = formatted_content['urlEntities']\n entities[\"hashtags\"] = formatted_content['hashtagEntities']\n entities[\"user_mentions\"] = formatted_content['userMentionEntities']\n entities[\"media\"] = formatted_content['mediaEntities']\n # To make the \"start\" and \"end\" API consistent with others\n for entity_name in [\"hashtags\", \"user_mentions\", \"urls\"]:\n for iEntity, entity in enumerate(entities[entity_name]):\n entity['indices'] = [entity['start'], entity['end']]\n entities[entity_name][iEntity] = entity\n formatted_content['entities'] = entities\n # Some other API convert\n formatted_content['retweet_count'] = formatted_content['retweetCount']\n formatted_content['favorite_count'] = formatted_content['favoriteCount']\n formatted_content['user']['favourites_count'] = formatted_content['user']['favouritesCount']\n formatted_content['user']['followers_count'] = formatted_content['user']['followersCount']\n formatted_content['user']['statuses_count'] = formatted_content['user']['statusesCount']\n formatted_content['user']['geo_enabled'] = formatted_content['user']['isGeoEnabled']\n formatted_content['user']['verified'] = formatted_content['user']['isVerified']\n formatted_content['user']['listed_count'] = formatted_content['user']['listedCount']\n formatted_content['user']['friends_count'] = formatted_content['user']['friendsCount']\n\n if filename.startswith(\"trecis2019-B\"):\n formatted_tweet_list_test.append(formatted_content)\n else:\n formatted_tweet_list_train.append(formatted_content)\n\n if count_inconsistent > 0:\n print(\"There are {} tweets have inconsistent API about the entities, \"\n \"and they are automatically converted.\".format(count_inconsistent))\n print(\"There are {0} tweets for training and {1} tweets for testing\".format(\n len(formatted_tweet_list_train), len(formatted_tweet_list_test)))\n\n outfile = '../data/all-tweets.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_train:\n fout.write(json.dumps(tweet) + '\\n')\n\n outfile = '../data/all-tweets-2019.txt'\n with open(outfile, 'w', encoding='utf8') as fout:\n for tweet in formatted_tweet_list_test:\n fout.write(json.dumps(tweet) + '\\n')",
"def import_datasets(snli_path):\n print('extract data from snli directory..')\n train = dict(); dev = dict(); test = dict()\n gold_labels = {'entailment': 0, 'neutral': 1, 'contradiction': 2}\n\n for file_type in ['train', 'dev', 'test']:\n path = os.path.join(snli_path, 'snli_1.0_{}.jsonl'.format(file_type))\n with open(path) as file:\n data = [json.loads(line) for line in file]\n eval(file_type)['premise'] = [entry['sentence1'] for entry in data if entry['gold_label'] != '-']\n eval(file_type)['hypothesis'] = [entry['sentence2'] for entry in data if entry['gold_label'] != '-']\n g_labels = np.array([gold_labels[entry['gold_label']] for entry in data if entry['gold_label'] != '-'])\n eval(file_type)['label'] = g_labels\n print('extraction process was finished successfully!')\n return train, dev, test",
"def _download_librispeech(self) -> None:\n base_url = \"http://www.openslr.org/resources/12\"\n train_dir = \"train-960\"\n\n if not os.path.exists(self.dataset_path):\n os.mkdir(self.dataset_path)\n\n for part in self.librispeech_parts:\n self.logger.info(f\"Librispeech-{part} download..\")\n url = f\"{base_url}/{part}.tar.gz\"\n wget.download(url, self.dataset_path)\n\n self.logger.info(f\"Un-tarring archive {self.dataset_path}/{part}.tar.gz\")\n tar = tarfile.open(f\"{self.dataset_path}/{part}.tar.gz\", mode=\"r:gz\")\n tar.extractall()\n tar.close()\n os.remove(f\"{self.dataset_path}/{part}.tar.gz\")\n\n self.logger.info(\"Merge all train packs into one\")\n\n if not os.path.exists(os.path.join(self.dataset_path, self.librispeech_dir)):\n os.mkdir(os.path.join(self.dataset_path, self.librispeech_dir))\n if not os.path.exists(os.path.join(self.dataset_path, self.librispeech_dir, train_dir)):\n os.mkdir(os.path.join(self.dataset_path, self.librispeech_dir, train_dir))\n\n for part in self.librispeech_parts[:-3]: # dev, test\n shutil.move(\n os.path.join(self.librispeech_dir, part),\n os.path.join(self.dataset_path, self.librispeech_dir, part),\n )\n\n for part in self.librispeech_parts[-3:]: # train\n path = os.path.join(self.librispeech_dir, part)\n subfolders = os.listdir(path)\n for subfolder in subfolders:\n shutil.move(\n os.path.join(path, subfolder),\n os.path.join(self.dataset_path, self.librispeech_dir, train_dir, subfolder),\n )",
"def get_data_loaders_1sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1_sentence_final_generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+1):]\n #history_complete.append(history)\n if len(history) > 3:\n history_chatbot = history[1]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def fetch(url, header_path, id, ip, dbase, targets_table):\n # url = 'http://esimbad/testGSAV7/reslabo?FENID=resLaboPatDitep&NIP={}' \\\n # '&STARTDATE={}&ENDDATE={}'\n\n # header_path = '~/workspace/data/biology/header.csv'\n # constant names specific to our database\n KEY1 = 'id'\n KEY2 = 'NIP'\n C1J1 = 'C1J1'\n\n header = pd.read_csv(header_path, sep=';', encoding='latin1').columns\n\n\n engine = get_engine(id, ip, dbase)\n\n df_ids = sql2df(engine, targets_table)[[KEY1, 'nip', C1J1]]\n df_ids.rename({'nip': KEY2}, inplace=True, axis=1)\n df_ids['patient_id'] = df_ids[KEY1]\n\n cols = [KEY2, 'Analyse', 'Resultat', 'Date prelvt']\n df_res = pd.DataFrame(data=None, columns=cols)\n\n for index, row in df_ids.iterrows():\n nip = row[KEY2].replace(' ', '')\n patient_id = row['patient_id']\n c1j1_date = row[C1J1].date()\n start_date = c1j1_date - timedelta(weeks=8)\n\n c1j1 = str(c1j1_date).replace('-', '')\n start = str(start_date).replace('-', '')\n\n req = requests.get(url.format(nip, start, c1j1))\n values = BeautifulSoup(req.content, 'html.parser').body.text\n\n new_df = pd.read_csv(StringIO(values), sep=';', header=None,\n index_col=False, names=header)\n new_df = new_df.loc[:, cols + ['LC']] # remove LC\n\n # normalize nip\n new_df[KEY2] = row[KEY2]\n # new_df[KEY2] = new_df[KEY2].map(str)\n # new_df[KEY2] = [nip[:4] + '-' + nip[4:] for nip in new_df[KEY2]]\n\n new_df.drop('LC', axis=1, inplace=True)\n\n df_res = pd.concat([df_res, new_df], axis=0,\n sort=False, ignore_index=True)\n\n return df_res",
"def get_data_loaders_4sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"]\n #history_complete.append(history)\n if len(history_splitted) > (len(persona)-1):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def extract_json_to_files(input_dir,output_dir):\n files={}\n files['train']='train-v1.1.json'\n files['dev']='dev-v1.1.json'\n\n for file in files:\n filename=os.path.join(input_dir,files[file])\n with open(filename,'r',encoding='utf-8') as data_file:\n examples = []\n dataset=json.load(data_file)\n count_total=total_exs(dataset)\n count_mapping_problem=0\n count_token_problem=0\n count_ansspan_problem=0\n count_examples=0\n for article_id in tqdm(range(len(dataset['data'])), desc=\"Preprocessing {}\".format(file)):\n article_paragraph=dataset['data'][article_id]['paragraphs']\n for paragraph_id in range(len(article_paragraph)):\n context=article_paragraph[paragraph_id]['context']\n context=context.replace(\"''\",'\"').replace(\"``\",'\"')\n context = context.replace('\\u3000', ' ').replace('\\u202f',' ').replace('\\u2009', ' ')#.replace(\"'\",\"'\")\n context=context.replace('\\-',' ')\n context_tokens=tokenize_sequence(context)\n context=context.lower()\n qas=article_paragraph[paragraph_id]['qas']\n charloc2wordloc=get_char_word_loc_mapping(context, context_tokens)\n if charloc2wordloc is None:\n count_mapping_problem+=len(qas)\n continue\n for qa in qas:\n question=qa['question'].lower()\n question_tokens=tokenize_sequence(question)\n\n ans_text=qa['answers'][0]['text'].lower()\n ans_text=ans_text.replace('\\u3000', ' ').replace('\\u202f', ' ').replace('\\u2009', ' ')\n ans_start_loc=qa['answers'][0]['answer_start']\n if qa['id'] in ['5706baed2eaba6190074aca5','57269c73708984140094cbb5','57269c73708984140094cbb7','572a11661d04691400779721','572a11661d04691400779722','572a11661d04691400779723','572a11661d04691400779724','572a11661d04691400779725','572a2cfc1d0469140077981b','572a3a453f37b319004787e9','572a84d3f75d5e190021fb3c']:\n ans_start_loc+=1\n if qa['id'] in ['572a5df77a1753140016aedf','572a5df77a1753140016aee0','572a84d3f75d5e190021fb38','572a84d3f75d5e190021fb39','572a84d3f75d5e190021fb3a','572a84d3f75d5e190021fb3b','572a85df111d821400f38bad','572a85df111d821400f38bae','572a85df111d821400f38baf','572a85df111d821400f38bb0']:\n ans_start_loc+=2\n if qa['id'] in ['572a5df77a1753140016aee1','572a5df77a1753140016aee2']:\n ans_start_loc+=3\n if qa['id'] in ['57286bf84b864d19001649d6','57286bf84b864d19001649d5']:\n ans_start_loc-=1\n if qa['id'] in ['5726bee5f1498d1400e8e9f3','5726bee5f1498d1400e8e9f4']:\n ans_start_loc-=2\n ans_end_loc=ans_start_loc+len(ans_text)\n\n if context[ans_start_loc:ans_end_loc]!=ans_text:\n count_ansspan_problem+=1\n continue\n ans_start_wordloc = charloc2wordloc[ans_start_loc][1] # answer start word loc\n ans_end_wordloc = charloc2wordloc[ans_end_loc-1][1] # answer end word loc\n assert ans_start_wordloc <= ans_end_wordloc\n\n ans_tokens = context_tokens[ans_start_wordloc:ans_end_wordloc + 1]\n if \"\".join(ans_tokens) != \"\".join(ans_text.split()):\n count_token_problem += 1\n #print(ans_text)\n #print(ans_tokens)\n continue # skip this question/answer pair\n examples.append((' '.join(context_tokens),' '.join(question_tokens),' '.join(ans_tokens),' '.join([str(ans_start_wordloc),str(ans_end_wordloc)])))\n print(\"Number of (context, question, answer) triples discarded due to char -> token mapping problems: \", count_mapping_problem)\n print(\"Number of (context, question, answer) triples discarded because character-based answer span is unaligned with tokenization: \",count_token_problem)\n print(\"Number of (context, question, answer) triples discarded due character span alignment problems (usually Unicode problems): \",count_ansspan_problem)\n print(\"Processed %i examples of total %i\\n\" % (len(examples), len(examples)+count_mapping_problem+count_token_problem+count_ansspan_problem))\n indices = list(range(len(examples)))\n np.random.shuffle(indices)\n with open(os.path.join(output_dir,file+'.context'),'w',encoding='utf-8') as context_file, \\\n open(os.path.join(output_dir,file+'.question'),'w',encoding='utf-8') as question_file, \\\n open(os.path.join(output_dir,file+'.answer'),'w',encoding='utf-8') as answer_file, \\\n open(os.path.join(output_dir,file+'.span'),'w',encoding='utf-8') as span_file:\n for i in indices:\n (context,question,answer,span)=examples[i]\n context_file.write(context+'\\n')\n question_file.write(question+'\\n')\n answer_file.write(answer+'\\n')\n span_file.write(span+'\\n')",
"def create_dataset(json_data_filepath, dataset_filepath, drop_irrelevant_tweets):\n # Stupidity check.\n check_for_preexisting_output_file(dataset_filepath)\n\n global unknown_company_count_global, non_english_count_global\n log.info(f'\\tloading raw tweets from {json_data_filepath}')\n\n count = 0\n include_header = True\n\n # Load/save the file in chunks.\n for df_chunk in pd.read_json(json_data_filepath, orient='records', lines=True, chunksize=100):\n\n # Modify these to determine what to export to CSV/JSON.\n required_fields = ['retweeted_derived', 'company_derived', 'text_derived', # \"tweet_quoted_status_id\",\n 'tweet_url_link_derived', 'multiple_companies_derived_count', \"company_derived_designation\",\n 'tweet_text_length_derived', \"spaCy_language_detect_all_tweets\",\n \"user_description_text_length\", # \"polyglot_lang_detect_all_tweets\"\n ] + tweet_object_fields + user_object_fields + entities_object_fields\n\n # These fields are exported to a separate CSV/JSON file to cut down on file size.\n extra_fields = [\"tweet_id\"] + retweeted_status_object_fields\n\n # Rename main Tweet object fields.\n df_chunk[tweet_object_fields] = df_chunk[original_tweet_object_field_names]\n\n # FIXME - KeyError: ('quoted_status_id', 'occurred at index 0') - debug the issue.\n # df_chunk[\"tweet_quoted_status_id\"] = df_chunk.apply(rename_column, axis=1)\n\n # Extract Tweet \"user\" object fields.\n df_chunk[user_object_fields] = df_chunk.apply(compute_user_series, axis=1)\n\n # Determine the user profile description text length.\n df_chunk[\"user_description_text_length\"] = df_chunk.apply(\n lambda x: compute_user_description_text_length(x) if (pd.notnull(x[\"user_description\"])) else 0, axis=1)\n\n # Extract Tweet \"entities\" fields.\n df_chunk[\"tweet_entities_expanded_urls\"] = df_chunk.apply(compute_expanded_urls, axis=1)\n df_chunk['tweet_entities_hashtags'] = df_chunk.apply(compute_hashtags, axis=1)\n df_chunk[\"tweet_entities_user_mentions_id\"] = df_chunk.apply(compute_user_mentions_id, axis=1)\n df_chunk[\"tweet_entities_user_mentions_name\"] = df_chunk.apply(compute_user_mentions_name, axis=1)\n df_chunk[\"tweet_entities_user_mentions_screen_name\"] = df_chunk.apply(compute_user_mentions_screen_name, axis=1)\n df_chunk[\"tweet_entities_symbols\"] = df_chunk.apply(compute_symbols, axis=1)\n\n # Create/update/infer fields. (original extracted/derived fields)\n df_chunk['retweeted_derived'] = df_chunk.apply(compute_retweet, axis=1)\n df_chunk['text_derived'] = df_chunk.apply(compute_full_text, axis=1)\n df_chunk['company_derived'] = df_chunk.apply(compute_company, axis=1)\n df_chunk['tweet_url_link_derived'] = df_chunk.apply(compute_url_link, axis=1)\n\n # Count the # of companies each Tweet is associated with.\n df_chunk['multiple_companies_derived_count'] = \\\n df_chunk.apply(compute_number_of_associated_companies, axis=1)\n\n # Determine whether Tweet is associated with \"company_name\" or \"multiple\" companies.\n df_chunk[\"company_derived_designation\"] = df_chunk.apply(compute_company_designation, axis=1)\n\n # Compute Tweet text length.\n df_chunk[\"tweet_text_length_derived\"] = df_chunk.apply(compute_text_length, axis=1)\n\n # Extract Tweet object \"retweeted_status\" object fields.\n df_chunk[retweeted_status_object_fields] = df_chunk.apply(compute_flatten_retweeted_status_attribute, axis=1)\n\n # Flatten nested fields in \"retweeted_status_user\". FIXME - non-functional.\n # df_chunk[retweeted_status_user_object_fields] = df_chunk.apply(\n # compute_flatten_retweeted_status_user_attributes, axis=1)\n\n # Determine the Tweet text's language using spaCy natural language processing library. (note: slow)\n df_chunk[\"spaCy_language_detect_all_tweets\"] = df_chunk.apply(\n lambda x: spacy_language_detection(x) if (pd.notnull(x[\"tweet_full_text\"])) else \"none\", axis=1)\n\n # Remove irrelevant tweets (non-English or unknown-company).\n if drop_irrelevant_tweets:\n df_chunk = df_chunk[\n ((df_chunk['company_derived'] != 'none') &\n (df_chunk['tweet_lang'].str.startswith('en') |\n df_chunk['spaCy_language_detect_all_tweets'].str.startswith('en')\n ))\n ]\n\n # Write each chunk to the combined dataset file.\n df_chunk[required_fields].to_csv(f\"{dataset_filepath}.csv\", index=False, quoting=csv.QUOTE_NONNUMERIC,\n mode='a', header=include_header)\n\n # Write select attributes within each chunk to a separate dataset file to reduce file size.\n df_chunk[extra_fields].to_csv(f\"{dataset_filepath}-extra.csv\", index=False,\n quoting=csv.QUOTE_NONNUMERIC, mode='a', header=include_header)\n\n # Print a progress message.\n count += df_chunk.shape[0]\n # Only include the header once, at the top of the file.\n include_header = False\n log.info(f'\\t\\tprocessed {count} records...')\n\n # Debug purposes - test on a small subset by setting to small chunk size and breaking out of loop.\n break\n\n # Drop duplicate rows/examples/Tweets.\n df_full = pd.read_csv(f\"{dataset_filepath}.csv\", sep=',', encoding=\"utf-8\")\n df_full.drop_duplicates(inplace=True)\n # df_full.dropna(how=\"all\")\n df_full.to_csv(f\"{dataset_filepath}.csv\",\n index=False, header=True, quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8')\n df_full.to_json(f\"{dataset_filepath}.json\", orient='records', lines=True)\n\n df_extra = pd.read_csv(f\"{dataset_filepath}-extra.csv\", sep=',', encoding=\"utf-8\")\n df_extra.drop_duplicates(inplace=True)\n df_extra.to_csv(f\"{dataset_filepath}-extra.csv\",\n index=False, header=True, quoting=csv.QUOTE_NONNUMERIC, encoding='utf-8')\n df_extra.to_json(f\"{dataset_filepath}-extra.json\", orient='records', lines=True)\n\n log.info(f'\\tsaved the dataset to {dataset_filepath}'\n f'\\n\\t\\tunknown company count: {unknown_company_count_global}'\n f'\\n\\t\\tnon-English count: {non_english_count_global}'\n )",
"def get_dldata(filepath, dlTrainCorpusPath, dlTestCorpusPath, seed=2018, batch_size=16):\r\n\tf = open(\"record/synthetic and academic datasets/testcases_train.pkl\",'rb') #get the testcase ids of train sets and test sets\r\n\ttestcases += pickle.load(f) \r\n\tf.close()\r\n\r\n\tf = open(\"record/synthetic and academic datasets/testcases_test.pkl\",'rb')\r\n\ttestcases += pickle.load(f)\r\n\tf.close()\r\n\t\r\n print(\"produce train dataset...\") \r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n train_set = [[], [], [], [], [], []]\r\n for folder_train in folders_train[int(i*len(folders_train)/N) : int((i+1)*len(folders_train)/N)]:\r\n if not folder_train in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_train), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_train)):\r\n f = open(filepath + folder_train + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n train_set[n].append(data[n])\r\n train_set[-1].append(folder_train+\"/\"+filename)\r\n f_train = open(dlTrainCorpusPath + \"train_\" + str(i)+ \"_0818.pkl\", 'wb')\r\n pickle.dump(train_set, f_train)\r\n f_train.close()\r\n\r\n del train_set \r\n gc.collect() \r\n\r\n print(\"\\nproduce test dataset...\")\r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n test_set = [[], [], [], [], [], []]\r\n for folder_test in folders_test[int(i*len(folders_test)/N) : int((i+1)*len(folders_test)/N)]:\r\n if not folder_test in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_test), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_test)):\r\n f = open(filepath + folder_test + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n test_set[n].append(data[n])\r\n test_set[-1].append(folder_test+\"/\"+filename)\r\n \r\n f_test = open(dlTestCorpusPath + \"test_\" + str(i)+ \"_0124.pkl\", 'wb')\r\n pickle.dump(test_set, f_test)\r\n f_test.close()\r\n\r\n del test_set\r\n gc.collect()\r\n return",
"def run(self, hash_list, issue_id_list, log_message_info_path,\n log_message_without_issueid_path, dsc_issue_dict, comment_issue_dict,\n output_dir):\n\n # extract train data\n if self.keyword_extraction_dict_path:\n keyword_extraction_dict = util.load_pickle(self.keyword_extraction_dict_path)\n else:\n ins = keyword_extraction.KeywordExtraction()\n keyword_extraction_dict = ins.run(hash_list, issue_id_list, log_message_info_path) # train data\n keyword_extraction_dict = generate_delete_data.main(keyword_extraction_dict, self.delete_rate)\n\n\n data_array, label_list, name_list, candidate_issue2hash_dict = self.extract_features(hash_list, issue_id_list,\n keyword_extraction_dict, log_message_info_path,\n log_message_without_issueid_path,\n dsc_issue_dict, comment_issue_dict,\n output_dir)\n\n pu = PUModel.PUModel(random_state=self.random_state)\n pu.fit(data_array, label_list)\n prediction_result = pu.predict(data_array)\n\n issue2hash_dict = {}\n for issue_id in candidate_issue2hash_dict.keys():\n for commit_hash in candidate_issue2hash_dict[issue_id]:\n idx = name_list.index(\"{0}:{1}\".format(issue_id, commit_hash))\n if prediction_result[idx]:\n if not issue_id in issue2hash_dict:\n issue2hash_dict[issue_id] = []\n issue2hash_dict[issue_id].append(commit_hash)\n\n return issue2hash_dict",
"def get_data_loaders_2sentences():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+1):]\n #history_complete.append(history)\n if len(history) > 4:\n history_chatbot = history[1::2]\n\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def main():\n datasets = {}\n for dataset_name in tqdm(SOURCE_DATASET_NAMES, desc=\"Processing datasets and fitting base models\"):\n logger.info(f\"processing dataset {dataset_name}\")\n clusters_path: Optional[str] = None\n if dataset_name not in PAIRWISE_ONLY_DATASETS:\n clusters_path = os.path.join(DATA_DIR, dataset_name, dataset_name + \"_clusters.json\")\n train_pairs_path = None\n val_pairs_path = None\n test_pairs_path = None\n else:\n train_pairs_path = os.path.join(DATA_DIR, dataset_name, \"train_pairs.csv\")\n val_pairs_path = os.path.join(DATA_DIR, dataset_name, \"val_pairs.csv\")\n if not os.path.exists(val_pairs_path):\n val_pairs_path = None\n test_pairs_path = os.path.join(DATA_DIR, dataset_name, \"test_pairs.csv\")\n\n logger.info(f\"loading dataset {dataset_name}\")\n anddata = ANDData(\n signatures=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_signatures.json\"),\n papers=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_papers.json\"),\n name=dataset_name,\n mode=\"train\",\n specter_embeddings=os.path.join(DATA_DIR, dataset_name, dataset_name + \"_specter.pickle\"),\n clusters=clusters_path,\n block_type=BLOCK_TYPE,\n train_pairs=train_pairs_path,\n val_pairs=val_pairs_path,\n test_pairs=test_pairs_path,\n train_pairs_size=N_TRAIN_PAIRS_SIZE,\n val_pairs_size=N_VAL_TEST_SIZE,\n test_pairs_size=N_VAL_TEST_SIZE,\n preprocess=True,\n )\n\n logger.info(f\"featurizing {dataset_name}\")\n train, val, test = featurize(\n anddata,\n FEATURIZER_INFO,\n n_jobs=N_JOBS,\n use_cache=True,\n chunk_size=100,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO,\n nan_value=NAN_VALUE,\n )\n X_train, y_train, nameless_X_train = train\n X_val, y_val, nameless_X_val = val\n X_test, y_test, nameless_X_test = test\n\n dataset = {}\n dataset[\"anddata\"] = anddata\n dataset[\"X_train\"] = X_train\n dataset[\"y_train\"] = y_train\n dataset[\"X_val\"] = X_val\n dataset[\"y_val\"] = y_val\n dataset[\"X_test\"] = X_test\n dataset[\"y_test\"] = y_test\n dataset[\"nameless_X_train\"] = nameless_X_train\n dataset[\"nameless_X_val\"] = nameless_X_val\n dataset[\"nameless_X_test\"] = nameless_X_test\n dataset[\"name\"] = anddata.name\n datasets[dataset_name] = dataset\n\n anddatas = [\n datasets[dataset_name][\"anddata\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in PAIRWISE_ONLY_DATASETS\n ]\n\n X_train = np.vstack([datasets[dataset_name][\"X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n y_train = np.hstack([datasets[dataset_name][\"y_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n X_val = np.vstack(\n [datasets[dataset_name][\"X_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n y_val = np.hstack(\n [datasets[dataset_name][\"y_val\"] for dataset_name in SOURCE_DATASET_NAMES if dataset_name not in {\"augmented\"}]\n )\n\n nameless_X_train = np.vstack([datasets[dataset_name][\"nameless_X_train\"] for dataset_name in SOURCE_DATASET_NAMES])\n nameless_X_val = np.vstack(\n [\n datasets[dataset_name][\"nameless_X_val\"]\n for dataset_name in SOURCE_DATASET_NAMES\n if dataset_name not in {\"augmented\"}\n ]\n )\n\n logger.info(\"fitting pairwise\")\n union_classifier = PairwiseModeler(n_iter=N_ITER, monotone_constraints=MONOTONE_CONSTRAINTS)\n union_classifier.fit(X_train, y_train, X_val, y_val)\n\n nameless_union_classifier = None\n if USE_NAMELESS_MODEL:\n logger.info(\"nameless fitting pairwise for \" + str(SOURCE_DATASET_NAMES))\n nameless_union_classifier = PairwiseModeler(\n n_iter=N_ITER,\n monotone_constraints=NAMELESS_MONOTONE_CONSTRAINTS,\n )\n nameless_union_classifier.fit(nameless_X_train, y_train, nameless_X_val, y_val)\n logger.info(\"nameless pairwise fit for \" + str(SOURCE_DATASET_NAMES))\n\n logger.info(\"fitting clusterer for\")\n union_clusterer = Clusterer(\n FEATURIZER_INFO,\n union_classifier.classifier,\n cluster_model=FastCluster(),\n search_space=search_space,\n n_jobs=N_JOBS,\n nameless_classifier=nameless_union_classifier.classifier if nameless_union_classifier is not None else None,\n nameless_featurizer_info=NAMELESS_FEATURIZER_INFO if nameless_union_classifier is not None else None,\n )\n union_clusterer.fit(anddatas)\n print(\n \"best clustering parameters:\",\n union_clusterer.best_params,\n )\n\n models = {}\n models[\"clusterer\"] = union_clusterer\n\n with open(\n f\"full_union_model_script_dump_average_{FEATURIZER_VERSION}.pickle\",\n \"wb\",\n ) as _pickle_file:\n pickle.dump(models, _pickle_file)\n logger.info(\"Done.\")",
"def load_batched_dataset(is_train, embeddings):\n tensorize_text_fn = build_tensorize_text_fn(embeddings)\n unbatched = load_data(is_train)\n\n def tensorize(x):\n x[\"premise\"] = tensorize_text_fn(x[\"premise\"])\n x[\"hypothesis\"] = tensorize_text_fn(x[\"hypothesis\"])\n return x\n\n unbatched = unbatched.map(tensorize)\n\n hist_bins = list(range(5, 500, 5))\n batched = unbatched.apply(\n ops.bucket_by_quantiles(lambda x: x[\"premise\"][\"len\"], FLAGS.batch_size,\n 10, hist_bins))\n if is_train:\n batched = batched.shuffle(1000, reshuffle_each_iteration=True)\n batched = batched.repeat()\n\n # Get (features, label) format for tf.estimator\n return batched.map(lambda x: (x, x[\"label\"]))",
"def main():\n\n df_links = pd.read_csv('data/tagged/ns-stories-full.csv', header=None)\n df_links.columns = ['source', 'date', 'label', 'url']\n\n df_dj = df_links[df_links['source'] == 'Digital Journal'].reset_index(drop=True)\n df_dj['date'] = pd.to_datetime(df_dj['date'].apply(lambda x: x[:9]))\n\n dj_csv = pd.DataFrame(columns = [\"url\",\"date\",\"title\", \"author\", \"content\",\"tag\", \"label\"])\n for i, row in tqdm(df_dj.iterrows()):\n try:\n url = row[\"url\"]\n html_soup = get_article(url)\n title = get_title(html_soup)\n tag = get_tags(html_soup)\n content = get_content(html_soup)\n author = get_author(html_soup)\n dj_csv = dj_csv.append({\"url\": url, \"date\": row[\"date\"], \"title\": title, \"author\": author, \n \"content\": content,\"tag\": tag, \n \"label\": row[\"label\"]}, \n ignore_index = True)\n except:\n print(i)\n time.sleep(10)\n try:\n url = row[\"url\"]\n html_soup = get_article(url)\n title = get_title(html_soup)\n tag = get_tags(html_soup)\n content = get_content(html_soup)\n author = get_author(html_soup)\n dj_csv = dj_csv.append({\"url\": url, \"date\": row[\"date\"], \"title\": title, \"author\": author, \n \"content\": content,\"tag\": tag, \n \"label\": row[\"label\"]}, \n ignore_index = True)\n except:\n continue\n \n dj_csv.to_csv(\"digital_journal.csv\", index=False)"
] | [
"0.6615366",
"0.5831273",
"0.57000536",
"0.56970143",
"0.5684071",
"0.5569833",
"0.5552695",
"0.5543285",
"0.55414546",
"0.55375046",
"0.5494917",
"0.5487292",
"0.5479934",
"0.54722214",
"0.54708064",
"0.54343843",
"0.54299",
"0.54090136",
"0.5354556",
"0.53511345",
"0.53494585",
"0.53471524",
"0.532359",
"0.5317925",
"0.52900916",
"0.5288966",
"0.52759105",
"0.52455384",
"0.5241353",
"0.5231784"
] | 0.59263885 | 1 |
Create table from csv file to input into database. | def create_table(engine, csv_filename, tablename):
# Read csv file and changes all column names to be lowercase
csv_df = pd.read_csv(f'./data/{csv_filename}.csv')
csv_df.columns = [c.lower() for c in csv_df.columns]
# Change date types to datetime
todateformat = []
for c in csv_df.columns:
if "date" in c:
csv_df[c] = csv_df[c].astype('datetime64[ns]')
# Create/replace table with tablename in db
csv_df.to_sql (tablename, engine, if_exists='replace', index=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_table_from_csv (sqlite_db_file):\n files = [f for f in os.listdir(os.curdir) if f.endswith(\".csv\")]\n name_df = [re.findall('(.*)\\.csv',f)[0] for f in files ]\n engine = create_engine('sqlite:///' + sqlite_db_file)\n for n, f_n in zip(name_df, files):\n try:\n df = pd.read_csv(f\"{f_n}\", sep=',')\n df.to_sql(f\"{n}\", engine, if_exists=\"fail\")\n\n except Exception:\n pass",
"def populate_table_from_csv(csv_file, csv_encoding='iso-8859-15'):\n try:\n with open(file=csv_file, mode='r', encoding=csv_encoding) as input_file:\n # Could find a good place to add iterators/generators/comprehensions elsewhere, so made a new function\n # Also, yet another pylint false positive. The below line isn't supposed to be assigned to anything.\n [add_customer(*l.split(',')) for l in input_file if 'Id,Name,Last_name,' not in l] # pylint: disable=W0106\n except Exception as e:\n logger.error(\"Failed to load records from csv file %s into database %s: %s\", csv_file, customer_db.database, e)",
"def create_table_from_csv_sql(csv_file, non_number_column_pattern, table_name):\n header = read_header(csv_file)\n header_with_type = []\n for item in header:\n if match_in_pattern_list(non_number_column_pattern, item):\n header_with_type.append((item, 'varchar(50)'))\n else:\n header_with_type.append((item, 'real'))\n assert header_with_type[0][1] == 'real', 'Primary key must be number'\n header_with_type[0] = (header[0], 'int NOT NULL')\n sql_statement = \"CREATE TABLE \" + table_name + \"\\n(\\n\"\n for col, dbt in header_with_type:\n sql_statement += \"\\t\" + col + \" \" + dbt + \",\\n\"\n sql_statement += \"\\tPRIMARY KEY(\" + header[0] + \")\\n);\"\n return sql_statement",
"def main(csvfile, dbfile, verbose=False):\n CONN = sqlite3.connect(dbfile)\n cursor = CONN.cursor()\n create_schema(cursor)\n process_data(cursor, csvfile, verbose=verbose)\n CONN.commit()\n CONN.close()",
"def copy_csv_to_train_table(conn, csv_file):\n COPY_TRAIN = \"08_copy_train_to_table.psql\"\n copy_expert_psql_script(conn, COPY_TRAIN, csv_file)",
"def create_table_from_file():\n\n full_path = os.getcwd()\n file_name = full_path + \"/inventory/inventory.csv\"\n\n if os.path.exists(file_name):\n table = data_manager.get_table_from_file(file_name)\n\n else:\n ui.print_error_message(\"There is no file to read!\")\n table = []\n\n return table",
"def from_csv(self, path):\n for model, table in [(self.Dataset, 'dataset'),\n (self.Datarun, 'datarun'),\n (self.Hyperpartition, 'hyperpartition'),\n (self.Classifier, 'classifier')]:\n df = pd.read_csv(os.path.join(path, '%ss.csv' % table))\n\n # parse datetime columns. This is necessary because SQLAlchemy can't\n # interpret strings as datetimes on its own.\n # yes, this is the easiest way to do it\n for c in inspect(model).attrs:\n if type(c) != ColumnProperty:\n continue\n col = c.columns[0]\n if type(col.type) == DateTime:\n df[c.key] = pd.to_datetime(df[c.key],\n infer_datetime_format=True)\n\n for _, r in df.iterrows():\n # replace NaN and NaT with None\n for k, v in list(r.iteritems()):\n if pd.isnull(v):\n r[k] = None\n\n # insert the row into the database\n create_func = getattr(self, 'create_%s' % table)\n create_func(**r)",
"def seed_db_from_csv(csv):\n\n # Delete any existing rows\n Event.query.delete()\n db.session.commit()\n\n with open(csv, 'r') as csv_file:\n # Skip the first row of column headers\n rows = [row.strip().split(',')[:11] for row in csv_file.readlines()[1:]]\n\n for _, _, _, _, _, state, date, _, _, kind, title in rows:\n event = Event(kind, date=date[:10], state=state, title=title.strip('\"'))\n db.session.add(event)\n\n try:\n # Persist changes if entire table was imported successfully\n db.session.commit()\n return True\n except Exception as e:\n db.session.rollback()\n return False",
"def write_create_table(\n\tfile_name, delim, table_name, col_prefix=\"\", \n\tdefault_type=\"varchar(100)\"):\n print(\"CREATE TABLE \" + table_name + \" (\")\n with open(file_name) as csv_file:\n\treader = csv.reader(csv_file, delimiter=delim)\n\theader = next(reader)\n\tfor col in header:\n\t name = col_prefix + col\n\t print(\"\\t\" + name + \" \" + default_type + \",\")\n print(\");\")",
"def import_table_data(con, cur, tbl_name):\n\n # Read schema from external file and create table according to schema\n schemas = import_schemas_from_file()\n tbl_schema = schemas[tbl_name]\n create_table(cur, tbl_name, tbl_schema)\n\n # Loop through CSV file and prepare data for import\n file_records = []\n create_query_str = \"\"\"INSERT INTO {} VALUES {}\"\"\".format(tbl_name, '(' + ','.join(['%s'] * len(tbl_schema)) + ')')\n table_csv_path = CSV_PATH + tbl_name + '.csv'\n\n with open(table_csv_path) as csv_file:\n reader = csv.reader(csv_file, delimiter=',')\n for i, line in enumerate(reader):\n record = [schema_process(tbl_schema, j, item) for j, item in enumerate(line)]\n file_records.append(record)\n # Import records into the MySQL database table, 1,000 records at a time\n if i % 1000 == 0:\n print('inserting 1000 rows')\n cur.executemany(create_query_str, file_records)\n con.commit()\n file_records = []\n # Insert any remaining records.\n print('inserting {} rows'.format(len(file_records)))\n cur.executemany(create_query_str, file_records)\n con.commit()",
"def _create_tables_schema(self, csv_file, csv_file_idx):\n logger.info('Creating database tables')\n\n tmp = pd.read_csv(csv_file, index_col=0, header=0, nrows=1, low_memory=False)\n old_columns = tmp.columns.tolist()\n del tmp\n new_columns = [self._rename_columns(x) for x in old_columns]\n\n # Remove columns that were previously loaded in other datasets\n if 'existing_col_names' not in self._loading_tmp:\n # dictionary with data-field as key and csv file as value\n columns_and_csv_files = {}\n else:\n columns_and_csv_files = self._loading_tmp['existing_col_names']\n\n old_columns_clean = []\n new_columns_clean = []\n\n for old_col_name, new_col_name in tuple(zip(old_columns, new_columns)):\n if new_col_name in columns_and_csv_files:\n corresponding_csv_file = columns_and_csv_files[new_col_name]\n logger.warning(f'Column {new_col_name} already loaded from {corresponding_csv_file}. Skipping.')\n continue\n\n columns_and_csv_files[new_col_name] = csv_file\n\n old_columns_clean.append(old_col_name)\n new_columns_clean.append(new_col_name)\n\n self._loading_tmp['existing_col_names'] = columns_and_csv_files\n\n # keep only unique columns (not loaded in previous files)\n old_columns = old_columns_clean\n new_columns = new_columns_clean\n all_columns = tuple(zip(old_columns, new_columns))\n\n # FIXME: check if self.n_columns_per_table is greater than the real number of columns\n self._loading_tmp['chunked_column_names'] = tuple(enumerate(self._chunker(all_columns, self.n_columns_per_table)))\n self._loading_tmp['chunked_table_column_names'] = \\\n {self._get_table_name(col_idx, csv_file_idx): [col[1] for col in col_names]\n for col_idx, col_names in self._loading_tmp['chunked_column_names']}\n\n # get columns dtypes (for PostgreSQL and standard ones)\n db_types_old_column_names, all_fields_dtypes, all_fields_description, all_fields_coding = self._get_db_columns_dtypes(csv_file)\n db_dtypes = {self._rename_columns(k): v for k, v in db_types_old_column_names.items()}\n self._fields_dtypes.update(all_fields_dtypes)\n\n data_sample = pd.read_csv(csv_file, index_col=0, header=0, nrows=1, dtype=str)\n data_sample = data_sample.rename(columns=self._rename_columns)\n\n # create fields table\n if csv_file_idx == 0:\n create_table('fields',\n columns=[\n 'column_name text NOT NULL',\n 'table_name text',\n 'field_id text NOT NULL',\n 'description text',\n 'coding bigint',\n 'inst bigint',\n 'arr bigint',\n 'type text NOT NULL',\n ],\n constraints=[\n 'pk_fields PRIMARY KEY (column_name)'\n ],\n db_engine=self._get_db_engine(),\n drop_if_exists=True\n )\n\n current_stop = 0\n for column_names_idx, column_names in self._loading_tmp['chunked_column_names']:\n new_columns_names = [x[1] for x in column_names]\n\n fields_ids = []\n instances = []\n arrays = []\n fields_dtypes = []\n fields_descriptions = []\n fields_codings = []\n\n for col_name in new_columns_names:\n match = re.match(Pheno2SQL.RE_FIELD_INFO, col_name)\n\n fields_ids.append(match.group('field_id'))\n instances.append(int(match.group('instance')))\n arrays.append(int(match.group('array')))\n\n fields_dtypes.append(all_fields_dtypes[col_name])\n fields_descriptions.append(all_fields_description[col_name])\n\n if col_name in all_fields_coding:\n fields_codings.append(all_fields_coding[col_name])\n else:\n fields_codings.append(np.nan)\n\n # Create main table structure\n table_name = self._get_table_name(column_names_idx, csv_file_idx)\n logger.info('Table {} ({} columns)'.format(table_name, len(new_columns_names)))\n data_sample.loc[[], new_columns_names].to_sql(table_name, self._get_db_engine(), if_exists='replace', dtype=db_dtypes)\n\n with self._get_db_engine().connect() as conn:\n conn.execute(\"\"\"\n ALTER TABLE {table_name} ADD CONSTRAINT pk_{table_name} PRIMARY KEY (eid);\n \"\"\".format(table_name=table_name))\n\n with self._get_db_engine().connect() as conn:\n conn.execute('DROP INDEX ix_{table_name}_eid;'.format(table_name=table_name))\n\n # Create auxiliary table\n n_column_names = len(new_columns_names)\n current_start = current_stop\n current_stop = current_start + n_column_names\n\n aux_table = pd.DataFrame({\n 'column_name': new_columns_names,\n 'field_id': fields_ids,\n 'inst': instances,\n 'arr': arrays,\n 'coding': fields_codings,\n 'table_name': table_name,\n 'type': fields_dtypes,\n 'description': fields_descriptions\n })\n # aux_table = aux_table.set_index('column_name')\n aux_table.to_sql('fields', self._get_db_engine(), index=False, if_exists='append')",
"def convert_csv_to_SQLite3(self,\n csv_path: str=None, # Path to .csv \n destination: str=None, # Where to create .db\n db_name: str=None, # Database name\n table_name: str=None, # table name\n **kwargs # Custom arguments for reader and writter\n ):\n # With scribe reader, read a .csv \n # **kwargs, are used in params in the subclass Scibe_File_Writter\n # **Kwargs Over-write convert_csv_to_db params\n # Inherits from scribe_readers.Scribe_File_Reader\n self.read_from_csv(csv_path, **kwargs) # Inherits from scribe_readers.Scribe_File_Reader\n if db_name != None:\n destination = f\"{destination}\\{db_name}.db\"\n self.db_name = db_name\n conn = self.create_sqlite_connection(destination) # Inherits from scribe_writers_Scribe_Scribe_SQLite_Writer\n # Create connection also creates new db if it does not exist.\n self.create_new_sqlite_table(conn=conn,\n schema=self.dtypes,\n table_name=f\"tbl_{table_name}\",\n close_conn =False)\n \n \"\"\"Insert data into SQLite database\"\"\"\n\n table_name=f\"tbl_{table_name}\"\n self.insert_into_sqlite_table(conn,\n csv_path,\n table_name,\n self.shape,\n self.delimiter)",
"def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)",
"def process_file(cur, conn, table, filepath):\n\n taxi_table_insert = (\"\"\"\n INSERT INTO {} (trip_id, taxi_id, trip_sec, trip_mile)\n VALUES (%s, %s, %s, %s);\n \"\"\".format(table))\n\n # open csv file\n # https://stackoverflow.com/questions/17444679/reading-a-huge-csv-file\n df = pd.read_csv(filepath)\n\n df = df[['Trip ID', 'Taxi ID', 'Trip Seconds', 'Trip Miles']]\n\n df.dropna(inplace=True)\n\n # insert trip records\n for index, row in df.iterrows():\n cur.execute(taxi_table_insert, row)\n conn.commit()",
"def copy_csv_to_example_test_table(conn, csv_file):\n COPY_EXAMPLE_TEST = \"11_copy_example_test_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_TEST, csv_file)",
"def create_train_table(conn):\n execute_sql_script(conn, \"03_create_train_table.sql\")",
"def copy_csv_to_lectures_table(conn, csv_file):\n COPY_LECTURES = \"10_copy_lectures_to_table.psql\"\n copy_expert_psql_script(conn, COPY_LECTURES, csv_file)",
"def home_away_table_from_csv(file_path='../home_away/home_away.csv'):\n conn = connect_sql()\n df = pd.read_csv('../home_away/home_away.csv')\n df.to_sql('home_away', conn, if_exists='replace', index=False)",
"def copy_csv_to_questions_table(conn, csv_file):\n COPY_QUESTIONS = \"09_copy_questions_to_table.psql\"\n copy_expert_psql_script(conn, COPY_QUESTIONS, csv_file)",
"def load_records():\n\n with open('seed_data/records.csv', 'rb') as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n record_id, user_id, common_name, date_time, latitude, longitude, notes, seen, num_birds = row\n\n record = Record(record_id=record_id, user_id=user_id, common_name=common_name,\n date_time=date_time, latitude=latitude, longitude=longitude, \n notes=notes, seen=seen, num_birds=num_birds)\n\n db.session.add(record)\n\n db.session.commit()",
"def generate_table(input_file, delim=\",\", header=True):\n input_file = os.path.abspath(input_file)\n if not os.path.exists(input_file):\n sys.exit(\"%s does not exist.\" % input_file)\n\n # Read in rows with user specified delimiter\n rows = read_rows(input_file, delim=delim)\n\n # Generate tabulars expected format\n labels = [\"column %s\" % x for x in range(len(rows[0]))]\n if header:\n labels = rows.pop(0)\n\n # Generate Tabular table to output\n table = Tabular(\n # Note that columns are specified here, so we provide a row (list) later\n columns=labels,\n style=dict(\n header_=dict(bold=True, transform=str.upper),\n # Default styling could be provided from some collection of styling files\n default_=dict(\n color=dict(\n lookup={\n \"Trix\": \"green\",\n \"110\": \"red\",\n \"100\": \"green\", # since no grey for now\n }\n )\n ),\n ),\n )\n\n # Add row to table. If columns aren't specified on init, provide dict here\n for row in rows:\n table(row)",
"def csv(self, file, table=None):\n\n if table:\n table.import_from_csv_file(file)\n else:\n db = self.db\n # This is the preferred method as it updates reference fields\n db.import_from_csv_file(file)\n db.commit()",
"def copy_csv_to_example_sample_submission_table(conn, csv_file):\n COPY_EXAMPLE_SAMPLE_SUBMISSION = \"12_copy_example_sample_submission_to_table.psql\"\n copy_expert_psql_script(conn, COPY_EXAMPLE_SAMPLE_SUBMISSION, csv_file)",
"def insert_books_data():\n # Get data from csv file\n print(\"Getting data from csv..\")\n file = open(\"books.csv\")\n reader = csv.reader(file)\n\n # Insert csv data into table\n print(\"Inserting data into 'books' table..\")\n for isbn, title, author, year in reader:\n try:\n db.execute(\"INSERT INTO books (isbn, title, author, year)\\\n VALUES (:isbn, :title, :author, :year)\", {\n \"isbn\": isbn, \"title\": title, \"author\": author, \"year\": year })\n except exc.DataError as err:\n print(\"Invalid entry in csv file\")\n db.commit()\n print(\"Data inserted\")",
"def create_table(self):\n values = []\n for key, field in self.COLUMN_TO_FILED.items():\n sql = ' '.join(\n [key, field.column_type, 'PRIMARY KEY' if field.primary_key else ''])\n values.append(sql)\n sql = 'CREATE TABLE IF NOT EXISTS {} ({})'.format(\n self.TABLE_NAME, ','.join(values))\n yield self._pool.execute(sql)",
"def insert_csv(self, file, tablename, sep=','):\n filehandel = open(file, 'r')\n self.cursor.copy_from(filehandel, tablename, sep)\n self.connection.commit()",
"def create_sqlite_table(self):\n self.print_datetime_output('Connect to data base %s' % self.db_name)\n con = sqlite3.connect(self.db_name)\n cur = con.cursor()\n\n # check if table exists\n cur.execute(\"select count(*) from sqlite_master where type='table' and name='%s'\" % self.db_table)\n if cur.fetchall()[0][0] == 1:\n self.print_datetime_output('Previous table %s was dropped' % self.db_table)\n cur.execute(\"DROP TABLE %s;\" % self.db_table)\n\n self.print_datetime_output('Create table %s and import data from csv file %s' % (self.db_table,\n self.time_series_file_name))\n cur.execute(\"CREATE TABLE %s (timestamp, close_USD);\" % self.db_table)\n\n with open(self.file_name, 'r') as fin:\n dr = csv.DictReader(fin)\n to_db = [(i['timestamp'], i['close (USD)']) for i in dr]\n\n cur.executemany(\"INSERT INTO %s (timestamp, close_USD) VALUES (?, ?);\" % self.db_table, to_db)\n con.commit()\n return con",
"def load_products_data(connection, csvfile):\n insert_sql = 'insert into products (id, description, genres) ' \\\n 'values (%s, %s, %s)'\n load_data(connection, insert_sql, get_data_from_file(csvfile))",
"def loadCSV(input_file):",
"def read_table_from_csv(\n self,\n path: str,\n header: Optional[bool] = None,\n columns: Optional[List[str]] = None,\n dialect: Optional[Union[str, Dialect]] = None,\n delimiters: Optional[str] = None,\n column_unknown: str = \"Unknown\",\n encoding: Optional[str] = None,\n ) -> Table:\n sniffer = csv.Sniffer()\n with open(path, newline=\"\", encoding=encoding) as fd:\n sample = fd.readline()\n\n if dialect is None:\n dialect_name = sniffer.sniff(sample, delimiters)\n elif isinstance(dialect, Dialect):\n dialect_name = dialect.value\n else:\n dialect_name = dialect\n\n if header is None:\n header = sniffer.has_header(sample)\n\n with open(path, newline=\"\", encoding=encoding) as fd:\n if header:\n reader = csv.DictReader(\n fd, dialect=dialect_name, restkey=str(column_unknown)\n )\n else:\n reader = csv.reader(fd, dialect=dialect_name)\n rows = list(reader)\n\n table = Table(rows, columns)\n notebook_table(self.table_head(table, 10))\n\n if header and column_unknown in table.columns:\n self.logger.warning(\n \"CSV file (%s) had fields not defined in header, \"\n \"which can be the result of a wrong dialect\",\n path,\n )\n\n return table"
] | [
"0.78636694",
"0.766007",
"0.74412394",
"0.7296567",
"0.72548956",
"0.71277964",
"0.6920125",
"0.6874775",
"0.6822775",
"0.68040115",
"0.678773",
"0.674541",
"0.67439973",
"0.6684536",
"0.66534555",
"0.66475165",
"0.6639408",
"0.6621663",
"0.66060764",
"0.65959024",
"0.6594661",
"0.6592354",
"0.6527366",
"0.6525425",
"0.6495382",
"0.64897805",
"0.6481355",
"0.64729005",
"0.64585334",
"0.6455105"
] | 0.77351403 | 1 |
Returns args including etcd endpoint and certificates if necessary. As dcosetcdctl and etcdctl share the same arguments, such as endpoints, ever considering the certificates involved, we group these arguments to generate the basic items to execute either etcdctl or dcosetcdctl | def get_etcdctl_with_base_args(
cert_type: str = "root",
endpoint_ip: str = LOCAL_ETCD_ENDPOINT_IP,
) -> List[str]:
return [ETCDCTL_PATH, "--endpoints=http://{}:2379".format(endpoint_ip)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_args():\n\n parser = argparse.ArgumentParser(description=\"Get DC, Clusters, Hosts and VM in JSON.\")\n parser.add_argument('-H', '--host', nargs=1, required=True, help='The vCenter to connect to',\n dest='host', type=str)\n parser.add_argument('-p', '--password', nargs=1, required=False,\n help='The password with which to connect to the VC. If not specified, the user is prompted at runtime for a password',\n dest='password', type=str)\n parser.add_argument('-u', '--user', nargs=1, required=True, help='The username with which to connect to the host',\n dest='username', type=str)\n args = parser.parse_args()\n return args",
"def parse_args():\n\n import cdr_cleaner.args_parser as parser\n\n additional_arguments = [{\n parser.SHORT_ARGUMENT: '-e',\n parser.LONG_ARGUMENT: '--ehr_dataset_id',\n parser.ACTION: 'store',\n parser.DEST: 'ehr_dataset_id',\n parser.HELP: 'ehr_dataset_id',\n parser.REQUIRED: True\n }, {\n parser.SHORT_ARGUMENT: '-v',\n parser.LONG_ARGUMENT: '--validation_dataset_id',\n parser.ACTION: 'store',\n parser.DEST: 'validation_dataset_id',\n parser.HELP: 'validation_dataset_id',\n parser.REQUIRED: True\n }]\n args = parser.default_parse_args(additional_arguments)\n return args",
"def GenerateToolArgStrings(options):\n # Preparing dnstreexport\n dnstreeexport_array = [options.tree_export]\n dnstreeexport_array.extend(['-c', options.config_file])\n if( options.force ):\n dnstreeexport_array.append('--force')\n if( options.quiet ):\n dnstreeexport_array.append('--quiet')\n dnstreeexport_arg_string = ' '.join(dnstreeexport_array)\n\n # Preparing dnscheckconfig\n dnscheckconfig_array = [options.check_config]\n dnscheckconfig_array.extend(['-i', '%s' % options.id])\n dnscheckconfig_array.extend(['--config-file', options.config_file])\n if( options.named_checkzone ):\n dnscheckconfig_array.extend(['-z', options.named_checkzone])\n if( options.named_checkconf ):\n dnscheckconfig_array.extend(['-c', options.named_checkconf])\n if( not options.quiet ):\n dnscheckconfig_array.append('-v')\n dnscheckconfig_arg_string = ' '.join(dnscheckconfig_array)\n\n # Preparing dnsservercheck\n dnsservercheck_array = [options.server_check]\n dnsservercheck_array.extend(['--export-config'])\n dnsservercheck_array.extend(['-c', options.config_file])\n dnsservercheck_array.extend(['-i', '%s' % options.id])\n dnsservercheck_arg_string = ' '.join(dnsservercheck_array)\n\n # Preparing dnsconfigsync\n dnsconfigsync_array = [options.config_sync]\n dnsconfigsync_array.extend(['--export-config'])\n dnsconfigsync_array.extend(['-i', '%s' % options.id])\n dnsconfigsync_array.extend(['-c', options.config_file])\n if( options.ssh_id ):\n dnsconfigsync_array.extend(['--ssh-id', options.ssh_id])\n if( options.rndc_exec ):\n dnsconfigsync_array.extend(['--rndc-exec', options.rndc_exec])\n if( options.rndc_port ):\n dnsconfigsync_array.extend(['--rndc-port', options.rndc_port])\n if( options.rndc_key ):\n dnsconfigsync_array.extend(['--rndc-key', options.rndc_key])\n if( options.rndc_conf ):\n dnsconfigsync_array.extend(['--rndc-conf', options.rndc_conf])\n dnsconfigsync_arg_string = ' '.join(dnsconfigsync_array)\n\n # Preparing dnsquerycheck\n dnsquerycheck_array = [options.query_check]\n dnsquerycheck_array.extend(['--export-config'])\n dnsquerycheck_array.extend(['-c', options.config_file])\n dnsquerycheck_array.extend(['-i', '%s' % options.id])\n dnsquerycheck_array.extend(['-n', '%s' % options.number])\n dnsquerycheck_array.extend(['-p', '%s' % options.port])\n dnsquerycheck_arg_string = ' '.join(dnsquerycheck_array)\n\n return [dnstreeexport_arg_string,\n dnscheckconfig_arg_string,\n dnsservercheck_arg_string,\n dnsconfigsync_arg_string, \n dnsquerycheck_arg_string]",
"def prepare_args(self):\n args = []\n if self.login:\n args.extend(['-L', cfg['tools.hydra.loginfile']])\n if self._port.is_ipv6:\n args.append('-6')\n\n args.extend(['-P', cfg['tools.hydra.passwordfile'], '-s', str(self._port.number), str(self._port.node.ip),\n self.service, ])\n return args",
"def get_args():\n parser = argparse.ArgumentParser(\n description='Arguments for talking to vCenter')\n\n parser.add_argument('-s', '--host',\n required=True,\n action='store',\n help='vSpehre service to connect to')\n\n parser.add_argument('-o', '--port',\n type=int,\n default=443,\n action='store',\n help='Port to connect on')\n\n parser.add_argument('-u', '--user',\n required=True,\n action='store',\n help='Username to use')\n\n parser.add_argument('-p', '--password',\n required=False,\n action='store',\n help='Password to use')\n\n parser.add_argument('-v', '--vm-name',\n required=True,\n action='store',\n help='Name of the VM you wish to operate on')\n\n parser.add_argument('--no-ssl',\n action='store_true',\n help='Skip SSL verification')\n\n parser.add_argument('--operation',\n required=True,\n action='store',\n help='start, suspend, or stop')\n\n parser.add_argument('-f', '--force',\n required=False,\n action='store',\n default=None)\n \n args = parser.parse_args()\n\n if not args.password:\n args.password = getpass.getpass(\n prompt='Enter password')\n\n return args",
"def parse_args(self):\n defaults = {\n 'analytics_api_ip': '127.0.0.1',\n 'analytics_api_port': '8181',\n 'start_time': 'now-10m',\n 'end_time': 'now',\n 'select' : [],\n 'sort': [],\n 'admin_user': 'admin',\n 'admin_password': 'contrail123',\n 'conf_file': '/etc/contrail/contrail-keystone-auth.conf',\n 'is_service_instance': 0\n }\n\n conf_parser = argparse.ArgumentParser(add_help=False)\n conf_parser.add_argument(\"--admin-user\", help=\"Name of admin user\")\n conf_parser.add_argument(\"--admin-password\", help=\"Password of admin user\")\n conf_parser.add_argument(\"--conf-file\", help=\"Configuration file\")\n conf_parser.add_argument(\"--analytics-api-ip\", help=\"IP address of Analytics API Server\")\n conf_parser.add_argument(\"--analytics-api-port\", help=\"Port of Analytcis API Server\")\n args, remaining_argv = conf_parser.parse_known_args();\n\n configfile = defaults['conf_file']\n if args.conf_file:\n configfile = args.conf_file\n\n config = ConfigParser.SafeConfigParser()\n config.read(configfile)\n if 'KEYSTONE' in config.sections():\n if args.admin_user == None:\n args.admin_user = config.get('KEYSTONE', 'admin_user')\n if args.admin_password == None:\n args.admin_password = config.get('KEYSTONE','admin_password')\n\n if args.admin_user == None:\n args.admin_user = defaults['admin_user']\n if args.admin_password == None:\n args.admin_password = defaults['admin_password']\n\n if args.analytics_api_ip == None:\n args.analytics_api_ip = defaults['analytics_api_ip']\n if args.analytics_api_port == None:\n args.analytics_api_port = defaults['analytics_api_port']\n\n parser = argparse.ArgumentParser(\n # Inherit options from config_parser\n parents=[conf_parser],\n # print script description with -h/--help\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.set_defaults(**defaults)\n\n parser.add_argument(\n \"--start-time\", help=\"Logs start time (format now-10m, now-1h)\")\n parser.add_argument(\"--end-time\", help=\"Logs end time\")\n parser.add_argument(\n \"--last\", help=\"Logs from last time period (format 10m, 1d)\")\n parser.add_argument(\n \"--table\", help=\"SessionAPI to query\", required=True,\n choices=['SessionSeriesTable', 'SessionRecordTable'])\n parser.add_argument(\n \"--session-type\", help=\"Session Type\", required=True,\n choices=['client', 'server'])\n parser.add_argument(\n \"--is-service-instance\", help=\"Service Instance Sessions\", type=int)\n parser.add_argument(\n \"--select\", help=\"List of Select Terms\", nargs='+')\n parser.add_argument(\n \"--where\", help=\"List of Where Terms to be ANDed\", nargs='+')\n parser.add_argument(\n \"--filter\", help=\"List of Filter Terms to be ANDed\", nargs='+')\n parser.add_argument(\n \"--sort\", help=\"List of Sort Terms\", nargs='+')\n parser.add_argument(\n \"--limit\", help=\"Limit the number of results\")\n\n self._args = parser.parse_args(remaining_argv)\n\n self._args.admin_user = args.admin_user\n self._args.admin_password = args.admin_password\n self._args.analytics_api_ip = args.analytics_api_ip\n self._args.analytics_api_port = args.analytics_api_port\n\n try:\n self._start_time, self._end_time = \\\n OpServerUtils.parse_start_end_time(\n start_time = self._args.start_time,\n end_time = self._args.end_time,\n last = self._args.last)\n except:\n return -1\n\n return 0",
"def arg_list():\n arg_list = [\n ['-d', '--domain', 'Specify the domain you are using'],\n ['-t', '--template-path', 'Specify template path'],\n ['-s', '--secrets-path', 'Specify template path'],\n ['-p', '--project', 'Specify a project name'],\n ['-c', '--cloud-platform', 'Specify the platform used'],\n ['-so', '--secrets-only', 'Generate secrets only'],\n ['-db', '--database-host', 'Specify the database host'],\n ['-dbc', '--database-connection-name', 'Specify the database connection name (GCP)'],\n ['-sbn', '--storage-bucket-name', 'Specify storage bucket name'],\n ['-sb', '--storage-backend', 'Specify storage backend s3/gcp/filesystem'],\n ['--acm', '--aws-cert-arn', 'Specify AWS ACM'],\n ['--sg-id', '--aws-alg-sg-id', 'Specify AWS SG ID'],\n ['--sentry', '--senty-dsn', 'Specify Sentry DSN'],\n ['-e', '--environment', 'Specify environment'],\n ['-g', '--gather', 'enable Gather yes or no'],\n ['--cm', '--cert-manager', 'Using cert manager?'],\n ['-m', '--modules', 'Aether modules i.e odk,ui,sync'],\n ['-r', '--redis-url', 'Redis endpoint for CouchDB sync'],\n ['-cdb', '--couchdb-url', 'Redis endpoint for CouchDB sync'],\n ['-gc', '--google-client-id', ' Google client ID for CouchDB sync']\n ]\n return arg_list",
"def _Args(parser,\n include_l7_internal_load_balancing=False,\n support_managed_certs=False):\n parser.add_argument(\n '--description',\n help='An optional, textual description for the SSL certificate.')\n\n parser.display_info.AddCacheUpdater(\n flags.SslCertificatesCompleterBeta\n if include_l7_internal_load_balancing else flags.SslCertificatesCompleter)\n\n if support_managed_certs:\n managed_or_not = parser.add_group(\n mutex=True,\n required=True,\n help='Flags for managed or self-managed certificate. ')\n\n managed_or_not.add_argument(\n '--domains',\n metavar='DOMAIN',\n type=arg_parsers.ArgList(min_length=1),\n default=[],\n help=\"\"\"\\\n List of domains to create a managed certificate for.\n \"\"\")\n\n not_managed = managed_or_not.add_group('Flags for self-managed certificate')\n not_managed.add_argument(\n '--certificate',\n metavar='LOCAL_FILE_PATH',\n required=True,\n help=\"\"\"\\\n Path to a local certificate file to create a self-managed\n certificate. The certificate must be in PEM format. The certificate\n chain must be no greater than 5 certs long. The chain must include at\n least one intermediate cert.\n \"\"\")\n not_managed.add_argument(\n '--private-key',\n metavar='LOCAL_FILE_PATH',\n required=True,\n help=\"\"\"\\\n Path to a local private key file. The private key must be in PEM\n format and must use RSA or ECDSA encryption.\n \"\"\")\n else:\n parser.add_argument(\n '--certificate',\n required=True,\n metavar='LOCAL_FILE_PATH',\n help=\"\"\"\\\n Path to a local certificate file. The certificate must be in PEM\n format. The certificate chain must be no greater than 5 certs long. The\n chain must include at least one intermediate cert.\n \"\"\")\n\n parser.add_argument(\n '--private-key',\n required=True,\n metavar='LOCAL_FILE_PATH',\n help=\"\"\"\\\n Path to a local private key file. The private key must be in PEM\n format and must use RSA or ECDSA encryption.\n \"\"\")",
"def GetArgs():\n\n parser = argparse.ArgumentParser(description='Process args for connecting to vCenter')\n parser.add_argument('-v', '--vc', required=True, action='store', help='vCenter')\n parser.add_argument('-u', '--user', required=True, action='store', help='vCenter Administrator')\n parser.add_argument('-p', '--password', required=False, action='store', help='Password')\n args = parser.parse_args()\n return args",
"def _ParseCertificateArguments(client, args):\n self_managed = None\n managed = None\n certificate_type = None\n if args.certificate:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.SELF_MANAGED\n certificate = files.ReadFileContents(args.certificate)\n private_key = files.ReadFileContents(args.private_key)\n self_managed = client.messages.SslCertificateSelfManagedSslCertificate(\n certificate=certificate, privateKey=private_key)\n if args.domains:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.MANAGED\n managed = client.messages.SslCertificateManagedSslCertificate(\n domains=args.domains)\n return certificate_type, self_managed, managed",
"def parse_args():\n parser = argparse.ArgumentParser(\n description='''\n {nm}: TCP over TLS server to accept requests.\\n\n '''.format(nm=sys.argv[0]))\n parser.add_argument('-p',\n '--port',\n help='Server port to connect to, defaults to \"9999\".',\n required=False,\n default='9999')\n parser.add_argument('-c',\n '--cert',\n help='Server certificate file with path,'\n ' defaults to \"server.pem\" in current directory.',\n required=False,\n default='server.pem')\n parser.add_argument('-k',\n '--key',\n help='Server certificate key file with path,'\n ' defaults to \"server.key\" in current directory.',\n required=False,\n default='server.key')\n parser.add_argument('-ca',\n '--cert-auth',\n help='CA certificate file with path,'\n ' defaults to \"ca_cert.pem\" in current directory.',\n required=False,\n dest='ca_cert',\n default='ca_cert.pem')\n parser.add_argument('--log-level',\n help='Logger level, defaults to \"DEBUG\"',\n required=False,\n default='DEBUG')\n return vars(parser.parse_args())",
"def getTCSargs():\n try:\n nameIndex, portIndex = sys.argv.index(\"-n\"), sys.argv.index(\"-e\")\n if abs(nameIndex - portIndex) > 1:\n if isinstance(sys.argv[nameIndex+1],str) and isinstance(sys.argv[portIndex+1], int):\n return [sys.argv[nameIndex+1], int(sys.argv[portIndex+1])]\n except ValueError as error:\n return [\"localhost\", 58044]\n except IndexError as error:\n return [\"localhost\", 58044]\n return [\"localhost\", 58044]",
"def _set_arguments(self):\n cert_location = f\"dependencies{sep}certificates{sep}localuser.crt\"\n key_location = f\"dependencies{sep}certificates{sep}localuser.key\"\n assert Path(cert_location).exists(), (\n f\"The certificate isn't \"\n f\"present at location {Path(cert_location).absolute()}\"\n )\n assert Path(key_location).exists(), (\n f\"The certificate key isn't \"\n f\"present at location {Path(key_location).absolute()}\"\n )\n self._arguments = [\n (\n \"test-certificate-verify\",\n [\"-k\", key_location, \"-c\", cert_location],\n ),\n (\n \"test-sig-algs\",\n [],\n ),\n (\n \"test-clienthello-md5\",\n [],\n ),\n (\n \"test-tls13-pkcs-signature\",\n [],\n ),\n ]",
"def get_args():\n\n parser = argparse.ArgumentParser(description=\"Add a (sub)tree from a vCenter's structure to the Nuage vCenter Deployment Tool. This can be done by specifying the datacenters, clusters and hosts you want to add. You can also specify to include all datacenters and/or clusters and/or hosts, depending on your requirements. It is also possible to provide a CSV file containing the hosts to add and each hosts specific configuration. Creation will only happen if the entity doesn't exist yet in the vCenter Deployment Tool. Hosts will be updated with the new configuration if you run the script with already existsing hosts. This script is also capable of updating the ESXi Hosts Agent VM settings.\")\n parser.add_argument('--all-clusters', required=False, help='Configure all Clusters from the selected vCenter Datacenters', dest='all_clusters', action='store_true')\n parser.add_argument('--all-datacenters', required=False, help='Configure all vCenter Datacenters from the vCenter', dest='all_datacenters', action='store_true')\n parser.add_argument('--all-hosts', required=False, help='Configure all Hosts from the selected Clusters', dest='all_hosts', action='store_true')\n parser.add_argument('--cluster', required=False, help='Cluster that has to be present in the Nuage vCenter Deployment Tool (can be specified multiple times)', dest='clusters', type=str, action='append')\n parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')\n parser.add_argument('-f', '--allow-fqdn', required=False, help='Allow the use of FQDN in the CSV hosts file instead of IP', dest='allow_fqdn', action='store_true')\n parser.add_argument('--datacenter', required=False, help='Datacenter that has to be present in the Nuage vCenter Deployment Tool (can be specified multiple times)', dest='datacenters', type=str, action='append')\n parser.add_argument('--host', required=False, help='Host IPs that has to be present in the Nuage vCenter Deployment Tool (can be specified multiple times)', dest='hosts', type=str, action='append')\n parser.add_argument('--host-configure-agent', required=False, help='Configure the VM Agent settings of the vCenter Hosts. It will configure the Management network you specify as an argument with --hv-management-network, or the one in the CSV file if specified. For datastore it will use the first available local datastore, or the one specified in the CSV file if provided.', dest='host_configure_agent', action='store_true')\n parser.add_argument('--hosts-file', required=False, help='CSV file which contains the configuration for each hypervisor', dest='hosts_file', type=str)\n parser.add_argument('--hv-user', required=True, help='The ESXi (default) hosts username', dest='hv_username', type=str)\n parser.add_argument('--hv-password', required=False, help='The ESXi hosts password. If not specified, the user is prompted at runtime for a password', dest='hv_password', type=str)\n parser.add_argument('--hv-management-network', required=True, help='The ESXi hosts management network', dest='hv_management_network', type=str)\n parser.add_argument('--hv-data-network', required=True, help='The ESXi hosts data network', dest='hv_data_network', type=str)\n parser.add_argument('--hv-vm-network', required=True, help='The ESXi hosts VM network', dest='hv_vm_network', type=str)\n parser.add_argument('--hv-mc-network', required=True, help='The ESXi hosts Multicast Source network', dest='hv_mc_network', type=str)\n parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str)\n parser.add_argument('--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)\n parser.add_argument('--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)\n parser.add_argument('--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)\n parser.add_argument('--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)\n parser.add_argument('--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)\n parser.add_argument('--nuage-vrs-ovf', required=False, help='The URL of the VRS OVF file', dest='nuage_vrs_ovf', type=str)\n parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect', dest='nosslcheck', action='store_true')\n parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')\n parser.add_argument('--vcenter-host', required=True, help='The vCenter server to connect to, use the IP', dest='vcenter_host', type=str)\n parser.add_argument('--vcenter-name', required=False, help='The name of the vCenter you want in the vCenter Deployment Tool', dest='vcenter_name', type=str)\n parser.add_argument('--vcenter-http-port', required=False, help='The vCenter server HTTP port to connect to (default = 80)', dest='vcenter_http_port', type=int, default=80)\n parser.add_argument('--vcenter-https-port', required=False, help='The vCenter server HTTPS port to connect to (default = 443)', dest='vcenter_https_port', type=int, default=443)\n parser.add_argument('--vcenter-password', required=False, help='The password with which to connect to the vCenter host. If not specified, the user is prompted at runtime for a password', dest='vcenter_password', type=str)\n parser.add_argument('--vcenter-user', required=True, help='The username with which to connect to the vCenter host', dest='vcenter_username', type=str)\n args = parser.parse_args()\n return args",
"def read_arguments(argv):\n\tif argv[0] in ('1', '2'):\n\t\tconos_config['endpoint'] = endpoint[argv[0]]\n\telse:\n\t\tusage()\n\n\tif argv[1] in ('dev', 'test', 'int', 'prod'):\n\t\tconos_config['environment'] = argv[1]\n\t\tconos_config['sts_url'] = eval(argv[1] + '_sts_url')\n\t\tconos_config['aicuu_url'] = eval(argv[1] + '_aicuu_url')\n\telse:\n\t\tusage()\n\n\tif len(argv) == 6:\n\t\tconos_config['number_threads'] = '1'\n\telse:\n\t\tif argv[6] in ('1', '2', '3', '4', '5', '6', '7', '8'):\n\t\t\tconos_config['number_threads'] = argv[6]\n\t\telse:\n\t\t\tusage()\n\n\tconos_config['client_id'] = argv[2]\n\tconos_config['client_secret'] = argv[3]\n\tconos_config['input_file'] = argv[4]\n\tconos_config['output_file'] = argv[5]",
"def get_args():\n parser = argparse.ArgumentParser(\n description='Standard Arguments for talking to Distributed Index Server')\n parser.add_argument('-c', '--config',\n required=True,\n action='store',\n help='Config file of the network')\n parser.add_argument('-i', '--index',\n type=int,\n required=True,\n action='store',\n help='key range start index')\n parser.add_argument('-e', '--end',\n type=int,\n required=True,\n action='store',\n help='key range end index')\n parser.add_argument('-o', '--operation',\n type=int,\n required=True,\n action='store',\n help='operation: 1.Register & Search ops 2.Obtain ops')\n args = parser.parse_args()\n return args",
"def ParseCommandArguments(args):\n\n\n\n import argparse\n from google.appengine.tools import boolean_action\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-A', '--application', required=True)\n parser.add_argument('--api_host', default='')\n\n parser.add_argument('--api_port', default=8000, type=int)\n parser.add_argument('--trusted',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n parser.add_argument('--application_root', default=None)\n parser.add_argument('--application_host', default='localhost')\n parser.add_argument('--application_port', default=None)\n\n\n parser.add_argument('--blobstore_path', default=None)\n\n\n parser.add_argument('--datastore_path', default=None)\n\n parser.add_argument('--auto_id_policy', default='scattered',\n type=lambda s: s.lower(),\n choices=(datastore_stub_util.SEQUENTIAL,\n datastore_stub_util.SCATTERED))\n\n parser.add_argument('--use_sqlite',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n parser.add_argument('--high_replication',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n parser.add_argument('--require_indexes',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n parser.add_argument('--clear_datastore',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n\n\n parser.add_argument('--logs_path', default=None)\n\n\n parser.add_argument('--enable_sendmail',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n parser.add_argument('--smtp_host', default='')\n\n parser.add_argument('--smtp_port', default=25, type=int)\n parser.add_argument('--smtp_user', default='')\n parser.add_argument('--smtp_password', default='')\n parser.add_argument('--show_mail_body',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n\n\n parser.add_argument('--prospective_search_path', default=None)\n parser.add_argument('--clear_prospective_search',\n action=boolean_action.BooleanAction,\n const=True,\n default=False)\n\n\n parser.add_argument('--enable_task_running',\n action=boolean_action.BooleanAction,\n const=True,\n default=True)\n\n parser.add_argument('--task_retry_seconds', default=30, type=int)\n\n\n parser.add_argument('--user_login_url', default=None)\n parser.add_argument('--user_logout_url', default=None)\n\n return parser.parse_args(args)",
"def parse_args():\n\n kwargs = {\n \"description\": \"A simple utility that leverages the AWS IoT SDK publish and subscribe to MQTT topics\",\n \"formatter_class\": argparse.RawDescriptionHelpFormatter,\n }\n parser = argparse.ArgumentParser(**kwargs)\n\n parser.add_argument(\n \"--endpoint\",\n required=True,\n help=\"Your AWS IoT custom endpoint, not including a port. \"\n + 'Ex: \"abcd123456wxyz-ats.iot.us-east-1.amazonaws.com\"',\n )\n parser.add_argument(\n \"--cert\",\n help=\"File path to your client certificate, in PEM format.\",\n )\n parser.add_argument(\"--key\", help=\"File path to your private key, in PEM format.\")\n parser.add_argument(\n \"--root-ca\",\n help=\"File path to root certificate authority, in PEM format. \"\n + \"Necessary if MQTT server uses a certificate that's not already in \"\n + \"your trust store.\",\n )\n parser.add_argument(\n \"--client-id\",\n default=\"test-\" + str(uuid4()),\n help=\"Client ID for MQTT connection.\",\n )\n parser.add_argument(\n \"--subscribe_topic\",\n default=\"IOOS/#\",\n help=\"Topic to subscribe to.\",\n )\n # parser.add_argument('--message', default=\"Hello World!\", help=\"Message to publish. \" +\n # \"Specify empty string to publish nothing.\")\n parser.add_argument(\n \"--count\",\n default=0,\n type=int,\n help=\"Number of messages to publish/receive before exiting. \"\n + \"Specify 0 to run forever.\",\n )\n parser.add_argument(\n \"--use-websocket\",\n default=False,\n action=\"store_true\",\n help=\"To use a websocket instead of raw mqtt. If you \"\n + \"specify this option you must specify a region for signing, you can also enable proxy mode.\",\n )\n parser.add_argument(\n \"--signing-region\",\n default=\"us-east-1\",\n help=\"If you specify --use-web-socket, this \"\n + \"is the region that will be used for computing the Sigv4 signature\",\n )\n # parser.add_argument('--proxy-host', help=\"Hostname for proxy to connect to. Note: if you use this feature, \" +\n # \"you will likely need to set --root-ca to the ca for your proxy.\")\n # parser.add_argument('--proxy-port', type=int, default=8080, help=\"Port for proxy to connect to.\")\n parser.add_argument(\n \"--verbosity\",\n choices=[x.name for x in io.LogLevel],\n default=io.LogLevel.NoLogs.name,\n help=\"Logging level\",\n )\n\n args = parser.parse_args()\n return args",
"def getOptions(args=sys.argv[1:]):\n\n # Create the top-level parser\n parser = argparse.ArgumentParser(prog=sys.argv[0],\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"Creates a one-off ECS task from a task definition already created\",\n epilog=textwrap.dedent(f'''\\\n Usage samples:\n --------------\n Run a one-off task on EC2 instances:\n {sys.argv[0]} --task-name <TASK_NAME> --from-task <REFERENCE_TASK_NAME> --cluster <ECS_CLUSTER_NAME> \\\\\n --image <OCI_IMAGE> --entrypoint <ENTRYPOINT> --command <COMMAND>\n\n Run a one-off task on Fargate:\n {sys.argv[0]} --task-name <TASK_NAME> --from-task <REFERENCE_TASK_NAME> --cluster <ECS_CLUSTER_NAME> \\\\\n --image <OCI_IMAGE> --entrypoint <ENTRYPOINT> --command <COMMAND> \\\\\n --launch-type FARGATE --networks-id <NET_ID1 NET_ID2 ...> --security-groups-id <SG_ID1 SG_ID2...>\n ''')\n )\n\n # Group for required arguments\n group = parser.add_argument_group('required arguments')\n\n # Required arguments\n group.add_argument(\"--task-name\", required=True, help=\"the name for one-off task\")\n group.add_argument(\"--from-task\", required=True, help=\"the name of the reference task to create the one-off task\")\n group.add_argument(\"--cluster\", required=True, help=\"the ECS cluster name to connect\")\n group.add_argument(\"--image\", required=True, help=\"the image URI for the one-off task\")\n group.add_argument(\"--command\", required=True, nargs='+', help=\"the command for the one-off task\")\n\n # Optional arguments\n parser.add_argument(\"-p\", \"--profile\", help=\"a valid AWS profile name to perform the tasks\")\n parser.add_argument(\"-r\", \"--region\", help=\"a valid AWS region to perform the tasks\")\n parser.add_argument(\"--entrypoint\", help=\"the entrypoint for the one-off task, e.g.: 'sh -c'\")\n parser.add_argument(\"--launch-type\", default='EC2', choices=[\"EC2\", \"FARGATE\"], help=\"the launch type on which to run the one-off task\")\n parser.add_argument(\n \"--networks-id\",\n nargs='*',\n help=\"the IDs of the subnets associated with the one-off task. All specified subnets must be from the same VPC\"\n )\n parser.add_argument(\n \"--security-groups-id\",\n nargs='*',\n help=\"the IDs of the security groups associated with the one-off task. All specified security groups must be from the same VPC.\"\n )\n\n # Print usage and exit if not arguments are supplied\n if not args:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n # Parse the args\n options = parser.parse_args(args)\n\n # Return the parsed args\n return options",
"def parse_command_line_args():\n parser = argparse.ArgumentParser(description=(\n 'HYAKUYOBAKO Data sender.'))\n parser.add_argument(\n '--project_id', required=True, help='GCP cloud project name')\n parser.add_argument(\n '--registry_id', required=True, help='Cloud IoT Core registry id')\n parser.add_argument(\n '--device_id', required=True, help='Cloud IoT Core device id')\n parser.add_argument(\n '--private_key_file',\n required=True,\n help='Path to private key file.')\n parser.add_argument(\n '--algorithm',\n choices=('RS256', 'ES256'),\n required=True,\n help='The encryption algorithm to use to generate the JWT.')\n parser.add_argument(\n '--cloud_region', default='us-central1', help='GCP cloud region')\n parser.add_argument(\n '--ca_certs',\n default='roots.pem',\n help=('CA root from https://pki.google.com/roots.pem'))\n parser.add_argument(\n '--message_type',\n choices=('event', 'state'),\n default='event',\n required=True,\n help=('Indicates whether the message to be published is a '\n 'telemetry event or a device state message.'))\n parser.add_argument(\n '--base_url',\n default=_BASE_URL,\n help=('Base URL for the Cloud IoT Core Device Service API'))\n parser.add_argument(\n '--jwt_expires_minutes',\n default=20,\n type=int,\n help=('Expiration time, in minutes, for JWT tokens.'))\n parser.add_argument(\n '--id',\n default=999,\n type=int,\n help=('Device id, not IoT Core device id for unique key.'))\n parser.add_argument(\n '--location_logitude',\n default=0.0,\n type=float,\n help=('Logitude of this deice. ex)35.658581'))\n parser.add_argument(\n '--location_latitude',\n default=0.0,\n type=float,\n help=('Latitude of this deice. ex)139.745433'))\n\n return parser.parse_args()",
"def setup_cmd_args():\n parser = argparse.ArgumentParser(description=\"This program will query G-POD and COPHUB on the same datasets, in order to obtain the number of data results, compare them compile a report with the differences.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n # parser.add_argument(\"root_dir\", help=\"The root directory containing data to check\")\n # parser.add_argument(\"--workspace\", help=\"Set Workspace manually\")\n parser.add_argument(\"--outputlist\", help=\"Folder to write the output lists with the un-synced products.\", default=\"c:\\\\temp\\\\\")\n parser.add_argument(\"--daysback\", help=\"Report with a given number of days back from today\", default=0)\n parser.add_argument(\"--dataset\", help=\"Set which dataset to query (chose S3A_SR_1_SRA_A_PREOPS or S3B_SR_1_SRA_A_NTC)\")\n parser.add_argument(\"--startdate\", help=\" The Start Date (format: YYYY-MM-DD) \", default=\"2016-06-01\")\n parser.add_argument(\"--enddate\",help=\" The End Date (format: YYYY-MM-DD)\")\n parser.add_argument(\"--cphubuser\",help=\"COPHUB username\", required=True)\n parser.add_argument(\"--cphubpw\",help=\"COPHUB password\", required=True)\n parser.add_argument(\"-email\", type=str, help=\"Email to send the results\", action=\"append\")\n parser.add_argument('-t', action='store_true', help=\"Today as enddate. Otherwise the last day of the previous month is considered.\")\n parser.add_argument('-n', action='store_true', help=\"Normal numeric check\")\n parser.add_argument('-m', action='store_true', help=\"Monthly check with product listing.\")\n return parser.parse_args()",
"def process_options(args):\n subcmds = dict() # each key(cmd) can take on a val of 0, or 1\n subcmds_wo_arg = [ 'clean', 'list' ]\n subcmds_with_args = [ 'add', 'remove' ]\n\n for cmd in subcmds_wo_arg:\n subcmds[cmd] = 0\n for cmd in subcmds_with_args:\n subcmds[cmd] = 1\n\n if (len(args) == 0):\n usage(\"ERROR. must have one sub-command available\")\n\n cmd = args.pop(0)\n argc = len(args)\n\n def bad_args(cmd, argc):\n return True if argc < subcmds[cmd] else False\n\n env_var = ''\n # determine what kind of cmd was given and arguments\n if cmd not in subcmds:\n usage(\"ERROR. Unrecognized cmd \" + cmd + \"! cmd must be from appropriate list\")\n elif bad_args(cmd, argc):\n usage(\"Must enter at least one argument for \" + cmd)\n elif argc > subcmds[cmd]: # determine if it defaults to PATH or anything else\n if os.getenv(args[0]) != None:\n env_var = args.pop(0)\n elif os.getenv(args[0].upper()) != None:\n env_var = args.pop(0).upper()\n else: # first argument is NOT a known env variable\n if (cmd == 'remove'):\n env_var = 'PATH'\n elif (cmd == 'add') and ('/' not in args[0]) and (len(args) > 1): # not like a path & has at least one other argument\n env_var = args.pop(0) # assume new env variable to be created\n else:\n usage(\"Unrecognized environment variable \" + args[0])\n else:\n env_var = 'PATH'\n\n return (cmd, env_var, args)",
"def get_args(command):\n super_args = ClientPlugin.get_args(command)\n parser = argparse.ArgumentParser(description='HTTP Client', prog=\"http/client.py\")\n\n parser.add_argument('--host-header', action='store', default=\"\", help='specifies host header for HTTP request')\n parser.add_argument('--injected-http-contains', action='store',\n default=\"\", help='checks if injected http response contains string')\n parser.add_argument('--valid-http-contains', action='store',\n default=\"\", help='checks if http response contains the given string. '\n 'if not, the connection is evaluated as broken')\n\n args, _ = parser.parse_known_args(command)\n args = vars(args)\n\n super_args.update(args)\n return super_args",
"def __common_args_handler(parser):\n parser.add_argument(\"-netloc\", help=\"<host>:<port>\", default=\"[::]:50051\", type=str)\n parser.add_argument(\"-debug\", help=\"Print debug messages.\", action=\"store_true\")\n args = parser.parse_args(sys.argv[2:])\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO)\n return args",
"def build_args(self, job, private_data_dir, passwords):\n creds = job.machine_credential\n\n ssh_username, become_username, become_method = '', '', ''\n if creds:\n ssh_username = creds.get_input('username', default='')\n become_method = creds.get_input('become_method', default='')\n become_username = creds.get_input('become_username', default='')\n else:\n become_method = None\n become_username = \"\"\n # Always specify the normal SSH user as root by default. Since this\n # task is normally running in the background under a service account,\n # it doesn't make sense to rely on ansible-playbook's default of using\n # the current user.\n ssh_username = ssh_username or 'root'\n args = []\n if job.job_type == 'check':\n args.append('--check')\n args.extend(['-u', sanitize_jinja(ssh_username)])\n if 'ssh_password' in passwords:\n args.append('--ask-pass')\n if job.become_enabled:\n args.append('--become')\n if job.diff_mode:\n args.append('--diff')\n if become_method:\n args.extend(['--become-method', sanitize_jinja(become_method)])\n if become_username:\n args.extend(['--become-user', sanitize_jinja(become_username)])\n if 'become_password' in passwords:\n args.append('--ask-become-pass')\n\n # Support prompting for multiple vault passwords\n for k, v in passwords.items():\n if k.startswith('vault_password'):\n if k == 'vault_password':\n args.append('--ask-vault-pass')\n else:\n # split only on the first dot in case the vault ID itself contains a dot\n vault_id = k.split('.', 1)[1]\n args.append('--vault-id')\n args.append('{}@prompt'.format(vault_id))\n\n if job.forks:\n if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:\n logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')\n args.append('--forks=%d' % settings.MAX_FORKS)\n else:\n args.append('--forks=%d' % job.forks)\n if job.force_handlers:\n args.append('--force-handlers')\n if job.limit:\n args.extend(['-l', job.limit])\n if job.verbosity:\n args.append('-%s' % ('v' * min(5, job.verbosity)))\n if job.job_tags:\n args.extend(['-t', job.job_tags])\n if job.skip_tags:\n args.append('--skip-tags=%s' % job.skip_tags)\n if job.start_at_task:\n args.append('--start-at-task=%s' % job.start_at_task)\n\n return args",
"def CommandArgs(args):\n if len(args) > 1:\n if args[1].startswith('--'):\n option = args[1] [2:]\n if len(args) > 2:\n content = args[2]\n return option, content\n return True, None\n return False, None",
"def parse_args():\n\n parser = ArgumentParser()\n parser.add_argument(\"config\", help=\"Path to config file\")\n parser.add_argument(\"-ncdc\", \"--download-ncdc\", action=\"store_true\", dest=\"d_ncdc\",\n help=\"Download new NCDC data (overwrites existing)\")\n arguments = parser.parse_args()\n\n return arguments",
"def get_args():\n parser = cli.build_arg_parser()\n\n parser.add_argument('-d', '--datastore',\n required=True,\n action='store',\n help='Datastore name where disk is located')\n\n parser.add_argument('-v', '--vdisk',\n required=False,\n action='store',\n help='First Class Disk name to delete snapshot for')\n\n # because -s is reserved for 'service', we use -n for snapshot name\n parser.add_argument('-n', '--snapshot',\n required=True,\n action='store',\n help='Snapshot name to be deleted')\n\n parser.add_argument('-y', '--yes',\n action='store_true',\n help='Confirm disk deletion.')\n\n my_args = parser.parse_args()\n return cli.prompt_for_password(my_args)",
"def parseArgs():\n\n def getInput(name: str, *, required=False):\n val = os.environ.get(f'INPUT_{name.replace(\" \", \"_\").upper()}', \"\")\n if not val and required:\n raise ValueError(f\"Missing required parameter: {name}\")\n return val\n\n certificate = getInput(\"certificate\", required=True)\n private_key = getInput(\"private_key\", required=True)\n connectorId = getInput(\"connector_id\", required=True)\n host = getInput(\"host\", required=True)\n body = yaml.load(getInput(\"args\", required=True), yaml.Loader)\n\n with string_to_tempfile(certificate) as cert_file, string_to_tempfile(\n private_key\n ) as key_file:\n yield ActionArgs(\n host=host,\n auth=AuthCert(cert=Path(cert_file.name), private_key=Path(key_file.name)),\n args=AddOrUpdateIncident2Args(**body, connectorId=connectorId),\n )",
"def build_args(self, ad_hoc_command, private_data_dir, passwords):\n creds = ad_hoc_command.credential\n ssh_username, become_username, become_method = '', '', ''\n if creds:\n ssh_username = creds.get_input('username', default='')\n become_method = creds.get_input('become_method', default='')\n become_username = creds.get_input('become_username', default='')\n else:\n become_method = None\n become_username = \"\"\n # Always specify the normal SSH user as root by default. Since this\n # task is normally running in the background under a service account,\n # it doesn't make sense to rely on ansible's default of using the\n # current user.\n ssh_username = ssh_username or 'root'\n args = []\n if ad_hoc_command.job_type == 'check':\n args.append('--check')\n args.extend(['-u', sanitize_jinja(ssh_username)])\n if 'ssh_password' in passwords:\n args.append('--ask-pass')\n # We only specify sudo/su user and password if explicitly given by the\n # credential. Credential should never specify both sudo and su.\n if ad_hoc_command.become_enabled:\n args.append('--become')\n if become_method:\n args.extend(['--become-method', sanitize_jinja(become_method)])\n if become_username:\n args.extend(['--become-user', sanitize_jinja(become_username)])\n if 'become_password' in passwords:\n args.append('--ask-become-pass')\n\n if ad_hoc_command.forks: # FIXME: Max limit?\n args.append('--forks=%d' % ad_hoc_command.forks)\n if ad_hoc_command.diff_mode:\n args.append('--diff')\n if ad_hoc_command.verbosity:\n args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))\n\n if ad_hoc_command.limit:\n args.append(ad_hoc_command.limit)\n else:\n args.append('all')\n\n return args"
] | [
"0.5863232",
"0.57427984",
"0.57362777",
"0.5711436",
"0.5671684",
"0.5665155",
"0.56511855",
"0.5629644",
"0.5586884",
"0.55581784",
"0.5516893",
"0.5515851",
"0.5509923",
"0.5477983",
"0.5474129",
"0.546462",
"0.5454525",
"0.54045254",
"0.5360767",
"0.53578204",
"0.53573877",
"0.5350753",
"0.53212917",
"0.5320189",
"0.53082997",
"0.528231",
"0.52776784",
"0.527561",
"0.52738017",
"0.52712446"
] | 0.7312782 | 0 |
assigns the value to the key. etcd is not exposed outside of the DC/OS cluster,so we have to execute etcdctl inside the DC/OS cluster, on a master in our case. | def put(self, key: str, value: str) -> None:
master = list(self.masters)[0]
etcdctl_with_args = get_etcdctl_with_base_args(endpoint_ip=MASTER_DNS)
etcdctl_with_args += ["put", key, value]
master.run(args=etcdctl_with_args, output=Output.LOG_AND_CAPTURE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set(self, key, value, cb=None):\n _log.analyze(self.node.id, \"+ CLIENT\", {'key': key, 'value': value})\n self.send(cmd='SET',msg={'key':key, 'value': value}, cb=cb)",
"async def set(self, key, value):\n trace_log(\"PersistantStorage: setting key \", key, \" to value \", value)\n self.dict[key] = value\n #self.log_set(key, value)",
"def set_value(self, key, value):\n self._version[key] = value",
"def _put(self, key: str, value):\n pass",
"def setValue(self, key, value, default=False):\n self.local[key.value] = value\n if default:\n self.system.setValue(key.value, value)",
"def _put(self, key, value, current_node):\n pass",
"def set(self, key, value):",
"def set(self, key, value):",
"def put(self, key, val):\n pass",
"def set(self, key, value):\n #try to lock the tree. If we succeed make sure\n #we dont lose updates from any other process\n if self._storage.lock():\n self._refresh_tree_ref()\n #get current top-level node and make a value-ref\n node = self._follow(self._tree_ref)\n value_ref = ValueRef(value)\n #insert and get new tree ref\n self._tree_ref = self._insert(node, key, value_ref)\n self._tree_ref = self._blacken(self._follow(self._tree_ref))",
"async def _set(self, key, value, ttl=0):\n value = str.encode(value) if isinstance(value, str) else value\n return await self.client.set(key, value, exptime=ttl or 0)",
"def set(self, key, value):\n self.log.debug(\"setting '%s' = '%s' on network\" % (key, value))\n dkey = digest(key)\n node = Node(dkey)\n\n def store(nodes):\n self.log.info(\"setting '%s' on %s\" % (key, list(map(str, nodes))))\n # if this node is close too, then store here as well\n if self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]):\n self.storage[dkey] = value\n ds = [self.protocol.callStore(n, dkey, value) for n in nodes]\n d = defer.DeferredList(ds)\n d.addCallback(self._anyRespondSuccess)\n d.addErrback(self.onError)\n return d\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n d = spider.find()\n d.addCallback(store)\n d.addErrback(self.onError)\n return d",
"def PutConfig(self, key, value):\n try:\n if self.etcd_key_prefix is not None:\n key = self.etcd_key_prefix + key\n self.etcd.put(key, value)\n except Exception as e:\n self.logger.error(\"Exception raised in PutConfig \\\n with error:{}\".format(e))\n raise e",
"def set(self, key, value):\n self.data[key] = value\n logger.debug('Setting value \"%s\" for variable \"%s\"', value, key)",
"def put(self, key, value):\n self.container[key] = value",
"def set_value(self, key, value):\n self.data[key] = value\n self.save_data()",
"def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None, client=None, nx=False):\r\n if client is None:\r\n key = self.make_key(key, version=version)\r\n client = self.get_server(key)\r\n\r\n return super(ShardClient, self).set(key=key, value=value,\r\n timeout=timeout, version=version,\r\n client=client, nx=nx)",
"def set(self, key, value):\n _log.debug(\"setting '%s' = '%s' on network\" % (key, value))\n dkey = digest(key)\n node = Node(dkey)\n\n def store(nodes):\n _log.debug(\"setting '%s' to %s on %s\" % (key, value, map(str, nodes)))\n # if this node is close too, then store here as well\n if (not nodes or self.node.distanceTo(node) < max([n.distanceTo(node) for n in nodes]) or\n dkey in self.storage):\n _log.debug(\"setting '%s' to %s locally\" % (key, value))\n self.storage[dkey] = value\n ds = [self.protocol.callStore(n, dkey, value) for n in nodes]\n return defer.DeferredList(ds).addCallback(self._anyRespondSuccess)\n\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n _log.warning(\"There are no known neighbors to set key %s\" % key)\n return defer.succeed(False)\n spider = NodeSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find().addCallback(store)",
"def set(self, key, value, ttl=0):\n pass",
"def store(self, key, value):\n pass",
"def set_to_redis(self, key: str, value):\n self.redis_client.hset(self.root_path, key, value)",
"def setKey(self, key, value ):\n self.conf[key] = value",
"def set(self, key, value):\n return self.redis_handler.set(key, value)",
"def set(self, key, value):\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n if self.get(key) is None:\n self.bucket_list[number % self.bucket_number].insert(stored_key, value)",
"def set(self, key, value):\n raise NotImplementedError",
"def set(self, key, value):\n task = Task.current_task()\n try:\n context = task._context\n except AttributeError:\n task._context = context = {}\n context[key] = value",
"def __setitem__(self, key, value):\n self.tree[key] = value",
"def _set(self, key, value):\n self._data[key] = value\n return self._data[key]",
"def setKeyAndValue(self, key, value):\n if key in self.keys:\n self.keyToValue[key] = value\n print \"SET OK\"\n elif self.curSize < self.MAXSIZE:\n # Always append the new key to the end of the list\n self.keys.append(key)\n self.keyToValue[key] = value\n self.curSize = len(self.keys)\n print \"SET OK\"\n elif self.curSize == self.MAXSIZE:\n # If maximal size reached, remove the first key in the list\n # since it is the least recently called.\n # Always append the new key to the end of the list\n keyToRemoved = self.keys.pop(0)\n del self.keyToValue[keyToRemoved]\n self.keys.append(key)\n self.keyToValue[key] = value\n self.curSize = len(self.keys)\n print \"SET OK\"\n else:\n print \"ERROR\"",
"def set(self, key, value):\n try:\n self.status[key] = value\n log.info('updated %s to %s' %(key, value))\n ret = 0\n except KeyError as err:\n log.error('could not update %s to %s: %s' %(key, value, err))\n ret = 1\n \n return ret"
] | [
"0.6925788",
"0.6801668",
"0.6539463",
"0.6499346",
"0.64941585",
"0.64688677",
"0.6442703",
"0.6442703",
"0.64351684",
"0.6388835",
"0.6360207",
"0.6353308",
"0.6348998",
"0.6324563",
"0.6311069",
"0.6296948",
"0.6296282",
"0.62657654",
"0.6252633",
"0.6233048",
"0.62180454",
"0.62114614",
"0.6201286",
"0.6198357",
"0.6188705",
"0.61717784",
"0.6167966",
"0.6167537",
"0.61285037",
"0.6117185"
] | 0.7933066 | 0 |
gets the value of the key on given master node | def get_key_from_node(
self,
key: str,
master_node: Node,
) -> str:
etcdctl_with_args = get_etcdctl_with_base_args(
endpoint_ip=str(master_node.private_ip_address))
etcdctl_with_args += ["get", key, "--print-value-only"]
result = master_node.run(
args=etcdctl_with_args,
output=Output.LOG_AND_CAPTURE,
)
value = result.stdout.strip().decode()
return str(value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(self, key):\n dkey = digest(key)\n _log.debug(\"Server:get %s\" % base64.b64encode(dkey))\n # if this node has it, return it\n exists, value = self.storage.get(dkey)\n if exists:\n return defer.succeed(value)\n node = Node(dkey)\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to get key %s\" % key)\n return defer.succeed(None)\n spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find()",
"def get(self, key):\n node = self._get_node(key)\n\n if node:\n return node.data",
"def get(self, key):\n\n node = self._get_node(key) # Get the node with the key (if it exists)\n\n if node is None:\n return None\n else:\n return node.value",
"def _get(self, key, current_node):\n pass",
"def get(self,root,key):\n node = root\n for digit in key:\n node = node.children[ord(digit)-ord('0')]\n if(node==None):\n return None\n return node.value.value",
"def get(self, key):\n if key is None:\n return None # None is not a valid key\n return get_from_subtree(self.root, key)",
"def get(self, key):",
"def get(self, key):",
"def get(self, key):\r\n if not isinstance(key, str):\r\n raise TypeError(\"Key must be a string\")\r\n\r\n node = self._find_node(key)\r\n if node is None:\r\n return None\r\n else:\r\n return node.value[1]",
"def get_value(self, key):\n pass",
"def retrieve(self, key):\n index = self._hash_mod(key)\n node = self.storage[index]\n while node is not None:\n if node.key == key:\n return node.value\n node = node.next\n return None",
"def get_node(self, key: str) -> Node:",
"def get_node(self, key):\n pos = self._get_node_pos(key)\n if pos is None:\n return None\n return self._hashring[self._sorted_keys[pos]]",
"def get(self, key):\n\t\treturn self.__get(key, key[1:])",
"def get(self, key):\n dkey = digest(key)\n # if this node has it, return it\n if self.storage.get(dkey) is not None:\n return defer.succeed(self.storage.get(dkey))\n node = Node(dkey)\n nearest = self.protocol.router.findNeighbors(node)\n if len(nearest) == 0:\n self.log.warning(\"There are no known neighbors to get key %s\" % key)\n return defer.succeed(None)\n spider = ValueSpiderCrawl(self.protocol, node, nearest, self.ksize, self.alpha)\n return spider.find()",
"def get_value(self, key):\r\n if self.hash_table[self.horner_hash(key)] is not None:\r\n if self.hash_table[self.horner_hash(key)].key == key:\r\n return self.hash_table[self.horner_hash(key)].value\r\n else:\r\n return None",
"def get(self, key):\n # Your code here\n\n idx = self.hash_index(key)\n\n # check if the index is in range\n if idx >= 0 and idx < self.capacity:\n curr_node = self.hash_table[idx]\n\n # check if any node at index exists\n if curr_node is None:\n return None\n\n # if there's already something at this index\n while curr_node is not None:\n \n # check to see if there is an entry at this index whose key matches the provided key\n while curr_node.key is not key:\n curr_node = curr_node.next\n \n # if we never found an entry with a matching key, return None\n if curr_node.key is not key or curr_node is None:\n return None\n else:\n return curr_node.value\n \n \n # otherwise return None if the index is not in range\n else:\n return None",
"def query(key):\n r = requests.get(\"http://127.0.0.1:2379/v2/keys/service/batman/{0}\".format(key))\n if r.ok:\n content = r.json()\n if content:\n return content.get('node', {}).get('value')\n return None",
"def get(self, key):\n hash_key = key % self.key_space\n return self.hash_table[hash_key].get(key)",
"def get(self, key):\n return self.execute_command(self.GET_CMD, key)",
"def __getitem__(self, key):\n if self._root:\n node = self._getItemHelper(key, self._root)\n if node:\n return node.value\n else:\n return None\n else:\n return None",
"def get_master_key():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<show><system><masterkey-properties></masterkey-properties></system></show>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def get(self, key):\n index = key % self.size\n curr_node = self.hash_table[index]\n\n while curr_node:\n if curr_node.key == key:\n return curr_node.value\n else:\n curr_node = curr_node.next\n\n return -1",
"def get_value(self, key):\n return self[key]",
"def get(self, nodename: str, key: str):\n if not self.cfg['auto_clean_in_new_thread']:\n self.clean(nodename)\n if key in self._d[nodename][0]:\n return self._d[nodename][0][key][0]\n else:\n return None",
"def get(self, key):\n if type(key) != str:\n raise TypeError(\"This is not the string you're looking for!\")\n number = self._hash(key)\n stored_key = number if self.function == 'fnv' else key\n try:\n return self.bucket_list[number % self.bucket_number].search(stored_key).stored_value\n except AttributeError:\n return None",
"def get(self, key):\n # Your code here \n index = self.hash_index(key) \n cur = self.data[index].head \n\n if cur==None:\n print(\"linked list is empty\")\n elif cur.key== key:\n return cur.value\n else:\n while cur.next:\n cur= cur.next\n if cur.key ==key: \n return cur.value",
"def get_master_key():\n\n get_master_query = 'SELECT master.master_key ' \\\n 'FROM master ' \\\n 'WHERE master.master_key_id = 1'\n\n my_cursor.execute(get_master_query)\n master_key_found = my_cursor.fetchone()\n decrypted_master = fk.decrypt(master_key_found[0].encode())\n\n return decrypted_master",
"def get(self, key):\n return self.sp.get(key)",
"def get(self, key):\n pass"
] | [
"0.7126619",
"0.70644236",
"0.700838",
"0.7002999",
"0.69473535",
"0.68663204",
"0.68588144",
"0.68588144",
"0.68452597",
"0.677081",
"0.672349",
"0.6679185",
"0.6676274",
"0.6670105",
"0.663204",
"0.66137815",
"0.66084397",
"0.6582573",
"0.6569191",
"0.65402824",
"0.65248156",
"0.65203774",
"0.6514318",
"0.65113646",
"0.65108037",
"0.6502858",
"0.6496273",
"0.6478821",
"0.6474746",
"0.6459105"
] | 0.79159415 | 0 |
Separate train or validation annotations to single video annotation. | def separate_annotations():
data_root = '/home/ubuntu/datasets/YT-VIS/'
ann_file = data_root + 'annotations/instances_train_sub.json'
import json
with open(ann_file, 'r') as f:
ann = json.load(f)
# ann['videos'] = ann['videos'][15]
# video_id = [0]
from tqdm import tqdm
for id in tqdm(range(len(ann['videos']))):
videos = []
anns = []
video = ann['videos'][id]
video['id'] = 1
videos.append(video)
i = 1
for a in ann['annotations']:
if a['video_id'] == id + 1:
anno = a
anno['id'] = i
anno['video_id'] = 1
anns.append(anno)
i += 1
# anno = ann['annotations'][id]
# anno['id'] = 1
# anno['video_id'] = 1
# anns.append(anno)
file_name = videos[0]['file_names'][0].split('/')[0]
ann_new = dict()
ann_new['info'] = ann['info']
ann_new['licenses'] = ann['licenses']
ann_new['categories'] = ann['categories']
ann_new['videos'] = videos
ann_new['annotations'] = anns
with open(data_root + 'train/Annotations/{}/{}_annotations.json'.format(file_name, file_name), 'w') as f:
json.dump(ann_new, f, ensure_ascii=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def AnnotateVideo(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']+'.avi') \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']] \n else:\n if not self.test_ret:\n video_info['text'] = [rnd.choice(video_info['text'])]\n else:\n video_info['clip_text_candidate'] = list(range(len(video_info['text'])))\n\n video_infos.append(video_info) \n del ann_info\n\n return video_infos",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos",
"def load_annotations(self):\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n if self.multi_class:\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_infos.append(dict(filename=filename, label=label))\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos",
"def process_video(data_info, name, mode, is_training=True):\r\n data = Action_Dataset(name, mode, [data_info])\r\n if is_training:\r\n clip_seq, label_seq = data.next_batch(1, _CLIP_SIZE)\r\n else:\r\n clip_seq, label_seq = data.next_batch(\r\n 1, _EACH_VIDEO_TEST_SIZE+1, shuffle=False, data_augment=False)\r\n clip_seq = 2*(clip_seq/255) - 1\r\n clip_seq = np.array(clip_seq, dtype='float32')\r\n return clip_seq, label_seq",
"def get_annotations(self, frame):\n # self.annotations_timestamp = (self.vid.frame_number + self.annotations_offset) / self.vid.fps\n self.annotations_timestamp = self.vid.frame_number / self.vid.fps\n frame = self.video_annotations.get_frame(frame_num=self.vid.frame_number).show(image=frame,\n height=frame.shape[0],\n width=frame.shape[1],\n with_text=self.show_label)\n return frame",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n data = hload_pkl(self.ann_file)\n\n video_infos = []\n for video_info in data:\n filename = video_info['filename']\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_info['filename'] = filename\n label = video_info['label']\n if self.multi_class and isinstance(label, np.ndarray):\n video_info['label'] = label.astype(np.float32)\n\n video_infos.append(video_info)\n\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos",
"def get_video_intelligence(gs_uri):\n video_client = videointelligence.VideoIntelligenceServiceClient(credentials=credentials)\n features = [videointelligence.enums.Feature.LABEL_DETECTION]\n\n mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE\n config = videointelligence.types.LabelDetectionConfig(label_detection_mode=mode)\n context = videointelligence.types.VideoContext(label_detection_config=config)\n\n operation = video_client.annotate_video(\n input_uri=gs_uri, features=features, video_context=context\n )\n print(\"\\nProcessing video for label annotations:\")\n\n result = operation.result(timeout=180)\n print(\"\\nFinished processing.\")\n\n # Process video/segment level label annotations\n segment_labels = result.annotation_results[0].segment_label_annotations\n labels = []\n for i, segment_label in enumerate(segment_labels):\n\n for i, segment in enumerate(segment_label.segments):\n start_time = (\n segment.segment.start_time_offset.seconds\n + segment.segment.start_time_offset.nanos / 1e9\n )\n end_time = (\n segment.segment.end_time_offset.seconds\n + segment.segment.end_time_offset.nanos / 1e9\n )\n positions = \"{}s to {}s\".format(start_time, end_time)\n confidence = segment.confidence\n\n labels.append('{} : {}'.format(segment_label.entity.description, confidence))\n break\n\n # Process frame level label annotations\n frame_labels = result.annotation_results[0].frame_label_annotations\n frame_lab = []\n for i, frame_label in enumerate(frame_labels):\n frame = frame_label.frames[0]\n time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9\n\n frame_lab.append(\n (\n int(frame.time_offset.seconds),\n {\"label\": frame_label.entity.description, \"confidence\": frame.confidence}\n )\n )\n return labels, frame_lab",
"def run(self):\n\n \"\"\" Detects labels given a GCS path. \"\"\"\n video_client = videointelligence.VideoIntelligenceServiceClient()\n features = [videointelligence.enums.Feature.LABEL_DETECTION]\n operation = video_client.annotate_video(self.input()[0].path, \n features=features)\n print('\\nProcessing video for label annotations:\\n')\n \n result = operation.result(timeout=900)\n \n print(result)\n print('\\nFinished processing.')\n \n segment_labels = result.annotation_results[0].shot_label_annotations\n \n output_csv = \"\"\n for i, segment_label in enumerate(segment_labels):\n print('Video label description: {}'.format(\n segment_label.entity.description))\n for category_entity in segment_label.category_entities:\n print('\\tLabel category description: {}'.format(\n category_entity.description))\n \n for i, segment in enumerate(segment_label.segments):\n start_time = (segment.segment.start_time_offset.seconds +\n segment.segment.start_time_offset.nanos / 1e9)\n end_time = (segment.segment.end_time_offset.seconds +\n segment.segment.end_time_offset.nanos / 1e9)\n positions = '{}s to {}s'.format(start_time, end_time)\n confidence = segment.confidence\n print('\\tSegment {}: {}'.format(i, positions))\n print('\\tConfidence: {}'.format(confidence))\n \n output_csv_line = '{},{},{},{}\\n'.format(\n segment_label.entity.description, \n category_entity.description,\n start_time, \n end_time)\n output_csv = output_csv + output_csv_line\n print(output_csv_line)\n print('\\n')\n print('\\n\\n-------\\n') \n print(output_csv) \n \n # output data\n f = self.output().open('w')\n f.write(output_csv)\n f.close()",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos",
"def AnnotateVideo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name'] if 'filename' not in video_info else video_info['filename']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers'] if 'answers' in video_info else video_info['text']\n info_dict['question'] = video_info['question'] if 'question' in video_info else \"\"\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos",
"def main(path):\n video_client = (video_intelligence_service_client.\n VideoIntelligenceServiceClient())\n features = [enums.Feature.LABEL_DETECTION]\n video_context = video_intelligence_pb2.VideoContext()\n video_context.stationary_camera = True\n video_context.label_detection_mode = video_intelligence_pb2.FRAME_MODE\n operation = video_client.annotate_video(path, features, video_context=video_context)\n print('\\nProcessing video for label annotations:')\n\n while not operation.done():\n sys.stdout.write('.')\n sys.stdout.flush()\n time.sleep(10)\n\n print('\\nFinished processing.')\n\n results = operation.result().annotation_results[0]\n\n return(results)",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos",
"def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)",
"def process_video(input_file, output_file):\n # video = VideoFileClip(input_file).subclip(40,44) # from 38s to 46s\n video = VideoFileClip(input_file)\n annotated_video = video.fl_image(process_pipeline)\n annotated_video.write_videofile(output_file, audio=False)",
"def detect_from_video(config: Dict):\n video = config['inference']['video_input']['video_input_path']\n vp = VideoProcessing(video=video)\n vp.generate_frames(export_path=config['inference']['video_input']['video_to_frames_export_path'])\n if config['inference']['video_input']['video_to_frames_export_path'] == config['inference']['predicted_frames_export_path']:\n print(\"[Warning]... You have given Video to frame path same as prediction output path /nPredicted output will overwrite video to frame\")\n img_height = config['inference']['img_height']\n img_width = config['inference']['img_width']\n model = ssd_300(image_size=(img_height, img_width, 3),\n n_classes=config['inference']['n_classes'],\n mode='inference',\n l2_regularization=0.0005,\n scales=[0.1, 0.2, 0.37, 0.54, 0.71, 0.88, 1.05], # The scales for MS COCO are [0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05]\n aspect_ratios_per_layer=[[1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5, 3.0, 1.0/3.0],\n [1.0, 2.0, 0.5],\n [1.0, 2.0, 0.5]],\n two_boxes_for_ar1=True,\n steps=[8, 16, 32, 64, 100, 300],\n offsets=[0.5, 0.5, 0.5, 0.5, 0.5, 0.5],\n clip_boxes=False,\n variances=[0.1, 0.1, 0.2, 0.2],\n normalize_coords=True,\n subtract_mean=[123, 117, 104],\n swap_channels=[2, 1, 0],\n confidence_thresh=0.5,\n iou_threshold=0.45,\n top_k=200,\n nms_max_output_size=400)\n\n # Load the trained weights into the model.\n weights_path = config['inference']['weights_path']\n\n model.load_weights(weights_path, by_name=True)\n \n # Working with image\n all_images = glob.glob(f\"{config['inference']['video_input']['video_to_frames_export_path']}/*/*\")\n \n # Setting Up Prediction Threshold\n confidence_threshold = config['inference']['confidence_threshold']\n \n # Setting Up Classes (Note Should be in same order as in training)\n classes = config['inference']['classes']\n \n vp.existsFolder(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}\")\n # Working with image\n for current_img in tqdm(all_images):\n current_img_name = current_img.split('/')[-1]\n orig_image = cv2.imread(current_img)\n input_images = [] # Store resized versions of the images here\n img = image.load_img(current_img, target_size=(img_height, img_width))\n img = image.img_to_array(img) \n input_images.append(img)\n input_images = np.array(input_images)\n \n # Prediction\n y_pred = model.predict(input_images)\n\n # Using threshold\n y_pred_thresh = [y_pred[k][y_pred[k,:,1] > confidence_threshold] for k in range(y_pred.shape[0])]\n \n # Drawing Boxes\n for box in y_pred_thresh[0]:\n xmin = box[2] * orig_image.shape[1] / img_width\n ymin = box[3] * orig_image.shape[0] / img_height\n xmax = box[4] * orig_image.shape[1] / img_width\n ymax = box[5] * orig_image.shape[0] / img_height\n \n label = f\"{classes[int(box[0])]}: {box[1]:.2f}\"\n cv2.rectangle(orig_image, (int(xmin), int(ymin)), (int(xmax),int(ymax)), (255, 0, 0), 2)\n cv2.putText(orig_image, label, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2, cv2.LINE_AA)\n cv2.imwrite(f\"{config['inference']['predicted_frames_export_path']}/{video.split('.')[0]}/{current_img_name}\", orig_image)\n \n # Creating video\n vp.generate_video(import_path=config['inference']['predicted_frames_export_path'],\n export_path=config['inference']['video_input']['video_output_path'])",
"def generateDataFromVideo(path):\n video = cv2.VideoCapture(path)\n success, frame = video.read()\n cnt = 1\n wiperExist = 0\n file = open(file='annotation.txt', mode='w')\n\n while success:\n cv2.imwrite(filename='./data/{0}.jpg'.format(cnt), img=frame)\n cnt += 1\n success, frame = video.read()\n if (cnt - 4) % 37 == 0 or (wiperExist > 0):\n wiperExist = (wiperExist + 1) % 21\n file.write('./Dataset/data/{0}.jpg 1\\n'.format(cnt))\n else:\n file.write('./Dataset/data/{0}.jpg 0\\n'.format(cnt))",
"def get_video_annotations(self, file_name):\n sql = f\"SET role {self.write_role}; \" \\\n + f\"SELECT * FROM validation.cvat_frames_interpmotion \" \\\n + f\"WHERE name = '{file_name}'; \"\n return sql",
"def adorn_video(self, iterator):\n return iterator \\\n | select(lambda f: f + (\n video_to_npy(f[1],\n # note weird thing here, width doesn't work they appear to be inverted\n height=self.video_size,\n squarecrop=self.squarecrop,\n fps=self.framerate,\n maxlength=self.max_length,\n # save a npy replacement\n outfile=self.get_numpy_filename(f[1]),\n use_cache=self.use_cache\n ),))",
"def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers']\n info_dict['question'] = video_info['q']\n info_dict['subtitle'] = video_info['located_sub_text']\n info_dict['frame_ind'] = video_info['located_frame']\n info_dict['total_frames'] = video_info.get('total_frames', -1)\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos",
"def log_video(self, trajectory_records, epoch):\n trajectory_rendering = trajectory_records\n video = np.transpose(trajectory_rendering, [0, 3, 1, 2])\n self.training_logger.log_video(\n np.expand_dims(video, axis=0),\n 'what_the_policy_looks_like',\n epoch)",
"def filter_video_data(encode_video, image_file_path, encoded_text, encode_label):\n video_frame_number = tf.shape(encode_video)[0]\n return tf.math.equal(video_frame_number, max_video_frame_number)",
"def demo_video(sess, net, im, csv_file, csv, frame_id):\n timer = Timer()\n timer.tic()\n scores, boxes = im_detect(sess, net, im)\n timer.toc()\n # print ('Detection took {:.3f}s for '\n # '{:d} object proposals').format(timer.total_time, boxes.shape[0])\n\n # Visualize detections for each class\n CONF_THRESH = 0.75\n\n NMS_THRESH = 0.2\n for cls_ind, cls in enumerate(CLASSES[1:]):\n cls_ind += 1 # because we skipped background\n cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\n\n cls_scores = scores[:, cls_ind]\n dets = np.hstack((cls_boxes,\n cls_scores[:, np.newaxis])).astype(np.float32)\n keep = nms(dets, NMS_THRESH)\n dets = dets[keep, :]\n if(cls == 'person'):\n im=vis_detections_video(im, cls, dets, csv_file, csv, frame_id, thresh=CONF_THRESH)\n #cv2.imwrite(os.path.join('output',str(time.time())+'.jpg'),im)\n cv2.imshow('ret',im)\n \n cv2.waitKey(20)",
"def tagVideo(modelpath, videopath, outputPath=None): \n model = get_model_instance_segmentation(3)\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # model.load_state_dict(torch.load(modelpath, map_location=device), strict=False)\n model.load_state_dict(torch.load(modelpath, map_location=device))\n model = model.to(device)\n model.eval()\n\n \n data_transform = transforms.Compose([\n ToPILImage(),\n transforms.ToTensor(), \n ])\n\n\n if outputPath:\n writer = FFmpegWriter(str(outputPath))\n \n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.namedWindow('main', cv2.WINDOW_NORMAL)\n labels = ['No mask', 'Mask']\n labelColor = [(10, 0, 255), (10, 255, 0)]\n img_count = 0\n outputDir = os.path.dirname(os.path.realpath(outputPath))\n frame_count = 0\n boundingBoxes = []\n for frame in vreader(str(videopath)):\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n print('Frame:', frame_count)\n\n if frame_count%30==0:\n frameTensor = data_transform(frame)\n frameTensor = torch.unsqueeze(frameTensor, 0).to(device)\n output = model(frameTensor)\n boundingBoxes = plot_image_new(frame, frameTensor[0], output[0]) \n \n if len(boundingBoxes)>0:\n for bb in boundingBoxes:\n cv2.rectangle(frame,\n (bb[0], bb[1]),\n (bb[2], bb[3]),\n (54, 66, 227),\n thickness=2)\n\n cv2.imshow('main', frame)\n if outputPath:\n writer.writeFrame(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n frame_count += 1\n if outputPath:\n writer.close()\n cv2.destroyAllWindows()",
"def video_feed(self):\r\n model.video.link(self.link)\r\n age_net, gender_net = model.video.caffe_models()\r\n return Response(model.video.video_detector(age_net, gender_net),mimetype='multipart/x-mixed-replace; boundary=frame')",
"def setup_annotations(self):\n sbd_path = get_data_path('sbd')\n target_path = pjoin(self.root, 'SegmentationClass/pre_encoded')\n if not os.path.exists(target_path): os.makedirs(target_path)\n path = pjoin(sbd_path, 'dataset/train.txt')\n sbd_train_list = tuple(open(path, 'r'))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files['train'] + sbd_train_list\n\n # keep unique elements (stable)\n train_aug = [train_aug[i] for i in \\\n sorted(np.unique(train_aug, return_index=True)[1])]\n self.files['train_aug'] = train_aug\n set_diff = set(self.files['val']) - set(train_aug) # remove overlap\n self.files['train_aug_val'] = list(set_diff)\n\n pre_encoded = glob.glob(pjoin(target_path, '*.png'))\n expected = np.unique(self.files['train_aug'] + self.files['val']).size\n\n if len(pre_encoded) != expected:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, 'dataset/cls', ii + '.mat')\n data = io.loadmat(lbl_path)\n lbl = data['GTcls'][0]['Segmentation'][0].astype(np.int32)\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, ii + '.png'), lbl)\n\n for ii in tqdm(self.files['trainval']):\n fname = ii + '.png'\n lbl_path = pjoin(self.root, 'SegmentationClass', fname)\n lbl = self.encode_segmap(m.imread(lbl_path))\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, fname), lbl)\n\n assert expected == 9733, 'unexpected dataset sizes'",
"def run_video(self, video_path):\n file, ext = os.path.splitext(video_path)\n video_name = file.split('/')[-1]\n out_filename = video_name + '_out' + '.avi'\n\n cap = cv2.VideoCapture(video_path)\n wi = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n he = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n print(wi, he)\n\n vwriter = cv2.VideoWriter(out_filename, cv2.VideoWriter_fourcc(*'MJPG'), 10, (wi, he))\n counter = 0\n fac = 2\n start = time.time()\n while True:\n ret, image = cap.read()\n\n if ret:\n counter += 1\n\n ## resize image\n\n height, width, channels = image.shape\n resize_ratio = 1.0 * self.INPUT_SIZE / max(width, height)\n target_size = (int(resize_ratio * width), int(resize_ratio * height))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n resized_image = cv2.resize(image, target_size, interpolation=cv2.INTER_AREA)\n output = resized_image.copy()\n\n ## get segmentation map\n batch_seg_map = self.sess.run(\n self.OUTPUT_TENSOR_NAME,\n feed_dict={self.INPUT_TENSOR_NAME: [np.asarray(resized_image)]})\n seg_map = batch_seg_map[0]\n\n ## visualize\n seg_image = label_to_color_image(seg_map).astype(np.uint8)\n\n ## overlay on image\n alpha = 0.7\n cv2.addWeighted(seg_image, alpha, output, 1 - alpha, 0, output)\n\n output = cv2.resize(output, (wi, he), interpolation=cv2.INTER_AREA)\n # outimg = 'image_' + str(counter) + '.jpg'\n # cv2.imwrite(os.path.join(os.getcwd(), 'test_out', outimg),output)\n vwriter.write(output)\n else:\n break\n\n end = time.time()\n print(\"Frames and Time Taken: \", counter, end - start)\n cap.release()\n vwriter.release()",
"def __init__(self, data_path, batch_size, video_size, mode=\"first80\"):\n self._batch_size = batch_size\n self._video_size = video_size\n\n\n # KTH video splits \n splits = [[11, 12, 13, 14, 15, 16, 17, 18], # train\n [19, 20, 21, 23, 24, 25, 1, 4], # validation\n [22, 2, 3, 5, 6, 7, 8, 9, 10]] # test\n \n label_mapping = {\"boxing\":0,\n \"handclapping\":1, \n \"handwaving\":2,\n \"jogging\":3,\n \"running\":4,\n \"walking\":5}\n self._num_classes = len(label_mapping)\n\n # file containing KTH video frame clip intervals\n sequence_list = os.path.join(data_path, \"00sequences.txt\")\n sequences = self._read_sequence_list(sequence_list)\n \n \n # clip and labels for each split, will be converted into [np.arrays()] format\n self._clips = [[] for _ in range(3)] # resized videos\n self._labels = [[] for _ in range(3)] # labels\n self._fns = [[] for _ in range(3)] # file names\n # read video into np array and create label according to splits \n for video_file in glob.glob(os.path.join(data_path, \"*.avi\")):\n fn = os.path.basename(video_file)\n fn = fn[0:len(fn) - 4]\n \n video = load_video(video_file, self._video_size)\n person_index = int(fn.split(\"_\")[0][-2:len(fn.split(\"_\")[0])])\n split = [i for i, j in enumerate(splits) if person_index in j][0]\n label = label_mapping[fn.split(\"_\")[1]]\n\n # obtain clips from video\n video_key_in_sequences = \"_\".join(fn.split(\"_\")[0:len(fn.split(\"_\")) - 1])\n print video_key_in_sequences\n\n if mode == \"episodes\":\n for clip_index, clip_range in enumerate(sequences[video_key_in_sequences]):\n self._labels[split].append(np.eye(len(label_mapping))[label]) \n self._clips[split].append(video[clip_range[0] - 1:clip_range[1] - 1, :, :, :])\n self._fns[split].append(fn + \"_\" + str(clip_index))\n elif mode == \"first80\":\n self._labels[split].append(np.eye(len(label_mapping))[label]) \n self._clips[split].append(video[0:80, :, :, :])\n self._fns[split].append(fn) \n else:\n raise NotImplementedError(\"Unknown preprocess mode.\")\n\n # maximum length for all clips, limit for padding\n self._clip_length = np.array(\\\n reduce(lambda a, b: a + [elem.shape[0] for elem in b], \n self._clips, [])).max() \n\n for split in range(3):\n for clip_index, (clip, label) in \\\n enumerate(zip(self._clips[split], self._labels[split])):\n self._clips[split][clip_index] = np.pad(clip, \\\n ((0, self._clip_length - clip.shape[0]), (0, 0), (0, 0), (0, 0)),\\\n mode=\"constant\", constant_values=0)\n # shuffling\n shuffle_index = range(len(self._clips[split]))\n random.shuffle(shuffle_index)\n self._clips[split] = [self._clips[split][i] for i in shuffle_index]\n self._labels[split] = [self._labels[split][i] for i in shuffle_index]\n self._fns[split] = [self._fns[split][i] for i in shuffle_index]\n \n self._clips[split] = np.concatenate(\\\n [np.expand_dims(i, axis=0) for i in self._clips[split]]) \n self._labels[split] = np.concatenate(\\\n [np.expand_dims(i, axis=0) for i in self._labels[split]])\n\n print self._clips[0].shape\n print self._labels[0].shape\n self._batch_index = [0 for _ in range(3)]"
] | [
"0.65230966",
"0.6502994",
"0.6373923",
"0.6358761",
"0.63245064",
"0.62168384",
"0.60723543",
"0.5980911",
"0.5978949",
"0.59094423",
"0.5880484",
"0.5861228",
"0.5837445",
"0.5835894",
"0.5835894",
"0.58234775",
"0.58234775",
"0.58179194",
"0.581608",
"0.5808023",
"0.5778187",
"0.5764704",
"0.569205",
"0.55964404",
"0.5525332",
"0.54985195",
"0.54822195",
"0.54820704",
"0.5480295",
"0.54748446"
] | 0.7296973 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.