query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test that we fallback to cn if there is no displayname nor gcos | def test_user_no_displayname_no_gcos(dummy_user_dict):
del dummy_user_dict["displayname"]
del dummy_user_dict["gecos"]
dummy_user_dict["cn"] = ["CN"]
user = User(dummy_user_dict)
assert user.name == "CN" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_user_no_displayname_no_gcos_no_cn(dummy_user_dict):\n del dummy_user_dict[\"displayname\"]\n del dummy_user_dict[\"gecos\"]\n del dummy_user_dict[\"cn\"]\n user = User(dummy_user_dict)\n assert user.name is None",
"def ValidateDisplayName(display_name):\n if display_name is not None and not display_name:\n raise exceptions.InvalidArgumentException(\n '--display-name',\n 'Display name can not be empty.')",
"def testUseDefaultNaming(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 0)\n self.assertEqual(expected, actual)",
"def test_user_no_displayname(dummy_user_dict):\n del dummy_user_dict[\"displayname\"]\n dummy_user_dict[\"gecos\"] = [\"GCOS\"]\n user = User(dummy_user_dict)\n assert user.name == \"GCOS\"",
"def testUseAltNamingOne(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex-rc/' +\n '0.12.433.269', '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 1)\n self.assertEqual(expected, actual)",
"def is_valid_cname(common_name: str) -> bool:\n return True if Band.band_range(common_name) else False",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def check_for_cname(record):\n CNAME = cydns.cname.models.CNAME\n if hasattr(record, 'label'):\n if CNAME.objects.filter(domain=record.domain,\n label=record.label).exists():\n raise ValidationError(\"A CNAME with this name already exists.\")\n else:\n if CNAME.objects.filter(label='', domain=record.domain).exists():\n raise ValidationError(\"A CNAME with this name already exists.\")",
"def check_display_option(display):\n display_options = get_display_options(verbose=False)\n if display not in display_options:\n err_str = \"The display value (%s) does not correspond to a possible \\\n display value in ENA\" % (display)\n raise ValueError(err_str)",
"def __CheckType(self, t):\n t = string.upper(t)\n \"\"\" convert lower letters to upper letters \"\"\"\n if not t in ['MX', 'CNAME', 'A', 'NS', 'PTR']:\n return None\n else:\n return t",
"def test_cn_ids_are_used_as_fallback(self):\n with pytest.warns(SubjectAltNameWarning):\n rv = extract_ids(X509_CN_ONLY)\n assert [\n DNSPattern(b\"www.microsoft.com\")\n ] == rv",
"def _check_name(self):\n\t\tpass",
"def provider(provider):\n if provider in (\"alditalk\", \"netzclub\", \"congstar\"):\n return True\n else:\n return False",
"def testRecoveryUseDefaultNaming(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n ['chromeos', '0.12.433.269', 'x86-alex', 'recovery',\n 'stable-channel', 'mp', '.bin'])\n actual = cb_name_lib.GetRecoveryName(self.board,\n self.version_string,\n 0)\n self.assertEqual(expected, actual)",
"def testDNUserName(self):\n testDn = \"/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=jha/CN=618566/CN=Manoj Jha\"\n testUserName = \"jha\"\n userName = self.mySiteDB.dnUserName(dn=testDn)\n self.assertTrue(testUserName == userName)",
"def require_cn(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_cn\")",
"def require_cn(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_cn\")",
"def testUseAltNamingThree(self):\n expected = (IMAGE_GSD_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 3)\n self.assertEqual(expected, actual)",
"def require_cn(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"require_cn\")",
"def test_unknown_device_name(self):\n search_response = SearchResponse()\n assert search_response.device_name == \"UNKNOWN\"",
"def testRecoveryUseAlternativeNaming(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex-rc/' +\n '0.12.433.269',\n ['chromeos', '0.12.433.269', 'x86-alex', 'recovery',\n 'stable-channel', 'mp', '.bin'])\n actual = cb_name_lib.GetRecoveryName(self.board,\n self.version_string,\n 1)\n self.assertEqual(expected, actual)",
"def test_useraccount_display(self):\n self.assertEqual(\"Partagez, échangez\", self.driver.title)",
"def _get_primary_cn(tls_cert):\n return cert_parser.get_host_names(tls_cert)['cn']",
"def test_display_name(self):\r\n def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)\r\n duplicated_item = self.get_item_from_modulestore(usage_key, draft=True)\r\n self.assertEqual(duplicated_item.display_name, expected_name)\r\n return usage_key\r\n\r\n # Display name comes from template.\r\n dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, \"Duplicate of 'Multiple Choice'\")\r\n # Test dupe of dupe.\r\n verify_name(dupe_usage_key, self.seq_usage_key, \"Duplicate of 'Duplicate of 'Multiple Choice''\")\r\n\r\n # Uses default display_name of 'Text' from HTML component.\r\n verify_name(self.html_usage_key, self.seq_usage_key, \"Duplicate of 'Text'\")\r\n\r\n # The sequence does not have a display_name set, so category is shown.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"Duplicate of sequential\")\r\n\r\n # Now send a custom display name for the duplicate.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"customized name\", display_name=\"customized name\")",
"def on_nicknameinuse(self, c, e):\n c.nick(c.get_nickname() + \"_\")",
"def provider_display_name(self) -> Optional[str]:\n return pulumi.get(self, \"provider_display_name\")"
] | [
"0.6524058",
"0.58852845",
"0.5666493",
"0.5665791",
"0.5654227",
"0.54735464",
"0.5469024",
"0.5469024",
"0.5469024",
"0.5469024",
"0.5469024",
"0.54659855",
"0.5449569",
"0.53934294",
"0.5368459",
"0.5359428",
"0.53472394",
"0.533857",
"0.533636",
"0.5329113",
"0.5329113",
"0.53185475",
"0.53128",
"0.5292893",
"0.5287877",
"0.52701265",
"0.52573085",
"0.52514017",
"0.5239639",
"0.52347684"
] | 0.6621293 | 0 |
Test that we fallback to cn if there is no displayname nor gcos | def test_user_no_displayname_no_gcos_no_cn(dummy_user_dict):
del dummy_user_dict["displayname"]
del dummy_user_dict["gecos"]
del dummy_user_dict["cn"]
user = User(dummy_user_dict)
assert user.name is None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_user_no_displayname_no_gcos(dummy_user_dict):\n del dummy_user_dict[\"displayname\"]\n del dummy_user_dict[\"gecos\"]\n dummy_user_dict[\"cn\"] = [\"CN\"]\n user = User(dummy_user_dict)\n assert user.name == \"CN\"",
"def ValidateDisplayName(display_name):\n if display_name is not None and not display_name:\n raise exceptions.InvalidArgumentException(\n '--display-name',\n 'Display name can not be empty.')",
"def testUseDefaultNaming(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 0)\n self.assertEqual(expected, actual)",
"def test_user_no_displayname(dummy_user_dict):\n del dummy_user_dict[\"displayname\"]\n dummy_user_dict[\"gecos\"] = [\"GCOS\"]\n user = User(dummy_user_dict)\n assert user.name == \"GCOS\"",
"def testUseAltNamingOne(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex-rc/' +\n '0.12.433.269', '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 1)\n self.assertEqual(expected, actual)",
"def is_valid_cname(common_name: str) -> bool:\n return True if Band.band_range(common_name) else False",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def display_name(self):\n if \"displayName\" in self._prop_dict:\n return self._prop_dict[\"displayName\"]\n else:\n return None",
"def check_for_cname(record):\n CNAME = cydns.cname.models.CNAME\n if hasattr(record, 'label'):\n if CNAME.objects.filter(domain=record.domain,\n label=record.label).exists():\n raise ValidationError(\"A CNAME with this name already exists.\")\n else:\n if CNAME.objects.filter(label='', domain=record.domain).exists():\n raise ValidationError(\"A CNAME with this name already exists.\")",
"def check_display_option(display):\n display_options = get_display_options(verbose=False)\n if display not in display_options:\n err_str = \"The display value (%s) does not correspond to a possible \\\n display value in ENA\" % (display)\n raise ValueError(err_str)",
"def __CheckType(self, t):\n t = string.upper(t)\n \"\"\" convert lower letters to upper letters \"\"\"\n if not t in ['MX', 'CNAME', 'A', 'NS', 'PTR']:\n return None\n else:\n return t",
"def test_cn_ids_are_used_as_fallback(self):\n with pytest.warns(SubjectAltNameWarning):\n rv = extract_ids(X509_CN_ONLY)\n assert [\n DNSPattern(b\"www.microsoft.com\")\n ] == rv",
"def _check_name(self):\n\t\tpass",
"def provider(provider):\n if provider in (\"alditalk\", \"netzclub\", \"congstar\"):\n return True\n else:\n return False",
"def testRecoveryUseDefaultNaming(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n ['chromeos', '0.12.433.269', 'x86-alex', 'recovery',\n 'stable-channel', 'mp', '.bin'])\n actual = cb_name_lib.GetRecoveryName(self.board,\n self.version_string,\n 0)\n self.assertEqual(expected, actual)",
"def testDNUserName(self):\n testDn = \"/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=jha/CN=618566/CN=Manoj Jha\"\n testUserName = \"jha\"\n userName = self.mySiteDB.dnUserName(dn=testDn)\n self.assertTrue(testUserName == userName)",
"def require_cn(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_cn\")",
"def require_cn(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"require_cn\")",
"def testUseAltNamingThree(self):\n expected = (IMAGE_GSD_PREFIX + '/stable-channel/x86-alex/0.12.433.269',\n '0.12.433.269', 'stable-channel', 'mp')\n actual = cb_name_lib.GetNameComponents(self.board, self.version_string, 3)\n self.assertEqual(expected, actual)",
"def require_cn(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"require_cn\")",
"def test_unknown_device_name(self):\n search_response = SearchResponse()\n assert search_response.device_name == \"UNKNOWN\"",
"def testRecoveryUseAlternativeNaming(self):\n expected = (IMAGE_SERVER_PREFIX + '/stable-channel/x86-alex-rc/' +\n '0.12.433.269',\n ['chromeos', '0.12.433.269', 'x86-alex', 'recovery',\n 'stable-channel', 'mp', '.bin'])\n actual = cb_name_lib.GetRecoveryName(self.board,\n self.version_string,\n 1)\n self.assertEqual(expected, actual)",
"def test_useraccount_display(self):\n self.assertEqual(\"Partagez, échangez\", self.driver.title)",
"def _get_primary_cn(tls_cert):\n return cert_parser.get_host_names(tls_cert)['cn']",
"def test_display_name(self):\r\n def verify_name(source_usage_key, parent_usage_key, expected_name, display_name=None):\r\n usage_key = self._duplicate_item(parent_usage_key, source_usage_key, display_name)\r\n duplicated_item = self.get_item_from_modulestore(usage_key, draft=True)\r\n self.assertEqual(duplicated_item.display_name, expected_name)\r\n return usage_key\r\n\r\n # Display name comes from template.\r\n dupe_usage_key = verify_name(self.problem_usage_key, self.seq_usage_key, \"Duplicate of 'Multiple Choice'\")\r\n # Test dupe of dupe.\r\n verify_name(dupe_usage_key, self.seq_usage_key, \"Duplicate of 'Duplicate of 'Multiple Choice''\")\r\n\r\n # Uses default display_name of 'Text' from HTML component.\r\n verify_name(self.html_usage_key, self.seq_usage_key, \"Duplicate of 'Text'\")\r\n\r\n # The sequence does not have a display_name set, so category is shown.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"Duplicate of sequential\")\r\n\r\n # Now send a custom display name for the duplicate.\r\n verify_name(self.seq_usage_key, self.chapter_usage_key, \"customized name\", display_name=\"customized name\")",
"def on_nicknameinuse(self, c, e):\n c.nick(c.get_nickname() + \"_\")",
"def provider_display_name(self) -> Optional[str]:\n return pulumi.get(self, \"provider_display_name\")"
] | [
"0.66205984",
"0.5885735",
"0.5667276",
"0.56651855",
"0.565568",
"0.547335",
"0.5470524",
"0.5470524",
"0.5470524",
"0.5470524",
"0.5470524",
"0.54652536",
"0.5450915",
"0.5394224",
"0.5367775",
"0.53575623",
"0.5349093",
"0.53388125",
"0.53359413",
"0.53286374",
"0.53286374",
"0.5319622",
"0.5312745",
"0.5291632",
"0.5288717",
"0.5270675",
"0.52585477",
"0.5252257",
"0.5240533",
"0.5235611"
] | 0.65224785 | 1 |
Gets the 95th percentile of bleakest_eval from bigtable | def get_95_percentile_bleak(n_back=500):
end_game = int(bigtable_input._games_nr.latest_game_number())
start_game = end_game - n_back if end_game >= n_back else 0
moves = bigtable_input._games_nr.bleakest_moves(start_game, end_game)
evals = np.array([m[2] for m in moves])
return np.percentile(evals, 5) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ninetieth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.90)]/60",
"def tenth_percentile(y_test, y_pred) :\n return np.abs(y_test - y_pred).sort_values().iloc[int(len(y_test)*0.10)]/60",
"def compute_percentile(value, cutoffs):\n\tif value < cutoffs[0]:\n\t\treturn 0.0\n\n\tfor i, cutoff in enumerate(cutoffs):\n\t\tif value < cutoff:\n\t\t\treturn math.floor(100 * (float(i)/(len(cutoffs))))\n\t\t\tbreak\n\treturn 100.0",
"def get_percentile(N, percent, key=lambda x:x):\r\n if not N:\r\n return 0\r\n k = (len(N)-1) * percent\r\n f = math.floor(k)\r\n c = math.ceil(k)\r\n if f == c:\r\n return key(N[int(k)])\r\n d0 = key(N[int(f)]) * (c-k)\r\n d1 = key(N[int(c)]) * (k-f)\r\n return d0+d1",
"def get_percentile(N, percent, key=lambda x:x):\r\n if not N:\r\n return 0\r\n k = (len(N)-1) * percent\r\n f = math.floor(k)\r\n c = math.ceil(k)\r\n if f == c:\r\n return key(N[int(k)])\r\n d0 = key(N[int(f)]) * (c-k)\r\n d1 = key(N[int(c)]) * (k-f)\r\n return d0+d1",
"def percentile(scores, student_score):\n scores = np.array(sorted(scores))\n num_scores = len(scores)\n return round(sum(scores <= student_score) / float(num_scores) * 100, 2)",
"def get_percentile(before, level):\n snr = np.array(before.getColumnByName('snr')[:])\n return np.percentile(snr, level)",
"def calcPercentile(percent, set): #TESTED\r\n\r\n # check for 100%\r\n if percent == Decimal('1.0'):\r\n return max(set)\r\n\r\n # convert percent to the appropriate index\r\n pValue = percent * len(set)\r\n\r\n set = sorted(set)\r\n\r\n # check for 0%\r\n if percent == Decimal('0'):\r\n return set[0]\r\n\r\n # check if percent is an integer\r\n if pValue % 1 == 0:\r\n\r\n # cast pValue as int so it can be used as an index\r\n pValue = int(pValue)\r\n\r\n # take average of values at indices percent and percent - 1\r\n return (set[pValue - 1] + set[pValue]) / Decimal('2')\r\n\r\n # if percentage needs to be rounded\r\n else:\r\n # round number up to nearest integer\r\n print pValue # DELETE\r\n pValue = pValue.to_integral_exact(rounding=ROUND_CEILING) # WHAT'S UP WITH THIS FUNCTION?\r\n print pValue # DELETE\r\n pValue = int(pValue)\r\n\r\n return set[pValue - 1]",
"def get_IQR(lst):\n return (float(np.percentile(lst, 75)) - float(np.percentile(lst, 25)))",
"def get_percentile(self, q):\n return None",
"def get_percentile(self, q):\n return None",
"def percentile(N, percent):\n N.sort()\n if not N:\n return None\n k = (len(N) - 1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return N[int(k)]\n d0 = N[int(f)] * (c - k)\n d1 = N[int(c)] * (k - f)\n return d0 + d1",
"def percentile(self, values, percent):\r\n if not values:\r\n return None\r\n k = (len(values)-1) * percent\r\n f = math.floor(k)\r\n c = math.ceil(k)\r\n if f == c:\r\n return values[int(k)]\r\n d0 = values[int(f)] * (c-k)\r\n d1 = values[int(c)] * (k-f)\r\n return d0+d1",
"def percentile(self, pct):\n return percentile(self.results, pct, interpolation='nearest')",
"def lscoreatpercentile (inlist, percent):\r\n if percent > 1:\r\n print \"\\nDividing percent>1 by 100 in lscoreatpercentile().\\n\"\r\n percent = percent / 100.0\r\n targetcf = percent*len(inlist)\r\n h, lrl, binsize, extras = histogram(inlist)\r\n cumhist = cumsum(copy.deepcopy(h))\r\n for i in range(len(cumhist)):\r\n if cumhist[i] >= targetcf:\r\n break\r\n score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)\r\n return score",
"def save_loss_percentile(\n col1,\n sensor_name,\n percentile=99.5,\n file_path=\"./test_env_loss_percentiles/\",\n):\n\n file_name = sensor_name + \"_loss_percentile.pkl\"\n\n loss_percentile = np.percentile(col1, percentile)\n\n dump(loss_percentile, open(file_path + file_name, \"wb\"))\n\n return loss_percentile",
"def get_statistics_percentile(self,table,field):\n dict = {}\n for x in xrange(1,11):\n dict[x] = db.session.execute(\"select statistics_viewCount as percentile from meta order by percentile asc limit 1 OFFSET 19346*\"+str(x)+\"/10-1\").first().percentile",
"def test_small_round_numbers_95_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 95\r\n expected_result = 9.12680\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)",
"def _percentile(self, data, percent):\n if not data:\n return None\n k = (len(data) - 1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return data[int(k)]\n d0 = data[int(f)] * (c - k)\n d1 = data[int(c)] * (k - f)\n return d0 + d1",
"def scar(res):\n\n bcut = res['s2n']> np.percentile(res['s2n'],90)\n x = res['Pcad'][bcut]\n x -= min(x)\n x /= max(x)\n y = (res['t0cad']/res['Pcad'])[bcut]\n\n D = np.vstack([x,y]).T\n tree = cKDTree(D)\n d,i= tree.query(D,k=2)\n return np.percentile(d[:,1],90)",
"def get_90_percentile(times):\n times.sort()\n length = len(times)\n last_5_percent = int(math.ceil(length * 0.90))\n return times[last_5_percent - 1]",
"def percentile(N, percent):\n if not N:\n return None\n k = (len(N)-1) * percent\n f = math.floor(k)\n c = math.ceil(k)\n if f == c:\n return N[int(k)]\n d0 = N[int(f)] * (c-k)\n d1 = N[int(c)] * (k-f)\n return d0+d1",
"def percentile(t: torch.tensor, q: float):\n # Note that ``kthvalue()`` works one-based, i.e. the first sorted value\n # indeed corresponds to k=1, not k=0! Use float(q) instead of q directly,\n # so that ``round()`` returns an integer, even if q is a np.float32.\n k = 1 + round(.01 * float(q) * (t.numel() - 1))\n result = t.view(-1).kthvalue(k).values.item()\n return result",
"def get_percentile(self, q):\n raise NotImplementedError(\"This is an abstract method and needs to be implemented in derived classes.\")",
"def test_profiled_quantiles(self):\n\n # this data has 4 bins, range of 3\n # with equal bin size, each bin has the width of 0.75\n\n data = [\"1.0\", \"2.0\", \"3.0\", \"4.0\"]\n df = pd.Series(data)\n profiler = FloatColumn(df.name)\n profiler.update(df)\n profile = profiler.profile\n\n est_quartiles = profile['quantiles']\n est_Q1 = est_quartiles[249]\n est_Q2 = est_quartiles[499]\n est_Q3 = est_quartiles[749]\n\n data_to_num = [float(item) for item in data]\n exact_Q1 = np.percentile(data_to_num, 25)\n exact_Q2 = np.percentile(data_to_num, 50)\n exact_Q3 = np.percentile(data_to_num, 75)\n\n self.assertEqual(est_Q1, exact_Q1)\n self.assertEqual(est_Q2, exact_Q2)\n self.assertEqual(est_Q3, exact_Q3)",
"def percentile(self, percentile):\n assert(percentile >= 0 and percentile <= 100)\n assert(self.kind in [\"exponential\", \"linear\", \"enumerated\", \"boolean\"])\n\n fraction = percentile/100\n to_count = fraction*self.buckets.sum()\n percentile_bucket = 0\n\n for percentile_bucket in range(len(self.buckets)):\n freq = self.buckets.values[percentile_bucket]\n if to_count - freq <= 0:\n break\n to_count -= freq\n\n percentile_lower_boundary = self.buckets.index[percentile_bucket]\n percentile_frequency = self.buckets.values[percentile_bucket]\n\n if percentile_bucket == len(self.buckets) - 1 or percentile_frequency == 0:\n return percentile_lower_boundary\n\n width = self.buckets.index[percentile_bucket + 1] - self.buckets.index[percentile_bucket]\n return percentile_lower_boundary + width*to_count/percentile_frequency",
"def _calculate_percentile_cutoff(run_numbers):\n mcp_values = []\n andor_values = []\n for run_number in run_numbers:\n current_data_path = ''.join([DATA_PATH, 'run', str(run_number), 'allevts.h5'])\n f = h5py.File(current_data_path, 'r')\n current_phot = _get_photon_energy(f, run_number)\n current_mcp = np.array(f['Acqiris2']['acq'])\n current_mcp = current_mcp[(current_phot > 781) & (current_phot < 782)]\n mcp_values.extend(current_mcp)\n current_andor = np.array(f['Andor']['signal'])\n current_andor = current_andor[(current_phot > 781) & (current_phot < 782)]\n andor_values.extend(current_andor)\n #plt.figure()\n #plt.scatter(mcp_values, andor_values)\n mcp_percentile_cutoff = min([percentileofscore(andor_values, 4000), 99.9])\n return mcp_percentile_cutoff",
"def tail_ratio(returns):\n\n return np.abs(np.percentile(returns, 95)) / \\\n np.abs(np.percentile(returns, 5))",
"def test_small_round_numbers_98_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 98\r\n expected_result = 7.67748\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)",
"def test_run_simulations_and_get_percentile_allele_length_1():\n simulator = genotype_confidence_simulator.GenotypeConfidenceSimulator(\n 50, 300, 0.01, iterations=5\n )\n simulator.run_simulations()\n expected_confidence_scores_percentiles = {\n 193: 20.0,\n 221: 40.0,\n 271: 60.0,\n 278: 80.0,\n 303: 100.0\n }\n assert (\n simulator.confidence_scores_percentiles\n == expected_confidence_scores_percentiles\n )\n assert simulator.get_percentile(193) == 20.00\n assert simulator.get_percentile(221) == 40.00\n # Try getting number that is not in the dict and will have to be inferred\n assert simulator.get_percentile(207) == 30.0\n # Try values outside the range of what we already have\n simulator.get_percentile(192) == 0.00\n simulator.get_percentile(191) == 0.00\n simulator.get_percentile(304) == 100.00\n simulator.get_percentile(305) == 100.00"
] | [
"0.66710836",
"0.64393985",
"0.6213645",
"0.61712694",
"0.61712694",
"0.61691296",
"0.6116343",
"0.60878766",
"0.6070936",
"0.6057565",
"0.6057565",
"0.5990582",
"0.5941295",
"0.59283644",
"0.592614",
"0.5912521",
"0.5911487",
"0.5907796",
"0.5905875",
"0.5864946",
"0.5858764",
"0.58160836",
"0.5807287",
"0.5794529",
"0.57501394",
"0.57424515",
"0.57391876",
"0.5725077",
"0.5721499",
"0.57212377"
] | 0.7476899 | 0 |
Updates the flagfile at `flags_path`, changing the value for `resign_threshold` to `new_threshold` | def update_flagfile(flags_path, new_threshold):
if abs(new_threshold) > 1:
raise ValueError("Invalid new percentile for resign threshold")
with tf.gfile.GFile(flags_path) as f:
lines = f.read()
if new_threshold > 0:
new_threshold *= -1
if not RESIGN_FLAG_REGEX.search(lines):
print("Resign threshold flag not found in flagfile {}! Aborting.".format(flags_path))
sys.exit(1)
old_threshold = RESIGN_FLAG_REGEX.search(lines).groups(1)
lines = re.sub(RESIGN_FLAG_REGEX, "--resign_threshold={:.3f}".format(new_threshold), lines)
print("Updated percentile from {} to {:.3f}".format(old_threshold, new_threshold))
with tf.gfile.GFile(flags_path, 'w') as f:
f.write(lines) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ctrl_update_flags(self, flags, old_dst, old_src, new_val, sub_op):\n of_cond = self.create_overflow_condition(old_dst, old_src, new_val, sub_op)\n cf_cond = self.create_carry_condition(new_val, sub_op)\n\n valid_flags = {'C': cf_cond is True,\n 'Z': new_val & 0xFF == 0,\n 'V': of_cond is True,\n 'N': ((new_val & 0x80) != 0)}\n\n for flag in flags:\n self.set_flag(flag, valid_flags[flag])",
"def update_feature_masks(distance_from_feature, feature, mask_resolution, ipm_distance_range=20):\n pass",
"def update(self, flags=''):\n _load = not self.san_interface.runmode\n self._update_params()\n if 'cachesize' in self._updatedattr or _load or 'f' in flags:\n (e,r) = self._update_cachesize()\n if e:\n if not _load: # reset cachesize on create but not on load\n self.cachesize=0\n return (e,r)\n## self._flush()\n return (0,'')",
"def set_flag(self, new):\n self.flag = new",
"def apply_additional_mask(\n old_mask_file=None,\n new_mask_file=None,\n new_thresh=0.0,\n operation='AND'\n):\n if root_mask == None:\n logger.info(\"Specify a cube root file name.\")\n return\n\n myia = au.createCasaTool(casaStuff.iatool)\n myia.open(new_mask_file)\n new_mask = myia.getchunk()\n myia.close()\n\n myia.open(old_mask_file)\n mask = myia.getchunk()\n if operation == \"AND\":\n mask *= (new_mask > new_thresh)\n else:\n mask = (mask + (new_mask > new_thresh)) >= 1.0\n myia.putchunk(mask)\n myia.close()\n\n return",
"def write_flag(path): \r\n f = open(path, \"r+\")\r\n line = f.readlines()\r\n line[1] = 'flag = 1'\r\n s=''.join(line) \r\n f.seek(0)\r\n f.write(s)\r\n f.close()",
"def set_status(flag, irods_fname):\n\n if flag:\n where, param = __make_sel({'fn' : irods_fname})\n if where:\n query = \"UPDATE file_restore_requests SET status=%s {}\".format(where)\n param.insert(0, flag)\n #print(query, param)\n __do_sql(query, param)\n return\n\n print(\"No value for status, update failed\")",
"def update(self,update_flags):\n pass",
"def update_exam_flags(sender, instance, **kwargs):\n exam = Exam.objects.get(pk=instance.exam.pk)\n exam.flags = ExamFlag.objects.filter(exam=exam, resolved=False).count()\n exam.save()",
"def setFlag(self, flag, value) -> None:\n ...",
"def edit_flag(\n flag,\n type_,\n start,\n finish,\n instrument,\n description,\n user,\n freq,\n inputs,\n metadata,\n force,\n):\n if type_:\n flag.type = type_\n\n if start:\n flag.start_time = start.int_timestamp\n\n if finish:\n flag.finish_time = finish.int_timestamp if finish != \"null\" else None\n\n if metadata:\n flag.metadata.update(metadata)\n\n # Edit any optional metadata\n if description:\n flag.metadata[\"description\"] = description\n if user:\n flag.metadata[\"user\"] = user\n if instrument:\n flag.metadata[\"instrument\"] = instrument\n if inputs:\n flag.metadata[\"inputs\"] = inputs\n if freq:\n flag.metadata[\"freq\"] = freq\n\n if force:\n flag.save()\n else:\n click.echo(\"Edited flag:\\n\")\n click.echo(format_flag(flag))\n if click.confirm(\"Commit changed flag?\"):\n flag.save()\n click.echo(\"Success.\")\n else:\n click.echo(\"Aborted.\")",
"def update_bool(file_path):\n with open(\n file_path, 'r'\n ) as the_result_file_from_spark_for_read_and_abbr_not_allowed_by_pylint:\n content = the_result_file_from_spark_for_read_and_abbr_not_allowed_by_pylint.read(\n )\n update = content.replace('true', 'True').replace('false', 'False')\n with open(\n file_path,\n 'w') as the_result_file_from_spark_for_write_and_abbr_not_allowed:\n the_result_file_from_spark_for_write_and_abbr_not_allowed.write(update)",
"def on_flags_update(self, event):\n self.entity.on_flags_update(event)",
"def refresh(self):\n \n ffm = FlagFileManager(basedir=self.basedir)\n flagfiles = ffm.search(projectname=self.projectname)\n if flagfiles:\n self.tag = flagfiles[0].tag # we assume only 1 flagfile per project\n self.filename = '%s.%s.%s' %(self.projectname, self.timestamp, self.tag)",
"def generate_minimization_flags_file(pdb_object):\n complex_path = pdb_object.complex.pdb.path\n complex = complex_path.name\n name = complex_path.stem\n params = pdb_object.ligand.params.path.name\n template = Template(storage.read_plain(constants.flags_relax_path))\n substitution = {'complex' : complex,\n 'name' : name,\n 'params' : params}\n output = template.substitute(substitution)\n pdb_object.flags_relax.write(output)",
"def adjust_threshold(file_bw, buffer_db, threshold_base):\n rf_decimation = int(constants.FILE_FS / file_bw)\n fft_bin_decimation = constants.OBS_INPUT_NFFT / rf_decimation / constants.OBS_OUTPUT_NFFT\n\n threshold_scale_factor = constants.OBS_INPUT_NFFT / constants.OBS_OOB_NFFT / fft_bin_decimation\n\n threshold = (threshold_base + 10 * np.log10(constants.FILE_FS)\n - 20*np.log10(threshold_scale_factor) + buffer_db)\n\n return threshold",
"def test_setFlags(self):\n self._flagsTest('setFlags', b'FLAGS')",
"def updateThreshold(self, t):\n\n budget = self.budget\n self.threshold = self.init_threshold * self.diameter * ((budget-t) / self.budget)**self.decay_factor",
"def set_ThresholdValue(self, value):\n super(UpdateTriggerInputSet, self)._set_input('ThresholdValue', value)",
"def _reapply_bsdflags_to_image(mysettings):\n\tif bsd_chflags:\n\t\tos.system(\"mtree -e -p %s -U -k flags < %s > /dev/null\" % \\\n\t\t\t(_shell_quote(mysettings[\"D\"]),\n\t\t\t_shell_quote(os.path.join(mysettings[\"T\"], \"bsdflags.mtree\"))))",
"def setThreshold(self, threshold): # real signature unknown; restored from __doc__\n pass",
"def set_flags(self, flags):\n\n if isinstance(flags, int):\n if flags not in (0, 1, 2, 3):\n raise ValueError(\"Invalid flags: {}\".format(flags))\n\n self.raw.flags = flags\n\n elif isinstance(flags, Iterable):\n valid_flags = {\"DF\", \"MF\"}\n flags = set(flags)\n invalid_flags = flags.difference(valid_flags)\n\n if len(invalid_flags) > 0:\n raise ValueError(\"Invalid flags: {}\".format(invalid_flags))\n\n raw_flags = 0\n\n if \"DF\" in flags:\n raw_flags += 0b010\n\n if \"MF\" in flags:\n raw_flags += 0b001\n\n self.raw.flags = raw_flags\n\n else:\n msg = \"Expected flags to be int or iterable, got: {}\"\n raise TypeError(msg.format(type(flags).__name__))",
"def _flag(self, test=False):\n\n if test:\n flag = self.test_flag\n else:\n flag = self._get_flag()\n\n # For each of the temperature add the threshold mask\n for var in TEMPERATURE_VARIABLES:\n self.add_mask(\n var, flag, 'discrepancy threshold exceeded',\n ('The discrepancy between the deiced and non-deiced temperature '\n f'sensors is greater than {TEMPERATURE_THRESHOLD} K.')\n )",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def setThreshold(self, v):\n self._set(threshold=v)\n return self",
"def set_restart_mode(restart_file, flag=\"reload\"):\n with open(restart_file, 'w') as f:\n f.write(str(flag))",
"def getFlags(acre_threshold=10, min_acre_diff=40):\n acre_threshold = float(acre_threshold)\n min_acre_diff = float(min_acre_diff)\n if acre_threshold > 100 or acre_threshold == 0:\n raise ValueError('Acre threshold must be between 1-100!')\n\n if acre_threshold > 1:\n acre_threshold *= .01\n\n # run summary stats on breakdown table\n gdb = utils.Geodatabase()\n stats ='ACRES SUM;BENEFIT SUM;ASSESSMENT SUM;SEC_TWN_RNG FIRST'\n case_field='CODE;LANDOWNER_NAME;PIN;COUNTY'\n tmp_stats = r'in_memory\\tmp_stats'\n #tmp_stats = os.path.join(gdb.path, 'tmp_stats') #testing only\n arcpy.analysis.Statistics(gdb.breakdown_table, tmp_stats, stats, case_field)\n\n # create new table\n if not arcpy.Exists(gdb.flag_table):\n flag_table_exists = False\n path, name = os.path.split(gdb.flag_table)\n arcpy.management.CreateTable(path, name)\n\n for fld, alias, ftype in FLAG_FIELDS:\n arcpy.management.AddField(gdb.flag_table, fld, ftype, field_alias=alias, field_length=255)\n\n else:\n # just clear out the rows\n flag_table_exists = True\n arcpy.management.DeleteRows(gdb.flag_table)\n\n # read summarized breakdown table\n sum_d = {}\n s_fields = ['PIN', 'CODE', 'LANDOWNER_NAME', 'SUM_ACRES', 'SUM_BENEFIT', 'SUM_ASSESSMENT', 'FIRST_SEC_TWN_RNG']\n with arcpy.da.SearchCursor(tmp_stats, s_fields) as rows:\n for r in rows:\n sum_d[r[0]] = r[1:]\n\n # read summary table from gdb\n summary_fields = ['PIN', 'OWNER_CODE', 'OWNER', 'ASSESSED_ACRES', 'TOT_BENEFIT',\n 'TOT_ASSESSMENT', 'SECTION', 'TOWNSHIP', 'RANGE', 'COUNTY']\n\n # generate flags\n flagCount = 0\n flag_pins = []\n pin_error_msg = 'PIN not found in Breakdown Table'\n with utils.InsertCursor(gdb.flag_table, [f[0] for f in FLAG_FIELDS[:-1]]) as irows:\n with arcpy.da.SearchCursor(gdb.summary_table, summary_fields) as rows:\n for r in rows:\n newRow = [None] * len(FLAG_FIELDS[:-1])\n par = None\n if r[0] in sum_d:\n plss = '-'.join(['{:0>2}'.format(p) if p else '99' for p in r[6:9]])\n par = sum_d[r[0]]\n newRow[0] = r[0]\n\n # check owner code\n if r[1] != par[0]:\n newRow[2] = 'Owner Code \"{}\" does not macth \"{}\" in breakdown table\"'.format(r[1] if r[1] else '', par[0] if par[0] else '')\n own = r[2]\n\n # check owner last name only\n if own and par[1]:\n ownLast = own.split()[0].upper().rstrip(',')\n bownLast = par[1].split()[0].upper().rstrip(',')\n if ownLast != bownLast:\n newRow[3] = 'Last name \"{}\" in summary table does not match \"{}\" in breakdown table'.format(ownLast, bownLast)\n\n # check acres based on pecent threshold\n acres = r[3]\n bacres = par[2]\n diff = acres - bacres\n perc_diff = (acres * acre_threshold)\n\n if abs(diff) >= perc_diff and abs(diff) >= min_acre_diff:\n newRow[4] = diff\n newRow[5] = perc_diff\n\n # check benefits and assessments, these should be exact matches!\n ben_diff = r[4] - par[3]\n if ben_diff:\n if ben_diff > 0.1:\n newRow[6] = ben_diff\n\n assess_diff = r[5] - par[4]\n if assess_diff:\n if assess_diff > 0.1:\n newRow[7] = assess_diff\n\n # verify plss info\n if plss != par[5]:\n newRow[8] = 'Section \"{}\" does not match \"{}\" from breakdown table'.format(plss, par[5])\n\n else:\n newRow[:2] = [r[0], pin_error_msg]\n\n if len(filter(None, newRow)) >= 2:\n # add county\n newRow[9] = r[-1]\n irows.insertRow(newRow)\n flagCount += 1\n\n if newRow[1] != pin_error_msg:\n flag_pins.append(newRow[0])\n\n # flag PINs in breakdown table, PINs keep getting set to NULL from relationship table??\n with utils.UpdateCursor(gdb.breakdown_table, [utils.PIN, 'FLAG']) as urows:\n for row in urows:\n if row[0] in flag_pins:\n row[1] = 'Y'\n else:\n row[1] = 'N'\n urows.updateRow(row)\n\n # flag PINs in summary table\n with utils.UpdateCursor(gdb.summary_table, [utils.PIN, 'FLAG']) as rows:\n for row in urows:\n if row[0] in flag_pins:\n row[1] = 'Y'\n else:\n row[1] = 'N'\n rows.updateRow(row)\n\n## # set up relationship classes, this is killing GDB performance, will just have to go with table joins :(\n## sum_rel = os.path.join(gdb.path, 'Summary_Relationship')\n## brk_rel = os.path.join(gdb.path, 'Breakdown_Relationship')\n## if not arcpy.Exists(sum_rel):\n## arcpy.management.CreateRelationshipClass(gdb.summary_table, gdb.flag_table, sum_rel, 'SIMPLE', 'Flags', 'Summary', 'BOTH', 'ONE_TO_ONE', 'NONE','PIN', 'PIN')\n## utils.Message('created ' + os.path.basename(sum_rel))\n##\n## if not arcpy.Exists(brk_rel):\n## arcpy.management.CreateRelationshipClass(gdb.flag_table, gdb.breakdown_table, brk_rel, 'SIMPLE', 'Breakdown', 'Flags', 'BOTH', 'ONE_TO_MANY', 'NONE', 'PIN', 'PIN')\n## utils.Message('created ' + os.path.basename(brk_rel))\n\n # compact gdb\n arcpy.management.Compact(gdb.path)\n\n # report message\n utils.Message('Found {} flags between summary and breakdown tables'.format(flagCount))\n return"
] | [
"0.5232395",
"0.50572014",
"0.50224495",
"0.5003035",
"0.4994549",
"0.4984667",
"0.4941392",
"0.48769605",
"0.48551136",
"0.48338896",
"0.48318133",
"0.48245242",
"0.4822055",
"0.48098576",
"0.47942355",
"0.47704318",
"0.47618973",
"0.47563523",
"0.46765515",
"0.46269655",
"0.4625937",
"0.46175054",
"0.45834365",
"0.4562915",
"0.4562915",
"0.4562915",
"0.4562915",
"0.4562915",
"0.45555532",
"0.45256114"
] | 0.8416566 | 0 |
Authenticate with SoundCloud API. Cache access token in the secrets file. | def init_api():
global soundcloud
import json
SECRETS_VERSION = 1
# Load secrets file
if os.path.exists(config.token_cache):
with open(config.token_cache, 'r', encoding='utf-8') as f:
secrets = json.load(f)
else:
secrets = {}
# Try to reuse the cached access token
if secrets\
and secrets['version'] == SECRETS_VERSION\
and secrets['access_token_acquired_at'] + secrets['access_token_expires_in'] > time() - 5 * 60\
and secrets['username'] == config.username:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
access_token=secrets['access_token']
)
return
# Get a new access token
logging.info('Getting a new access token')
try:
soundcloud = Soundcloud(
client_id=config.client_id,
client_secret=config.client_secret,
username=config.username,
password=config.password
)
except HTTPError as e:
if e.response.status_code == 401:
logging.critical('Incorrect API key, login or password. Please, edit config.py.')
sys.exit(1)
else:
raise
# Save the token
secrets = {
'version': SECRETS_VERSION,
'username': config.username,
'access_token': soundcloud.access_token,
'access_token_acquired_at': time(),
'access_token_expires_in': soundcloud.token.expires_in,
}
with open(config.token_cache, 'w', encoding='utf-8') as f:
secrets = json.dump(secrets, f, indent='\t', ensure_ascii=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authenticate(self):\n try:\n self._token = self._lookup_token()\n except:\n raise HTTPError(\n \"Unable to get short-lived access token for cyberark storage\"\n )",
"def authenticate():\n\n # We are uploading and then downloading so we want Musicmanager\n api = Musicmanager()\n\n # Attempt to authenticate and log in\n logged_in = api.login()\n\n # If login() returns false, you have not performed oauth yet, or did not\n # write your credentials to your disk. Using oauth allows authentication\n # without providing plaintext credentials to the application\n if not logged_in:\n print('No oauth credentials found, please authenticate your account')\n\n # Performs oauth and stores generated credentials to Appdirs \n # 'user_data_dir' by default. oauth only needs to be performed once per \n # machine if the credentials are stored, which is the default behavior.\n authenticated = api.perform_oauth(open_browser=True)\n else:\n print('Successfully logged in.\\n')\n\n return api",
"def _authenticate(self):\n if self.creds().consumer_key() is None or \\\n self.creds().app_secret() is None:\n self.logger.error(\"You need a consumer key and app secret, yo\")\n else:\n self._access_token = self._request_access_token()",
"def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth",
"def authenticate():\n auth = OAuthHandler(config.CONSUMER_API_KEY, config.CONSUMER_API_SECRET)\n auth.set_access_token(config.ACCESS_TOKEN, config.ACCESS_TOKEN_SECRET)\n\n return auth",
"def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']",
"def authenticate(self):\n # Check if we already have access token and secret\n if not os.path.exists(self.sTOKEN_FILE):\n # 1) Obtain Request token\n oauth = OAuth1(self.apiKey, client_secret=self.apiKeySecret, callback_uri='oob')\n r = requests.post(url=self.sREQUEST_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n resource_owner_key = credentials.get('oauth_token')[0]\n resource_owner_secret = credentials.get('oauth_token_secret')[0]\n\n # 2) Obtain authorization for the user to access resources\n # Redirect the user to /authorize and get the callback\n authorize_url = self.sAUTHORIZE_URL + '?oauth_token=' + resource_owner_key + \\\n '&oauth_consumer_key=' + self.apiKey + \\\n '&Access=Full&Permissions=Modify'\n\n print 'Please go here and authorize,', authorize_url\n verifier = raw_input('Please enter the six-digit PIN code: ')\n\n # 3) Obtain final access token\n oauth = OAuth1(self.apiKey, client_secret = self.apiKeySecret,\n resource_owner_key = resource_owner_key,\n resource_owner_secret = resource_owner_secret,\n verifier=verifier)\n r = requests.post(url=self.sACCESS_TOKEN_URL, auth=oauth)\n\n credentials = parse_qs(r.content)\n access_token = credentials.get('oauth_token')[0]\n access_token_secret = credentials.get('oauth_token_secret')[0]\n\n # Store access token so we can use it later\n with open(self.sTOKEN_FILE, 'w') as f:\n json.dump({'access_token': access_token,\n 'access_token_secret': access_token_secret}, f)\n\n else:\n with open(self.sTOKEN_FILE, 'r') as f:\n tokens = json.load(f)\n access_token = tokens.get('access_token')\n access_token_secret = tokens.get('access_token_secret')\n\n # store the file access token details for use in other methods\n self.accessToken = access_token\n self.accessTokenSecret = access_token_secret",
"def authenticate():\n auth = OAuthHandler(config.TW_API_KEY, config.TW_API_SECRET)\n auth.set_access_token(config.TW_ACC_TOKEN, config.TW_ACC_SECRET)\n\n return auth",
"def authentication(): \n pathToConfig = os.path.join(prefix, \"twitterConfig\")\n config = json.load(open(pathToConfig))\n consumer_key = config['consumer_key']\n consumer_secret = config['consumer_secret']\n access_token = config['access_token']\n access_token_secret = config['access_token_secret']\n api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret,\n access_token_key=access_token, access_token_secret=access_token_secret)\n return api",
"def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):\r\n auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID, \r\n client_secret=SPOTIPY_CLIENT_SECRET)\r\n \r\n return spotipy.Spotify(auth_manager=auth_manager)",
"def authenticate(self):\n auth = tw.OAuthHandler(self.consumer_key, self.consumer_secret)\n auth.set_access_token(self.access_token, self.access_secret)\n return tw.API(auth)",
"def authorize_credentials():\n credentials = STORAGE.get()\n # If the credentials doesn't exist in the storage location then run the flow\n if credentials is None or credentials.invalid:\n flow = flow_from_clientsecrets(CREDENTIAL_JSON, scope=SCOPE)\n http = httplib2.Http()\n credentials = run_flow(flow, STORAGE, http=http)\n return credentials",
"def init_auth_client(self):\n with open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile)\n token = util.prompt_for_user_token(\n cfg['username'],\n scope=cfg['scope'],\n client_id=cfg['spotipy_client_id'],\n client_secret=cfg['spotipy_client_secret'],\n redirect_uri=cfg['spotipy_redirect_uri'])\n sp = spotipy.Spotify(auth=token)\n return sp, cfg['username']",
"def vault_auth():\n # Check if vault is sealed\n if client.sys.is_sealed() == True:\n # if the vault is SEALED, UNSEAL IT using the unseal_key\n unseal_response = client.sys.submit_unseal_key(vault_unseal_key)\n\n # [Uncomment line below only if you want to generate a new API token for the application your ROOT admin registered]\n # Keep in mind you need Application Role ID and Secret ID\n client_data = client.auth_approle(vault_role_id, vault_secret_id)\n # print(client_data['auth']['client_token'])\n\n # Authenticate against the VAULT using the new CLIENT TOKEN conatained in the new dict object\n client.token = client_data['auth']['client_token']",
"def authenticate(self, username, password, consumerKey, consumerSecret):\r\n pass",
"def __init__(self):\n self.authurl = Config().auth\n self.baseurl = Config().api\n self.s = Session()\n self.s.headers = {'Accept': 'application/json'}\n data = {\"grant_type\": \"client_credentials\", \"scope\": \"/read-public\", \"client_id\": Config().client_id,\n \"client_secret\": Config().client_secret}\n r = self.s.request(method=\"post\", url=self.authurl, data=data)\n self.s.headers = {'Accept': 'application/json', \"Access token\": r.json()[\"access_token\"]}",
"def connect(self):\n r = authentication.token(connection=self)\n\n\n self.auth_token = r.json().get('token')",
"def api_authentication(token, secret):\n\n api = twitter.Twitter(auth=twitter.OAuth(token, secret,\n CONSUMER_KEY, CONSUMER_SECRET))\n if not api:\n print(\"Authentication failed, try running the script one more time\")\n\n # delete data from the configuration file to force a new\n # authentication next time\n os.remove(TOKEN_FILE)\n return None\n\n return api",
"def oauth():\n return {\"consumer_key\": \"Insert consumer key HERE\",\n \"consumer_secret\": \"Insert consumer secret HERE\",\n \"token_key\": \"Insert token key HERE\",\n \"token_secret\": \"Insert token secret HERE\"}",
"def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()",
"def authorize():\n encoded_auth = base64.b64encode(\n (os.environ[\"SPOTIFY_CLIENT_ID\"] + ':' + os.environ[\"SPOTIFY_CLIENT_SECRET\"]).encode())\n headers = {\n 'Authorization': 'Basic {}'.format(encoded_auth.decode(\"utf-8\"))\n }\n\n response = requests.post(os.environ['SPOTIFY_AUTH_URL'], data={'grant_type': 'client_credentials'},\n headers=headers).text\n return json.loads(response)",
"def __init__(self):\n self.api = Api(consumer_key=credentials[\"consumer_key\"],\n consumer_secret=credentials[\"consumer_secret\"],\n access_token_key=credentials[\"access_token_key\"],\n access_token_secret=credentials[\"access_token_secret\"])",
"def authorize(self) -> None:\n\n if not self.login_secret:\n #TODO trigger error\n self.login()\n \n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.access_token = sObj.getAccessToken(\n self.oauth_token,\n self.login_secret,\n self.oauth_verifier\n )",
"def t_auth():\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, token_secret)\n return tweepy.API(auth)",
"def authorise(consumer_key, consumer_secret, access_token, access_token_secret):\r\n # Authorisation:\r\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n auth.set_access_token(access_token, access_token_secret)\r\n\r\n # Creating api object:\r\n api = tweepy.API(auth)\r\n\r\n # Finally:\r\n return api",
"def auth(self):\n return self.api(self.token)",
"def authenticate(self):\n\n headers = {\n 'Authorization': 'Bearer ' + self.access_token,\n 'ClientId': self.client_id,\n }\n self.headers.update(headers)",
"def authenticate(redirect_uri, client_cred_manager, username, scope,client_id,client_secret):\r\n\r\n sp = spotipy.Spotify(client_credentials_manager = client_cred_manager)\r\n token = util.prompt_for_user_token(username, scope, client_id, client_secret, redirect_uri)\r\n if token:\r\n sp = spotipy.Spotify(auth=token)\r\n else:\r\n print(\"Can't get token for\", username)\r\n return sp",
"def auth(access_token, access_token_secret, consumer_key, consumer_secret):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token,access_token_secret)\n return auth",
"def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='[email protected]', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)"
] | [
"0.69103223",
"0.6770919",
"0.65899146",
"0.6582983",
"0.6582983",
"0.6426025",
"0.64257234",
"0.6412163",
"0.63717526",
"0.63108236",
"0.62029386",
"0.61904144",
"0.6170871",
"0.61696583",
"0.6121056",
"0.6096839",
"0.60418415",
"0.60281444",
"0.60094726",
"0.6006562",
"0.59891003",
"0.59873533",
"0.5908705",
"0.5890671",
"0.58650535",
"0.58639",
"0.5863631",
"0.58554643",
"0.5843255",
"0.5817573"
] | 0.7882311 | 0 |
Return true if the respost exists, according to soundcloud. Also update the database if a repost is already deleted on soundcloud, but is not marked as deleted in the db. | def check_repost_exists(type, id):
try:
soundcloud.get('/e1/me/{}_reposts/{}'.format(type, id))
return True
except HTTPError as e:
if e.response.status_code == 404:
db.mark_as_deleted(type, id)
return False
else:
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exists(self):\r\n try:\r\n self.refresh()\r\n except:\r\n return False\r\n return True",
"def is_new_post(self, post):\n return self.last_post != post['id']",
"def check_repost(post, user):\n if not user.is_authenticated():\n return 'not_auth' # no user to repost as\n\n if user.pk == post.author.pk:\n return 'own_post' # don't repost own post\n\n existing_repost = Post.objects.filter(author=user, repost_original=post).exists()\n if existing_repost:\n # don't repost more than once\n return 'already_reposted_as'\n\n return 'ok'",
"async def exists(self, payload: TPayload) -> bool:",
"def test_response_reusage_after_replied(self):\n\n post1 = self._create_tweet(\n content=\"I need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n\n resp1 = Response.objects.upsert_from_post(post1)\n\n support = UserProfile.objects.upsert('Twitter', dict(screen_name='@test2'))\n self._create_tweet(\n user_profile=support,\n content=\"We cant help you right now. Sorry.\",\n channel=self.outbound,\n demand_matchables=True,\n in_reply_to=post1)\n\n post2 = self._create_tweet(\n content=\"I still need a foo.\",\n channel=self.inbound,\n demand_matchables=True)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertNotEqual(resp1.id, resp2.id)",
"def test_post_deletion_success(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n self.client.credentials(\n HTTP_AUTHORIZATION = 'Token ' + self.user1.auth_token.key\n )\n response = self.client.delete(url)\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n deleted_post = Post.objects.filter(\n id=self.post1.id,\n )\n self.assertFalse(deleted_post.exists())",
"def test_response_reusage(self):\n\n post1 = self._create_db_post(content=\"@test I need a foo.\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n self.assertTrue(self.sc.inbound_channel.is_assigned(post1))\n\n conv1 = self.sc.upsert_conversation(post1)\n post2 = self._create_db_post(content=\"I still need a foo!\",\n channel=self.sc.inbound,\n demand_matchables=True,\n user_profile={'screen_name': 'customer'})\n conv2 = self.sc.upsert_conversation(post2)\n\n resp1 = Response.objects.upsert_from_post(post1)\n resp2 = Response.objects.upsert_from_post(post2)\n self.assertEqual(conv1.id, conv2.id)\n self.assertEqual(resp1.id, resp2.id)\n self.assertTrue(resp2.post_date > resp1.post_date)",
"def test_post_update_sucess(self):\n url = reverse(\n 'post-detail',\n args=[\n self.topic1.url_name,\n self.post1.id\n ]\n )\n payload = {\n 'title': 'Updated title',\n 'content': 'Updated content'\n }\n self.client.credentials(\n HTTP_AUTHORIZATION = 'Token ' + self.user1.auth_token.key\n )\n response = self.client.patch(url, payload)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n updated_post = Post.objects.filter(\n id=self.post1.id,\n author=self.user1,\n title=payload.get('title'),\n content=payload.get('content')\n )\n self.assertTrue(updated_post.exists())",
"def exists(self):\n print \"exists called: \", self.path\n d = self._get()\n d.addCallback(self._handleResponse)\n\n return d",
"def pingback_post(response, target_uri, slug):\n post = Post.query.filter_by(slug=slug).first()\n if post is None:\n return False\n\n if post is None or not post.pings_enabled:\n raise PingbackError(33, 'no such post')\n elif not post.can_read():\n raise PingbackError(49, 'access denied')\n title, excerpt = get_excerpt(response, target_uri)\n if not title:\n raise PingbackError(17, 'no title provided')\n elif not excerpt:\n raise PingbackError(17, 'no useable link to target')\n old_pingback = Comment.query.filter(\n (Comment.is_pingback == True) &\n (Comment.www == response.url)\n ).first()\n if old_pingback:\n raise PingbackError(48, 'pingback has already been registered')\n Comment(post, title, excerpt, '', response.url, is_pingback=True,\n submitter_ip=get_request().remote_addr, parser='text')\n db.commit()\n return True",
"def test_exists_true(self):\n self.assertTrue(PrepSample.exists(self.sample_id, self.prep_template))",
"def should_update(self) -> bool:\n if CoronaCaseRaw.objects.all().count() == 0:\n return True\n last_updated = CoronaCaseRaw.objects.latest('date_received').date_received\n return timezone.now() >= last_updated + timezone.timedelta(seconds=self.interval)",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False",
"def test_exists_true(self):\n self.assertTrue(Sample.exists(self.sample_id, self.sample_template))",
"def remove_post(request):\n if request.method == \"POST\":\n if \"token\" in request.data and request.data[\"token\"] != \"\" and request.data[\"token\"] is not None:\n if Token.objects.filter(key=request.data[\"token\"]).exists():\n if Post.objects.filter(pk=request.data[\"post_id\"]).exists():\n Post.objects.get(pk=request.data[\"post_id\"]).delete()\n return Response({\"success\": 91})\n else:\n return Response({\"error\": 32})\n else:\n return Response({\"error\": 17})",
"def create(self):\n return (True == self.client.put(self.name).getBodyData(\"ok\"))",
"def exists(self, answer):\n return self.find(answer) is not None",
"def doc_exist(self, docid):\n doc = Document(self.cloudant_database, docid)\n return doc.exists()",
"def check_response_valid_update(response: HTTPResponse) -> bool:\n return response.status_code == 200",
"def exists(self):\n return bool(self.get())",
"def is_exist(self, status_code):\n if status_code == 200:\n return True\n return False",
"def exist(self):",
"def document_exists(self, docid):\n raise NotImplementedError",
"def check_if_duplicate(self, data):\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND location = '{}'\\\n \".format(self.table, data['topic'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup with same topic at the same venue\\\n already exists'\n\n query = \"SELECT * FROM {} WHERE happening_on = '{}' AND location = '{}'\\\n \".format(self.table, data['happening_on'], data['location'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date at the same venue \\\n already exists'\n\n query = \"SELECT * FROM {} WHERE topic = '{}' AND happening_on = '{}'\\\n \".format(self.table, data['topic'], data['happening_on'])\n\n result = self.fetch_one(query)\n if result:\n return True, 'Meetup happening the same date with same topic \\\n already exists'\n\n return False, None",
"def user_response_to_post(self, request, pk):\n post_objects_count = Post.objects.filter(id=pk, liked_users__id=request.user.id).count()\n post_objects = Post.objects.get(id=pk)\n if post_objects_count !=0:\n post_objects.liked_users.remove(request.user)\n response_msg = \"You disliked the post\"\n else:\n post_objects.liked_users.add(request.user)\n response_msg = \"You have liked the post\"\n return Response({'data': response_msg}, status=status.HTTP_200_OK)",
"def save(self):\n self.save_to_db()\n if hasattr(self, 'id'):\n self.status_code = 201\n return True\n else:\n self.errors['messages'].append(\"DataBase Error, Please Try again\")\n self.status_code = 500\n return False",
"def checkIfExists(dbconnection, title):\n cursor = dbconnection.cursor()\n output = \"\"\n title = title.replace(\"'\", \"''\")\n try:\n cursor.execute(\"SELECT * FROM transcriptions WHERE title = '\" + title + \"';\")\n dbconnection.commit()\n output = cursor.fetchone()\n cursor.close()\n if(output is None):\n return False\n else:\n return True\n except:\n dbconnection.rollback()\n cursor.execute(\"SELECT * FROM transcriptions WHERE title = '\" + title + \"';\")\n dbconnection.commit()\n output = cursor.fetchone()\n cursor.close()\n if(output is None):\n return False\n else:\n return True",
"def verify_post(post_id, connection):\n\n database = connection['test']\n collection = database['posts']\n\n try:\n post = collection.find_one({\"_id\" : ObjectId(post_id)})\n except InvalidId:\n post = None\n \n if post is None:\n return False\n\n return True"
] | [
"0.6219022",
"0.59738153",
"0.59722084",
"0.59081906",
"0.5828247",
"0.5757684",
"0.5751083",
"0.5581836",
"0.556682",
"0.556317",
"0.55112916",
"0.5496816",
"0.54759526",
"0.54759526",
"0.54688525",
"0.54517484",
"0.5447654",
"0.54395247",
"0.5435582",
"0.5389179",
"0.53644985",
"0.5342778",
"0.533283",
"0.53301585",
"0.53248775",
"0.5323773",
"0.5322133",
"0.53150445",
"0.53122556",
"0.5308354"
] | 0.7226926 | 0 |
Repost a resource into the group and update the database. | def group_repost(user_id, resource_type, resource_id):
logging.info('Reposting %s %d...', resource_type, resource_id)
soundcloud.put('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_repost(user_id, resource_type, resource_id)
db.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_post_resource(self, req, resp, *args, **params):\n instance = self.get_object(**params)\n self.save_object(req.params, req, resp, instance, **params)",
"def post_security_group_update(self, resource_id, resource_dict):\n pass",
"def _resubmit(self, *args, **kwargs):\n self.retry()",
"def post_database_node_update(self, resource_id, resource_dict):\n pass",
"def post_project_update(self, resource_id, resource_dict):\n pass",
"def post(self, post):\n\n self._post = post",
"def post(self, post):\n\n self._post = post",
"def test_post_recreate(self):\n user = self.make_user()\n school_year = SchoolYearFactory(school__admin=user)\n bundle = BundleFactory(school_year=school_year, status=Bundle.Status.COMPLETE)\n data = {\"recreate\": \"true\"}\n\n with self.login(user):\n response = self.post(\"reports:bundle\", school_year.pk, data=data)\n\n self.response_302(response)\n bundle.refresh_from_db()\n assert bundle.status == Bundle.Status.PENDING",
"def post_domain_update(self, resource_id, resource_dict):\n pass",
"def save(self, context=None):\n updates = self.obj_get_changes()\n self.dbapi.update_nodegroup(self.cluster_id, self.uuid, updates)\n\n self.obj_reset_changes()",
"def test_update_resource_group(self):\n pass",
"def post_update():\n\n\n user_id = session['user_id']\n post = request.form.get('post')\n\n Update.add_update(user_id, post)\n\n return \"Updated Post\"",
"def post_routing_instance_update(self, resource_id, resource_dict):\n pass",
"def put(self):\n status = ErrorCode.SUCCESS\n try:\n data = DotDict(json_decode(self.request.body))\n cid = self.current_user.cid\n tid = self.current_user.tid\n gid = data.gid\n name = data.name\n logging.info(\"[UWEB] Modify group request: %s, cid: %s\",\n data, self.current_user.cid)\n except Exception as e:\n status = ErrorCode.ILLEGAL_DATA_FORMAT\n logging.exception(\"[UWEB] Invalid data format. body:%s, Exception: %s\",\n self.request.body, e.args)\n self.write_ret(status)\n return\n\n try: \n group = self.get_group_by_cid(cid, name)\n if group:\n status = ErrorCode.GROUP_EXIST\n self.write_ret(status)\n return\n\n self.db.execute(\"UPDATE T_GROUP\"\n \" SET name = %s\"\n \" WHERE id = %s\",\n name, gid)\n\n # NOTE: wspush to client \n if status == ErrorCode.SUCCESS:\n WSPushHelper.pushS3(tid, self.db, self.redis)\n\n self.write_ret(status)\n except Exception as e:\n logging.exception(\"[UWEB] Modify group failed. cid: %s, Exception: %s\",\n self.current_user.cid, e.args)\n status = ErrorCode.SERVER_BUSY\n self.write_ret(status)",
"def test_update_item_using_post(self):\n pass",
"def _post(self, request_obj):\n return self._execute_action(request_obj, [CreateAction, EditAction], 'POST')",
"def resubmit(self):\n self.keep_data = True\n ManagedJob.submit(self)",
"def commit(self):",
"def save(self):\n updates = self.obj_get_changes()\n db_obj = self.dbapi.update_action_plan(self.uuid, updates)\n obj = self._from_db_object(\n self.__class__(self._context), db_obj, eager=False)\n self.obj_refresh(obj)\n\n def _notify():\n notifications.action_plan.send_update(\n self._context, self, old_state=self.old_state)\n\n _notify()\n\n self.obj_reset_changes()",
"def update_group():\n _id = request.form['_id']\n name = request.form['name']\n data, code, message = FIELD_SERVICE.update_group(_id, name)\n return __result(data, code, message)",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass",
"def post(self):\n pass"
] | [
"0.6204693",
"0.6052708",
"0.59446126",
"0.59445024",
"0.5799984",
"0.57475936",
"0.57475936",
"0.57396287",
"0.5733804",
"0.5732188",
"0.5626466",
"0.5625362",
"0.5611247",
"0.5603161",
"0.55945265",
"0.55783963",
"0.55557644",
"0.552884",
"0.54994965",
"0.5487721",
"0.5483469",
"0.5483469",
"0.5483469",
"0.5483469",
"0.5483469",
"0.5483469",
"0.5483469",
"0.5483469",
"0.5483469",
"0.5483469"
] | 0.70165044 | 0 |
Delete a resource from the group and update the database. | def group_delete(user_id, resource_type, resource_id):
logging.info('Deleting %s %d...', resource_type, resource_id)
soundcloud.delete('/e1/me/{}_reposts/{}'.format(resource_type, resource_id))
db.record_deletion(user_id, resource_type, resource_id)
db.commit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self):\n import labstep.entities.resource.repository as resourceRepository\n\n return resourceRepository.editResource(self, deleted_at=getTime())",
"def test_delete_resource_group(self):\n pass",
"def delete(openstack_resource):\n openstack_resource.delete()",
"def delete():\n name = request.json['name']\n group = models.user.Group.get(name)\n if not group:\n raise Absent('Group does not exists.', deletion=False)\n else:\n models.db.session.delete(group)\n models.db.session.commit()\n return response(200, deletion=True)",
"def post_security_group_delete(self, resource_id, resource_dict):\n pass",
"def pre_security_group_delete(self, resource_id):\n pass",
"def delete():",
"def delete(self, identifier):\n self.get(identifier)\n conn = self.get_connector()\n cursor = conn.cursor()\n\n query = \"delete from {0} where {2}={1}\".format(\n self.ressource_config[\"table\"],\n identifier,\n self.model.pk_field.name)\n try:\n cursor.execute(query)\n except sqlite3.IntegrityError, e:\n message = \"\"\n if \"foreign\" in e.message:\n message = \"\"\"another ressource depends on this\n object. Cloud not delete before all ressources\n depending on it are also deleted\"\"\"\n\n raise BadRequest(message)\n\n conn.commit()\n conn.close()",
"def do_del_group(dbsync, group):\n pass",
"def delete(self, context, id_):\n try:\n db_resource_data = self.db_api.get_resource(\n context, id_)\n\n if db_resource_data['type'] == (eon_const.\n EON_RESOURCE_TYPE_ESX_CLUSTER):\n msg = _(\"Delete operation not supported for type %s\"\n % db_resource_data['type'])\n raise exception.DeleteException(err=msg)\n\n _resource_data = _make_response(\n db_resource_data)\n _resource_data_log = deepcopy(_resource_data)\n _resource_data_log.pop(\"meta_data\", None)\n LOG.info(\"Details for the ID %s is: %s\" % (\n id_, logging.mask_password(_resource_data_log)))\n driver_obj = driver.load_resource_driver(\n db_resource_data['type'])\n driver_obj.validate_delete(db_resource_data)\n driver_obj.delete(context, id_)\n self.db_api.delete_resource(context, id_)\n # delete the data from hlm input model\n try:\n LOG.info(\"[%s] remove resource from input model\" % id_)\n hux_obj = HLMFacadeWrapper(context)\n resource_id = db_resource_data[eon_const.EON_RESOURCE_ID]\n hux_obj.delete_server(resource_id)\n hux_obj.commit_changes(resource_id, \"Delete compute resource\")\n except facade_excep.NotFound:\n # log and do nothing\n LOG.warn(\"[%s] resource not found in hlm input model\" % id_)\n LOG.info(\"[%s]: Deleted resource from eon\" % id_)\n # Notify the message to consumers\n try:\n message = {\"resource_id\": id_,\n \"resource_state\": eon_const.EON_RESOURCE_STATE_REMOVED,\n \"resource_details\": _resource_data,\n }\n message_notifier.notify(context,\n message_notifier.EVENT_PRIORITY_INFO,\n message_notifier.EVENT_TYPE[\n 'removed'],\n message)\n except Exception as ex:\n LOG.exception(\n \"Exception while notifying the message : %s\" % ex)\n except exception.NotFound as e:\n msg = (\"Failed to delete resource %s. Error: %s\") % (\n _resource_data['name'], e.message)\n LOG.exception(msg)\n raise e",
"def delete(self, resource, id):\n self.request('/' + resource + '/' + str(id), 'DELETE')\n return True",
"def delete(self, _id):",
"def remove_resource(self, graph_db):\n with mutex:\n neo_resource.delete_node(graph_db, self.index)",
"def delete(self):\n ...",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n pass",
"def delete(self):\n # gid must be specified for deletion\n gid = self.get_query_argument('gid')\n self.write(self._rpc.aria2.remove(self._token, gid))",
"def delete(self):\n DATABASE_CONNECTION.delete(self.__class__.__name__, self.id)",
"def delete_group(gid):\n if request.method == 'POST':\n hl.deleteGroup(gid)\n return redirect('/users')",
"def delete(self):\n query = \"DELETE FROM \" + self.table + \" WHERE \" + self.idfield + \"=%s\"\n dbh = dbstuff.getRW(self.dbh_key)\n try:\n c = dbh.cursor()\n c.execute(query, self.id)\n c.close()\n dbh.commit()\n finally:\n dbstuff.release(dbh,self.dbh_key)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n self.manager.delete(self.name)",
"def delete(self):\n\n\n try:\n db = getDatabase()\n connection = db.connect()\n\n connection.delete(self)\n except Exception as e:\n raise e\n finally:\n db.dispose()",
"def after_delete(self, record):\n debug = logging.getLogger(__name__).debug\n debug('deleted group %r (%r)', record['name'], record['group_id'])\n audit('delete group', record['name'])",
"def delete_group(self, group):\n raise NotImplementedError('delete_group')",
"def delete(self):\n DBSESSION.delete(self)\n DBSESSION.commit()\n LOG.info(f\"Register of {self.str_representation} with id = {self.id} was successfully deleted.\")",
"def delete(self):\n if not self.isNew:\n #We do not check the hasData property, so we can use this function to delete records\n #without reading them first.\n #TODO: this is stupid and unclean, change it\n try:\n CFG.CX.delete ( CFG.DB.SCHEMA + \".object\", { 'objectid' : self._objectid } )\n self.clearRecord()\n self.raiseEvent ( \"record_deleted\", self )\n except pg.DatabaseError, e:\n raise Record.DataManipulationError ( \"Deleting record {1} of '{0}'\".format(self._table.name, self._objectid),\n \"\",\n e)",
"def delete(self):\n url = util.join_url(self.path, str(self['id']))\n new_attributes = self.api.delete(url)\n self.error = None\n self.merge(new_attributes)\n return self.success()"
] | [
"0.7100132",
"0.7027492",
"0.6920822",
"0.6880339",
"0.6865927",
"0.67449135",
"0.6699044",
"0.6655716",
"0.6652711",
"0.66081554",
"0.6541622",
"0.6539928",
"0.6534143",
"0.65040797",
"0.6388483",
"0.6388483",
"0.6388483",
"0.6388483",
"0.6381689",
"0.6329746",
"0.6328542",
"0.6321519",
"0.63193905",
"0.63193905",
"0.6316429",
"0.6315946",
"0.63140017",
"0.6286616",
"0.6239005",
"0.6230574"
] | 0.72901237 | 0 |
Set a flag to update the description once all comments are processed. | def request_description_update():
global should_update_description
should_update_description = True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_description_debounced(self, value):\n self.update_description(value)",
"def set_description(desc):\n global last_description\n last_description = desc",
"def set_description(self, description):\r\n self.__description = description",
"def description(self, new_description):\r\n self.set({\"description\": new_description})",
"def set_description(self, description):\n self.description = description",
"def set_description(self):\n if 'description' not in self.data:\n if self.verbose:\n click.echo('Adding empty descriptions to root')\n self.data['description'] = ''",
"def set_description(self, description):\n self.__description = description",
"def description(self, description) :\n\t\ttry :\n\t\t\tself._description = description\n\t\texcept Exception as e:\n\t\t\traise e",
"def set_description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def set_description(self, desc: str) -> None:\n self.metadata.data[\"description\"] = desc",
"def set_desc(self, item_desc):\r\n self.description = item_desc",
"def SetDescription(self, description):\n self.description = str(description)",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description"
] | [
"0.67883927",
"0.6781191",
"0.6655528",
"0.66096985",
"0.65891665",
"0.65389127",
"0.6520836",
"0.64286363",
"0.6415596",
"0.641495",
"0.641495",
"0.641495",
"0.641495",
"0.6397862",
"0.63956046",
"0.63900524",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967",
"0.6380967"
] | 0.75725305 | 0 |
`dict` group configurations keyed by name | def groups(self):
group_config = {}
# legacy way of threating any dict as a potential
# group config (pre #44 implementation)
# supported until vaping 2.0
for k,v in list(self.config.items()):
if isinstance(v, collections.Mapping):
group_config[k] = v
# explicit groups object (#44 implementation)
for _group_config in self.config.get("groups",[]):
group_config[_group_config["name"]] = _group_config
return group_config | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _init_group_dicts(self):\n\n all_groups = set()\n\n for detection in config['detections'].values():\n if 'action' in detection and detection['action'] == 'buy':\n if 'groups' in detection:\n for group in detection['groups']:\n all_groups.add(group)\n\n for group in all_groups:\n self.trade_sizes[group] = config['trade_min_size']\n self.trade_proceeds[group] = {}\n\n self.trade_sizes['default'] = config['trade_min_size']\n self.trade_proceeds['default'] = {}",
"def set_up_groups(self):\n groups = []\n groups.append({'groupname': 'th',\n 'grouptitle': 'TH',\n 'path': '/'})\n groups.append({'groupname': 'neutronics',\n 'grouptitle': 'Neutronics',\n 'path': '/'})\n groups.append({'groupname': 'metadata',\n 'grouptitle': 'Simulation Metadata',\n 'path': '/'})\n return groups",
"def _initGroups(self):\n defaults = self._getGroupDefaults()\n ddict = self._getDefaultGroupDict(defaults)\n\n for group in self._config.sections():\n ddict[\"_name\"] = group\n container = self.getGroupContainer(**ddict)\n self._passConfig(container, group)\n self.groups.append(container)\n\n if not self.groups:\n self.groups.append(self.getGroupContainer(**defaults._dict_))",
"def _getDefaultGroupDict(self, container):\n ddict = dict(container._dict_)\n ddict.update({\n \"_def_for_repos\": container.for_repos,\n \"_def_for_paths\": container.for_paths,\n })\n\n return ddict",
"def __init__(self, configGroups):\r\n self.config = {cls:configGroup[classes] for configGroup in configGroups for classes in configGroup for cls in IterWrapper(classes)}",
"def define_group_properties(self):\n\n # PropertyGroup\n self.propertygroup['debug']['x86'] = get_propertygroup(\n 'debug', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['debug']['x64'] = get_propertygroup(\n 'debug', 'x64', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x86'] = get_propertygroup(\n 'release', 'x86', ' and @Label=\"Configuration\"'\n )\n self.propertygroup['release']['x64'] = get_propertygroup(\n 'release', 'x64', ' and @Label=\"Configuration\"'\n )\n\n # ItemDefinitionGroup\n self.definitiongroups['debug']['x86'] = get_definitiongroup('debug', 'x86')\n self.definitiongroups['debug']['x64'] = get_definitiongroup('debug', 'x64')\n self.definitiongroups['release']['x86'] = get_definitiongroup('release', 'x86')\n self.definitiongroups['release']['x64'] = get_definitiongroup('release', 'x64')",
"def get_configs_ids(self, name=None):\n\n m_return = {}\n\n for x in self.get_configs().findall(\"config\"):\n m_return[x.find(\"name\").text] = x.get(\"id\")\n\n if name:\n return {name : m_return[name]}\n else:\n return m_return",
"def get_groups(self) -> dict:\n return dict(self._groups)",
"def config_section_map(configer):\n conf_dict = {}\n for section in configer.sections():\n conf_dict[section] = {}\n for key, val in configer.items(section):\n conf_dict[section][key] = val\n return conf_dict",
"def build_groupings(idir: str) -> dict:\n bkg_group = {key: [ifile for ifile in glob(f'{idir}/*_{key}_*.root')] for key in bkgs}\n pw_group = {key: [ifile for ifile in glob(f'{idir}/{key}*.root')] for key in powhegs}\n wh_pw_group = [ifile for name in wh_powhegs for ifile in glob(f'{idir}/{name}*.root')]\n ungrouped = [ifile for ifile in glob(f'{idir}/*.root') if 'madgraph' in ifile or 'JHU' in ifile]\n\n group = {}\n for key, files in bkg_group.items():\n if len(files) > 0:\n group[key] = files\n\n for key, files in pw_group.items():\n if len(files) > 0:\n group[key] = files\n\n for ifile in ungrouped:\n name = ifile.split('/')[-1].replace('.root', '')\n name = name.split('_SYST')[0].replace('-', '_')\n name = name.replace('_ggH125', '').replace('_VBF125', '').replace('_WH125', '').replace('_ZH125', '')\n group[name] = [ifile]\n\n if len(wh_pw_group) > 0:\n group['wh125_powheg'] = wh_pw_group\n\n return group",
"def load_group_from_config(self):\n\n group_file_name = \"cicada/config/group.yaml\"\n if os.path.isfile(group_file_name):\n self.group_data = dict()\n with open(group_file_name, 'r') as stream:\n self.group_data = yaml.safe_load(stream)\n self.all_groups = deepcopy(self.group_data)\n if self.group_data:\n keys_to_del = []\n for key, value in self.group_data.items():\n missing_file = False\n for file in value:\n if file not in self.nwb_path_list.values():\n missing_file = True\n if missing_file:\n keys_to_del.append(key)\n for key in keys_to_del:\n self.group_data.pop(key)\n self.grouped_labels = []\n if self.group_data:\n self.grouped = True\n for value in self.group_data.values():\n nwb_file_list = []\n for file in value:\n io = NWBHDF5IO(file, 'r')\n nwb_file = io.read()\n self.data_dict[nwb_file.identifier] = nwb_file\n nwb_file_list.append(nwb_file.identifier)\n self.grouped_labels.append(nwb_file_list)\n self.showGroupMenu.setEnabled(True)\n self.addGroupDataMenu.setEnabled(True)\n self.populate_menu()\n else:\n self.showGroupMenu.setEnabled(False)\n self.addGroupDataMenu.setEnabled(False)\n self.showGroupMenu.clear()\n self.addGroupDataMenu.clear()",
"def _build_config_group(self, config_name: str):\n # TODO: consider adding calibration sub-groups\n # create configuration group\n gname = config_name\n self.create_group(gname)\n\n # -- set attributes for configuration group ----\n brd_slot_num = [\n 3,\n ]\n brd_types = [\n 4,\n ]\n brd_config_indices = [\n 0,\n ]\n brd_address = [\n self.slot_info[brd_slot_num[0]][2],\n ]\n for field in (\"SIS 3305\", \"SIS 3302\"):\n config_index = 0\n brd_bool_arr = np.any(self._active_brdch[field], axis=1)\n brd_index = np.where(brd_bool_arr)[0]\n for brd in brd_index:\n # determine slot number\n slot = self.get_slot(brd + 1, field)\n if slot is None:\n warn(f\"Got no slot number for board number {brd+1}\")\n continue\n\n # update lists\n brd_slot_num.append(slot)\n brd_types.append(3 if field == \"SIS 3305\" else 2)\n brd_config_indices.append(config_index)\n brd_address.append(self.slot_info[slot][2])\n\n # increment config index\n config_index += 1\n\n # update attributes\n self[gname].attrs.update(\n {\n \"SIS crate base addresses\": np.array(brd_address, dtype=np.uint32),\n \"SIS crate board types\": np.array(brd_types, dtype=np.uint32),\n \"SIS crate config indices\": np.array(brd_config_indices, dtype=np.uint32),\n \"SIS crate max average shots\": np.int32(1),\n \"SIS crate slot numbers\": np.array(brd_slot_num, dtype=np.uint32),\n }\n )\n\n # -- Create and Populate Configuration Sub-Groups ----\n for slot, index in zip(brd_slot_num, brd_config_indices):\n adc = self.slot_info[slot][1]\n if adc == \"SIS 3820\":\n self._build_config_sis3820_subgroup(config_name, slot, index)\n elif adc == \"SIS 3302\":\n self._build_config_sis3302_subgroup(config_name, slot, index)\n elif adc == \"SIS 3305\":\n self._build_config_sis3305_subgroup(config_name, slot, index)",
"def get_case_list_by_group(config):\n # Identity = namedtuple('Identity', ['service', 'id'])\n groups = config.get('groups')\n full_case_lists = {}\n for group_name, group in groups.items():\n cases = group['cases']\n if group.get('dependencies'):\n for dep in group.get('dependencies'):\n dependencies_tests = groups.get(dep).get('cases')\n cases += dependencies_tests\n full_case_lists[group_name] = cases\n return full_case_lists",
"def settings_grp(self):\n settings_grp = self.h5[SETTINGS]\n return settings_grp",
"def _process_group(self, **config_kwargs) -> RobotGroupConfig:\n return RobotGroupConfig(self.sim_scene, **config_kwargs)",
"def make_groups(self):\n for g in self.groups:\n self.add_group(groupname=g['groupname'],\n grouptitle=g['grouptitle'],\n path_to_group=g['path'])",
"def get_new_config_group(self):\n filename = \"%(config_dir)s/%(group)s.%(time)s\" % \\\n { \"config_dir\": self.config_dir(),\n \"group\": self.group_name(),\n \"time\": common.time_suffix(),}\n common.write_file(\"w\", 0o644, filename, self.get_match_criteria())",
"def proc_group(inp):\n dic = {}\n dic.update(proc_attr(inp))\n for key in inp.keys():\n if isinstance(inp[key], h5py.Group):\n dic.update({key:proc_group(inp[key])})\n else:\n dic[key] = inp[key][()]\n pass\n return dic",
"def configure_groups(mods, apps):\n specs = configure_dd_spec_list(mods, apps)\n groups = [\n ScratchPad('scratch', config_dropdowns(specs)),\n Group('1', label='', layout='verticaltile'),\n Group('1a', label='', layout='monadthreecol'),\n Group('2', label='', layout='verticaltile'),\n Group('2a', label='', layout='maximize'),\n Group('3', label='', layout='treetab'),\n Group('3a', label='', layout='treetab'),\n Group('4', label='', layout='monadtall'),\n Group('4a', label='', layout='monadtall'),\n Group('5', label='', layout='max',\n matches=[Match(wm_class=['emacs'])]),\n Group('5a', label='', layout='max'),\n Group('6', layout='treetab', label=''),\n Group('6a', label='', layout='max'),\n Group('7', label=''),\n Group('7a', label='', layout='treetab'),\n Group('8', label='', layout='max'),\n Group('8a', label='', layout='max'),\n Group('9', label='', layout='treetab', matches=[\n Match(wm_class=['microsoft teams - preview']),\n Match(wm_class=['msoutlook-nativefier-9dd141']),\n ]),\n Group('9a', label='', layout='treetab', matches=[\n Match(wm_class=['jira-nativefier-894f7c'])\n ]),\n Group('0', label='', layout='floating'),\n ]\n keys = keymap.bind_keys(mods, apps, groups, specs)\n return (groups, keys)",
"def grouped_backend_names(self):\n groups = {}\n for provider in self.providers:\n groups.update(provider.grouped_backend_names())\n for pair in combinations(groups.values(), r=2):\n if not set.isdisjoint(set(pair[0]), set(pair[1])):\n raise ValueError('duplicate backend group definition')\n\n return groups",
"def grouping_configuration(self) -> Optional['outputs.GroupingConfigurationResponse']:\n return pulumi.get(self, \"grouping_configuration\")",
"def configSectionMap(self, sectionName):\n\n dict1 = {}\n configObj = self.config\n try:\n options = configObj.options(sectionName)\n except Exception as e:\n info(\"**** [G2]: ConfigParser exception; exiting....\\n\", e, \"\\n\")\n return {}\n\n for option in options:\n try:\n dict1[option] = configObj.get(sectionName, option)\n if dict1[option] == -1:\n info(\"**** [G2]: skip: %s\" % option, \"\\n\")\n except:\n info(\"**** [G2]: exception on %s!\" % option, \"\\n\")\n dict1[option] = None\n return dict1",
"def getAllConfigInfo(self):\r\n self._update('getAllConfigInfo')\r\n\r\n configinfo = []\r\n for gconfig in self.supervisord.options.process_group_configs:\r\n inuse = gconfig.name in self.supervisord.process_groups\r\n for pconfig in gconfig.process_configs:\r\n configinfo.append(\r\n { 'name': pconfig.name,\r\n 'group': gconfig.name,\r\n 'inuse': inuse,\r\n 'autostart': pconfig.autostart,\r\n 'group_prio': gconfig.priority,\r\n 'process_prio': pconfig.priority })\r\n\r\n configinfo.sort(key=lambda r: r['name'])\r\n return configinfo",
"def get_storable_dict(self):\n d = super().get_storable_dict()\n d.update(grp=turn_keys_into_str(self._grp), grp_order=self._grp_order)\n return d",
"def group_by(self, key, sort_key=None, repopulate=True):\n\n self.log.info('Grouping sims by: %s', str(key))\n # This works by storing the different values of the specifed parameter\n # as keys, and a list of sims whose value matches the key as the value\n pdict = {}\n for conf in self.sim_confs:\n if conf[key] in pdict:\n pdict[conf[key]].append(conf)\n else:\n pdict[conf[key]] = [conf]\n # Now all the sims with matching values for the provided key are just\n # the lists located at each key. We sort the groups in increasing order\n # of the provided key\n groups = sorted(pdict.values(), key=lambda group: group[0][key])\n # If specified, sort the sims within each group in increasing order of\n # the provided sorting key\n if sort_key:\n if callable(sort_key):\n for group in groups:\n group.sort(key=sort_key)\n else:\n for group in groups:\n group.sort(key=lambda sim: conf[sort_key])\n # groups = [SimulationGroup(sims) for sims in groups]\n if repopulate:\n self.sim_groups = groups\n self.grouped_by = key\n return groups",
"def groups(self):\n return []",
"def group_models(model_list):\n grouped_models = {}\n for item in model_list:\n grouped_models.setdefault((item['window'], item['binary_labels'], item['n_dim'], item['screwdriver_only']), []).append(item)\n\n return grouped_models",
"def create_default_groups():\n from flaskbb.fixtures.groups import fixture\n result = []\n for key, value in fixture.items():\n group = Group(name=key)\n\n for k, v in value.items():\n setattr(group, k, v)\n\n group.save()\n result.append(group)\n return result",
"def fromFile(filename, config=None, **kwargs):\n\n # overwrite existing values?\n overwrite = kwargs.pop('overwrite', False)\n\n # Config files can have grouped arguments\n # the variable to store in groups\n groups_name = kwargs.pop('groups_name', 'groups')\n # the name of the grouping key=value pair\n group_on = kwargs.pop('group_on', None)\n # the target group to extract\n primary_group = kwargs.pop('group', None)\n\n # If no config object was passed in, create one\n if config is not None:\n self = config\n else:\n self = Config(**kwargs)\n self._filename = filename\n self._path = os.path.abspath(os.path.dirname(filename))\n\n self[group_on] = primary_group\n\n # current group\n group = self # start with the base config object as the group\n group_name = None\n groups = {}\n self[groups_name] = groups\n\n if filename is not None:\n file = open(filename, 'r')\n for line in file:\n line = line.strip()\n # skip comments\n if line == '' or line[0] in ('#', '%') or line[:2] in ('//',):\n continue\n key, value = line.split('=', 1)\n key = key.strip()\n value = value.strip()\n\n # using eval() is inherently insecure, but allows for nice options\n # for setting options in the config file\n\n # first we attempt to evaluate the value without using the\n # config object as the locals\n no_locals_val = None\n try:\n no_locals_val = eval(value)\n except:\n pass\n\n # now we evaluate the value with the config object as the locals\n locals_val = None\n try:\n locals_val = eval(value, {}, self.__dict__)\n except:\n locals_val = value\n\n # if the key equals the group tag, start a new grouping\n if key == group_on:\n group_name = locals_val\n if group is not None:\n self[locals_val] = group\n group = Config(**kwargs)\n groups[locals_val] = group\n\n # start at the next line now that we have a group object\n continue\n\n if type(locals_val) is str:\n # try string replacement using the config object as the dict\n try:\n locals_val = locals_val % self\n except KeyError:\n pass\n try:\n locals_val = locals_val % group\n except KeyError:\n pass\n\n # if their string representations are not equal then the config\n # object, used as locals, was actually need to evaluate the value\n # so store the original string, it will be needed to reconstruct things\n if str(no_locals_val) != str(locals_val):\n group.__orig[key] = value\n\n if overwrite:\n group[key] = locals_val\n else:\n cur_val = group.get(key, None)\n group[key] = locals_val if cur_val is None else cur_val\n\n # if the current group is the target/primary group the add the\n # key=value directly to the config\n if group_name == primary_group:\n if overwrite:\n self[key] = locals_val\n else:\n cur_val = self.get(key, None)\n self[key] = locals_val if cur_val is None else cur_val\n\n file.close()\n\n # if there is only one group, extract it outwards to the top level\n # if len(groups) == 1:\n # self.__dict__[group_on] = groups.iterkeys().next()\n return self",
"def _build_config(data):\n defaults = data['defaults']\n clusters = data['clusters']\n\n # set each cluster settings\n for cluster in clusters:\n\n # set each setting for each group if not present\n for group_key, group_val in defaults.items():\n if group_key in cluster:\n for key, val in group_val.items():\n cluster[group_key].setdefault(key, val)\n else:\n cluster[group_key] = copy.deepcopy(group_val)\n\n # set default endpoint if nothing else is set for kibana\n cluster['kibana'].setdefault('url', cluster['es']['url'])\n\n # by default, exclude nothing!\n cluster.setdefault('exclude', [])\n cluster['exclude'] = set(cluster['exclude'])\n\n return data"
] | [
"0.64101",
"0.6327717",
"0.63224345",
"0.61177534",
"0.61143225",
"0.6107522",
"0.6007326",
"0.58925676",
"0.58895403",
"0.5835795",
"0.582298",
"0.580441",
"0.5785304",
"0.56956583",
"0.5687126",
"0.5680097",
"0.5602832",
"0.5600695",
"0.55983984",
"0.5597506",
"0.5594908",
"0.5583851",
"0.55822206",
"0.55762947",
"0.5542788",
"0.5527331",
"0.5490602",
"0.5481316",
"0.54777235",
"0.54757214"
] | 0.7403048 | 0 |
creates and returns new message `dict`, setting `type`, `source`, `ts`, `data` `data` is initialized to an empty array Returns message (`dict`) | def new_message(self):
msg = {}
msg['data'] = []
msg['type'] = self.plugin_type
msg['source'] = self.name
msg['ts'] = (datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds()
return msg | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, msg: dict):\n\n for key, value in msg.items():\n setattr(self, key, value)\n self.data = msg\n self.dt = datetime.fromisoformat(self.timestamp)",
"def transformMessage(self):\n\n message = json.loads(self.message)\n\n call_data = {\n 'call_id': message.get('call_id')\n }\n\n if message.get('type') == 'start':\n call_data['start_timestamp'] = message.get('timestamp')\n call_data['source'] = message.get('source')\n call_data['destination'] = message.get('destination')\n else:\n call_data['stop_timestamp'] = message.get('timestamp')\n\n self.data = call_data\n return self.data",
"def parseMsg(self):\n # These 4 elements are always present\n # \"ToUserName\"\n # \"FromUserName\"\n # \"CreateTime\"\n # \"MsgType\"\n\n # Following elements depends on MsgType\n # \"MsgId\"\n # \"Content\"\n # \"MediaId\"\n # \"PicUrl\"\n # \"Format\"\n # \"ThumbMediaId\"\n # \"Location_X\"\n # \"Location_Y\"\n # \"Scale\"\n # \"Label\"\n # \"Title\"\n # \"Description\"\n # \"Url\"\n # \"Event\"\n # \"EventKey\"\n # \"Ticket\"\n # \"Latitude\"\n # \"Longitude\"\n # \"Precision\"\n # \"Recognition\"\n\n def getField(req, key):\n if req.find(key) != None:\n return req.find(key).text\n\n\n msg = {}\n req = et.fromstring(self.request.body.decode(\"utf-8\"))\n\n # These 4 elements are always present\n msg[\"ToUserName\"] = getField(req, \"ToUserName\")\n msg[\"FromUserName\"] = getField(req, \"FromUserName\")\n msg[\"CreateTime\"] = getField(req, \"CreateTime\")\n msg[\"MsgType\"] = getField(req, \"MsgType\")\n\n # Following elements depends on MsgType\n msg[\"MsgId\"] = getField(req, \"MsgId\")\n msg[\"Content\"] = getField(req, \"Content\")\n msg[\"MediaId\"] = getField(req, \"MediaId\")\n msg[\"PicUrl\"] = getField(req, \"PicUrl\")\n msg[\"Format\"] = getField(req, \"Format\")\n msg[\"ThumbMediaId\"] = getField(req, \"ThumbMediaId\")\n msg[\"Location_X\"] = getField(req, \"Location_X\")\n msg[\"Location_Y\"] = getField(req, \"Location_Y\")\n msg[\"Scale\"] = getField(req, \"Scale\")\n msg[\"Label\"] = getField(req, \"Label\")\n msg[\"Title\"] = getField(req, \"Title\")\n msg[\"Description\"] = getField(req, \"Description\")\n msg[\"Url\"] = getField(req, \"Url\")\n msg[\"Event\"] = getField(req, \"Event\")\n msg[\"EventKey\"] = getField(req, \"EventKey\")\n msg[\"Ticket\"] = getField(req, \"Ticket\")\n msg[\"Latitude\"] = getField(req, \"Latitude\")\n msg[\"Longitude\"] = getField(req, \"Longitude\")\n msg[\"Precision\"] = getField(req, \"Precision\")\n msg[\"Recognition\"] = getField(req, \"Recognition\")\n return msg",
"def _build_common_message(msg_title,msg_text,msg_type):\n data = {\n 'message': {\n \"topic\" : '',\n 'data': {\n 'title': '',\n 'message': '',\n 'type' : ''\n }\n }\n }\n data['message']['topic'] = FCM_TOPIC\n data['message']['data']['title'] = msg_title\n data['message']['data']['message'] = datetime.datetime.now().strftime(\"%H:%M:%S\") + \" \" + msg_text\n data['message']['data']['type'] = msg_type\n return data",
"def _build_common_message(msg_title,msg_text,msg_type):\n data = {\n 'message': {\n \"topic\" : '',\n 'data': {\n 'title': '',\n 'message': '',\n 'type' : ''\n }\n }\n }\n data['message']['topic'] = FCM_TOPIC\n data['message']['data']['title'] = msg_title\n data['message']['data']['message'] = datetime.datetime.now().strftime(\"%H:%M:%S\") + \" \" + msg_text\n data['message']['data']['type'] = msg_type\n return data",
"def set_message_data(self) -> None:\n if PrimaryFlight.MESSAGETYPE == self.type:\n self.message_data = PrimaryFlight(self.data, self.config)\n elif GPS.MESSAGETYPE == self.type:\n self.message_data = GPS(self.data, self.config)\n elif Attitude.MESSAGETYPE == self.type:\n self.message_data = Attitude(self.data, self.config)\n elif EngineData.MESSAGETYPE == self.type:\n self.message_data = EngineData(self.data, self.config)\n else:\n self.message_data = MessageData(self.data, self.config)",
"def __init__(self):\n self._msg_dict = {}",
"def create(self, data):\n\n\t\tmessage = data\n\t\tmessage[\"id\"] = self.count = self.count + 1\n\t\tself.messages.append(message)\n\n\t\treturn(message)",
"def create_message(message):\n return {\n \"id\": message.id,\n \"from\": message.sender,\n \"preview\": create_preview(message),\n \"subject\": message.subject,\n \"date\": message.date_created,\n }",
"def NewFromJsonDict(data):\n return DirectMessage(created_at=data.get('created_at', None),\n recipient_id=data.get('recipient_id', None),\n sender_id=data.get('sender_id', None),\n text=data.get('text', None),\n sender_screen_name=data.get('sender_screen_name', None),\n id=data.get('id', None),\n recipient_screen_name=data.get('recipient_screen_name', None))",
"def __init__(self):\n self.msg_dict = dict() # msg: last_print_time_stamp",
"def data_to_msg(self, data):\n fields_names = [self.primary_key] + [field.name for field in self.fields]\n data_dict = {}\n for idx, field in enumerate(fields_names):\n data_dict[field] = data[idx]\n return MsgWithTag.from_dict(data_dict)",
"def ReceivedMessageCatalog():\n aio_updates = numpy.zeros((len(AIO_NODE_HELPER), len(MESSAGE_TYPE_HELPER)),\n dtype=numpy.ulonglong)\n # aio_updates[source][message_type] > 0 if any message is received.\n aio_util.GetAioUpdates(aio_updates)\n received_messages = {}\n for message_type in range(len(MESSAGE_TYPE_HELPER)):\n aio_nodes = numpy.nonzero(aio_updates[:, message_type])[0]\n if aio_nodes.size:\n received_messages[message_type] = aio_nodes\n return received_messages",
"def message_parser(msg):\n # Start a new message\n new_msg = {\n \"messageType\": msg[\"messageType\"],\n \"messageID\": msg[\"messageID\"],\n \"messageURL\": msg[\"messageURL\"],\n \"messageIssueTime\": msg[\"messageIssueTime\"],\n 'messageBody': {}\n }\n # Break down the incoming message's messageBody and save to new message\n sections = msg[\"messageBody\"].split(\"\\n## \")\n for part in sections:\n try:\n header, body = part.split(\":\", 1) # only split on first occurrence of colon, not all occurrences (ie dates)\n header = header.strip(\"##\").replace(\" \", \"_\").lower() # clean up headers\n body = body.lstrip(\" \").replace(\"\\n\", \" \").replace(\"#\", \"\")\n if header:\n new_msg[\"messageBody\"][header] = body\n except ValueError:\n continue\n # Break down notes if present and save to new message\n if \"notes\" in new_msg[\"messageBody\"] and new_msg[\"messageBody\"][\"notes\"]:\n try:\n notes_wo_dsc = new_msg[\"messageBody\"][\"notes\"].split(\"Disclaimer\")[0] # First set the important stuff to a var\n new_msg[\"messageBody\"][\"notes\"] = {} # now turn notes into an object\n parent_header, children = notes_wo_dsc.split(\":\", 1)\n parent_header = parent_header.lstrip(\" \")\n new_msg[\"messageBody\"][\"notes\"][parent_header] = {} # make a new object for more children\n child_parts = children.split(\" \")\n child_header = None\n new_body = \"\"\n for part in child_parts:\n if part.endswith(\":\"):\n child_header = part.strip(\":\")\n else:\n new_body += part + \" \"\n if child_header:\n new_msg[\"messageBody\"][\"notes\"][parent_header][child_header] = new_body\n except ValueError:\n pass\n # We don't need the disclaimers taking up memory\n if \"disclaimer\" in new_msg[\"messageBody\"]:\n del new_msg[\"messageBody\"][\"disclaimer\"]\n return new_msg",
"def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body",
"def build_message(self, src, dst, typee, msg):\n my_dict = {\n SRCE: src,\n DEST: dst,\n TYPE: typee,\n MESG: msg\n }\n\n return json.dumps(my_dict).encode()",
"def _create_add_message_from_order(order):\n message = {}\n message.update({\"message-type\": \"A\"})\n message.update({\"instrument\": order.instrument})\n message.update({\"order-id\": order.order_id})\n message.update({\"price\": int(order.price)})\n message.update({\"quantity\": int(order.quantity)})\n message.update({\"side\": side_to_str(order.side)})\n message.update({\"timestamp\": order.timestamp})\n message.update({\"snapshot\": 1})\n return message",
"def into_data(self) -> Dict[str, Any]:\n data = dict(producer=self.producer)\n if self.mtime_ns > 0:\n data[\"mtime\"] = str(_datetime_from_nanoseconds(self.mtime_ns))\n return data",
"def _populate_message(template_dictionary):\n message_type = remove_from_session(KEY_MESSAGE_TYPE)\n if message_type is None:\n message_type = TYPE_INFO\n template_dictionary[KEY_MESSAGE_TYPE] = message_type\n\n message = remove_from_session(KEY_MESSAGE)\n if message is None:\n message = \"\"\n template_dictionary[KEY_MESSAGE] = message\n return template_dictionary",
"def __create_msg(self, ping):\n now = rospy.get_rostime()\n output = {\n \"info\": {},\n \"timestamp\": int(now.secs * 1e3 + now.nsecs * 1e-6),\n \"data\": ping.T.tolist()\n }\n return json.dumps(output)",
"def createMessage(self, sender: str, to: str, subject: str, message_text: str):\n message = MIMEText(message_text)\n message['to'] = to\n message['from'] = sender\n message['subject'] = subject\n raw_message = {'raw': base64.urlsafe_b64encode(message.as_bytes())}\n raw_message['raw']=raw_message['raw'].decode('utf-8')\n return raw_message",
"def handle_message(self, data, task_type, msgtype):\n data['message'] = data['message'].upper()\n return data",
"def _construct_message(self):\n self.message[\"text\"] = \"\"\n if self.from_:\n self.message[\"text\"] += \"From: \" + self.from_ + \"\\n\"\n if self.subject:\n self.message[\"text\"] += \"Subject: \" + self.subject + \"\\n\"\n\n self.message[\"text\"] += self.body\n self._add_attachments()",
"def __init__(self):\n self.recent_messages= {}",
"def process_message(message):\n return {\n \"subject\": message.subject,\n \"sender\": message.sender_name,\n \"header\": message.transport_headers,\n \"body\": message.plain_text_body,\n \"creation_time\": message.creation_time,\n \"submit_time\": message.client_submit_time,\n \"delivery_time\": message.delivery_time,\n \"attachment_count\": message.number_of_attachments,\n }",
"def generate_dict(self):\n # verify preferred timestamp exists in the structure...\n if not self._check_preferred_timestamps():\n raise SampleException(\"Preferred timestamp not in particle!\")\n\n # build response structure\n self._encoding_errors = []\n if self._values is None:\n self._values = self._build_parsed_values()\n result = self._build_base_structure()\n result[DataParticleKey.STREAM_NAME] = self.data_particle_type()\n result[DataParticleKey.VALUES] = self._values\n\n return result",
"def __init__(self, msgtype,precision,maxFreqError,originateNanos,receiveNanos,transmitNanos,originalOriginate=None):\n super(WCMessage,self).__init__()\n self.log = logging.getLogger(\"dvbcss.protocol.wc.WCMessage\")\n\n self.msgtype = msgtype #: (read/write :class:`int`) Type of message. 0=request, 1=response, 2=response-with-followup, 3=followup\n self.precision = precision #: (read/write :class:`int`) Precision encoded in log base 2 seconds between -128 and +127 inclusive. For example: -10 encodes a precision value of roughly 0.001 seconds.\n self.maxFreqError = maxFreqError #: (read/write :class:`int`) Maximum frequency error in units of 1/256ths ppm. For example: 12800 encodes a max freq error of 50ppm.\n self.originateNanos = originateNanos #: (read/write :class:`int`) Originate timevalue in integer number of nanoseconds\n self.receiveNanos = receiveNanos #: (read/write :class:`int`) Receive timevalue in integer number of nanosecond\n self.transmitNanos = transmitNanos #: (read/write :class:`int`) Transmit timevalue in integer number of nanosecond\n self.originalOriginate = originalOriginate #: (read/write :obj:`None` or (:class:`int`, :class:`int`)) Optional original encoding of the originate timevalue as (seconds, nanos). Overrides `originateNanos` when the message is packed if the value is not `None`. ",
"def makeAMQPmsg(self):\n msg = {'msgType' : 'AgentUpdate',\n 'AgentType': 'Bus',\n 'Extnum':self.Extnum,\n 'Vm': self.cv['Vm'],\n 'Va': self.cv['Va'],\n }\n return msg",
"def parse_message(msg):\n # the message number, increments with each message\n msg_number = msg[0][0]\n # the message type\n msg_type = msg[0][1][0]\n return {\n 'noop': parse_noop_message,\n 'c': parse_content_message,\n }[msg_type](msg, msg_number)",
"def populate_data_from_message(self, msg):\n for field in self:\n try:\n setattr(field, 'data', getattr(msg, field.name))\n except:\n continue"
] | [
"0.657394",
"0.64370185",
"0.6369165",
"0.6215842",
"0.6215842",
"0.6197032",
"0.61927515",
"0.6148582",
"0.6145553",
"0.61364484",
"0.60306203",
"0.5966783",
"0.5927442",
"0.58727574",
"0.5813844",
"0.5785403",
"0.57741964",
"0.57672644",
"0.57665217",
"0.57646066",
"0.57078",
"0.56897306",
"0.5653105",
"0.5650547",
"0.56343186",
"0.5628319",
"0.5621228",
"0.56078804",
"0.55921644",
"0.5564701"
] | 0.74926597 | 0 |
creates a subprocess with passed args Returns Popen instance | def popen(self, args, **kwargs):
self.log.debug("popen %s", ' '.join(args))
return vaping.io.subprocess.Popen(args, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_process(self, args=[], *popenargs, **kwargs):\n try:\n startupinfo = subprocess.STARTUPINFO()\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n kwargs.setdefault('startupinfo', startupinfo)\n except:\n pass\n kwargs.setdefault('universal_newlines', True)\n kwargs.setdefault('stdin', sys.stdin)\n return subprocess.Popen(self.build_args(args), *popenargs, **kwargs)",
"def create_subprocess(command, args):\n\n proc = subprocess.Popen([command] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, _ = proc.communicate()\n\n return proc.returncode, output",
"def Popen(self, *unargs, **kwargs):\r\n cmdline = None\r\n if 'args' in kwargs:\r\n cmdline = kwargs['args']\r\n else:\r\n cmdline = unargs[0]\r\n return PopenWrapper.WaitWrapper(subprocess_.Popen(*unargs, **kwargs), self, cmdline)",
"def make_subprocess(cmdline, stdout=False, stderr=False, stdin=False,\n universal_newlines=False, close_fds=True, env=None):\n LOG.info(\"Running cmd '%s'\" % \" \".join(cmdline))\n kwargs = {}\n kwargs['stdout'] = stdout and subprocess.PIPE or None\n kwargs['stderr'] = stderr and subprocess.PIPE or None\n kwargs['stdin'] = stdin and subprocess.PIPE or None\n kwargs['universal_newlines'] = universal_newlines\n kwargs['close_fds'] = close_fds\n kwargs['env'] = env\n try:\n proc = subprocess.Popen(cmdline, **kwargs)\n except OSError, e: # noqa\n if e.errno == errno.ENOENT:\n raise CommandNotFound\n else:\n raise\n return proc",
"def popen(self, args, bufsize=0, stdin=None, stdout=None, stderr=None, cwd=None, env=None, tty=False, compress=False): \n return subprocess.Popen(args, bufsize=bufsize, cwd=cwd, env=env, stdin=stdin, stdout=stdout, stderr=stderr)",
"def popener(\n args: models.CommandArgs,\n *,\n stdin: Optional[int] = DEVNULL,\n stdout: Optional[int] = DEVNULL,\n stderr: Optional[int] = DEVNULL,\n shell: Optional[bool] = None,\n text: bool = False,\n) -> Popen:\n return Popen(\n args,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n shell=_get_shell(args, shell),\n universal_newlines=text,\n )",
"def run_subprocess(self, *cmd_and_args):\n\n command_line = \" \".join(cmd_and_args)\n self.logger.debug(\"Running: %s\", command_line)\n\n return subprocess.Popen(command_line, shell=True, close_fds=True)",
"def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )",
"def spawn(*args):\n # Adapted from ranger.ext.spawn\n process = Popen(args, stdout=PIPE, shell=True)\n stdout, stderr = process.communicate()\n return stdout.decode('utf-8')",
"def execute(args):\n print '################################'\n print 'args: ', args\n p = subprocess.Popen(args, shell=True, executable='/bin/bash')\n # p = subprocess.call(args, shell=True, executable='/bin/bash')\n p.wait()\n return p\n print '################################'",
"def Popen(self, args, **kwargs):\n # Invoke subprocess.check_output\n if self.command.verbosity >= 2:\n print(\">>> {cmdline}\".format(\n cmdline=' '.join(shlex.quote(arg) for arg in args)\n ))\n\n return self._subprocess.Popen(\n [\n str(arg) for arg in args\n ],\n **self.final_kwargs(**kwargs)\n )",
"def StartCmd(args, cwd=None, shell=False, env=None):\n _ValidateAndLogCommand(args, cwd, shell)\n return Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=shell,\n cwd=cwd,\n env=env)",
"def call(*args, **kwargs):\n return Popen(*args, **kwargs).wait()",
"def open_subprocess(self, args_, subprocess_key=None):\n\n if subprocess_key in self.subprocess and self.subprocess[subprocess_key].poll is not None:\n # TODO better error class\n\n raise AssertionError(\"process '%s'(pid:%s) already exist and still running\" % (\n subprocess_key, self.subprocess[subprocess_key].pid))\n\n child_process = subprocess.Popen(args_)\n if subprocess_key is None:\n subprocess_key = str(child_process.pid)\n self.subprocess[subprocess_key] = child_process\n str_args = \" \".join(map(str, args_))\n self.log(\"open subprocess pid:%s, cmd='%s'\" % (child_process.pid, str_args))\n\n return child_process.pid",
"def spawn(self, arguments=None, environment=None):\n return subprocess.Popen(\n args=[self.executable] + ([] or arguments),\n # do not redirect std streams\n # this fakes the impression of having just one program running\n stdin=None,\n stdout=None,\n stderr=None,\n env=environment,\n )",
"def create_process(cmd, root_helper=None, addl_env=None):\n if root_helper:\n cmd = shlex.split(root_helper) + cmd\n cmd = map(str, cmd)\n\n env = os.environ.copy()\n if addl_env:\n env.update(addl_env)\n\n obj = subprocess_popen(cmd, shell=False,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=env)\n\n return obj, cmd",
"def start_process(self, args):\n try:\n with open(os.devnull, 'w') as devnull:\n popenObj = subprocess.Popen(\n args, stdout=devnull, stderr=subprocess.PIPE, cwd=\"/tmp/\")\n popenObj.name = args\n return popenObj\n except Exception as e:\n self.logger.error(\n \"Cannot start process %s due to reason:%s\", args, e)\n raise e",
"def subprocess_Popen(command, **params):\r\n startupinfo = None\r\n if os.name == 'nt':\r\n startupinfo = subprocess.STARTUPINFO()\r\n try:\r\n startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW\r\n except AttributeError:\r\n startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW\r\n\r\n # Anaconda for Windows does not always provide .exe files\r\n # in the PATH, they also have .bat files that call the corresponding\r\n # executable. For instance, \"g++.bat\" is in the PATH, not \"g++.exe\"\r\n # Unless \"shell=True\", \"g++.bat\" is not executed when trying to\r\n # execute \"g++\" without extensions.\r\n # (Executing \"g++.bat\" explicitly would also work.)\r\n params['shell'] = True\r\n\r\n # Using the dummy file descriptors below is a workaround for a\r\n # crash experienced in an unusual Python 2.4.4 Windows environment\r\n # with the default None values.\r\n stdin = None\r\n if \"stdin\" not in params:\r\n stdin = open(os.devnull)\r\n params['stdin'] = stdin.fileno()\r\n\r\n try:\r\n proc = subprocess.Popen(command, startupinfo=startupinfo, **params)\r\n finally:\r\n if stdin is not None:\r\n del stdin\r\n return proc",
"def process_run(cmd_string, stdin=None):\n process_object=subprocess.Popen(shlex.split(cmd_string),\n stdin=stdin,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n return process_object",
"def run(*args, **kwargs):\n _patch_popen(Popen)\n assert len(args) > 0\n\n arguments = []\n\n input = kwargs.pop('input', None)\n fail_on_error = kwargs.pop('fail_on_error', False)\n encoding = kwargs.pop('encoding', None) \n\n if len(args) == 1:\n if isinstance(args[0], string):\n arguments = args[0].split()\n else:\n for i in args:\n if isinstance(i, (list, tuple)):\n for j in i:\n arguments.append(j)\n else:\n arguments.append(i)\n\n def set_default_kwarg(key, default): \n kwargs[key] = kwargs.get(key, default)\n\n set_default_kwarg('stdin', PIPE)\n set_default_kwarg('stdout', PIPE)\n set_default_kwarg('stderr', PIPE)\n\n\n proc = Popen(arguments, **kwargs)\n stdout, stderr = proc.communicate(input)\n \n if encoding is not None:\n stdout = stdout.decode(encoding=encoding)\n stderr = stderr.decode(encoding=encoding)\n\n result = RunResult(proc.returncode, stdout, stderr)\n \n if fail_on_error and proc.returncode != 0:\n raise ProcessError(' '.join(arguments), result)\n\n return result",
"def subprocess(cls, cmd, **kwargs):\r\n def call(args):\r\n return subprocess.call(cmd + args, **kwargs)\r\n return cls(call)",
"def __init__(self, proc_args: Optional[List[str]]):\n if proc_args:\n self.proc = subprocess.Popen(\n proc_args,\n universal_newlines=True,\n stdin=subprocess.PIPE, # pipe STDIN and STDOUT to send and receive messages\n stdout=subprocess.PIPE\n )\n self.outward_comm_stream = self.proc.stdin\n self.inward_comm_stream = self.proc.stdout\n else:\n self.proc = None\n self.outward_comm_stream = sys.stdout\n self.inward_comm_stream = sys.stdin",
"def subprocess(cls, cmd, **kwargs):\n def call(args):\n return subprocess.call(cmd + args, **kwargs)\n return cls(call)",
"def popenAndCall(onExit, *popenArgs, **popenKWArgs):\n def runInThread(onExit, popenArgs, popenKWArgs):\n global proc\n proc = subprocess.Popen(*popenArgs, **popenKWArgs)\n print(type(proc))\n proc.wait()\n onExit()\n return\n\n thread = threading.Thread(target=runInThread,\n args=(onExit, popenArgs, popenKWArgs))\n thread.start()\n\n return thread # returns immediately after the thread starts",
"def shell_cmd(*args):\n proc = subprocess.run(args)\n returncode = proc.returncode\n if returncode != 0:\n raise RuntimeError(\n f\"Command {args} failed with return code {returncode}\")\n return proc",
"def subprocess_run(args, **kwargs_in):\n kwargs = kwargs_in.copy()\n kwargs['stdout'] = subprocess.PIPE\n kwargs['stderr'] = subprocess.PIPE\n logger.debug(f'running a subprocess {args} {kwargs}')\n output = subprocess.run(args, **kwargs)\n logger.debug(f' returned: {output.stdout}')\n return output",
"def spawn_subprocess(args, loop=None):\n if not _IS_XOS_ASYNC:\n return spawn_subprocess_not_xos(args, loop=loop)\n else:\n return spawn_subprocess_xos(args, loop=loop)",
"def _launch_command(args, out_cb, err_cb, done=None, **kwargs):\n\n def pump_stream(callback, stream):\n \"\"\"Pump the stream\"\"\"\n for line in stream:\n callback(line)\n callback(None)\n\n def joiner():\n \"\"\"Wait for streams to finish, then call done callback\"\"\"\n for th in threads:\n th.join()\n done(process)\n\n kwargs = kwargs.copy()\n in_data = kwargs.get(\"input\")\n if \"input\" in kwargs:\n del kwargs[\"input\"]\n assert kwargs.get(\"stdin\") is None, kwargs[\"stdin\"]\n kwargs[\"stdin\"] = PIPE\n elif \"stdin\" not in kwargs:\n kwargs[\"stdin\"] = DEVNULL\n kwargs.setdefault(\"stdout\", PIPE)\n kwargs.setdefault(\"stderr\", PIPE)\n kwargs[\"universal_newlines\"] = True # Text streams, not byte streams\n process = Popen(args, **kwargs)\n threads = []\n if process.stdout:\n thread = Thread(\n target=pump_stream, args=(out_cb, process.stdout), daemon=True\n )\n thread.start()\n threads.append(thread)\n if process.stderr:\n thread = Thread(\n target=pump_stream, args=(err_cb, process.stderr), daemon=True\n )\n thread.start()\n threads.append(thread)\n if done and threads:\n Thread(target=joiner, daemon=True).start()\n if in_data:\n process.stdin.write(str(in_data, \"utf-8\"))\n process.stdin.close()\n return process",
"async def arun(\n args: models.CommandArgs,\n *,\n stdin: Optional[int] = DEVNULL,\n input: Optional[AnyStr] = None,\n stdout: Optional[int] = DEVNULL,\n stderr: Optional[int] = DEVNULL,\n capture_output: bool = False,\n shell: Optional[bool] = None,\n text: bool = False,\n **other_subprocess_kwargs,\n) -> CompletedProcess:\n if capture_output:\n stdout = PIPE\n stderr = PIPE\n\n if _get_shell(args, shell):\n assert isinstance(args, str)\n process = await create_subprocess_shell(\n args,\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n **other_subprocess_kwargs,\n )\n else:\n assert isinstance(args, Iterable)\n process = await create_subprocess_exec(\n args[0],\n *args[1:],\n stdin=stdin,\n stdout=stdout,\n stderr=stderr,\n **other_subprocess_kwargs,\n )\n\n result_stdout, result_stderr = await process.communicate(\n input=input.encode() if (input and text) else input, # type: ignore\n )\n\n return CompletedProcess(\n args=args,\n returncode=process.returncode or 0,\n stdout=result_stdout.decode() if (result_stdout and text) else result_stdout,\n stderr=result_stderr.decode() if (result_stderr and text) else result_stderr,\n )",
"def _spawn(self, protocol, args, env=None):\n return reactor.spawnProcess(protocol, self.cmd, args, env=env)"
] | [
"0.76723176",
"0.7587004",
"0.73669636",
"0.72725964",
"0.72679687",
"0.70070004",
"0.69992423",
"0.69684494",
"0.6902572",
"0.6820378",
"0.67948174",
"0.67915964",
"0.678975",
"0.678923",
"0.67641646",
"0.66676784",
"0.66353923",
"0.65361035",
"0.65184724",
"0.6518346",
"0.6507334",
"0.6501121",
"0.6491504",
"0.64549226",
"0.6453996",
"0.63685805",
"0.6355541",
"0.63520306",
"0.6310531",
"0.63029766"
] | 0.80149055 | 0 |
logger instance for plugin type | def log(self):
if not self._logger:
self._logger = logging.getLogger('vaping.plugins.' + self.plugin_type)
return self._logger | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_logger(self):",
"def logger(self):\n pass",
"def __init__(self):\n self.logger = logger()",
"def __init__(self):\n\n self.log = logger.getLogger(name=\"directord\")",
"def __init__(self):\n self.logger = logging.getLogger(FeatureEngineeringLogger.__name__)",
"def build_logger(self):\n pass",
"def __add_logger(self):\n #FIXME: adapt to the settings that are proper for you\n self.__logger = logging.getLogger('lib-autopilot')\n self.__logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n self.__logger.addHandler(ch) \n # TODO: CHANGE from Console to file handler\n # fh = logging.FileHandler('lib-autopilot.log')\n # fh.setLevel(logging.DEBUG)\n #fh.setFormatter(formatter)\n #self.__logger.addHandler(fh)",
"def __init__(self):\n self.log = logging.getLogger()",
"def __init__(self):\n\n self._logger = logging.getLogger(__name__)",
"def __init__(self):\r\n self.logger = dict()",
"def log():\n return logging.getLogger(\"vodka\")",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.log = logging.getLogger(logger_name(__name__))",
"def logger(self):\n return logging",
"def get_logger(plugin_name):\n return logging.getLogger('sopel.externals.%s' % plugin_name)",
"def __init__(self, log):\n self.log = log\n self.logger = logging.getLogger(self.__class__.__name__)",
"def __init__(self, logger=logging.getLogger(\"dummy\")):\n super(RemoteObserver, self).__init__()\n self.logger = logger",
"def __init__(self):\n super().__init__()\n self.register_as_type(DefaultLoggerFactory.NullLoggerDescriptor, NullLogger)\n self.register_as_type(DefaultLoggerFactory.ConsoleLoggerDescriptor, ConsoleLogger)\n self.register_as_type(DefaultLoggerFactory.CompositeLoggerDescriptor, CompositeLogger)",
"def load():\n return SyslogOutOutputPlugin",
"def get_logger(args):\n logger_kind = 'tensorboard' if 'logger' not in args.__dict__ else args.logger\n if logger_kind == 'tensorboard':\n logger = pl.loggers.tensorboard.TensorBoardLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.dataset,\n )\n\n elif logger_kind == 'wandb':\n logger = pl.loggers.WandbLogger(\n save_dir=os.path.join(os.getcwd(), 'tmp'),\n name=args.backbone,\n )\n\n else:\n raise Exception(f'Error. Logger \"{lokker_kind}\" is not supported.')\n return logger",
"def _logger():\n return logging.getLogger(module_name)",
"def getLogger(self, *args, **kwargs):\r\n return loggers.getLogger(*args, **kwargs)",
"def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())",
"def get_logger(self, ast_ctx, log_type, *arg, **kw):\n\n name = ast_ctx.get_logger_name()\n if name not in self.loggers:\n #\n # Maintain a cache for efficiency.\n #\n self.loggers[name] = ast_ctx.get_logger()\n return getattr(self.loggers[name], log_type)",
"def logger(self, value):\n pass",
"def __init__(self,\n faciliity=None,\n level='warning',\n name=None,\n logfmt='%(name)s[%(process)d] %(levelname).1s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S ',\n child=None,\n logger=None\n ):\n\n if logger:\n self.level=logging.getLevelName(logger.level)\n dc(f\"{logger.handlers}\")\n dc(f\"self.level={self.level!r}\")\n self.log=logger\n else:\n self.level=level\n self.log=getlogger(facility=facility,level=level,name=name,logfmt=logfmt,datefmt=datefmt,child=child)",
"def __init__(self, plugin, hostname, projectname, sensorMac):\n Logger.__init__(self, plugin, hostname, projectname)\n self.sensor = sensorMac\n\n self.logFiles = ['scan', 'rssi']\n self.logs = dict(zip(self.logFiles, [open('/'.join([\n self.logDir, '%s-%s-%s.log' % (self.hostname, self.sensor, i)]),\n 'a') for i in self.logFiles]))\n\n self.enableLagLog(plugin.config.getValue('enable_lag_logging'))",
"def __init__(self, logger=logging.getLogger(\"dummy\")):\n super(ShowMap, self).__init__()\n self.logger = logger",
"def __init__(self, logger, level):\n self.logger = logger\n self.level = level",
"def __init__(self, logger, level):\n self.logger = logger\n self.level = level",
"def __init__(self, logger, level):\n self.logger = logger\n self.level = level"
] | [
"0.73108566",
"0.71119756",
"0.68729246",
"0.68288493",
"0.6814063",
"0.6643799",
"0.6631724",
"0.6627801",
"0.66231793",
"0.65565336",
"0.6532955",
"0.6517596",
"0.6504724",
"0.64651346",
"0.6436994",
"0.63748336",
"0.6320188",
"0.6318142",
"0.63155955",
"0.630599",
"0.62634593",
"0.62550163",
"0.62538975",
"0.62528026",
"0.6246403",
"0.61917406",
"0.6187917",
"0.61832887",
"0.61832887",
"0.61832887"
] | 0.7757826 | 0 |
queue an emission of a message for all output plugins Arguments | def queue_emission(self, msg):
if not msg:
return
for _emitter in self._emit:
if not hasattr(_emitter, 'emit'):
continue
def emit(emitter=_emitter):
self.log.debug("emit to {}".format(emitter.name))
emitter.emit(msg)
self.log.debug("queue emission to {} ({})".format(
_emitter.name, self._emit_queue.qsize()))
self._emit_queue.put(emit) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output_function(**kwargs):\n\n\t\toutput_queue = kwargs['q']\n\t\twhile True:\n\t\t\titem = output_queue.get()\n\t\t\t# expects to get a string or None\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\toutfile.write(item)\n\t\t\t# outfile.write(\"output_function:: {item}\".format(item=item)+\"\\n\")\n\t\t\toutput_queue.task_done()",
"def send_emission(self):\n if self._emit_queue.empty():\n return\n emit = self._emit_queue.get()\n emit()",
"def emit(self, message):",
"def write(self, *args, **kwargs):\n for arg in args:\n self.output.append(arg)",
"def send(metadata, output, minio_client, debug_mode):\n\n # log some info about what the send function has been given\n logger.info(\"LETTING MONITOR KNOW PROCESSING HAS BEEN DONE\")\n\n if isinstance(output, str):\n err = output\n output = {}\n elif isinstance(output, dict):\n err = None\n elif output == None:\n output = {}\n err = \"NO OUTPUT WAS RETURNED\"\n\n # send the info from this plugin to the next one in the pipeline\n send_result(output, metadata, err)",
"def emit_all(self):\n while not self._emit_queue.empty():\n self.send_emission()",
"def send(self):\n for output in self.outputs:\n output.send(self.logger)",
"def logDispatch(self, *msg, **args):\n args['sessionno'] = 'T{0}'.format(str(args['sessionno']))\n for output in self.tac.output_plugins:\n output.logDispatch(*msg, **args)",
"def output(self, msg):",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def _get_output_queue(self):\n return self.__output_queue",
"def create_output(self, messages):",
"def putting_on_queue(*args):\n results.put(main_func(*args))",
"def planner_cmd_callback(self, msg):\n\n # Use planner input directly\n self.mix_cmd_pub.publish(msg)",
"def write_queued_output(self):\n for stream in [\"stdout\", \"stderr\"]:\n while True:\n output, queue_size = getattr(self, stream).readline(timeout=0.1)\n if not (output is None or len(output) == 0):\n self.log(output, self.log_level[stream])\n if queue_size == 0:\n break",
"def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input = arguments[\"input\"]\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution: '{0}' . \".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input)\n\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))",
"def agent_behaviour(queue):\n\n gr = register_message()",
"def _qprocess(self):\n while 1:\n t, args, kw = self.inq.get()\n ret = self.__call__(*args, **kw)\n self.outq.put((t, ret))",
"def processOutput(self, plugin, output, command, isReport=False):\n output_queue = JoinableQueue()\n plugin.set_actions_queue(self.pending_actions)\n\n self.plugin_process = PluginProcess(\n plugin, output_queue, isReport)\n\n getLogger(self).debug(\n \"Created plugin_process (%d) for plugin instance (%d)\" %\n (id(self.plugin_process), id(plugin)))\n\n self.pending_actions.put((Modelactions.PLUGINSTART, plugin.id, command.getID()))\n output_queue.put((output, command.getID()))\n plugin_commiter = PluginCommiter(\n output_queue,\n output,\n self.pending_actions,\n plugin,\n command,\n self._mapper_manager,\n self.end_event,\n )\n plugin_commiter.start()\n # This process is stopped when plugin commiter joins output queue\n self.plugin_process.start()",
"def subscribe_to_commands(self):\n self.basic_consume(self.process_command, queue=self.name)",
"def _queue_output(arguments, pidq, outputq):\n kwargs = arguments[\"process\"]\n input_data = arguments[\"input\"].encode(\"utf-8\") if arguments[\"input\"] else None\n\n try:\n proc = Popen(**kwargs)\n except OSError as e:\n # pid None is read by the main thread as a crash of the process\n pidq.put(None)\n\n outputq.put((\n \"\",\n (\"Unexpected exception caught during execution of taskw: '{0}' . \"\n \"If you are running out-of-tree tests set TASK_USE_PATH=1 \"\n \"in shell env before execution and add the \"\n \"location of the task(d) binary to the PATH\".format(e)),\n 255)) # false exitcode\n\n return\n\n # Put the PID in the queue for main process to know.\n pidq.put(proc.pid)\n\n # Send input and wait for finish\n out, err = proc.communicate(input_data)\n\n if sys.version_info > (3,):\n out, err = out.decode('utf-8'), err.decode('utf-8')\n\n # Give the output back to the caller\n outputq.put((out, err, proc.returncode))",
"def _emited(self, *args):\n\t\tdebug(\"OnEventDeferred : event catched\")\n\t\tself.callback(*args)\n\t\tself._clean()",
"def output_raw_message(text):\n database.messages_output_queue.put(text)",
"def msg(*args):\n if messages_on:\n print(*args)",
"def on_task_output(cls, task: Task, config: dict) -> None:",
"def command_output_updated(self, logger):"
] | [
"0.60872614",
"0.5945144",
"0.59260297",
"0.59092003",
"0.58465993",
"0.5714343",
"0.5665754",
"0.56151253",
"0.5593914",
"0.5585872",
"0.5585872",
"0.5585872",
"0.5585872",
"0.5585872",
"0.5585872",
"0.5554473",
"0.5533875",
"0.549778",
"0.54847586",
"0.5459334",
"0.54484266",
"0.5437732",
"0.53810894",
"0.5380237",
"0.52983266",
"0.5277377",
"0.52647567",
"0.525417",
"0.5249867",
"0.5234706"
] | 0.62826663 | 0 |
emit and remove the first emission in the queue | def send_emission(self):
if self._emit_queue.empty():
return
emit = self._emit_queue.get()
emit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_first(self):\n self.deque.pop(0)",
"def dequeue(self):\r\n return self.queue.pop(0)",
"def pop(self):\r\n return self.queue.pop(0)",
"def dequeue(self):\n return self.queue.pop(0)",
"def queue_emission(self, msg):\n if not msg:\n return\n for _emitter in self._emit:\n if not hasattr(_emitter, 'emit'):\n continue\n def emit(emitter=_emitter):\n self.log.debug(\"emit to {}\".format(emitter.name))\n emitter.emit(msg)\n self.log.debug(\"queue emission to {} ({})\".format(\n _emitter.name, self._emit_queue.qsize()))\n self._emit_queue.put(emit)",
"def pop(self):\n return self.queue.pop(0)",
"def _dequeue(self) -> Optional[torch.cuda.Event]:\n if self._queue:\n event = self._queue.popleft()\n return event\n return None",
"def pop_from_deque(self):",
"def _dequeue(self):\n return self._queue.popleft()",
"def dequeue(self):\n return self.the_queue.pop(0)",
"def dequeue(self):",
"def dequeue(self):\n\n # del self._queue[0]\n return self._queue.pop(0)",
"def dequeue(self):\n pass",
"def dequeue(self):\n pass",
"def pop(self):\n self.queue.insert(len(self.queue), self.queue[0])\n self.queue.remove(self.queue[0])\n return self.queue.pop()",
"def dequeue(self):\n return self.queue.popleft()",
"def remove(self) -> T:\n if not self.is_empty():\n return self._queue.pop()",
"def remove(self):\n return self.queue.popleft()",
"def popitem(self):\r\n result = super(EmittingWeakKeyDefaultDict, self).popitem()\r\n if self.emitter:\r\n self.emitter.emit()\r\n return result",
"def pop_first(self):\n self.pop_item(0)",
"def AdvanceQueue(self):\r\n self.data.pop(0)\r\n return",
"def dequeue(self):\n return self.__queue.pop()",
"def discard(self):\r\n self.pushes.pop()",
"def dequeue(self):\n if len(self) == 1:\n self.tail = None\n return self.pop()",
"def dequeue(self):\n\n return self._data.pop(0)",
"def pop(self):\n self.move()\n return self.queue2.pop()",
"def pop(self):\n return self.q1.dequeue()",
"def dequeue(self):\n\n item = self.__items__.pop(0)\n return item",
"def dequeue(self):\n return self.items.pop()",
"def dequeue(self):\n return self.items.pop()"
] | [
"0.65136707",
"0.61705273",
"0.6156415",
"0.6083466",
"0.60746723",
"0.60541797",
"0.60505503",
"0.59951586",
"0.5936325",
"0.5912753",
"0.5911044",
"0.58870256",
"0.5882075",
"0.5882075",
"0.58683664",
"0.58458984",
"0.5829686",
"0.58193946",
"0.5809367",
"0.5806965",
"0.579555",
"0.5795395",
"0.5787614",
"0.5777981",
"0.577138",
"0.5725777",
"0.5712736",
"0.5694213",
"0.5672431",
"0.5672431"
] | 0.6335579 | 1 |
emit and remove all emissions in the queue | def emit_all(self):
while not self._emit_queue.empty():
self.send_emission() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def send_emission(self):\n if self._emit_queue.empty():\n return\n emit = self._emit_queue.get()\n emit()",
"def queue_emission(self, msg):\n if not msg:\n return\n for _emitter in self._emit:\n if not hasattr(_emitter, 'emit'):\n continue\n def emit(emitter=_emitter):\n self.log.debug(\"emit to {}\".format(emitter.name))\n emitter.emit(msg)\n self.log.debug(\"queue emission to {} ({})\".format(\n _emitter.name, self._emit_queue.qsize()))\n self._emit_queue.put(emit)",
"def clearQueueAll():",
"def _drain_queue(self):\n while self.queue:\n self._export_batch()",
"def clearQueue(targets):",
"def pop_all(self):\n with self.lock:\n output = list(self.queue)\n self.queue.clear()\n\n return output",
"def flushMsgs(self):\n\n self.queue = self.pre_queue[:]\n self.pre_queue = []",
"def clear_queue(self):\n while not self.queue.empty():\n self.queue.get()",
"def clear(self):\n self.queue.clear()",
"def clear_queue(self):\n self.queue = deque()",
"def drain(queue):\n while not queue.is_empty():\n queue.remove()",
"def clean_queue(self):\n self._stdin_queue.put_nowait(None) # Release thread",
"def clear(self):\n self.queue = Queue()",
"def flush(self) -> None:\n\n self.event_queue.put(self._FLUSH_SIGNAL)",
"def drain_call_queue(self):\n if len(self.call_queue) == 0:\n return\n self.apply(lambda x: x)",
"def remove_all_outputs(self):\n self._outs.clear()",
"def drain_call_queue(self):\n pass",
"def discard(self):\r\n self.pushes.pop()",
"def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)",
"def process_queue_fast(self):\n while self.queue:\n self.queue.popleft()()",
"def remove_to_deletes(self):\n go = True\n while go:\n go = False\n for op in self.queue:\n if op.delete:\n self.queue.remove(op)\n go = True\n break",
"def _flush_enqueued(self):\n\n msgs = self.RPC.query.all()\n for msg in msgs:\n if msg.enqueued:\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n msg.delete()",
"def flush(self):\n size = self.queue.qsize()\n self.queue.join()\n logger.debug(f'[Analytics Client] Forcefully flushed {size} events')",
"def dump_queue(self):\n self.set_polling_many(self.queue)\n self.queue = []",
"def abort(self):\n for key in self.listeners:\n sem = self.listeners[key]\n self.listeners[key] = None\n\n # TODO: Received data and semahore should be stored separately\n if isinstance(sem, asyncio.Semaphore):\n sem.release()",
"def queueOff() -> None:\n\t\tLogging.enableQueue = False",
"def spam( self, node ):\n map( lambda queue: queue.put( node ), self.state[ 'qout' ] )",
"def purge(self):\n while self.bus.inWaiting() > 0:\n self.bus.read(self.bus.inWaiting())",
"def clearDownloadQueue(self):\n #print(\"CLEAR DOWNLOAD QUEUE\")\n self.downloadQueue = []\n self.clearEvents()",
"def _gc(self):\n remove_before = time.time() - self._keep_for\n for item in self._queue:\n # Time for the sequence to be removed?\n if item[1] < remove_before:\n # Sequence data is old, so remove it\n self._queue.remove(item)\n else:\n # Sequence number was added recently, so don't remove it. Also\n # stop processing the queue because all later items will be\n # newer\n break"
] | [
"0.6907306",
"0.6813454",
"0.6777606",
"0.65254974",
"0.6431948",
"0.63691956",
"0.63535166",
"0.63029647",
"0.62848836",
"0.6125597",
"0.6116918",
"0.6115065",
"0.6113113",
"0.60741436",
"0.60656524",
"0.60627425",
"0.60400844",
"0.60000175",
"0.59864897",
"0.5860363",
"0.58467793",
"0.5845593",
"0.5827919",
"0.5821347",
"0.5820629",
"0.5808999",
"0.5805465",
"0.57879066",
"0.5787153",
"0.5783901"
] | 0.7361969 | 0 |
Here we validate that our filehandler is pointing to an existing file. If it doesnt, because file has been deleted, we close the filehander and try to reopen | def validate_file_handler(self):
if self.fh.closed:
try:
self.fh = open(self.path, "r")
self.fh.seek(0, 2)
except OSError as err:
logging.error("Could not reopen file: {}".format(err))
return False
open_stat = os.fstat(self.fh.fileno())
try:
file_stat = os.stat(self.path)
except OSError as err:
logging.error("Could not stat file: {}".format(err))
return False
if open_stat != file_stat:
self.log
self.fh.close()
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_file_deleted(self):\n try:\n with get_temp_file() as (fd, name):\n os.unlink(name)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))",
"def close(self) -> None:\n if self.file_handler:\n self.file_handler.close()",
"def __del__(self):\n if self.file is None:\n return\n try:\n self.file.close()\n del self.file\n self.file = None\n except:\n getLogger(__name__).warning('Error on file close', exc_info=True)",
"def efile_handle(self):\n if not self.status == \"finished\":\n raise NameError(\"redhawk: unfinished efile check\")\n\n tries = 0\n while not self.efile_exists() and ties < self.file_limit:\n time.sleep(self.file_delay)\n tries = tries+1\n \n if os.path.isfile(self.efile_name()):\n return open(self.efile_name(), \"r\")\n\n raise NameError(\"redhawk: unfinished efile check\")",
"def test_file_closed(self):\n try:\n with get_temp_file() as (fd, name):\n os.close(fd)\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)",
"def close_file_handle(self):\n if self.file_handle and self.output_file:\n self.file_handle.close()",
"def close(self):\r\n if self._filename and self._fh:\r\n self._fh.close()\r\n self._fh = None",
"def _close_file_logger(self):\n if self._file_log_handler is not None:\n self._file_log_handler.flush()\n self._file_log_handler.close()\n self.logger.removeHandler(self._file_log_handler)\n self._file_log_handler = None\n self.logger.propagate = True",
"def _filehandle(self):\n if not self._fh or self._is_closed():\n filename = self._rotated_logfile or self.filename\n if filename.endswith('.gz'):\n self._fh = gzip.open(filename, 'r')\n else:\n self._fh = open(filename, \"r\", 1)\n self._fh.seek(self._offset)\n\n return self._fh",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\r\n if instance.path:\r\n if os.path.isfile(instance.path.path):\r\n os.remove(instance.path.path)",
"def removed(self, filename):\r\n self.__close_and_reload(filename)",
"def _close(self):\n self.fh.close()",
"def __del__(self):\r\n self.filename.close()",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.file:\n if os.path.isfile(instance.file.path):\n os.remove(instance.file.path)",
"def reopen_files(self):\r\n for log in (self.error_log, self.access_log):\r\n for h in log.handlers:\r\n if isinstance(h, logging.FileHandler):\r\n h.acquire()\r\n h.stream.close()\r\n h.stream = open(h.baseFilename, h.mode)\r\n h.release()",
"def __del__(self):\n self.file.close()",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\n if instance.document:\n if os.path.isfile(instance.document.path):\n os.remove(instance.document.path)",
"def unlink(self,):\n self._wait()\n self.fd.close()\n self.fd = None\n os.unlink(self.fname)",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\n\n if instance.file:\n if os.path.isfile(instance.file.path):\n os.remove(instance.file.path)",
"def test_is_delicious_file(self):\r\n good_file = self._get_del_file()\r\n\r\n self.assertTrue(\r\n DelImporter.can_handle(good_file),\r\n \"DelImporter should handle this file\")\r\n\r\n good_file.close()",
"def WriteAbort(self):\n if self._file_object:\n self._file_object.close()\n self._file_object = None\n\n if os.path.exists(self.name):\n os.remove(self.name)",
"def auto_delete_file_on_delete(sender, instance, **kwargs):\r\n if instance.image:\r\n if pathlib.Path(instance.image.path).is_file():\r\n pathlib.Path(instance.image.path).unlink()",
"def __del__(self):\n self.f.close()",
"def test_file_unused(self):\n try:\n with get_temp_file() as (fd, name):\n pass\n except Exception as err:\n self.fail('Failed with exception \"{}\"'.format(err))\n else:\n file_exists = os.access(name, os.F_OK)\n self.assertFalse(file_exists)",
"def close(self, verify_file=True):\n if self.fp:\n do_close(self.fp)\n self.fp = None",
"def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False",
"def ofile_handle(self):\n if not self.status == \"finished\":\n raise NameError(\"redhawk: unfinished ofile check\")\n tries = 0\n while not self.ofile_exists() and tries < self.file_limit:\n time.sleep(self.file_delay)\n tries = tries+1\n \n if os.path.isfile(self.ofile_name()):\n return open(self.ofile_name(), \"r\")\n\n raise NameError(\"redhawk: unfound ofile\")",
"def finalize(self):\n if self._file:\n toLog(\"Closing file `{0}`\".format(self._fname), True)\n self._file.close()\n self._file = None",
"def reopen(self):\n self.close()\n self._fileobj = os.fdopen(os.open(str(self.path), os.O_CREAT | os.O_RDWR, 384), \"r+b\", 0)",
"def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None"
] | [
"0.6699122",
"0.65485364",
"0.63995856",
"0.6399519",
"0.6396869",
"0.6383093",
"0.6045455",
"0.6025384",
"0.5983456",
"0.59830105",
"0.597813",
"0.5963656",
"0.5961477",
"0.5950006",
"0.59438777",
"0.5907139",
"0.5896517",
"0.58615994",
"0.58526576",
"0.5834653",
"0.5818726",
"0.5798499",
"0.57981",
"0.57776856",
"0.5774414",
"0.5773867",
"0.57725817",
"0.5770512",
"0.57677835",
"0.57558143"
] | 0.7340528 | 0 |
accept message to emit | def emit(self, message): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def emit(data):",
"def receive(self, message):",
"def receive(self):\n pass",
"def receive(self, msg):\n pass",
"def handle_accept(self):\r\n pass",
"def handle(self, message):",
"def accept(self, event):\n raise NotImplementedError()",
"def receive():\n pass",
"def accept(self):\n pass",
"def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg",
"def transmit(self, message):\n pass",
"def on_receive(self, msg):\n raise NotImplementedError",
"def _handle_message(self, msg):\n self.event('message', msg)",
"def send(self, event, message):\n pass",
"def received(self, message):\n raise NotImplementedError()",
"def OnAccept(self, event):\n pass",
"def process_sink_msg(self):\n logging.debug('Received message on the sink socket')\n \n msg = self.sink_socket.recv_json()\n \n logging.debug('Message: %s', msg)\n\n # Publish the results to the clients using the\n # request id of the service request as the topic\n self.result_pub_socket.send_unicode(msg['uuid'], zmq.SNDMORE)\n self.result_pub_socket.send_json(msg)",
"def on_message(self, event):\n self.response = event.message\n self.connection.container.yield_() # Wake up the wait() loop to handle the message.",
"def emit(self, record):\n pass",
"def process(self, message, callback):\n\n\t\tif message.type == message_type.EMIT:\n\t\t\t# We are in the server, the message has just been built.\n\t\t\t# Forward it nearly \"as is\". Only the message type is changed,\n\t\t\t# to make us know it has been processed one time since emission,\n\t\t\t# and thus the next hop will be the client, which has the task\n\t\t\t# to display it, and eventually get an interactive answer.\n\n\t\t\tassert ltrace(TRACE_MESSAGING, ' MessageProcessor.process(EMIT)')\n\n\t\t\tif message.interaction:\n\n\t\t\t\tif message.interaction == interactions.ASK_FOR_REPAIR:\n\n\t\t\t\t\tmessage.answer = ttyutils.interactive_ask_for_repair(message.data,\n\t\t\t\t\t\tauto_answer=message.auto_answer)\n\n\t\t\t\telif message.interaction == interactions.GET_PASSWORD:\n\n\t\t\t\t\tmessage.answer = getpass.getpass(message.data)\n\n\t\t\t\telse:\n\t\t\t\t\tassert ltrace(TRACE_MESSAGING,\n\t\t\t\t\t\t'unsupported interaction type in message %s.' % message)\n\t\t\t\t\tmessage.answer = None\n\n\t\t\t\tmessage.type = message_type.ANSWER\n\t\t\t\treturn callback.process(message, self.getAttrProxy())\n\n\t\t\telse:\n\t\t\t\tif message.clear_terminal:\n\t\t\t\t\tttyutils.clear_terminal(MessageProcessor.channels[message.channel])\n\n\t\t\t\tchan_flush = MessageProcessor.channels[message.channel].flush\n\t\t\t\tchan_write = MessageProcessor.channels[message.channel].write\n\n\t\t\t\tif message.word_delay:\n\t\t\t\t\tdelay = message.word_delay\n\t\t\t\t\tfor word in message.data.split(' '):\n\t\t\t\t\t\tchan_write(word + ('' if word.endswith('\\n') else ' '))\n\t\t\t\t\t\tchan_flush()\n\t\t\t\t\t\ttime.sleep(delay)\n\n\t\t\t\telif message.char_delay:\n\t\t\t\t\tdelay = message.char_delay\n\t\t\t\t\tfor char in message.data:\n\t\t\t\t\t\tchan_write(char)\n\t\t\t\t\t\tchan_flush()\n\t\t\t\t\t\ttime.sleep(min(delay*4, 0.4) if char == ' ' else delay)\n\n\t\t\t\telse:\n\t\t\t\t\tchan_write(message.data)\n\n\t\t\t\tmessage.answer = None\n\n\t\telif message.type == message_type.ANSWER:\n\t\t\t# We are on the server, this is the answer from the client to\n\t\t\t# ourquestion. Return it directly to the calling process. The\n\t\t\t# message loop ends here.\n\n\t\t\tassert ltrace(TRACE_MESSAGING, ' MessageProcessor.process(ANSWER)')\n\n\t\t\t#message.channel.write(message.data)\n\t\t\treturn message.answer\n\t\telif message.type == message_type.PUSH_STATUS:\n\n\t\t\t# FIXME: is this really needed ? will the status be really pushed by this way ?\n\t\t\tfrom licorn.core import LMC\n\t\t\tLMC.machines.update_status(mid=message.sender,\n\t\t\t\tstatus=message.status)\n\n\t\telse:\n\t\t\traise exceptions.LicornRuntimeException('''Unrecognized message '''\n\t\t\t\t'''type %s for message %s.''' % (message.type, message))",
"def receive_message(self, message):",
"def handle_write(self):\n self.initiate_send()",
"def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))",
"def message_handler(self, dest, source, message):\n pass",
"def server_do(self,input, connstream):\r\n pass",
"def connection_handler(self):\n\t\tline = yield self.read_line()\n\t\tyield self.sendall(line + \"\\r\\n\")",
"def emit (self, signal):\n for room in self.transmissionarea :\n for listener in room.listeners:\n listener.signalReceived(signal)",
"def on_message(data):\n pass",
"def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!",
"def receive_message(self, message):\r\n return"
] | [
"0.699732",
"0.6608894",
"0.65500426",
"0.65143687",
"0.64708894",
"0.6423427",
"0.63328993",
"0.6292642",
"0.62430483",
"0.62041724",
"0.61830676",
"0.6182339",
"0.61788404",
"0.61708754",
"0.609756",
"0.6090819",
"0.60893077",
"0.6083992",
"0.6030369",
"0.60288525",
"0.60244817",
"0.6011015",
"0.600925",
"0.6008581",
"0.5970214",
"0.5928003",
"0.590727",
"0.5899672",
"0.58973056",
"0.5887605"
] | 0.8129066 | 0 |
Returns a dict containing the various filename formatter values Values are gotten from the vaping data message as well as the currently processed row in the message Arguments | def filename_formatters(self, data, row):
r = {
"source" : data.get("source"),
"field" : self.field,
"type" : data.get("type")
}
r.update(**row)
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filenames(self) -> dict[str, str]:\r\n ...",
"def map_file_format_info(file_format_event, file_validation_event):\n event_info = {}\n if not file_format_event:\n return\n try:\n event_info.update(\n {\n \"dct:FileFormat\": file_format_event.event_outcome_detail,\n \"prov:softwareAgent\": file_format_event.event_detail.split(\";\")[0],\n \"premis:version\": file_format_event.event_detail.split(\";\")[1],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_format_event.event_detail,\n )\n if file_validation_event:\n event_info.update(\n {\n \"dct:FileFormat\": file_validation_event.event_outcome_detail,\n }\n )\n return event_info",
"def format_filename(self, data, row):\n return self.filename.format(**self.filename_formatters(data, row))",
"def for_popen(self):\n return {compat.filename_str(k): compat.filename_str(v) for k, v in self.items()}",
"def _filename_pre_data(self) -> dict:\n key = []\n remainder = \"\"\n prework = {}\n for i in self.draft_file:\n if i == \"{\":\n remainder = \"\"\n elif i == \"}\":\n key.append(remainder)\n else:\n remainder += i\n list_filename = self.filename.split(\"_\")\n for key, value in zip(key, list_filename):\n prework[key] = value\n self.pre_data = prework",
"def _get_info_from_filename(filename: str) -> dict:\n *parts, suffix = filename.split('.')\n dct = re.match(r'^(?P<name>[A-z0-9.]*)(-(?P<num_rows>[0-9]+))?$', '.'.join(parts)).groupdict()\n return {\n 'name': dct['name'],\n 'num_rows': int(dct['num_rows']) if dct['num_rows'] else None,\n 'format': suffix,\n }",
"def _read_params_txt(self) -> dict:\n df = pd.read_csv(self.file_path, sep=\" \", header=None, index_col=0).T\n\n sources_info = {\n \"sample_rate\": float(df[\"samplerate\"].iloc[0]),\n \"data_format\": df[\"dataformat\"].str.replace(\"'\", \"\").iloc[0],\n \"n_samples\": None,\n \"path\": self.file_path,\n }\n\n return sources_info",
"def files_context_transformer(row_content: dict) -> dict:\n\n return {\n 'Action': row_content.get('action', {}).get('value'),\n 'App': row_content.get('app'),\n 'AppCategory': row_content.get('app_category'),\n 'AppSubcategory': row_content.get('app_sub_category'),\n 'CharacteristicOfApp': row_content.get('characteristics_of_app'),\n 'DestinationIP': row_content.get('dest_ip', {}).get('value'),\n 'CloudHostname': row_content.get('cloud_hostname'),\n 'CountOfRepeats': row_content.get('count_of_repeats'),\n 'CustomerID': row_content.get('customer_id'),\n 'DestinationLocation': row_content.get('dest_location'),\n 'DestinationPort': row_content.get('dest_port'),\n 'DirectionOfAttack': row_content.get('direction_of_attack', {}).get('value'),\n 'FileID': row_content.get('file_id'),\n 'FileName': row_content.get('file_name'),\n 'FileType': row_content.get('file_type'),\n 'Flags': row_content.get('flags'),\n 'FromZone': row_content.get('from_zone'),\n 'Http2Connection': row_content.get('http2_connection'),\n 'InboundIf': row_content.get('inbound_if', {}).get('value'),\n 'IngestionTime': human_readable_time_from_epoch_time(row_content.get('ingestion_time', 0)),\n 'IsCaptivePortal': row_content.get('is_captive_portal'),\n 'IsClientToServer': row_content.get('is_client_to_server'),\n 'IsContainer': row_content.get('is_container'),\n 'IsDecryptMirror': row_content.get('is_decrypt_mirror'),\n 'IsDupLog': row_content.get('is_dup_log'),\n 'IsExported': row_content.get('is_exported'),\n 'IsForwarded': row_content.get('is_forwarded'),\n 'IsMptcpOn': row_content.get('is_mptcp_on'),\n 'IsNat': row_content.get('is_nat'),\n 'IsNonStdDestPort': row_content.get('is_non_std_dest_port'),\n 'IsPacketCapture': row_content.get('is_packet_capture'),\n 'IsPhishing': row_content.get('is_phishing'),\n 'IsPrismaBranch': row_content.get('is_prisma_branch'),\n 'IsPrismaMobile': row_content.get('is_prisma_mobile'),\n 'IsProxy': row_content.get('is_proxy'),\n 'IsReconExcluded': row_content.get('is_recon_excluded'),\n 'IsSaasApp': row_content.get('is_saas_app'),\n 'IsServerToClient': row_content.get('is_server_to_client'),\n 'IsSymReturn': row_content.get('is_sym_return'),\n 'IsTransaction': row_content.get('is_transaction'),\n 'IsTunnelInspected': row_content.get('is_tunnel_inspected'),\n 'IsUrlDenied': row_content.get('is_url_denied'),\n 'LogSet': row_content.get('log_set'),\n 'LogSource': row_content.get('log_source'),\n 'LogSourceID': row_content.get('log_source_id'),\n 'LogSourceName': row_content.get('log_source_name'),\n 'LogTime': human_readable_time_from_epoch_time(row_content.get('log_time', 0)),\n 'LogType': row_content.get('log_type', {}).get('value'),\n 'NatDestination': row_content.get('nat_dest', {}).get('value'),\n 'NatSource': row_content.get('nat_source', {}).get('value'),\n 'NatDestinationPort': row_content.get('nat_dest_port'),\n 'NatSourcePort': row_content.get('nat_source_port'),\n 'OutboundIf': row_content.get('outbound_if', {}).get('value'),\n 'PcapID': row_content.get('pcap_id'),\n 'Protocol': row_content.get('protocol', {}).get('value'),\n 'RecordSize': row_content.get('record_size'),\n 'ReportID': row_content.get('report_id'),\n 'RiskOfApp': row_content.get('risk_of_app'),\n 'RuleMatched': row_content.get('rule_matched'),\n 'RuleMatchedUuid': row_content.get('rule_matched_uuid'),\n 'SanctionedStateOfApp': row_content.get('sanctioned_state_of_app'),\n 'SequenceNo': row_content.get('sequence_no'),\n 'SessionID': row_content.get('session_id'),\n 'Severity': row_content.get('severity'),\n 'SourceIP': row_content.get('source_ip', {}).get('value'),\n 'Subtype': row_content.get('sub_type', {}).get('value'),\n 'TechnologyOfApp': row_content.get('technology_of_app'),\n 'TimeGenerated': human_readable_time_from_epoch_time(row_content.get('time_generated', 0)),\n 'ToZone': row_content.get('to_zone'),\n 'Tunnel': row_content.get('tunnel', {}).get('value'),\n 'TunneledApp': row_content.get('tunneled_app'),\n 'URLCategory': row_content.get('url_category', {}).get('value'),\n 'FileSHA256': row_content.get('file_sha_256'),\n 'Vsys': row_content.get('vsys'),\n 'VsysID': row_content.get('vsys_id'),\n 'VendorName': row_content.get('vendor_name'),\n 'VendorSeverity': row_content.get('vendor_severity', {}).get('value')\n }",
"def map_file_normalization_info(file_normalization_event):\n event_info = {}\n if not file_normalization_event:\n return\n try:\n event_info.update(\n {\n \"premis:outcome\": file_normalization_event.event_outcome_detail,\n }\n )\n if file_normalization_event.event_detail:\n event_info.update(\n {\n \"prov:softwareAgent\": file_normalization_event.event_detail.split(\n \";\"\n )[0],\n \"premis:version\": file_normalization_event.event_detail.split(\";\")[\n 1\n ],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file normalization tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_normalization_event.event_detail,\n )\n return event_info",
"def format_file_event(event):\n event_dict = {\n \"premis:eventIdentifier\": event.event_id,\n \"event_name\": event.event_type,\n \"prov:softwareAgent\": event.event_detail,\n \"premis:outcome\": event.event_outcome,\n \"event_outcome_detail\": event.event_outcome_detail,\n }\n return event_dict",
"def map_file_data(file_obj, file_events):\n file_as_dict = {\n \"premis:originalName\": file_obj.currentlocation,\n \"original_name\": escape(file_obj.originallocation),\n # needs investigation\n \"sanitized_file_name\": get_sanitized_file_name(\n get_file_name_cleanup(file_events)\n ),\n \"prov:generatedAtTime\": file_obj.modificationtime.strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n ),\n \"premis:fixity\": {\n \"checksum_type\": convert_to_premis_hash_function(file_obj.checksumtype),\n \"Checksum\": file_obj.checksum,\n },\n \"premis:identifier\": file_obj.uuid,\n \"premis:size\": file_obj.size,\n \"file_name\": file_obj.label,\n # not sure if this is the file name or if we should stick with\n \"dct:FileFormat\": map_file_format_info(\n get_file_format_event(file_events), get_file_validation_event(file_events)\n ),\n \"file_validation\": map_file_validation_info(\n get_file_validation_event(file_events)\n ),\n \"file_normalization\": map_file_normalization_info(\n get_file_normalization_event(file_events)\n ),\n \"events\": list_file_events(file_events),\n }\n return file_as_dict",
"def format(self, filename, timestamp, line):\n return self._formatter({\n '@source': \"file://{0}{1}\".format(self._current_host, filename),\n '@type': self._file_config.get('type', filename),\n '@tags': self._file_config.get('tags', filename),\n '@fields': self._file_config.get('fields', filename),\n '@timestamp': timestamp,\n '@source_host': self._current_host,\n '@source_path': filename,\n '@message': line.strip(os.linesep),\n })",
"def map_file_validation_info(file_validation_event):\n event_info = {}\n if not file_validation_event:\n return\n try:\n event_info.update(\n {\n \"premis:outcome\": file_validation_event.event_outcome_detail,\n \"prov:softwareAgent\": file_validation_event.event_detail.split(\";\")[0],\n \"premis:version\": file_validation_event.event_detail.split(\";\")[1],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file validation tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_validation_event.event_detail,\n )\n return event_info",
"def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None",
"def process_message(message):\n return {\n \"subject\": message.subject,\n \"sender\": message.sender_name,\n \"header\": message.transport_headers,\n \"body\": message.plain_text_body,\n \"creation_time\": message.creation_time,\n \"submit_time\": message.client_submit_time,\n \"delivery_time\": message.delivery_time,\n \"attachment_count\": message.number_of_attachments,\n }",
"def to_dict(self):\n return {'file_name': self.file_name,\n 'raw_file_name': self.raw_file_name,\n 'metadata': self.metadata,\n 'pre_file_name': self.pre_file_name,\n }",
"def get_format_data(self, message_type: str, data: dict) -> CredFormatAttachment:",
"def filename(self, *args) -> \"PyObject *\":\n return _ida_fpro.qfile_t_filename(self, *args)",
"def create_file_meta_data(vk4_container, args):\n log.debug(\"Entering create_file_meta_data()\")\n\n header_list = list()\n header_list.append(args.layer)\n header_list.append('\\n')\n header_list.append('File name')\n header_list.append(args.input)\n header_list.append('Title')\n header_list.append(args.input[:-4])\n header_list.append('Measurement date')\n header_list.append(str(vk4_container.measurement_conditions['month']) + '\\\\' +\n str(vk4_container.measurement_conditions['day']) + '\\\\' +\n str(vk4_container.measurement_conditions['year']))\n header_list.append('Measurement time')\n header_list.append(str(vk4_container.measurement_conditions['hour']) + ':' +\n str(vk4_container.measurement_conditions['minute']) + ':' +\n str(vk4_container.measurement_conditions['second']))\n # User mode?\n header_list.append('Objective lens')\n header_list.append(vk4_container.string_data['lens_name'] + ' ' +\n str(vk4_container.measurement_conditions['lens_magnification'] / 10.0) + 'x')\n header_list.append('Numerical Aperture')\n header_list.append(vk4_container.measurement_conditions['num_aperture'] / 1000.0)\n # Size? Standard?\n # Mode? Surface profile?\n # RPD? OFF?\n header_list.append('Quality')\n header_list.append('Skip 4 lines')\n header_list.append('Pitch (um)')\n header_list.append(vk4_container.measurement_conditions['pitch'] / 1000.0)\n header_list.append('Z measurement distance (um)')\n header_list.append(vk4_container.measurement_conditions['distance'] / 1000.0)\n # Double scan? OFF?\n header_list.append('Brightness 1')\n header_list.append(vk4_container.measurement_conditions['PMT_gain'])\n header_list.append('Brightness 2')\n br_2 = vk4_container.measurement_conditions['PMT_gain_2']\n header_list.append('---') if br_2 == 0 else header_list.append(br_2)\n # Not sure how they got ND filter to 30% in example csv\n header_list.append('ND filter (%)')\n header_list.append(vk4_container.measurement_conditions['ND_filter'] * 30)\n header_list.append('Optical zoom')\n header_list.append(vk4_container.measurement_conditions['optical_zoom'] / 10.0)\n # Average count? 1 time?\n # Filter? OFF?\n # Fine mode? ON?\n header_list.append('Line count')\n l_count = vk4_container.measurement_conditions['number_of_lines']\n header_list.append(l_count)\n\n header_list.append('Line position1')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][0])\n\n header_list.append('Line position2')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][1])\n\n header_list.append('Line position3')\n if l_count == 0:\n header_list.append('---')\n else:\n header_list.append(vk4_container.measurement_conditions['reserved_1'][2])\n\n header_list.append('Camera gain (db)')\n header_list.append(vk4_container.measurement_conditions['camera_gain'] * 6)\n header_list.append('Shutter speed')\n header_list.append(vk4_container.measurement_conditions['shutter_speed'])\n header_list.append('White balance mode')\n wb_mode = vk4_container.measurement_conditions['white_balance_mode']\n header_list.append('Auto') if wb_mode == 1 else header_list.append(wb_mode)\n header_list.append('White balance R')\n header_list.append(vk4_container.measurement_conditions['white_balance_red'])\n header_list.append('White balance B')\n header_list.append(vk4_container.measurement_conditions['white_balance_blue'])\n header_list.append('Intensity correction mode')\n header_list.append('Gamma correction')\n header_list.append('Gamma correction value')\n header_list.append(vk4_container.measurement_conditions['gamma'] / 100.0)\n header_list.append('Gamma offset (%)')\n header_list.append(vk4_container.measurement_conditions['gamma_correction_offset'] /\n 65536.0)\n # W/B inversion? OFF?\n # Head type? VK-X110?\n # Correct intensity eccentricity? OFF?\n # Correct field curvature? OFF?\n header_list.append('XY calibration (nm/pixel)')\n header_list.append(vk4_container.measurement_conditions['x_length_per_pixel'] / 1000.0)\n header_list.append('Z calibration (nm/digit)')\n header_list.append(vk4_container.measurement_conditions['z_length_per_digit'] / 1000.0)\n # Saturation?\n # Contrast?\n # Brightness?\n # AI noise elimination? Auto(ON)?\n # Angled surface noise filter? Auto(OFF)?\n header_list.append('Width')\n header_list.append(vk4_container.image_width)\n header_list.append('Height')\n header_list.append(vk4_container.image_height)\n # Skip amount? 1?\n\n out_type = args.type\n if out_type == 'hcsv':\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return np.reshape(header_list, (len(header_list) // 2, 2))\n else:\n # Can use a dict to attach info to an image using PILs Image module\n meta_dict = dict()\n for n in range(0, len(header_list), 2):\n meta_dict[header_list[n]] = header_list[n + 1]\n\n log.debug(\"Exiting create_file_meta_data() where out_type == %s\" % out_type)\n return meta_dict",
"def getStepLogData(self) -> Dict:\n query = \"\"\" SELECT file_process_id,\n fp_id,\n filename,\n bucket_name\n FROM file_process_log\n WHERE file_process_id IN(SELECT file_process_id\n FROM file_process_step_log\n WHERE step_name = 'File Registration'\n AND step_status = 'DONE'\n AND step_end_ts > (SELECT\n COALESCE(MAX(step_start_ts),'1970-01-01 00:00:00')\n FROM file_process_step_log\n WHERE step_name = 'Data Loader'\n AND step_status = 'DONE')) \n \"\"\"\n data = self.getDataAsDict(query)\n return data",
"def _parse_proxy_files(self):\n out = dict()\n\n # name may be something like \"ssp1_[YEAR].tif\", which actually refers to multiple files\n # such as \"ssp1_2010.tif\" and \"ssp1_2020.tif\" when info['years'] == [2010, 2020]\n for name, info in self.proxy_files.items():\n # promote strs to list\n if isinstance(info['variables'], str):\n info['variables'] = [info['variables']]\n\n if isinstance(info['years'], int):\n info['years'] = [info['years']]\n\n # flags are optional\n if 'flags' in info:\n if isinstance(info['flags'], str):\n info['flags'] = [info['flags']]\n else:\n info['flags'] = []\n\n for variable in info['variables']:\n\n # file name may use an abbreviation of the variable name\n # if info['variables'] is a dict of form {variable: abbreviation}\n abbreviation = info['variables'][variable] if isinstance(info['variables'], dict) else variable\n\n for year in info['years']:\n # determine the actual name of the file containing variable variable for year year\n filename = name.replace('{variable}', abbreviation).replace('{year}', str(year))\n\n if filename not in out:\n out[filename] = {'variables': [], 'years': [], 'flags': info['flags']}\n\n if variable not in out[filename]['variables']:\n out[filename]['variables'].append(variable)\n if year not in out[filename]['years']:\n out[filename]['years'].append(year)\n\n self.proxy_files = out",
"def prepare_misc_data(self):\n if 'misc_input_files' in self.man:\n data = self.man['misc_input_files']\n else:\n return None\n\n data_new_format = {}\n for item in data:\n token = item['token']\n if not token in data_new_format:\n data_new_format[token] = []\n data_new_format[token].append(item['file'])\n return data_new_format",
"def _options(self):\r\n xmi_file = self.tb_xmi_file_name.GetValue()\r\n topic = self.tb_pragma.GetValue()\r\n package = self.tb_package.GetValue()\r\n header = self.tb_file_header.GetValue()\r\n target_folder = self.tb_target_folder.GetValue()\r\n encoding = self.tb_encoding.GetValue()\r\n \r\n return {\"topic\" : topic, \r\n \"package\" : package, \r\n \"header\" : header, \r\n \"target_folder\" : target_folder,\r\n \"encoding\" : encoding,\r\n \"xmi_file\" : xmi_file}",
"def format(self, record):\n\n\n if not hasattr(record, 'filename_'):\n record.file_indicator = '-'\n else:\n record.file_indicator = os.path.relpath(record.filename_.strip(),\n self.study_dir)\n record.line_indicator = self.format_aggregated(\n record,\n 'line_number',\n ' line %d:',\n ' lines [%s]:',\n optional=True)\n record.column_indicator = self.format_aggregated(\n record,\n 'column_number',\n ' column %d:',\n ' columns [%s]:',\n optional=True)\n record.cause_indicator = self.format_aggregated(\n record,\n 'cause',\n \"; value encountered: '%s'\",\n \"; values encountered: ['%s']\",\n join_string=\"', '\",\n optional=True)\n\n # format the string based on these fields\n formatted_result = super(LogfileStyleFormatter, self).format(record)\n\n # prepend an empty line if the filename is different than before\n current_filename = getattr(record, 'filename_', '')\n if (self.previous_filename is not None and\n current_filename != self.previous_filename):\n formatted_result = '\\n' + formatted_result\n self.previous_filename = current_filename\n\n return formatted_result",
"def __read_job_params_file__(self):\n # | - __read_job_params_file__\n job_params = {}\n\n # file_path = self.full_path + \"/\" + \"job_parameters.json\"\n\n file_exists = False\n\n file_path = os.path.join(\n self.full_path,\n \"job_parameters.json\")\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n ind_i = self.full_path.rfind(self.full_path.split(\"/\")[-1])\n path_i_rt = self.full_path[:ind_i - 1]\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_parameters.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_params.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n if not file_exists:\n print(\"No job_params file found for following job:\")\n print(self.full_path)\n\n return(job_params)\n # __|",
"def get_args(self):\n return {\n 'contents': self.get_formatted_code()\n }",
"def get_files(message):\n message_deserialized = json.loads(message)\n filename = message_deserialized['contents'] #returns unicode\n return str(filename)",
"def args_to_params(args: list) -> dict:\n found = {}\n\n # Setup the dictionary identifying the parameters\n found['sensor'] = args.sensor\n found['filename'] = args.filename\n found['working_space'] = args.working_space\n if args.userid:\n found['userid'] = args.userid\n\n # Note: Return an empty dict if we're missing mandatory parameters\n return found",
"def resource_file_format(value):\n try:\n file_name, blob_source = value.split('=')\n except ValueError:\n message = (\"Incorrectly formatted resource reference. \"\n \"Argmuent values should be in the format filename=blobsource\")\n raise ValueError(message)\n return {'file_path': file_name, 'blob_source': blob_source}",
"def _reformat_load_versions(ctx, param, value) -> Dict[str, str]:\n load_version_separator = \":\"\n load_versions_dict = {}\n\n for load_version in value:\n load_version_list = load_version.split(load_version_separator, 1)\n if len(load_version_list) != 2:\n raise ValueError(\n \"Expected the form of `load_version` to be \"\n \"`dataset_name:YYYY-MM-DDThh.mm.ss.sssZ`,\"\n \"found {} instead\".format(load_version)\n )\n load_versions_dict[load_version_list[0]] = load_version_list[1]\n\n return load_versions_dict"
] | [
"0.5899487",
"0.586862",
"0.58116794",
"0.5770152",
"0.575695",
"0.55263674",
"0.5516201",
"0.5448604",
"0.5429166",
"0.5357836",
"0.5328083",
"0.53153014",
"0.5263745",
"0.51659256",
"0.50826246",
"0.50714976",
"0.5064152",
"0.5055028",
"0.5017024",
"0.50022215",
"0.49784836",
"0.4957016",
"0.49275097",
"0.4925857",
"0.49224886",
"0.4921266",
"0.49154347",
"0.49095696",
"0.49037898",
"0.4903773"
] | 0.6851664 | 0 |
Label the provided ICA components with the ICLabel neural network. ICLabel is designed to classify ICs fitted with an extended infomax ICA decomposition algorithm on EEG datasets referenced to a common average and filtered between [1., 100.] Hz. It is possible to run ICLabel on datasets that do not meet those specification, but the classification performance might be negatively impacted. Moreover, the ICLabel paper did not study the effects of these preprocessing steps. | def iclabel_label_components(
inst: Union[BaseRaw, BaseEpochs],
ica: ICA,
inplace: bool = True,
backend: Optional[str] = None,
):
features = get_iclabel_features(inst, ica)
labels_pred_proba = run_iclabel(*features, backend=backend) # type: ignore
if inplace:
from mne_icalabel.config import ICA_LABELS_TO_MNE
ica.labels_scores_ = labels_pred_proba
argmax_labels = np.argmax(labels_pred_proba, axis=1)
# add labels to the ICA instance
for idx, (_, mne_label) in enumerate(ICA_LABELS_TO_MNE.items()):
auto_labels = list(np.argwhere(argmax_labels == idx).flatten())
if mne_label not in ica.labels_:
ica.labels_[mne_label] = auto_labels
continue
for comp in auto_labels:
if comp not in ica.labels_[mne_label]:
ica.labels_[mne_label].append(comp)
return labels_pred_proba | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _run_iclabel(images: ArrayLike, psds: ArrayLike, autocorr: ArrayLike) -> NDArray:\n # load weights\n network_file = files(\"mne_icalabel.iclabel.network\") / \"assets\" / \"ICLabelNet.pt\"\n iclabel_net = ICLabelNet()\n iclabel_net.load_state_dict(torch.load(network_file))\n # format inputs and run forward pass\n labels = iclabel_net(\n *_format_input_for_torch(*_format_input(images, psds, autocorr))\n )\n return labels.detach().numpy()",
"def test_basic_labeling(self):\n # data with only 1 feature\n data = array([[-1], [1], [0.5], [0.25], [-0.33], [0]])\n # give 1 if feature value >= 0; otherwise 0\n labels = array([0, 1, 1, 1, 0, 1])\n cdata = LabeledCData(data, labels)\n\n # ensure that labelling is correct\n assert array_equal(cdata.labels, labels)",
"def create_coco_label(is_training):\n from pycocotools.coco import COCO\n\n coco_root = config.coco_root\n data_type = config.val_data_type\n if is_training:\n data_type = config.train_data_type\n\n # Classes need to train or test.\n train_cls = config.coco_classes\n train_cls_dict = {}\n for i, cls in enumerate(train_cls):\n train_cls_dict[cls] = i\n\n anno_json = os.path.join(coco_root, config.instances_set.format(data_type))\n\n coco = COCO(anno_json)\n classs_dict = {}\n cat_ids = coco.loadCats(coco.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"id\"]] = cat[\"name\"]\n\n image_ids = coco.getImgIds()\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n\n for img_id in image_ids:\n image_info = coco.loadImgs(img_id)\n file_name = image_info[0][\"file_name\"]\n anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anno = coco.loadAnns(anno_ids)\n image_path = os.path.join(coco_root, data_type, file_name)\n annos = []\n iscrowd = False\n for label in anno:\n bbox = label[\"bbox\"]\n class_name = classs_dict[label[\"category_id\"]]\n iscrowd = iscrowd or label[\"iscrowd\"]\n if class_name in train_cls:\n x_min, x_max = bbox[0], bbox[0] + bbox[2]\n y_min, y_max = bbox[1], bbox[1] + bbox[3]\n annos.append(list(map(round, [y_min, x_min, y_max, x_max])) + [train_cls_dict[class_name]])\n\n if not is_training and iscrowd:\n continue\n if len(annos) >= 1:\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = np.array(annos)\n\n return images, image_path_dict, image_anno_dict",
"def build_label_transform():\n\n return NALabelEncoder()",
"def pca_visual(X_data, Y_data, dict_CLnames, comp=False, clusters=None,):\n pca = PCA(2) # project from 72 to 2 dimensions\n X_pca = pca.fit_transform(X_data)\n\n #encode class labels into numeric values\n le = preprocessing.LabelEncoder()\n label_encoder = le.fit(Y_data)\n y = label_encoder.transform(Y_data)\n\n Xax=X_pca[:,0] #First Principal Component\n Yax=X_pca[:,1] #Second Principal Component\n labels= y\n cdict={0:'red',1:'green'} #dict with colors\n labl=dict_CLnames\n labl_cl = {0:'cluster 1',1:'cluster 2'}\n if comp == False:\n fig,ax=plt.subplots(figsize=(7,5))\n fig.patch.set_facecolor('white')\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n plt.xlabel(\"First Principal Component\",fontsize=14)\n plt.ylabel(\"Second Principal Component\",fontsize=14)\n plt.legend()\n plt.show()\n \n if comp == True:\n fig,axs =plt.subplots(nrows=1, ncols=2, figsize=(15,5))\n fig.patch.set_facecolor('white')\n ax = axs[0]\n for l in np.unique(labels):\n ix=np.where(labels==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Original data')\n ax.legend()\n\n \n ax = axs[1]\n for l in np.unique(clusters):\n ix=np.where(clusters==l)\n ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40, label=labl_cl[l])\n # for loop ends\n ax.set_xlabel(\"First Principal Component\",fontsize=14)\n ax.set_ylabel(\"Second Principal Component\",fontsize=14)\n ax.set_title('Clustered data')\n ax.legend()\n plt.show()",
"def transform_with_label(aug):\n\n geometric_tfx = get_geometric_transformer(aug)\n intensity_tfx = get_intensity_transformer(aug)\n\n def transform(comp, c_label, c_img, use_onehot, nclass, **kwargs):\n \"\"\"\n Args\n comp: a numpy array with shape [H x W x C + c_label]\n c_label: number of channels for a compact label. Note that the current version only supports 1 slice (H x W x 1)\n nc_onehot: -1 for not using one-hot representation of mask. otherwise, specify number of classes in the label\n\n \"\"\"\n comp = copy.deepcopy(comp)\n if (use_onehot is True) and (c_label != 1):\n raise NotImplementedError(\"Only allow compact label, also the label can only be 2d\")\n assert c_img + 1 == comp.shape[-1], \"only allow single slice 2D label\"\n\n # geometric transform\n _label = comp[..., c_img ]\n _h_label = np.float32(np.arange( nclass ) == (_label[..., None]) )\n comp = np.concatenate( [comp[..., :c_img ], _h_label], -1 )\n comp = geometric_tfx(comp)\n # round one_hot labels to 0 or 1\n t_label_h = comp[..., c_img : ]\n t_label_h = np.rint(t_label_h)\n assert t_label_h.max() <= 1\n t_img = comp[..., 0 : c_img ]\n\n # intensity transform\n t_img = intensity_tfx(t_img)\n\n if use_onehot is True:\n t_label = t_label_h\n else:\n t_label = np.expand_dims(np.argmax(t_label_h, axis = -1), -1)\n return t_img, t_label\n\n return transform",
"def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')",
"def test_labels(self):\n self.compliance_tester.test_labels(self.oi)",
"def test_func_labeling(self):\n # Define a labeling function\n def label(x):\n return 1 if x >= 0 else 0\n\n # Create (arbitrary) data\n data = array([[500], [-17], [12], [0], [-.002], [.001]])\n\n # Manually create the labels\n labels = array([label(x) for x in data])\n\n # Create a labelled cdata object by passing in the labeling function\n cdata = LabeledCData(data, label)\n\n # Make sure the data is labelled correctly\n self.assertTrue(array_equal(labels, cdata.labels))",
"def labelComponents26(cube):\n x,y,z = np.where(cube);\n label = np.zeros(cube.shape, dtype = 'uint8');\n ncomp = 0;\n for xp,yp,zp in zip(x,y,z):\n if label[xp,yp,zp] == 0:\n ncomp += 1;\n label = labelNeighbours26(cube, label, xp,yp,zp, ncomp);\n return ncomp, label",
"def setContourLabelString(text=''):\n dislin.conlab(text)",
"def old_ideal_label(I):\n a, c, d = ideal_HNF(I)\n return \"%s.%s.%s\" % (a * d, c, d)",
"def label(self):\r\n raise NotImplementedError",
"def _gather_components(self):\n comps = set()\n for data in self._collection:\n for c in data.components:\n if c in comps:\n continue\n label = \"%s (%s)\" % (c, data.label)\n label = disambiguate(label, self._labels)\n self._labels[label] = c\n comps.add(c)",
"def label(image,**kw):\n # default connectivity in OpenCV: 8 (which is equivalent to...)\n # default connectivity in scikit-image: 2\n n, labels = cv2.connectedComponents(image.astype(uint8), connectivity=4)\n #n, labels = cv2.connectedComponentsWithAlgorithm(image.astype(uint8), connectivity=4, ltype=2, ccltype=cv2.CCL_DEFAULT)\n return labels, n-1\n # try: return measurements.label(image,**kw)\n # except: pass\n # types = [\"int32\",\"uint32\",\"int64\",\"uint64\",\"int16\",\"uint16\"]\n # for t in types:\n # try: return measurements.label(array(image,dtype=t),**kw)\n # except: pass\n # # let it raise the same exception as before\n # return measurements.label(image,**kw)",
"def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)",
"def labeling_func(df_clus):\n\n df_all_labeled = df_all_columns.copy()\n df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()\n df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)\n for i in range(0, clus_params['n_components']):\n df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()\n\n return df_all_labeled",
"def label(filenames, train_path='../data/train_molecules_30.mat'):\n unlabeled = [scipy.io.loadmat(fname) for fname in filenames]\n unlabeled_X = np.vstack([data['X'] for data in unlabeled])\n X, Y = load_data(train_path, shape=(-1, 30, 30, 30))\n\n num_unlabeled = unlabeled_X.shape[0]\n unlabeled_Y = np.zeros(num_unlabeled) - 1\n unlabeled_Y = unlabeled_Y.reshape((-1, 1))\n Y = Y.reshape((-1, 1))\n Y_all = np.vstack((Y, unlabeled_Y))\n\n X_all = np.vstack((X, unlabeled_X))\n X_all = X_all.reshape((-1, 27000))\n\n label_prop_model = LabelSpreading()\n label_prop_model.fit(X_all, Y_all)\n Y_all = label_prop_model.transduction_\n unlabeled_Y = Y_all[num_unlabeled:]\n return (unlabeled_X, unlabeled_Y), (X_all, Y_all)",
"def __init__(self, i1=None, i2=None, axial=None, label=None):\n self.i1 = i1 # will be 1 for center.\n self.i2 = i2\n self.setAxial(axial)\n if not label:\n self.label = \"ExCore\"\n self.makeLabel()\n else:\n self.fromLabel(label)\n self.firstChar = None",
"def biplot(score,coeff,pcax,pcay,labels=None,nm=None):\n pca1=pcax-1\n pca2=pcay-1\n xs = score[:,pca1]\n ys = score[:,pca2]\n n=score.shape[1]\n if nm == None:\n nm = n\n #construct scales to constrain data between -1 and 1\n scalex = 1.0/(xs.max()- xs.min())\n scaley = 1.0/(ys.max()- ys.min())\n \n #scatter data points in the new basis span by pca1 and pca2\n plt.scatter(xs*scalex,ys*scaley, marker='.',edgecolor='none')\n vectors = []\n \n #overlay transforms of original features in the new basis\n for i in range(n):\n #calculate length of vectors in new basis\n vectors.append((labels[i],np.sqrt(coeff[i,pca1]**2 + coeff[i,pca2]**2)))\n #plot arrow vectors\n plt.arrow(0, 0, coeff[i,pca1], coeff[i,pca2],color='g',alpha=0.5) \n #add labels\n if labels is None:\n plt.text(coeff[i,pca1]* 1.15, coeff[i,pca2] * 1.15, \"Var\"+str(i+1), color='k', ha='center', va='center')\n else:\n plt.text(coeff[i,pca1]* 1.15, coeff[i,pca2] * 1.15, labels[i], color='k', ha='center', va='center')\n plt.xlim(-1,1)\n plt.ylim(-1,1)\n plt.xlabel(\"PC{}\".format(pcax))\n plt.ylabel(\"PC{}\".format(pcay))\n plt.grid()\n plt.show()\n #print \"Feature import (PCA)\"\n #print \"--------------------\"\n vectors = sorted(vectors,key=lambda x:x[1],reverse=False)\n \n plt.barh(range(len(vectors)),map(lambda x:x[1],vectors),edgecolor='none')\n plt.yticks(np.arange(len(vectors))+0.4,map(lambda x:x[0],vectors))\n plt.xlabel('Feature importance')\n plt.grid()\n plt.show()\n #pprint(vectors)\n return vectors",
"def makeLabel(self):\n\n self.setIndexNames()\n\n if self.isInCore():\n self.getFirstChar()\n else:\n # stick with what we have. (default:ExCore)\n return\n self.label = self.firstChar + \"{0:03d}\".format(self.i2)\n if self.axial is not None:\n # add axial letter\n self.label = self.label + AXIAL_CHARS[self.axial]",
"def test_labels(self):\n classes = np.array([\"a\", \"b\", \"c\", \"d\", \"e\"])\n y = classes[np.random.randint(0, 5, 100)]\n\n oz = ClassificationScoreVisualizer(GaussianNB, classes=classes)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = dict(zip(range(len(classes)), classes))\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)\n\n encoder = LabelEncoder().fit(y)\n oz = ClassificationScoreVisualizer(GaussianNB, encoder=encoder)\n npt.assert_array_equal(oz._labels(), classes)",
"def SAMT_labels(self):\n \t\t#find basic labels\n \t\tlabels_basic = self.dependency_labels()\n \t\tlabels = Labels(labels_basic)\n \t\treturn labels.SAMT_labels()",
"def get_labels(self):\r\n return [\"X\", \"O\", \"B-a\", \"I-a\", \"B-b\", \"I-b\", \"B-c\", \"I-c\", \"S-a\", \"S-b\", \"S-c\", \"[CLS]\", \"[SEP]\"]",
"def make_coco_labels(real_c):\n y = np.eye(real_c.size(1))\n\n fixed_c_list = []\n\n # single object addition and removal\n for i in range(2*real_c.size(1)):\n fixed_c = real_c.clone()\n for c in fixed_c:\n if i%2:\n c[i//2] = 0.\n else:\n c[i//2] = 1.\n fixed_c_list.append(Variable(fixed_c, volatile=True).cuda())\n\n # multi-attribute transfer (H+G, H+A, G+A, H+G+A)\n #if self.dataset == 'CelebA':\n # for i in range(4):\n # fixed_c = real_c.clone()\n # for c in fixed_c:\n # if i in [0, 1, 3]: # Hair color to brown\n # c[:3] = y[2]\n # if i in [0, 2, 3]: # Gender\n # c[3] = 0 if c[3] == 1 else 1\n # if i in [1, 2, 3]: # Aged\n # c[4] = 0 if c[4] == 1 else 1\n # fixed_c_list.append(self.to_var(fixed_c, volatile=True))\n return fixed_c_list",
"def get_labels_comp(F, is_p, is_m):\n labels = [\"C\"+str(idx+1)+\"|P\" if is_p[idx]\n else \"C\"+str(idx+1)+\"|M\" if is_m[idx]\n else \"C\"+str(idx+1) for idx in range(F.shape[0])]\n return labels",
"def _get_classify_labels(df):\n labels = np.ones((len(df), 1), dtype=dtype) * 2\n labels[df['A-coref']] = 0\n labels[df['B-coref']] = 1\n return labels",
"def test_basic_labeled_cdata(self):\n data = array([[1, 0, 0], [0, 1, 0]])\n labels = array([1, 0])\n lcdata = LabeledCData(data, labels)\n self.assertEqual(lcdata.num_features, 3)\n self.assertEqual(lcdata.num_samples, 2)",
"def find_ica_components(self, fit_on_epochs=False, high_pass_freq=1, decim=3, n_components=20, **kwargs):\n if fit_on_epochs and self.epochs is None:\n raise AttributeError('No epochs found. You have to create epochs using `get_epochs()` before '\n 'you can fit the ICA on them.')\n\n # Checking whether the ICA needs to be re-run:\n these_params = dict(fit_on_epochs=fit_on_epochs, high_pass_freq=high_pass_freq, decim=decim)\n these_params.update(kwargs)\n if self.ica is None or these_params != self.ica_fit_params:\n\n fit_data = self.epochs if fit_on_epochs else self.raw\n\n self.ica = mne.preprocessing.ICA(n_components=n_components, **kwargs)\n self.ica.fit(fit_data.filter(high_pass_freq, None), decim=decim)\n self.ica_fit_params = dict(fit_on_epochs=fit_on_epochs, high_pass_freq=high_pass_freq, decim=decim)\n self.ica_fit_params.update(kwargs)\n\n print('Plotting found components. If you plan to exclude components manually make sure to create a file '\n 'listing the numbers of those you wish to exclude.')\n self.ica.plot_components()",
"def input_features_labels(device, signal, subject_ID):\n\n directory = f'data/feature_label_tables/feature_{device}_{signal}/feature_S{subject_ID}_all_axis_{device}_{signal}'\n data = pd.read_csv(directory)\n data = data.dropna()\n\n # since all zero_crossing and mean_crossing metrics are zero and 200, respectively,\n # regardless of the signal and the activity, we ignore this feature.\n features = data.drop(columns=[f'x_{signal}_zero_crossing', f'x_{signal}_mean_crossing',\n f'y_{signal}_zero_crossing', f'y_{signal}_mean_crossing',\n f'z_{signal}_zero_crossing', f'z_{signal}_mean_crossing',\n 'Activity_ID'])\n\n all_labels = data[['Activity_ID']]\n\n feature_train, feature_test, label_train, label_test = train_test_split(\n features, all_labels, test_size=0.2, shuffle=True)\n # feature normalization\n scalar = StandardScaler().fit(feature_train)\n normalized_feature_train = scalar.transform(feature_train)\n normalized_feature_test = scalar.transform(feature_test)\n normalized_all_feature = scalar.transform(features)\n # convert 'numpy.ndarray' to pandas dataframe\n normalized_feature_train = pd.DataFrame(normalized_feature_train)\n normalized_feature_test = pd.DataFrame(normalized_feature_test)\n normalized_all_feature = pd.DataFrame(normalized_all_feature)\n\n return normalized_feature_train, normalized_feature_test, label_train, label_test, normalized_all_feature, all_labels"
] | [
"0.62045825",
"0.6137364",
"0.60607266",
"0.5875136",
"0.5775551",
"0.5774383",
"0.5720462",
"0.567829",
"0.5673847",
"0.5587884",
"0.55063105",
"0.5473702",
"0.54581785",
"0.5449399",
"0.5438388",
"0.542551",
"0.54028636",
"0.53972393",
"0.53966945",
"0.5395051",
"0.53771865",
"0.53722537",
"0.5370307",
"0.53614813",
"0.5358048",
"0.53430957",
"0.5325858",
"0.5306452",
"0.52728885",
"0.5264964"
] | 0.72729254 | 0 |
Test if path is correctly recognized as not hidden. | def test_is_not_hidden(self) -> None:
path = "home"
result = is_hidden(path)
self.assertFalse(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_hidden(self, path):\n return False",
"def _isPathHidden(self, path: Path) -> bool:\n if os.name == 'nt': # on Windows, check Windows flags of the path\n try:\n fileAttrs = win32api.GetFileAttributes(str(path))\n return fileAttrs & (win32con.FILE_ATTRIBUTE_HIDDEN | win32con.FILE_ATTRIBUTE_SYSTEM)\n except pywintypes.error:\n return False\n else:\n return path.name.startswith('.')",
"def path_is_hidden(path):\n for p in path.parts:\n if p != '..' and p[0] == '.':\n return True\n return False",
"def is_hidden(path_like):\n item_path = pathlib.Path(path_like)\n # check for POSIX-like dot convention\n if item_path.name.startswith(\".\"):\n return True\n # check for windows hidden attribute\n # \"st_file_attributes\" only available on windows\n file_attrs = item_path.stat()\n st_file_attrs = getattr(file_attrs, \"st_file_attributes\", 0)\n return bool(st_file_attrs & stat.FILE_ATTRIBUTE_HIDDEN)",
"def test_is_hidden(self) -> None:\n path = \".ssh\"\n result = is_hidden(path)\n self.assertTrue(result)",
"def is_hidden_file(path:str)->bool:\n if os.name == 'nt':\n # For Windows use file attribute.\n attrs = ctypes.windll.kernel32.GetFileAttributesW(path)\n return bool(attrs & FILE_ATTRIBUTE_HIDDEN)\n\n # For *nix use a '.' prefix.\n return os.path.basename(path).startswith('.')",
"def _assert_vhd_not_hidden(path):\n query_cmd = [\"vhd-util\", \"query\", \"-n\", path, \"-f\"]\n out = run_command(query_cmd)\n\n for line in out.splitlines():\n if line.lower().startswith('hidden'):\n value = line.split(':')[1].strip()\n if value == \"1\":\n raise Exception(\n \"VHD %s is marked as hidden without child\" % path)",
"def is_hidden(self, path):\n if path.endswith(self.extension):\n return True\n return super().is_hidden(path)",
"def is_hidden(self, path):\n\t\tos_path = self._get_os_path(path=path)\n\t\treturn is_hidden(self.bucket, os_path)",
"def hidden(p):\n return not (p == \".\" or p == \"..\") and p.startswith(\".\")",
"def is_hidden(bucket, path):\n\t# TODO: Implement metadata on S3 to mark as hidden\n\tparts = path.split('/')\n\treturn parts[-1].startswith('.')",
"def is_hidden():\n return False",
"def is_hidden():\n return False",
"def is_visible(self, path):\n return True",
"def test_folderhelper_ishidden(self):\n import assetbox.base.helpers\n\n txt_file = os.path.abspath('../content/.test_file.txt')\n fh = assetbox.base.helpers.FolderHelper()\n\n self.assertTrue(fh.is_hidden(txt_file))",
"def isBasedInHiddenFile(self):\n #type: () -> Optional[bool]\n return (\n None if self.realFileName is None #if before\n else self.realFileName != self.fileName\n )",
"def hidden():\n return False",
"def is_hidden(self):\n return self.has_label(HIDDEN_LABEL)",
"def test_ignore_hidden_dotfiles(tmp_path):\n os.chdir(tmp_path)\n Path(\".bar\").write_text(\"bar stuff\")\n Path(\"foo\").write_text(\"foo stuff\")\n assert _ls_visiblefile_paths() == [str(Path(\"foo\").resolve())]\n assert str(Path(tmp_path) / \"foo\") == str(Path(\"foo\").resolve())",
"def verify_hidden_path(self) -> None:\n path = \"/home/user/.ssh/secret_key\"\n with self.assertRaises(AccessDeniedException):\n verify_file_path(path)",
"def field_is_not_hidden_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n log_to_file('Hidden field displayed test failed', 'WARNING')\n return False\n else:\n print \"Hidden field displayed = true\"\n return True",
"def field_is_hidden_xpath(driver, locator):\n elem = driver.find_element_by_xpath(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n print \"Hidden field = true\"\n return True\n else:\n log_to_file('Hidden field test failed', 'WARNING')\n return False",
"def test_hiddenpart(self):\n testfile='hiddenpart.eml'\n try:\n tmpfile = tempfile.NamedTemporaryFile(\n suffix='hidden', prefix='fuglu-unittest', dir='/tmp')\n shutil.copy(\"%s/%s\" % (TESTDATADIR, testfile), tmpfile.name)\n\n user = '[email protected]'\n conffile = self.tempdir + \"/%s-filetypes.conf\" % user\n # the largefile in the test message is just a bunch of zeroes\n open(conffile, 'w').write(\n \"deny application\\/zip no zips allowed\")\n self.rulescache._loadrules()\n suspect = Suspect(\n '[email protected]', user, tmpfile.name)\n\n result = self.candidate.examine(suspect)\n if type(result) is tuple:\n result, message = result\n self.assertEqual(\n result, DELETE, 'hidden message part was not detected')\n\n finally:\n tmpfile.close()\n os.remove(conffile)",
"def on_wrong_path_visibility(self):\n self._set_filter_value(\n 'vizWrongPathState', self.wrong_path_visibility_btn.isChecked())",
"def IsHidden(self):\n return self._common_type.IsHidden()",
"def field_is_not_hidden_css(driver, locator):\n elem = driver.find_element_by_css_selector(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n log_to_file('Hidden field displayed test failed', 'WARNING')\n return False\n else:\n print \"Hidden field displayed = true\"\n return True",
"def hidden(self) -> bool:\n return False",
"def hidden(self) -> bool:\n return False",
"def field_is_hidden_css(driver, locator):\n elem = driver.find_element_by_css_selector(locator)\n is_hidden = elem.get_attribute(\"style\")\n if is_hidden == 'display: none;':\n print \"Hidden field = true\"\n return True\n else:\n log_to_file('Hidden field test failed', 'WARNING')\n return False",
"def is_invisible(self, name):\n return self.q(css=\"div.{}\".format(name)).first.invisible"
] | [
"0.82105416",
"0.8147075",
"0.8074287",
"0.79748076",
"0.79527706",
"0.78247166",
"0.7760722",
"0.76462585",
"0.7565071",
"0.70671797",
"0.70537364",
"0.6908462",
"0.6908462",
"0.6827894",
"0.66952556",
"0.6647296",
"0.65984875",
"0.65974015",
"0.6504221",
"0.64817345",
"0.6468007",
"0.6403826",
"0.6148563",
"0.6118816",
"0.6116334",
"0.61151904",
"0.61001813",
"0.61001813",
"0.6017032",
"0.58388025"
] | 0.82911503 | 0 |
Test getting dataset path. | def test_get_dataset_path(self) -> None:
framework = "tensorflow"
domain = "image_recognition"
result = get_dataset_path(framework, domain)
self.assertEqual(result, "examples/test/dataset/imagenet") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass",
"def test_data_path(self):\n path = self._api.GetDatapath()\n self._api.End()\n self.assertRaises(\n RuntimeError, self._api.Init, path=(self._test_dir + os.path.sep)\n ) # no tessdata\n if _TESSERACT_VERSION >= 0x3999800:\n new_path = path\n else:\n new_path = os.path.abspath(os.path.join(path, os.path.pardir)) + os.path.sep\n self._api.End()\n self._api.Init(new_path)\n self.assertEqual(self._api.GetDatapath(), path)",
"def _getDatasetPath(self):\n return self.__dataset_path",
"def dataset_path(cls):\n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(\n os.path.join(basepath, \"..\", \"datasets\", get_env('DATA_FILENAME')))\n return filepath",
"def get_dataset_path(dataset: str = \"MVTec\") -> str:\n # Initially check if `datasets` directory exists locally and look\n # for the `dataset`. This is useful for local testing.\n path = os.path.join(\"./datasets\", dataset)\n\n # For docker deployment or a CI that runs on server, dataset directory\n # may not necessarily be located in the repo. Therefore, check anomalib\n # dataset path environment variable.\n if not os.path.isdir(path):\n path = os.path.join(os.environ[\"ANOMALIB_DATASET_PATH\"], dataset)\n return path",
"def test_record_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.record\"\n result = is_dataset_file(path)\n self.assertTrue(result)",
"def dataset_path(dataset, work_dir=consts.WORK_DIR):\r\n return join_path(work_dir, consts.DATA_DIR, dataset)",
"def get_test_data_path():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"data\") + os.path.sep)",
"def _is_dataset_path(ds_path: github_api.GithubPath) -> bool:\n return ds_path.is_dir() and (ds_path / f'{ds_path.name}.py').exists()",
"def test_datasets_in_custom_path(tmpdir_factory):\n\n tmpdir1 = tmpdir_factory.mktemp('datasets1')\n tmpdir2 = tmpdir_factory.mktemp('datasets2')\n os.environ['CTAPIPE_SVC_PATH'] = \":\".join([str(tmpdir1),str(tmpdir2)])\n\n # create a dummy dataset to search for:\n\n dataset_name = \"test_dataset_1.txt\"\n dataset_path = str(tmpdir1.join(dataset_name))\n\n with open(dataset_path, \"w\") as fp:\n fp.write(\"test test test\")\n\n # try to find dummy dataset\n path = datasets.get_dataset(dataset_name)\n assert path == dataset_path\n\n with pytest.raises(FileNotFoundError):\n badpath = datasets.get_dataset(\"does_not_exist\")\n\n\n # try using find_all_matching_datasets:\n\n ds = datasets.find_all_matching_datasets(\"test.*\",\n searchpath=os.environ['CTAPIPE_SVC_PATH'])\n assert dataset_name in ds",
"def test_data_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')",
"def data_path(self):\n raise NotImplementedError",
"def test_pbtxt_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.pbtxt\"\n result = is_dataset_file(path)\n self.assertFalse(result)",
"def test_dataset_from_dir(dir_dataset):\n dummy = \"malesuada. Integer id magna et ipsum cursus vestibulum. Mauris magna.\"\n assert dir_dataset[0][0] == dummy\n assert dir_dataset[0][1] == '8'\n\n dummy = \"Sed molestie. Sed id risus quis diam luctus lobortis. Class\"\n assert dir_dataset[100][0] == dummy\n assert dir_dataset[100][1] == '6'",
"def check_data(data_set):\n print (\"Checking dataset {0}\".format(data_set))\n data_dir, data_file = os.path.split(data_set)\n # When a directory is not provided, check if dataset is in the data directory\n if data_dir == \"\" and not os.path.isfile(data_set):\n new_path = os.path.join(os.path.split(__file__)[0], \"data\", data_set)\n if os.path.isfile(new_path) or data_file == 'OpportunityUCIDataset.zip':\n data_set = new_path\n\n # When dataset not found, try to download it from UCI repository\n if (not os.path.isfile(data_set)) and data_file == 'OpportunityUCIDataset.zip':\n print (\"... dataset path {0} not found\".format(data_set))\n import urllib.request\n origin = (\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/00226/OpportunityUCIDataset.zip'\n )\n if not os.path.exists(data_dir):\n print (\"... creating directory {0}\".format(data_dir))\n os.makedirs(data_dir)\n print (\"... downloading data from {0}\".format(origin))\n urllib.request.urlretrieve(origin, data_set)\n\n return data_dir",
"def get_data_path():\n return os.getcwd() + \"/data/\"",
"def test_get_denoiser_data_dir(self):\r\n\r\n obs = get_denoiser_data_dir()\r\n\r\n self.assertTrue(exists(obs))\r\n self.assertTrue(exists(obs + 'FLX_error_profile.dat'))",
"def test_local_data_path(i07_nexus: I07Nexus, path):\n assert i07_nexus.local_data_path == path",
"def dir_dataset():\n return TabularDataset.from_path('tests/data/dummy_tabular', sep=',')",
"def dataset_dir(self):\n return self._dataset_dir",
"def get_data_path():\n\treturn _paths[_DATA_DIRECTORY_KEY]",
"def test_load_dataset():\n\n # Given\n dataset_file_name = core.config.app_config.TESTING_DATA_FILE\n\n # When\n subject = utils.load_dataset(filename=dataset_file_name)\n\n # Then\n assert isinstance(subject, pd.DataFrame)\n assert subject.shape == (5940, 41)",
"def test_src_data_path(i07_nexus: I07Nexus, path):\n assert i07_nexus._src_data_path == path",
"def getDataPath():\n\treturn \"..\" + os.sep + \"data\" + os.sep",
"def test_get_dataset_path_unknown_domain(self) -> None:\n framework = \"tensorflow\"\n domain = \"domain\"\n with self.assertRaises(Exception):\n get_dataset_path(framework, domain)",
"def GetTestData():\n return os.path.join(GetSrc(), 'chrome', 'test', 'data')",
"def get_sample_data_dir():\n \n return resource_filename('cdat_lite.test.test_cdms', 'sample_data')",
"def test_get_report_path(self):\n self.assertEqual(self.downloader.directory, self.mock_data.directory)\n self.assertEqual(self.downloader.export_name, self.mock_data.export_name)\n\n self.assertEqual(self.downloader._get_report_path(self.mock_data.test_date), self.mock_data.report_path)",
"def data_dir():\n return os.path.join(os.path.dirname(_here), 'test', 'data')",
"def data_dir():\n return os.path.join(os.path.dirname(__file__), 'test', 'data')"
] | [
"0.75778013",
"0.75268173",
"0.7313395",
"0.70568323",
"0.6979461",
"0.69666904",
"0.69261616",
"0.6925592",
"0.6921823",
"0.69095963",
"0.679207",
"0.6753882",
"0.6716215",
"0.67095876",
"0.6703324",
"0.6695395",
"0.6667533",
"0.66508496",
"0.6650378",
"0.66204023",
"0.6606951",
"0.65893185",
"0.65534717",
"0.65413785",
"0.6538734",
"0.6536956",
"0.6521734",
"0.64917725",
"0.64865005",
"0.6415434"
] | 0.82967913 | 0 |
Test getting dataset path failure. | def test_get_dataset_path_unknown_domain(self) -> None:
framework = "tensorflow"
domain = "domain"
with self.assertRaises(Exception):
get_dataset_path(framework, domain) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_data_path(self):\n path = self._api.GetDatapath()\n self._api.End()\n self.assertRaises(\n RuntimeError, self._api.Init, path=(self._test_dir + os.path.sep)\n ) # no tessdata\n if _TESSERACT_VERSION >= 0x3999800:\n new_path = path\n else:\n new_path = os.path.abspath(os.path.join(path, os.path.pardir)) + os.path.sep\n self._api.End()\n self._api.Init(new_path)\n self.assertEqual(self._api.GetDatapath(), path)",
"def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")",
"def test_make_dataset_happy_path(self):\n # User story: user runs src.make_dataset() on the current directory\n # and gets a fully functional dataset\n pass",
"def test_get_dataset_path_unknown_framework(self) -> None:\n framework = \"unknown\"\n domain = \"image_recognition\"\n with self.assertRaises(Exception):\n get_dataset_path(framework, domain)",
"def test_get_denoiser_data_dir(self):\r\n\r\n obs = get_denoiser_data_dir()\r\n\r\n self.assertTrue(exists(obs))\r\n self.assertTrue(exists(obs + 'FLX_error_profile.dat'))",
"def test_datasets_in_custom_path(tmpdir_factory):\n\n tmpdir1 = tmpdir_factory.mktemp('datasets1')\n tmpdir2 = tmpdir_factory.mktemp('datasets2')\n os.environ['CTAPIPE_SVC_PATH'] = \":\".join([str(tmpdir1),str(tmpdir2)])\n\n # create a dummy dataset to search for:\n\n dataset_name = \"test_dataset_1.txt\"\n dataset_path = str(tmpdir1.join(dataset_name))\n\n with open(dataset_path, \"w\") as fp:\n fp.write(\"test test test\")\n\n # try to find dummy dataset\n path = datasets.get_dataset(dataset_name)\n assert path == dataset_path\n\n with pytest.raises(FileNotFoundError):\n badpath = datasets.get_dataset(\"does_not_exist\")\n\n\n # try using find_all_matching_datasets:\n\n ds = datasets.find_all_matching_datasets(\"test.*\",\n searchpath=os.environ['CTAPIPE_SVC_PATH'])\n assert dataset_name in ds",
"def test_get_data_fail(self):\n self.assertIsNone(get_data('this_must_fail', 5, 0))",
"def test_pbtxt_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.pbtxt\"\n result = is_dataset_file(path)\n self.assertFalse(result)",
"def test_missing_data_sources(self):",
"def check_data(data_set):\n print (\"Checking dataset {0}\".format(data_set))\n data_dir, data_file = os.path.split(data_set)\n # When a directory is not provided, check if dataset is in the data directory\n if data_dir == \"\" and not os.path.isfile(data_set):\n new_path = os.path.join(os.path.split(__file__)[0], \"data\", data_set)\n if os.path.isfile(new_path) or data_file == 'OpportunityUCIDataset.zip':\n data_set = new_path\n\n # When dataset not found, try to download it from UCI repository\n if (not os.path.isfile(data_set)) and data_file == 'OpportunityUCIDataset.zip':\n print (\"... dataset path {0} not found\".format(data_set))\n import urllib.request\n origin = (\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/00226/OpportunityUCIDataset.zip'\n )\n if not os.path.exists(data_dir):\n print (\"... creating directory {0}\".format(data_dir))\n os.makedirs(data_dir)\n print (\"... downloading data from {0}\".format(origin))\n urllib.request.urlretrieve(origin, data_set)\n\n return data_dir",
"def test_record_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.record\"\n result = is_dataset_file(path)\n self.assertTrue(result)",
"def test_dfi_raises_if_folder_missing(self):\n with TemporaryDirectory() as tmpdirname:\n # start with empty project (no data/coefficients subdirectory)\n with raises(SmifDataNotFoundError):\n CSVDataStore(tmpdirname)",
"def test_BenchmarkSuite_invalid_path_access(benchmark_suite: typing.Callable):\n bs = benchmark_suite()\n with test.Raises(TypeError):\n _ = bs.path",
"def _is_dataset_path(ds_path: github_api.GithubPath) -> bool:\n return ds_path.is_dir() and (ds_path / f'{ds_path.name}.py').exists()",
"def _check_before_run(self):\r\n if not os.path.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))",
"def test_download_dataset_full_already_exists(tmp_path, force, expect_data):\n\n with open(tmp_path / \"dataset\", \"wb\") as f:\n f.write(b\"This is local data\")\n\n pennylane.data.data_manager._download_dataset(\n \"dataset/path\", tmp_path / \"dataset\", attributes=None, force=force\n )\n\n with open(tmp_path / \"dataset\", \"rb\") as f:\n assert f.read() == expect_data",
"def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))",
"def check_existing_dataset(path: str):\n x_path = os.path.join(path, IMG_DIR)\n y_path = os.path.join(path, MSK_DIR)\n\n if os.path.isdir(x_path) and os.path.isdir(y_path):\n _, _, x_files = next(os.walk(x_path))\n _, _, y_files = next(os.walk(y_path))\n x = len(x_files)\n y = len(y_files)\n\n if x != y:\n logger.warning(\n \"Found un-even numbers of x-y for dataset. x = %i, y = %i.\", x, y\n )\n\n return -1\n\n if x == 0:\n logger.info(\"Found 0 existing sets.\")\n\n return 0\n logger.info(\"Found %s sets in existing dataset.\", x)\n\n return x\n logger.error(\"Could not locate x and y folder.\")\n sys.exit()",
"def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start",
"def test_dataset_from_dir(dir_dataset):\n dummy = \"malesuada. Integer id magna et ipsum cursus vestibulum. Mauris magna.\"\n assert dir_dataset[0][0] == dummy\n assert dir_dataset[0][1] == '8'\n\n dummy = \"Sed molestie. Sed id risus quis diam luctus lobortis. Class\"\n assert dir_dataset[100][0] == dummy\n assert dir_dataset[100][1] == '6'",
"def test_load_dataset():\n\n # Given\n dataset_file_name = core.config.app_config.TESTING_DATA_FILE\n\n # When\n subject = utils.load_dataset(filename=dataset_file_name)\n\n # Then\n assert isinstance(subject, pd.DataFrame)\n assert subject.shape == (5940, 41)",
"def test_DL_export_empty_dataset(self):\n filepath = '4.txt'\n empty_dl = flow_processing_input.DetectorsLocation(2021)\n empty_dl.detector_flow_data = createDLDataset(0).dataset\n # Check if exception was raised for empty detector_flow_data\n with self.assertRaises(Exception):\n empty_dl.export_to_file(filepath)",
"def test_bad_dirrectory_path(self):\n\n portfolio = PortfolioPerformanceData(self.data_path+'NonExisting')\n for func in (portfolio.calculate_asset_performance,\n portfolio.calculate_currency_performance,\n portfolio.calculate_total_performance):\n self.assertIsNone(func(*self.boarder))",
"def _check_before_run(self):\r\n if not osp.exists(self.dataset_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\r\n if not osp.exists(self.train_dir):\r\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))",
"def test_local_data_path(i07_nexus: I07Nexus, path):\n assert i07_nexus.local_data_path == path",
"def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))\n if not osp.exists(self.train_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.train_dir))\n if not osp.exists(self.test_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.test_dir))",
"def test_downloading(self):\n month = '2013-01' # smallest of available datasets\n path = download_data(month)\n self.assertTrue(os.path.isfile(path), msg='File on returned location does not exist')\n os.remove(path)",
"def test_missing_shape(setup_teardown_file):\n f = setup_teardown_file[3]\n\n with pytest.raises(TypeError):\n f.create_dataset('foo')",
"def test_disk_store_alt_name_abspath():\n with TestConfig(DISK_TEST_CONFIG) as (directory, object_store):\n empty_dataset = MockDataset(1)\n directory.write(\"\", \"files1/000/dataset_1.dat\")\n absfoo = os.path.abspath(os.path.join(directory.temp_directory, \"foo.txt\"))\n with open(absfoo, 'w') as f:\n f.write(\"foo\")\n try:\n assert object_store.get_data(\n empty_dataset,\n extra_dir='dataset_1_files',\n alt_name=absfoo) != 'foo'\n except ObjectInvalid:\n pass",
"def check_dataset_exists(dataset):\n result = subprocess.call(['das_client.py', '--query', 'dataset dataset=%s' % dataset])\n return result == 0"
] | [
"0.74248797",
"0.71991664",
"0.71628946",
"0.7049641",
"0.68198264",
"0.6784641",
"0.66754675",
"0.6576848",
"0.6568233",
"0.65432364",
"0.6533707",
"0.6523117",
"0.65167046",
"0.6403536",
"0.63809836",
"0.6379454",
"0.6332501",
"0.6325068",
"0.63231117",
"0.6315364",
"0.6292673",
"0.62701964",
"0.62628585",
"0.62562937",
"0.6208104",
"0.6205129",
"0.6204884",
"0.6192729",
"0.61713296",
"0.6159868"
] | 0.7332925 | 1 |
Test getting framework name from path. | def test_get_onnx_framework_from_path(self) -> None:
path = "/home/user/model.onnx"
result = get_framework_from_path(path)
self.assertEqual(result, "onnxrt") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_unknown_framework_from_path(self) -> None:\n path = \"/home/user/model.some_extension\"\n result = get_framework_from_path(path)\n self.assertIsNone(result)",
"def infoForFramework(filename):\n global FRAMEWORK_RE\n if FRAMEWORK_RE is None:\n import re\n FRAMEWORK_RE = re.compile(FRAMEWORK_RE_STR)\n is_framework = FRAMEWORK_RE.findall(filename)\n if not is_framework:\n return None\n return is_framework[-1]",
"def extract_test_name(base_path):\n name = p.basename(base_path)\n if name == \"test.py\":\n name = \"\"\n elif name.startswith(\"test_\") and name.endswith(\".py\"):\n name = name[len(\"test_\") : (len(name) - len(\".py\"))]\n return name",
"def detect_framework(model_path):\n num_tensorflow_models = _count_num_files_in_path(\n model_path, TENSORFLOW_SPECIFIC_MODEL_FILE_NAMES)\n num_xgboost_models = _count_num_files_in_path(\n model_path, XGBOOST_SPECIFIC_MODEL_FILE_NAMES)\n num_sklearn_models = _count_num_files_in_path(model_path,\n SCIKIT_LEARN_MODEL_FILE_NAMES)\n\n num_matches = num_tensorflow_models + num_xgboost_models + num_sklearn_models\n if num_matches > 1:\n error_msg = \"Multiple model files are found in the model_path: {}\".format(\n model_path)\n logging.critical(error_msg)\n raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)\n\n if num_tensorflow_models == 1:\n return TENSORFLOW_FRAMEWORK_NAME\n elif num_xgboost_models == 1:\n return XGBOOST_FRAMEWORK_NAME\n elif num_sklearn_models == 1:\n model_obj = load_joblib_or_pickle_model(model_path)\n return detect_sk_xgb_framework_from_obj(model_obj)\n else:\n logging.warning((\"Model files are not found in the model_path.\"\n \"Assumed to be custom code.\"))\n return None",
"def test_name(self):\r\n parts = []\r\n if self.test.__module__ != '__main__':\r\n parts.append(self.test.__module__)\r\n if hasattr(self.test, 'im_class'):\r\n parts.append(self.test.im_class.__name__)\r\n parts.append(self.test.__name__)\r\n return '.'.join(parts)",
"def source_test_file_name():\n return 'feature'",
"def test_get_module_name():\n\n assert application_services.get_module_name() == 'tests.unit'",
"def getmodulename(path):\r\n info = getmoduleinfo(path)\r\n if info: return info[0]",
"def test_get_module_qualname_from_path_sys(self):\n\n name = b_utils.get_module_qualname_from_path(os.__file__)\n self.assertEqual(\"os\", name)\n\n # This will fail because of magic for os.path. Not sure how to fix.\n # name = b_utils.get_module_qualname_from_path(os.path.__file__)\n # self.assertEqual(name, 'os.path')",
"def named(path):\n return re.findall(r'.*(test\\d+)\\.out', path)[0]",
"def extract_suite_name(file_path, project_name):\n\n suite_name = str(project_name) + \".\"\n suite_name = suite_name + os.path.splitext(str(file_path).replace(os_sep, \".\"))[0]\n return suite_name",
"def test_path_basename():\n mock_path = \"E:\\\\Repos\\\\pc-setup\\\\powershell\\\\provision_python.ps1\"\n output = sh.path_basename(mock_path)\n assert output == \"provision_python.ps1\"",
"def getName(path):\n\tfrom os.path import split, splitext\n\tpath = normalizePath(path)\n\treturn splitext(split(path)[1])[0]",
"def getSlavename():",
"def get_test_fname(fname):\n path = get_test_path()\n full_path = os.path.join(path, fname)\n return full_path",
"def target_test_file_name():\n return 'test'",
"def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]",
"def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]",
"def get_base_name(path):\n return os.path.basename(path).split('.')[0]",
"def test_get_tensorflow_framework_from_path(self, mocked_get_model_type: MagicMock) -> None:\n mocked_get_model_type.return_value = \"frozen_pb\"\n path = \"/home/user/model.pb\"\n result = get_framework_from_path(path)\n self.assertEqual(result, \"tensorflow\")\n mocked_get_model_type.assert_called_with(path)",
"def get_framework(tjs_url, framework_uri):\n payload = {'service': 'TJS',\n 'version': '1.0.0',\n 'request': 'DescribeFrameworks',\n 'FrameworkURI': framework_uri}\n y = requests.get(tjs_url, params=payload, verify=False)\n xml = etree.fromstring(y.content)\n xml_temp = etree.tostring(xml[0])\n # Quick&dirty removal of namespace prefix\n root = xml_temp.replace(b'ns0:', b'')\n parser = etree.XMLParser(ns_clean=True, encoding='utf-8')\n framework = etree.fromstring(root, parser=parser)\n\n return framework",
"def test_get_module_qualname_from_path_invalid_path(self):\n\n name = b_utils.get_module_qualname_from_path(\"/a/b/c/d/e.py\")\n self.assertEqual(\"e\", name)",
"def testGetFrameworkUid(self):\n # Request dispatcher framework UID\n status, response = self._http_get(\"/framework\")\n\n # Check result\n self.assertEqual(status, 200)\n self.assertEqual(response, json.dumps(self.framework_uid))",
"def test_get_dataset_path_unknown_framework(self) -> None:\n framework = \"unknown\"\n domain = \"image_recognition\"\n with self.assertRaises(Exception):\n get_dataset_path(framework, domain)",
"def name_from_path(path):\n return path[0:-3]",
"def get_perf_tester_name(self):\n original_bot_name = self.bisect_config.get('original_bot_name', '')\n recipe_tester_name = self.bisect_config.get('recipe_tester_name')\n if recipe_tester_name:\n return recipe_tester_name\n elif 'win' in original_bot_name: # pragma: no cover\n return 'win64_nv_tester'\n else: # pragma: no cover\n # Reasonable fallback.\n return 'linux_perf_tester'",
"def name(self):\n return self._path or '__main__'",
"def get_app_foldername(path):\n\tif not is_file(path):\n\t\treturn \"\"\n\n\ts = path.split(\"/\")\n\tif not (len(s) >= 2):\n\t\treturn \"\"\n\n\tif s[0] == \"examples\":\n\t\tif s[-1].endswith(\".py\"):\n\t\t\treturn ((s[-1])[:-3])\n\t\telse:\n\t\t\treturn \"\"\n\telse:\n\t\treturn s[-2]",
"def module_name_from_filepath(path: str) -> str:\n name = osp.splitext(osp.basename(path))[0]\n if name == '__init__':\n name = osp.basename(osp.dirname(path))\n return name",
"def get_name(path):\n return path.rsplit('/',1)[1]"
] | [
"0.7062699",
"0.69441956",
"0.64345545",
"0.64053917",
"0.6366199",
"0.6274874",
"0.6201753",
"0.6191427",
"0.60973287",
"0.6045537",
"0.59635186",
"0.5961694",
"0.5953851",
"0.5943369",
"0.58846164",
"0.58798844",
"0.5822605",
"0.5822605",
"0.5822379",
"0.5800866",
"0.5770571",
"0.5767416",
"0.5757527",
"0.57310665",
"0.5731013",
"0.573011",
"0.5703697",
"0.56933635",
"0.5693354",
"0.569259"
] | 0.7172764 | 0 |
Test getting framework name from path. | def test_get_unknown_framework_from_path(self) -> None:
path = "/home/user/model.some_extension"
result = get_framework_from_path(path)
self.assertIsNone(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_onnx_framework_from_path(self) -> None:\n path = \"/home/user/model.onnx\"\n result = get_framework_from_path(path)\n self.assertEqual(result, \"onnxrt\")",
"def infoForFramework(filename):\n global FRAMEWORK_RE\n if FRAMEWORK_RE is None:\n import re\n FRAMEWORK_RE = re.compile(FRAMEWORK_RE_STR)\n is_framework = FRAMEWORK_RE.findall(filename)\n if not is_framework:\n return None\n return is_framework[-1]",
"def extract_test_name(base_path):\n name = p.basename(base_path)\n if name == \"test.py\":\n name = \"\"\n elif name.startswith(\"test_\") and name.endswith(\".py\"):\n name = name[len(\"test_\") : (len(name) - len(\".py\"))]\n return name",
"def detect_framework(model_path):\n num_tensorflow_models = _count_num_files_in_path(\n model_path, TENSORFLOW_SPECIFIC_MODEL_FILE_NAMES)\n num_xgboost_models = _count_num_files_in_path(\n model_path, XGBOOST_SPECIFIC_MODEL_FILE_NAMES)\n num_sklearn_models = _count_num_files_in_path(model_path,\n SCIKIT_LEARN_MODEL_FILE_NAMES)\n\n num_matches = num_tensorflow_models + num_xgboost_models + num_sklearn_models\n if num_matches > 1:\n error_msg = \"Multiple model files are found in the model_path: {}\".format(\n model_path)\n logging.critical(error_msg)\n raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL, error_msg)\n\n if num_tensorflow_models == 1:\n return TENSORFLOW_FRAMEWORK_NAME\n elif num_xgboost_models == 1:\n return XGBOOST_FRAMEWORK_NAME\n elif num_sklearn_models == 1:\n model_obj = load_joblib_or_pickle_model(model_path)\n return detect_sk_xgb_framework_from_obj(model_obj)\n else:\n logging.warning((\"Model files are not found in the model_path.\"\n \"Assumed to be custom code.\"))\n return None",
"def test_name(self):\r\n parts = []\r\n if self.test.__module__ != '__main__':\r\n parts.append(self.test.__module__)\r\n if hasattr(self.test, 'im_class'):\r\n parts.append(self.test.im_class.__name__)\r\n parts.append(self.test.__name__)\r\n return '.'.join(parts)",
"def source_test_file_name():\n return 'feature'",
"def test_get_module_name():\n\n assert application_services.get_module_name() == 'tests.unit'",
"def getmodulename(path):\r\n info = getmoduleinfo(path)\r\n if info: return info[0]",
"def test_get_module_qualname_from_path_sys(self):\n\n name = b_utils.get_module_qualname_from_path(os.__file__)\n self.assertEqual(\"os\", name)\n\n # This will fail because of magic for os.path. Not sure how to fix.\n # name = b_utils.get_module_qualname_from_path(os.path.__file__)\n # self.assertEqual(name, 'os.path')",
"def named(path):\n return re.findall(r'.*(test\\d+)\\.out', path)[0]",
"def extract_suite_name(file_path, project_name):\n\n suite_name = str(project_name) + \".\"\n suite_name = suite_name + os.path.splitext(str(file_path).replace(os_sep, \".\"))[0]\n return suite_name",
"def test_path_basename():\n mock_path = \"E:\\\\Repos\\\\pc-setup\\\\powershell\\\\provision_python.ps1\"\n output = sh.path_basename(mock_path)\n assert output == \"provision_python.ps1\"",
"def getName(path):\n\tfrom os.path import split, splitext\n\tpath = normalizePath(path)\n\treturn splitext(split(path)[1])[0]",
"def getSlavename():",
"def get_test_fname(fname):\n path = get_test_path()\n full_path = os.path.join(path, fname)\n return full_path",
"def target_test_file_name():\n return 'test'",
"def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]",
"def get_module_name(module_path):\n return ntpath.split(module_path)[1].split(\".\")[0]",
"def get_base_name(path):\n return os.path.basename(path).split('.')[0]",
"def test_get_tensorflow_framework_from_path(self, mocked_get_model_type: MagicMock) -> None:\n mocked_get_model_type.return_value = \"frozen_pb\"\n path = \"/home/user/model.pb\"\n result = get_framework_from_path(path)\n self.assertEqual(result, \"tensorflow\")\n mocked_get_model_type.assert_called_with(path)",
"def get_framework(tjs_url, framework_uri):\n payload = {'service': 'TJS',\n 'version': '1.0.0',\n 'request': 'DescribeFrameworks',\n 'FrameworkURI': framework_uri}\n y = requests.get(tjs_url, params=payload, verify=False)\n xml = etree.fromstring(y.content)\n xml_temp = etree.tostring(xml[0])\n # Quick&dirty removal of namespace prefix\n root = xml_temp.replace(b'ns0:', b'')\n parser = etree.XMLParser(ns_clean=True, encoding='utf-8')\n framework = etree.fromstring(root, parser=parser)\n\n return framework",
"def test_get_module_qualname_from_path_invalid_path(self):\n\n name = b_utils.get_module_qualname_from_path(\"/a/b/c/d/e.py\")\n self.assertEqual(\"e\", name)",
"def testGetFrameworkUid(self):\n # Request dispatcher framework UID\n status, response = self._http_get(\"/framework\")\n\n # Check result\n self.assertEqual(status, 200)\n self.assertEqual(response, json.dumps(self.framework_uid))",
"def test_get_dataset_path_unknown_framework(self) -> None:\n framework = \"unknown\"\n domain = \"image_recognition\"\n with self.assertRaises(Exception):\n get_dataset_path(framework, domain)",
"def name_from_path(path):\n return path[0:-3]",
"def get_perf_tester_name(self):\n original_bot_name = self.bisect_config.get('original_bot_name', '')\n recipe_tester_name = self.bisect_config.get('recipe_tester_name')\n if recipe_tester_name:\n return recipe_tester_name\n elif 'win' in original_bot_name: # pragma: no cover\n return 'win64_nv_tester'\n else: # pragma: no cover\n # Reasonable fallback.\n return 'linux_perf_tester'",
"def name(self):\n return self._path or '__main__'",
"def get_app_foldername(path):\n\tif not is_file(path):\n\t\treturn \"\"\n\n\ts = path.split(\"/\")\n\tif not (len(s) >= 2):\n\t\treturn \"\"\n\n\tif s[0] == \"examples\":\n\t\tif s[-1].endswith(\".py\"):\n\t\t\treturn ((s[-1])[:-3])\n\t\telse:\n\t\t\treturn \"\"\n\telse:\n\t\treturn s[-2]",
"def module_name_from_filepath(path: str) -> str:\n name = osp.splitext(osp.basename(path))[0]\n if name == '__init__':\n name = osp.basename(osp.dirname(path))\n return name",
"def get_name(path):\n return path.rsplit('/',1)[1]"
] | [
"0.7172764",
"0.69441956",
"0.64345545",
"0.64053917",
"0.6366199",
"0.6274874",
"0.6201753",
"0.6191427",
"0.60973287",
"0.6045537",
"0.59635186",
"0.5961694",
"0.5953851",
"0.5943369",
"0.58846164",
"0.58798844",
"0.5822605",
"0.5822605",
"0.5822379",
"0.5800866",
"0.5770571",
"0.5767416",
"0.5757527",
"0.57310665",
"0.5731013",
"0.573011",
"0.5703697",
"0.56933635",
"0.5693354",
"0.569259"
] | 0.7062699 | 1 |
Test getting file extension from path. | def test_get_file_extension(self) -> None:
path = "/home/user/file.ext"
result = get_file_extension(path)
self.assertEqual(result, "ext") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_file_with_dots_extension(self) -> None:\n path = \"/home/user/file.name.ext2\"\n result = get_file_extension(path)\n self.assertEqual(result, \"ext2\")",
"def extension_from_path(path):\n extension = path.split(\".\")[-1]\n return extension",
"def _get_ext(self, path):\n return os.path.splitext(path)[1][1:]",
"def getExtension(path):\n\tfrom os.path import splitext\n\treturn splitext(path)[1]",
"def test_get_file_without_extension(self) -> None:\n path = \"/home/user/file\"\n result = get_file_extension(path)\n self.assertEqual(result, \"\")",
"def get_file_ext(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[1]",
"def file_ext(path):\n result = os.path.splitext(path)[1]\n return result",
"def get_file_ext(path):\n # Test if the path exist and if it is a file (not a directory):\n if not os.path.isfile(path):\n # If it is a directory, check if it is a mff directory\n if path[-3:].lower() != \"mff\":\n raise ValueError(\"The provided path (\" + path + \") is not valid.\")\n # Find file extension :\n file, ext = os.path.splitext(path)\n # Be sure to be in lowercase :\n ext = ext.lower()\n return file, ext",
"def _get_extension_from_string(path):\n file_name_parts = os.path.basename(path).split('.')\n if len(file_name_parts) == 1: # no periods in file name\n return ''\n if len(file_name_parts) > 2: # two or more periods in file name\n return '.'.join(file_name_parts[-2:])\n return file_name_parts[-1] # one period in file name",
"def getExtension(path: unicode, extLevel: int) -> unicode:\n ...",
"def test_get_filename_extension(self):\r\n u = Uploader()\r\n filename = \"image.png\"\r\n err_msg = \"The extension should be PNG\"\r\n assert u.get_filename_extension(filename) == 'png', err_msg\r\n filename = \"image.jpg\"\r\n err_msg = \"The extension should be JPEG\"\r\n assert u.get_filename_extension(filename) == 'jpeg', err_msg\r\n filename = \"imagenoextension\"\r\n err_msg = \"The extension should be None\"\r\n assert u.get_filename_extension(filename) == None, err_msg",
"def getFilenameExtension(path):\n return os.path.splitext(os.path.normcase(path))[1]",
"def test_get_ext(self):\r\n filename_str = 'http://www.example.com/path/video.mp4'\r\n output = get_ext(filename_str)\r\n self.assertEqual(output, 'mp4')",
"def _has_extension(self, path):\r\n if re.match(r'.*\\\\.*\\..*$', path):\r\n return True",
"def get_file_name_with_ext(path: str) -> str:\n return os.path.basename(path)",
"def get_file_extension(fpath):\n return str(fpath).split(\".\")[-1]",
"def _getFileExtension( filepath ):\r\n file = os.path.splitext(filepath.lower())\r\n if len( file ):\r\n return file[1].replace( '.', '' )\r\n else:\r\n return filepath",
"def path_extension(self):\r\n return self.path.split('/')[-1].split('.')[-1]",
"def get_extension(filename: str) -> str:\n return Path(filename).suffix[1:]",
"def get_file_extension(file_path):\n _ext = os.path.splitext(file_path)[-1]\n if _ext:\n return _ext[1:] if _ext.startswith('.') else _ext\n else:\n return \"\"",
"def get_extension(filename: str) -> str:\n return filename.split(\".\")[-1]",
"def getExtension(filename):\n return filename[filename.rfind('.'):]",
"def file_check(file):\n # unpacking the tuple\n file_name, file_extension = os.path.splitext(file)\n return file_extension",
"def ext(self):\n return os.path.splitext(self.path)[1]",
"def getExtension(self, fileName):\r\n print 'getExtension():' \r\n extension = split(basename(fileName), '.')[1]\r\n return extension",
"def get_file_ext(filename):\n return filename.rsplit('.', 1)[1]",
"def filepath_ext(filepath: str) -> str:\n return pathlib.Path(filepath).suffix",
"def find_extension(file):\n\n index_ext = file.name.rfind('.')\n if index_ext != -1:\n return file.name[index_ext+1:]\n # else: we raise an exception because\n # we can't find any extension",
"def get_fileext(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[1]",
"def get_file_extension(fname):\n _, ext = os.path.splitext(fname)\n return ext"
] | [
"0.86800325",
"0.81086683",
"0.8072279",
"0.7949813",
"0.79368246",
"0.7920298",
"0.7789062",
"0.76943505",
"0.755353",
"0.75219536",
"0.749016",
"0.74370855",
"0.7421382",
"0.7406953",
"0.7355715",
"0.73351365",
"0.733208",
"0.7278737",
"0.72730386",
"0.71788836",
"0.7164469",
"0.71639735",
"0.7122828",
"0.7075083",
"0.70591754",
"0.7059164",
"0.70519197",
"0.7048407",
"0.7030765",
"0.7011309"
] | 0.8825947 | 0 |
Test getting file extension from path. | def test_get_file_with_dots_extension(self) -> None:
path = "/home/user/file.name.ext2"
result = get_file_extension(path)
self.assertEqual(result, "ext2") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_file_extension(self) -> None:\n path = \"/home/user/file.ext\"\n result = get_file_extension(path)\n self.assertEqual(result, \"ext\")",
"def extension_from_path(path):\n extension = path.split(\".\")[-1]\n return extension",
"def _get_ext(self, path):\n return os.path.splitext(path)[1][1:]",
"def getExtension(path):\n\tfrom os.path import splitext\n\treturn splitext(path)[1]",
"def test_get_file_without_extension(self) -> None:\n path = \"/home/user/file\"\n result = get_file_extension(path)\n self.assertEqual(result, \"\")",
"def get_file_ext(path: str) -> str:\n return os.path.splitext(os.path.basename(path))[1]",
"def file_ext(path):\n result = os.path.splitext(path)[1]\n return result",
"def get_file_ext(path):\n # Test if the path exist and if it is a file (not a directory):\n if not os.path.isfile(path):\n # If it is a directory, check if it is a mff directory\n if path[-3:].lower() != \"mff\":\n raise ValueError(\"The provided path (\" + path + \") is not valid.\")\n # Find file extension :\n file, ext = os.path.splitext(path)\n # Be sure to be in lowercase :\n ext = ext.lower()\n return file, ext",
"def _get_extension_from_string(path):\n file_name_parts = os.path.basename(path).split('.')\n if len(file_name_parts) == 1: # no periods in file name\n return ''\n if len(file_name_parts) > 2: # two or more periods in file name\n return '.'.join(file_name_parts[-2:])\n return file_name_parts[-1] # one period in file name",
"def getExtension(path: unicode, extLevel: int) -> unicode:\n ...",
"def test_get_filename_extension(self):\r\n u = Uploader()\r\n filename = \"image.png\"\r\n err_msg = \"The extension should be PNG\"\r\n assert u.get_filename_extension(filename) == 'png', err_msg\r\n filename = \"image.jpg\"\r\n err_msg = \"The extension should be JPEG\"\r\n assert u.get_filename_extension(filename) == 'jpeg', err_msg\r\n filename = \"imagenoextension\"\r\n err_msg = \"The extension should be None\"\r\n assert u.get_filename_extension(filename) == None, err_msg",
"def getFilenameExtension(path):\n return os.path.splitext(os.path.normcase(path))[1]",
"def test_get_ext(self):\r\n filename_str = 'http://www.example.com/path/video.mp4'\r\n output = get_ext(filename_str)\r\n self.assertEqual(output, 'mp4')",
"def _has_extension(self, path):\r\n if re.match(r'.*\\\\.*\\..*$', path):\r\n return True",
"def get_file_name_with_ext(path: str) -> str:\n return os.path.basename(path)",
"def get_file_extension(fpath):\n return str(fpath).split(\".\")[-1]",
"def _getFileExtension( filepath ):\r\n file = os.path.splitext(filepath.lower())\r\n if len( file ):\r\n return file[1].replace( '.', '' )\r\n else:\r\n return filepath",
"def path_extension(self):\r\n return self.path.split('/')[-1].split('.')[-1]",
"def get_extension(filename: str) -> str:\n return Path(filename).suffix[1:]",
"def get_file_extension(file_path):\n _ext = os.path.splitext(file_path)[-1]\n if _ext:\n return _ext[1:] if _ext.startswith('.') else _ext\n else:\n return \"\"",
"def get_extension(filename: str) -> str:\n return filename.split(\".\")[-1]",
"def getExtension(filename):\n return filename[filename.rfind('.'):]",
"def file_check(file):\n # unpacking the tuple\n file_name, file_extension = os.path.splitext(file)\n return file_extension",
"def ext(self):\n return os.path.splitext(self.path)[1]",
"def getExtension(self, fileName):\r\n print 'getExtension():' \r\n extension = split(basename(fileName), '.')[1]\r\n return extension",
"def get_file_ext(filename):\n return filename.rsplit('.', 1)[1]",
"def filepath_ext(filepath: str) -> str:\n return pathlib.Path(filepath).suffix",
"def find_extension(file):\n\n index_ext = file.name.rfind('.')\n if index_ext != -1:\n return file.name[index_ext+1:]\n # else: we raise an exception because\n # we can't find any extension",
"def get_fileext(file_path):\n\n # Get rid of directories and etc\n just_file = os.path.basename(file_path)\n\n # Now we return just the base name\n return os.path.splitext(just_file)[1]",
"def get_file_extension(fname):\n _, ext = os.path.splitext(fname)\n return ext"
] | [
"0.8825947",
"0.81086683",
"0.8072279",
"0.7949813",
"0.79368246",
"0.7920298",
"0.7789062",
"0.76943505",
"0.755353",
"0.75219536",
"0.749016",
"0.74370855",
"0.7421382",
"0.7406953",
"0.7355715",
"0.73351365",
"0.733208",
"0.7278737",
"0.72730386",
"0.71788836",
"0.7164469",
"0.71639735",
"0.7122828",
"0.7075083",
"0.70591754",
"0.7059164",
"0.70519197",
"0.7048407",
"0.7030765",
"0.7011309"
] | 0.86800325 | 1 |
Test if record is a valid dataset file. | def test_record_is_dataset_file(self) -> None:
path = "/home/user/dataset.record"
result = is_dataset_file(path)
self.assertTrue(result) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_valid(self, dataset):\n pass",
"def validate_dataset(self, path: str = None) -> bool:\n checked = {} # type: Dict[str, Any]\n # Check path exists\n path = path if path is not None else self.path\n if not os.path.exists(path):\n raise ValueError('The path to the dataset does not exists! ({:s})'.format(path))\n # Check readme\n checked['readme'] = os.path.exists(os.path.join(path, 'Readme.md'))\n # Check the different file formats\n file_formats = {\n 'simple-zip': 'single_oscillation_{:04d}.zip',\n 'advanced-gz': 'single_oscillation_0-2_{:04d}.gz',\n 'big2-gz': 'single_oscillation_01-2_{:04d}.gz',\n }\n for file_key, file_format in file_formats.items():\n file_count = 0\n while os.path.exists(os.path.join(path, file_format.format(file_count))):\n file_count += 1\n checked[file_key] = file_count\n # Test for valid dataset\n if checked['readme'] and checked['simple-zip'] == 64:\n self.dataset = 'simple'\n self._file_format = 'single_oscillation_{:04d}.zip'\n self._max_file_count = 64\n return True\n elif checked['advanced-gz'] == 1024:\n self.dataset = 'big'\n self._file_format = 'single_oscillation_0-2_{:04d}.gz'\n self._max_file_count = 1024\n return True\n elif checked['big2-gz'] == 1024:\n self.dataset = 'big2'\n self._file_format = 'single_oscillation_01-2_{:04d}.gz'\n self._max_file_count = 1024\n return True\n return False",
"def _isvalid_file(filename):\r\n thisisavalidfile = True\r\n if (filename[0] == \".\") or (filename[0] == \"_\") or not ((filename.split(\".\")[-1] == \"txt\") or (filename.split(\".\")[-1] == \"csv\")):\r\n thisisavalidfile = False\r\n\r\n return thisisavalidfile",
"def test_pbtxt_is_dataset_file(self) -> None:\n path = \"/home/user/dataset.pbtxt\"\n result = is_dataset_file(path)\n self.assertFalse(result)",
"def validate_dataset(self):\n pass",
"def is_valid_file(ext, argument):\n formats = {\n 'input_dataset_path': ['csv', 'txt'],\n 'output_dataset_path': ['csv'],\n 'output_plot_path': ['png'],\n 'input_model_path': ['pkl']\n }\n return ext in formats[argument]",
"def valid_for(obj):\n\n if not obj.filedata:\n return False\n\n #hexstring = \"cffaedfe07000001030000800200\"\n return True",
"def is_data_by_filename(fname):\n return \"Run2017\" in fname",
"def test_read_file_invalid():\n\tfrom ..skySurvey import SkySurvey\n\tfile_list = 0\n\ttry:\n\t\tSkySurvey(file_list = file_list)\n\texcept TypeError:\n\t\tassert True\n\telse:\n\t\tassert False",
"def _is_valid(self, *args, **kwargs):\n fn = args[0]\n if not fn.endswith(self._suffix):\n return False\n try:\n with h5py.File(fn, \"r\") as f:\n if \"arbor_type\" not in f.attrs:\n return False\n if f.attrs[\"arbor_type\"].astype(str) != \"YTreeArbor\":\n return False\n except BaseException:\n return False\n return True",
"def isRecordNameValid(record):\n # Split the string with the record separator ':'\n splitted = record.split(':')\n # There must be 5 values - year:month:day:hour:minute\n if len(splitted) != 5:\n # Not valid - more or less than 5 values\n return False\n # There are 5 values - check each one if is a number\n for x in splitted:\n # If one of the values is not a number - record is not valid\n if not isNumber(x):\n return False\n # The record is valid\n return True",
"def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data",
"def is_dataset_exported(filename):\n try:\n with open(filename):\n return True\n except IOError:\n return False",
"def test_with_valid_input(self):\n for dataset_type in ['regular', 'raw', 'REGULAR', 'RAW']:\n try:\n check_dataset_type(dataset_type)\n except ValueError:\n self.fail(\"Dataset {0} should be valid\".format(dataset_type))",
"def verify_file(file_path):\n print(\"Verifying dataset file {}\".format(file_path))\n\n with open(file_path, mode='rb') as f:\n data = pickle.load(f, encoding='latin1')\n print(\"Number of records: {}\".format(len(data)))\n\n for record in data:\n try:\n check_record(record)\n except AssertionError:\n print(\"File with error: {}\".format(record[\"image_name\"]))\n print(\"Content of record: {}\".format(record))\n raise",
"def is_record_with_cover(record):\n if '$schema' in record:\n schema = record[\"$schema\"]\n if schema.endswith(\"document-v1.0.0.json\") or schema.endswith(\n \"series-v1.0.0.json\"\n ):\n return True\n return False",
"def isValid(self):\n return self.file_name != \"\" and self.line_number != 0",
"def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)",
"def _is_valid(self, *args, **kwargs):\n fn = args[0]\n if not fn.endswith(\".h5\"):\n return False\n try:\n with h5py.File(fn, \"r\") as f:\n if \"arbor_type\" not in f.attrs:\n return False\n if f.attrs[\"arbor_type\"].astype(str) != \"ArborArbor\":\n return False\n except BaseException:\n return False\n return True",
"def is_valid_file(self, file_path):\n return True",
"def task_validate_rawdata_file(pk):\n\n # init and checks\n valid = False\n logger = Logger.get_logger(StringIO())\n try:\n df = Data.objects.get(id=pk)\n assert df.kind == 'rd_file'\n tr = df.content_object\n except:\n logger.log('ERROR')\n return valid\n\n try:\n logger.log('looking at raw data file with pk: %s' % pk)\n rd, sr = read_hdf5_arc(df.file.path)\n logger.log('found rd_file: %s' % df.name)\n len_rd_sec = rd.shape[0] / sr\n logger.log('found data in %d channels, for %d sec' % (\n rd.shape[1], len_rd_sec))\n\n # TODO: more checks?\n\n logger.log('rd_file passed all checks')\n valid = True\n except Exception, ex:\n logger.log('ERROR: rawdata file check: %s' % str(ex))\n finally:\n df.save()\n tr.valid_rd_log = logger.get_content()\n tr.save()\n return valid",
"def check_record(self, mask, fullpath):\r\n if self.is_carved_gzip:\r\n decode_error = False\r\n # Flag conflicts\r\n # These flag combinations can not exist together\r\n type_err = \"FolderEvent\" in mask[0] and \"FileEvent\" in mask[0]\r\n fol_cr_err = \"FolderEvent\" in mask[0] and \"Created\" in mask[1] and \\\r\n \"FolderCreated\" not in mask[1]\r\n fil_cr_err = \"FileEvent\" in mask[0] and \"FolderCreated\" in mask[1]\r\n lnk_err = \"SymbolicLink\" in mask[0] and \"HardLink\" in mask[0]\r\n h_lnk_err = \"HardLink\" not in mask[0] and \"LastHardLink\" in mask[1]\r\n h_lnk_err_2 = \"LastHardLink\" in mask[1] and \";Removed\" not in mask[1]\r\n n_used_err = \"NOT_USED-0x0\" in mask[1]\r\n ver_error = \"ItemCloned\" in mask[1] and self.dls_version == 1\r\n\r\n # If any error exists return false to caller\r\n if type_err or \\\r\n fol_cr_err or \\\r\n fil_cr_err or \\\r\n lnk_err or \\\r\n h_lnk_err or \\\r\n h_lnk_err_2 or \\\r\n n_used_err or \\\r\n decode_error or \\\r\n ver_error:\r\n return False\r\n else:\r\n # Record passed tests and may be valid\r\n # return true so that record is included in output reports\r\n return True\r\n else:\r\n # Return true. fsevent file was not identified as being carved\r\n return True",
"def check_dataset_format(ds_format):\n if ds_format.lower() not in DATASET_FORMATS.keys():\n raise ValueError(\"dataset_format is expected to be one of %s. '%s' is not valid\" % (\n ', '.join(DATASET_FORMATS.keys()), ds_format,))",
"def __datatype_check(self, record_attribute, attribute_schema):\n if 'INT' in attribute_schema[TYPE_KEY].upper():\n if record_attribute.isdigit():\n return True\n elif attribute_schema[TYPE_KEY].upper() in DECIMAL_TYPES:\n if record_attribute.isdecimal():\n return True\n elif 'CHAR' in attribute_schema[TYPE_KEY].upper() \\\n or 'TEXT' in attribute_schema[TYPE_KEY].upper():\n if type(record_attribute) is str:\n return True\n else:\n IS_VALID_FILE = False\n return False",
"def process(self, record):\n is_data = True\n if self.file_path.split('.')[-1] == 'csv':\n if self.header_skip:\n logging.info('Skipping header data... {}'.format(record))\n self.header_skip = False\n is_data = False\n return [(record, None, None, is_data)]\n record_attributes = list(csv.reader([record]))[0]\n if len(record_attributes) != len(self.schema[FIELDS_KEY]):\n if len(record_attributes) > 1 or not record_attributes[0].strip().isdigit():\n IS_VALID_FILE = False\n is_data = None\n return [(record, None, None, is_data)]\n for record_attribute, attribute_schema in zip(\n record_attributes, self.schema[FIELDS_KEY]):\n is_valid_datatype_check = self.__datatype_check(record_attribute, attribute_schema)\n is_valid_null_check = self.__null_check(record_attribute, attribute_schema)\n return [(record, is_valid_datatype_check, is_valid_null_check, is_data)]",
"def isValidPDSFile(filename):\n\t# Filesize check\n\timport os\n\tfileBytes = os.path.getsize(filename)\n\tfrom parser import Parser\n\tparser = Parser()\n\tlabels = parser.parse(open_pds(filename))\n\texpectedFileBytes = int(labels[\"FILE_RECORDS\"]) * int(labels[\"RECORD_BYTES\"])\n\t\n\tvalidityChecks = (# lambda : True, # lambda : False,\n\t\tlambda : fileBytes == expectedFileBytes and True or False,)\n\tcheckVals = (check() for check in validityChecks)\n\t\n\treturn False not in checkVals",
"def valid(self):\r\n if self.file_exists and len(self.missing_columns) == 0 and len(self.veg_columns) > 0 and \\\r\n len(self.lat_errors) == 0 and len(self.lon_errors) == 0 and len(self.time_errors) == 0 and len(self.date_errors) == 0:\r\n return True\r\n else:\r\n return False",
"def __null_check(self, record_attribute, attribute_schema):\n if attribute_schema[NULLABLE_KEY]:\n return True\n elif record_attribute is not None:\n return True\n else:\n IS_VALID_FILE = False\n return False",
"def test_is_check_filename(self):\n self.assertTrue(check_filename('sample.csv'))",
"def is_data_valid(self):\n errors = []\n for state, filename in self.symbols.items():\n if state is None or state == \"\":\n errors.append(\"Image {} has no state\".format(filename))\n error, file_type = self.check_image(filename)\n if error:\n errors.append(error)\n\n if len(errors) > 0:\n error_msg = os.linesep.join(errors)\n return False, error_msg\n\n return True, \"\""
] | [
"0.71040785",
"0.6863867",
"0.68257785",
"0.67931664",
"0.67237216",
"0.6699486",
"0.6648243",
"0.65024203",
"0.64735746",
"0.6458235",
"0.6451198",
"0.64415586",
"0.64020693",
"0.6372555",
"0.6347536",
"0.6343386",
"0.6315279",
"0.629199",
"0.6281639",
"0.625653",
"0.62462723",
"0.621189",
"0.6207334",
"0.6171962",
"0.61604536",
"0.6144805",
"0.61311555",
"0.60929525",
"0.60851324",
"0.6074183"
] | 0.80596375 | 0 |
Test getting predefined config path for TF image recognition models. | def test_get_predefined_tf_image_recognition_config_path(self) -> None:
self._assert_predefined_config_path(
framework="tensorflow",
domain="image_recognition",
domain_flavour="",
expected_filename="image_recognition.yaml",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_predefined_onnx_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_tf_recommendation_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"recommendation\",\n domain_flavour=\"\",\n expected_filename=\"recommendation.yaml\",\n )",
"def test_get_predefined_tf_object_detection_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_get_predefined_tf_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def test_get_predefined_tf_object_detection_unknown_flavour_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"foo\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")",
"def test_get_predefined_onnx_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_get_predefined_tf_object_detection_ssd_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"ssd\",\n expected_filename=\"object_detection_ssd.yaml\",\n )",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def get_config():\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n\n if conf[\"use_dev_config\"]:\n print(\"Dev Setup: dev_config.json will be used\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dev_config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n else:\n print(\"Server Setup: config.json will be used\")\n\n pretrained_root = conf[\"pretrained_root\"]\n gloves_dir = os.path.join(pretrained_root, \"gloves\")\n if not os.path.exists(gloves_dir):\n os.makedirs(gloves_dir)\n print(\"Created directory:\", gloves_dir)\n\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\n \"pretrained_dir\"])\n\n captioning_pretrained_dir = conf[\"captioning\"][\"attention\"][\"pretrained_dir\"]\n vqa_pretrained_dir = conf[\"vqa\"][\"attention\"][\"pretrained_dir\"]\n lxmert_pretrained_dir = conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"]\n\n vqa_features = os.path.join(conf[\"ade20k_vqa_dir\"], \"precomputed_features/training\")\n\n gqa_dir = os.path.join(lxmert_pretrained_dir,\n \"gqa\")\n vqa_dir = os.path.join(lxmert_pretrained_dir,\n \"vqa\")\n\n if not os.path.exists(captioning_pretrained_dir):\n os.makedirs(captioning_pretrained_dir)\n print(\"Created directory:\", captioning_pretrained_dir)\n if not os.path.exists(vqa_pretrained_dir):\n os.makedirs(vqa_pretrained_dir)\n print(\"Created directory:\", vqa_pretrained_dir)\n\n if not os.path.exists(lxmert_pretrained_dir):\n os.makedirs(lxmert_pretrained_dir)\n print(\"Created directory:\", lxmert_pretrained_dir)\n\n if not os.path.exists(gqa_dir):\n os.makedirs(gqa_dir)\n print(\"Created directory:\", gqa_dir)\n\n if not os.path.exists(vqa_dir):\n os.makedirs(vqa_dir)\n print(\"Created directory:\", vqa_dir)\n\n if not os.path.exists(vqa_features):\n os.makedirs(vqa_features)\n print(\"Created directory:\", vqa_features)\n\n checkpoints_dir = os.path.join(captioning_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"logs\")\n create_directory_structure(checkpoints_dir)\n\n if not os.path.exists(conf[\"game_logs_dir\"]):\n os.makedirs(conf[\"game_logs_dir\"])\n print(\"Created directory:\", conf[\"game_logs_dir\"])\n\n ade20k_localized_narratives_train_file = os.path.join(conf[\"ade20k_dir\"], conf[\"ade20k_localized_narratives_train_file\"])\n\n if not os.path.exists(ade20k_localized_narratives_train_file):\n print(f\"Warning, config for ade20k_localized_narratives_train_file is missing: {ade20k_localized_narratives_train_file}\")\n conf[\"ade20k_localized_narratives_train_file\"] = ade20k_localized_narratives_train_file\n\n return conf",
"def get_run_config():\n run_config = tf.contrib.learn.RunConfig()\n run_config = run_config.replace(model_dir=FLAGS.model_dir)\n return run_config",
"def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\"",
"def get_configs_from_multiple_files(model_config_path=\"\",\n train_config_path=\"\",\n train_input_config_path=\"\",\n eval_config_path=\"\",\n eval_input_config_path=\"\",\n graph_rewriter_config_path=\"\"):\n configs = {}\n if model_config_path:\n model_config = model_pb2.DetectionModel()\n with tf.gfile.GFile(model_config_path, \"r\") as f:\n text_format.Merge(f.read(), model_config)\n configs[\"model\"] = model_config\n\n if train_config_path:\n train_config = train_pb2.TrainConfig()\n with tf.gfile.GFile(train_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_config)\n configs[\"train_config\"] = train_config\n\n if train_input_config_path:\n train_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(train_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_input_config)\n configs[\"train_input_config\"] = train_input_config\n\n if eval_config_path:\n eval_config = eval_pb2.EvalConfig()\n with tf.gfile.GFile(eval_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_config)\n configs[\"eval_config\"] = eval_config\n\n if eval_input_config_path:\n eval_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(eval_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_input_config)\n configs[\"eval_input_configs\"] = [eval_input_config]\n\n if graph_rewriter_config_path:\n configs[\"graph_rewriter_config\"] = get_graph_rewriter_config_from_file(\n graph_rewriter_config_path)\n\n return configs",
"def fetch_predictor_config():\n try:\n f = open(os.path.join(CONFIG['results_path'], CONFIG['predictor_model_dir'], 'config.csv'), 'r')\n f.readline()\n values = f.readline().split(',')\n f.close()\n return int(values[0]), str(values[2]).strip()\n except:\n # If this happens, check the path.\n print('ERROR in fetching predictor config. Default values used. These may fail.')\n return CONFIG['training_img_size'], CONFIG['training_set_image_type']",
"def _load_model_conf(path, run_id=None):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf_path = os.path.join(path, \"MLmodel\")\n model = Model.load(conf_path)\n if FLAVOR_NAME not in model.flavors:\n raise Exception(\"Format '{format}' not found not in {path}.\".format(format=FLAVOR_NAME,\n path=conf_path))\n return model.flavors[FLAVOR_NAME]",
"def get_tf_config(config_path):\n return modeling.BertConfig.from_json_file(config_path).__dict__",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def get_model_dir(self, context=None):\n if context == 'eval' and \\\n 'eval' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['eval'].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config['eval']['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n elif context == 'train' and \\\n 'train' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['train'].keys() and \\\n 'model_type' in self.pipeline_config['train'].keys():\n return self.pipeline_config['train']['model_path']\n elif context == 'postprocess' and \\\n 'postprocess' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config[context].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config[context]['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n else:\n # FIXME: now having a class method and also a function named get_output_dir, confusing!\n output_dir = self.get_output_dir()\n print(f'Getting model from {output_dir}')\n return output_dir",
"def get_config():\n config = ml_collections.ConfigDict()\n\n # Which model to use -- see ./models.py\n config.model_name = 'ViT-B_32'\n # Where to store training logs.\n config.log_dir = '.'\n\n # Number of steps to measure.\n config.steps = 30\n # Number of steps before measuring.\n config.initial_steps = 10\n\n # Batch size\n config.batch = 0\n # Number of output classes.\n config.num_classes = 0\n # Image size (width=height).\n config.image_size = 0\n\n config.train = 'inference_time'\n\n return config",
"def _setupFilename(self):\n try:\n os.mkdir('./.netModel')\n except:\n pass # hope it's already there...\n filenames = os.listdir('./.netModel')\n configNum = 1\n i = 0\n configNumString = '%(c)04d' % {'c':configNum}\n while i < len(filenames):\n configNumString = '%(c)04d' % {'c':configNum}\n if (filenames[i][:4]==configNumString):\n configNum += 1\n i = 0\n else:\n i += 1\n return os.path.realpath('.')+'/.netModel/'+configNumString",
"def test_text_classifier_create_from_path(self):\n pass",
"def get_log_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, _DEFAULT_FILENAME_LOG)",
"def get_model_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, 'model.pickle'))",
"def test_get_onnx_framework_from_path(self) -> None:\n path = \"/home/user/model.onnx\"\n result = get_framework_from_path(path)\n self.assertEqual(result, \"onnxrt\")",
"def predict_image_path(self) -> str:\n return join(self.directory_path, 'predict-images')"
] | [
"0.75592035",
"0.7412118",
"0.72849274",
"0.717529",
"0.6809559",
"0.6795905",
"0.6554628",
"0.64774716",
"0.64007187",
"0.6367952",
"0.63568515",
"0.62823087",
"0.6041288",
"0.59696573",
"0.59272754",
"0.5859404",
"0.5838563",
"0.58267045",
"0.5783078",
"0.575788",
"0.5698276",
"0.5604088",
"0.5582831",
"0.55570525",
"0.55116534",
"0.5493324",
"0.5481892",
"0.5473402",
"0.5466473",
"0.5462457"
] | 0.83963805 | 0 |
Test getting predefined config path for TF object detection models. | def test_get_predefined_tf_object_detection_config_path(self) -> None:
self._assert_predefined_config_path(
framework="tensorflow",
domain="object_detection",
domain_flavour="",
expected_filename="object_detection.yaml",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_predefined_tf_object_detection_unknown_flavour_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"foo\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_get_predefined_tf_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_tf_object_detection_ssd_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"ssd\",\n expected_filename=\"object_detection_ssd.yaml\",\n )",
"def test_get_predefined_tf_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_get_predefined_tf_recommendation_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"recommendation\",\n domain_flavour=\"\",\n expected_filename=\"recommendation.yaml\",\n )",
"def test_get_predefined_onnx_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_onnx_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def get_config():\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n\n if conf[\"use_dev_config\"]:\n print(\"Dev Setup: dev_config.json will be used\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dev_config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n else:\n print(\"Server Setup: config.json will be used\")\n\n pretrained_root = conf[\"pretrained_root\"]\n gloves_dir = os.path.join(pretrained_root, \"gloves\")\n if not os.path.exists(gloves_dir):\n os.makedirs(gloves_dir)\n print(\"Created directory:\", gloves_dir)\n\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\n \"pretrained_dir\"])\n\n captioning_pretrained_dir = conf[\"captioning\"][\"attention\"][\"pretrained_dir\"]\n vqa_pretrained_dir = conf[\"vqa\"][\"attention\"][\"pretrained_dir\"]\n lxmert_pretrained_dir = conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"]\n\n vqa_features = os.path.join(conf[\"ade20k_vqa_dir\"], \"precomputed_features/training\")\n\n gqa_dir = os.path.join(lxmert_pretrained_dir,\n \"gqa\")\n vqa_dir = os.path.join(lxmert_pretrained_dir,\n \"vqa\")\n\n if not os.path.exists(captioning_pretrained_dir):\n os.makedirs(captioning_pretrained_dir)\n print(\"Created directory:\", captioning_pretrained_dir)\n if not os.path.exists(vqa_pretrained_dir):\n os.makedirs(vqa_pretrained_dir)\n print(\"Created directory:\", vqa_pretrained_dir)\n\n if not os.path.exists(lxmert_pretrained_dir):\n os.makedirs(lxmert_pretrained_dir)\n print(\"Created directory:\", lxmert_pretrained_dir)\n\n if not os.path.exists(gqa_dir):\n os.makedirs(gqa_dir)\n print(\"Created directory:\", gqa_dir)\n\n if not os.path.exists(vqa_dir):\n os.makedirs(vqa_dir)\n print(\"Created directory:\", vqa_dir)\n\n if not os.path.exists(vqa_features):\n os.makedirs(vqa_features)\n print(\"Created directory:\", vqa_features)\n\n checkpoints_dir = os.path.join(captioning_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"logs\")\n create_directory_structure(checkpoints_dir)\n\n if not os.path.exists(conf[\"game_logs_dir\"]):\n os.makedirs(conf[\"game_logs_dir\"])\n print(\"Created directory:\", conf[\"game_logs_dir\"])\n\n ade20k_localized_narratives_train_file = os.path.join(conf[\"ade20k_dir\"], conf[\"ade20k_localized_narratives_train_file\"])\n\n if not os.path.exists(ade20k_localized_narratives_train_file):\n print(f\"Warning, config for ade20k_localized_narratives_train_file is missing: {ade20k_localized_narratives_train_file}\")\n conf[\"ade20k_localized_narratives_train_file\"] = ade20k_localized_narratives_train_file\n\n return conf",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def get_tf_config(config_path):\n return modeling.BertConfig.from_json_file(config_path).__dict__",
"def get_run_config():\n run_config = tf.contrib.learn.RunConfig()\n run_config = run_config.replace(model_dir=FLAGS.model_dir)\n return run_config",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")",
"def get_configs_from_multiple_files(model_config_path=\"\",\n train_config_path=\"\",\n train_input_config_path=\"\",\n eval_config_path=\"\",\n eval_input_config_path=\"\",\n graph_rewriter_config_path=\"\"):\n configs = {}\n if model_config_path:\n model_config = model_pb2.DetectionModel()\n with tf.gfile.GFile(model_config_path, \"r\") as f:\n text_format.Merge(f.read(), model_config)\n configs[\"model\"] = model_config\n\n if train_config_path:\n train_config = train_pb2.TrainConfig()\n with tf.gfile.GFile(train_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_config)\n configs[\"train_config\"] = train_config\n\n if train_input_config_path:\n train_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(train_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_input_config)\n configs[\"train_input_config\"] = train_input_config\n\n if eval_config_path:\n eval_config = eval_pb2.EvalConfig()\n with tf.gfile.GFile(eval_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_config)\n configs[\"eval_config\"] = eval_config\n\n if eval_input_config_path:\n eval_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(eval_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_input_config)\n configs[\"eval_input_configs\"] = [eval_input_config]\n\n if graph_rewriter_config_path:\n configs[\"graph_rewriter_config\"] = get_graph_rewriter_config_from_file(\n graph_rewriter_config_path)\n\n return configs",
"def test_model_checkpoint_path(tmpdir, logger_version, expected):\n tutils.reset_seed()\n model = EvalModelTemplate()\n logger = TensorBoardLogger(str(tmpdir), version=logger_version)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n overfit_pct=0.2,\n max_epochs=5,\n logger=logger\n )\n trainer.fit(model)\n\n ckpt_version = Path(trainer.ckpt_path).parent.name\n assert ckpt_version == expected",
"def load_config():\n model_type, run_name, run_comment, epoch, verbose = get_args()\n name = run_name + '-' + run_comment\n if model_type == \"s2s\": \n run_title = \"seq2seq\"\n else:\n run_title = \"def2vec\"\n path = \"outputs/{}/logs/{}/config.json\".format(run_title, name)\n config = None\n with open(path) as f:\n config = dict(json.load(f))\n config = load_config(eval=True)\n return (config, name, model_type)",
"def _load_model_conf(path, run_id=None):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf_path = os.path.join(path, \"MLmodel\")\n model = Model.load(conf_path)\n if FLAVOR_NAME not in model.flavors:\n raise Exception(\"Format '{format}' not found not in {path}.\".format(format=FLAVOR_NAME,\n path=conf_path))\n return model.flavors[FLAVOR_NAME]",
"def YumGetPathToConfig(vm):\n raise NotImplementedError",
"def get_model_dir(self, context=None):\n if context == 'eval' and \\\n 'eval' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['eval'].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config['eval']['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n elif context == 'train' and \\\n 'train' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['train'].keys() and \\\n 'model_type' in self.pipeline_config['train'].keys():\n return self.pipeline_config['train']['model_path']\n elif context == 'postprocess' and \\\n 'postprocess' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config[context].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config[context]['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n else:\n # FIXME: now having a class method and also a function named get_output_dir, confusing!\n output_dir = self.get_output_dir()\n print(f'Getting model from {output_dir}')\n return output_dir",
"def test_load_model_config(self) -> None:\n result = load_model_config()\n self.assertIs(type(result), dict)\n self.assertIsNot(result, {})",
"def check_config( config: 'bittensor.Config' ):\n assert config.neuron.batch_size_train > 0, \"batch_size_train must be a positive value\"\n assert config.neuron.learning_rate > 0, \"learning_rate must be a positive value.\"\n bittensor.logging.check_config( config )\n bittensor.wallet.check_config( config )\n bittensor.subtensor.check_config( config )\n bittensor.metagraph.check_config( config )\n bittensor.dataloader.check_config( config )\n bittensor.dendrite.check_config( config )\n bittensor.axon.check_config( config )\n GPT2Nucleus.check_config( config )\n SGMOERouter.check_config( config )\n full_path = os.path.expanduser('{}/{}/{}'.format( config.logging.logging_dir, config.wallet.name + \"-\" + config.wallet.hotkey, config.neuron.name ))\n config.neuron.full_path = os.path.expanduser(full_path)\n config.neuron.tensorboard_dir = config.neuron.full_path + '/tensorboard-' + '-'.join(str(datetime.now()).split())\n if not os.path.exists(config.neuron.full_path):\n os.makedirs(config.neuron.full_path)",
"def get_config(seed, shot):\n if args.coco:\n # COCO\n assert args.two_stage, 'Only supports novel weights for COCO now'\n\n if args.novel_finetune:\n # Fine-tune novel classifier\n ITERS = {\n 1: (10000, 500),\n 2: (10000, 1500),\n 3: (10000, 1500),\n 5: (10000, 1500),\n 10: (10000, 2000),\n 30: (10000, 6000),\n }\n mode = 'novel'\n\n assert not args.fc and not args.unfreeze\n else:\n # Fine-tune entire classifier\n ITERS = {\n 1: (14400, 16000),\n 2: (28800, 32000),\n 3: (43200, 48000),\n 5: (72000, 80000),\n 10: (144000, 160000),\n 30: (216000, 240000),\n }\n mode = 'all'\n split = temp_split = ''\n temp_mode = mode\n\n config_dir = 'configs/COCO-detection'\n ckpt_dir = 'checkpoints/coco/faster_rcnn'\n base_cfg = '../../Base-RCNN-FPN.yaml'\n else:\n # PASCAL VOC\n assert not args.two_stage, 'Only supports random weights for PASCAL now'\n\n ITERS = {\n 1: (3500, 4000),\n 2: (7000, 8000),\n 3: (10500, 12000),\n 5: (17500, 20000),\n 10: (35000, 40000),\n }\n split = 'split{}'.format(args.split)\n mode = 'all{}'.format(args.split)\n # temp_split = 'split1'\n # temp_mode = 'all1'\n temp_split=split\n temp_mode = mode\n\n config_dir = 'configs/PascalVOC-detection'\n ckpt_dir = 'checkpoints/voc/faster_rcnn'\n base_cfg = '../../../Base-RCNN-FPN.yaml'\n\n seed_str = 'seed{}'.format(seed) if seed != 0 else ''\n fc = '_fc' if args.fc else ''\n unfreeze = '_unfreeze' if args.unfreeze else ''\n # Read an example config file for the config parameters\n temp = os.path.join(\n temp_split, 'faster_rcnn_R_101_FPN_ft{}_{}_1shot{}'.format(\n fc, temp_mode, unfreeze)\n )\n print('temp_file:', temp)\n config = os.path.join(args.root, config_dir, temp + '.yaml')\n print('config_file:', config)\n\n prefix = 'faster_rcnn_R_101_FPN_ft{}_{}_{}shot{}{}'.format(\n fc, mode, shot, unfreeze, args.suffix)\n print('prefix_file:', prefix)\n\n output_dir = os.path.join(args.root, ckpt_dir, seed_str)\n print('output_dir',output_dir)\n os.makedirs(output_dir, exist_ok=True)\n \n save_dir = os.path.join(\n args.root, config_dir, split, seed_str,\n )\n print('save_dir',save_dir)\n os.makedirs(save_dir, exist_ok=True)\n save_file = os.path.join(save_dir, prefix + '.yaml')\n print('save_file' , save_file)\n\n configs = load_yaml_file(config)\n print('reading from this config file ',config)\n configs['_BASE_'] = base_cfg\n configs['DATASETS']['TRAIN'] = make_tuple(configs['DATASETS']['TRAIN'])\n configs['DATASETS']['TEST'] = make_tuple(configs['DATASETS']['TEST'])\n if args.coco and not args.novel_finetune:\n ckpt_path = os.path.join(output_dir, prefix, 'model_reset_combine.pth')\n if not os.path.exists(ckpt_path):\n src2 = os.path.join(\n output_dir, 'faster_rcnn_R_101_FPN_ft_novel_{}shot{}'.format(\n shot, args.suffix),\n 'model_final.pth',\n )\n if not os.path.exists(src2):\n print('Novel weights do not exist. Please run with the ' + \\\n '--novel-finetune flag first.')\n assert False\n combine_cmd = 'python tools/ckpt_surgery.py --coco --method ' + \\\n 'combine --src1 checkpoints/coco/faster_rcnn/faster_rcnn' + \\\n '_R_101_FPN_base/model_final.pth --src2 {}'.format(src2) + \\\n ' --save-dir {}'.format(os.path.join(output_dir, prefix))\n run_cmd(combine_cmd)\n assert os.path.exists(ckpt_path)\n configs['MODEL']['WEIGHTS'] = ckpt_path\n elif not args.coco:\n configs['MODEL']['WEIGHTS'] = configs['MODEL']['WEIGHTS'].replace(\n 'base1', 'base' + str(args.split))\n for dset in ['TRAIN', 'TEST']:\n configs['DATASETS'][dset] = (\n configs['DATASETS'][dset][0].replace(\n temp_mode, 'all' + str(args.split)),\n )\n configs['DATASETS']['TRAIN'] = (\n configs['DATASETS']['TRAIN'][0].replace(\n '1shot', str(shot) + 'shot'\n ) + ('_{}'.format(seed_str) if seed_str != '' else ''),\n )\n configs['SOLVER']['BASE_LR'] = args.lr\n configs['SOLVER']['MAX_ITER'] = ITERS[shot][1]\n configs['SOLVER']['STEPS'] = (ITERS[shot][0],)\n configs['SOLVER']['CHECKPOINT_PERIOD'] = ITERS[shot][1] // args.ckpt_freq\n configs['OUTPUT_DIR'] = os.path.join(output_dir, prefix)\n\n if seed != 0:\n with open(save_file, 'w') as fp:\n yaml.dump(configs, fp)\n\n return save_file, configs",
"def get_model_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, 'model.pickle'))",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def _assert_predefined_config_path(\n self,\n framework: str,\n domain: str,\n domain_flavour: str,\n expected_filename: str,\n ) -> None:\n result = get_predefined_config_path(framework, domain, domain_flavour)\n expected = os.path.join(\n os.path.abspath(\n os.path.dirname(\n inspect.getfile(get_predefined_config_path),\n ),\n ),\n \"configs\",\n \"predefined_configs\",\n f\"{framework}\",\n expected_filename,\n )\n self.assertEqual(result, expected)\n self.assertEqual(os.path.isfile(result), True)",
"def test_validate_nagl_model_path(model_name):\n model_path = validate_nagl_model_path(model_name)\n assert os.path.exists(model_path)"
] | [
"0.7654645",
"0.73946184",
"0.72947437",
"0.70547116",
"0.70378405",
"0.67137825",
"0.6437595",
"0.6437529",
"0.6192349",
"0.61044425",
"0.6100769",
"0.6069273",
"0.60661966",
"0.59318006",
"0.59008956",
"0.5850615",
"0.5848458",
"0.5828326",
"0.56665957",
"0.5662301",
"0.5632772",
"0.56302494",
"0.5584547",
"0.55248654",
"0.55212563",
"0.550086",
"0.5481983",
"0.548047",
"0.5472406",
"0.5464651"
] | 0.8449237 | 0 |
Test getting predefined config path for TF object detection models. | def test_get_predefined_tf_object_detection_unknown_flavour_config_path(self) -> None:
self._assert_predefined_config_path(
framework="tensorflow",
domain="object_detection",
domain_flavour="foo",
expected_filename="object_detection.yaml",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_predefined_tf_object_detection_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_get_predefined_tf_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_tf_object_detection_ssd_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"ssd\",\n expected_filename=\"object_detection_ssd.yaml\",\n )",
"def test_get_predefined_tf_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_get_predefined_tf_recommendation_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"recommendation\",\n domain_flavour=\"\",\n expected_filename=\"recommendation.yaml\",\n )",
"def test_get_predefined_onnx_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_onnx_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def get_config():\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n\n if conf[\"use_dev_config\"]:\n print(\"Dev Setup: dev_config.json will be used\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dev_config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n else:\n print(\"Server Setup: config.json will be used\")\n\n pretrained_root = conf[\"pretrained_root\"]\n gloves_dir = os.path.join(pretrained_root, \"gloves\")\n if not os.path.exists(gloves_dir):\n os.makedirs(gloves_dir)\n print(\"Created directory:\", gloves_dir)\n\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\n \"pretrained_dir\"])\n\n captioning_pretrained_dir = conf[\"captioning\"][\"attention\"][\"pretrained_dir\"]\n vqa_pretrained_dir = conf[\"vqa\"][\"attention\"][\"pretrained_dir\"]\n lxmert_pretrained_dir = conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"]\n\n vqa_features = os.path.join(conf[\"ade20k_vqa_dir\"], \"precomputed_features/training\")\n\n gqa_dir = os.path.join(lxmert_pretrained_dir,\n \"gqa\")\n vqa_dir = os.path.join(lxmert_pretrained_dir,\n \"vqa\")\n\n if not os.path.exists(captioning_pretrained_dir):\n os.makedirs(captioning_pretrained_dir)\n print(\"Created directory:\", captioning_pretrained_dir)\n if not os.path.exists(vqa_pretrained_dir):\n os.makedirs(vqa_pretrained_dir)\n print(\"Created directory:\", vqa_pretrained_dir)\n\n if not os.path.exists(lxmert_pretrained_dir):\n os.makedirs(lxmert_pretrained_dir)\n print(\"Created directory:\", lxmert_pretrained_dir)\n\n if not os.path.exists(gqa_dir):\n os.makedirs(gqa_dir)\n print(\"Created directory:\", gqa_dir)\n\n if not os.path.exists(vqa_dir):\n os.makedirs(vqa_dir)\n print(\"Created directory:\", vqa_dir)\n\n if not os.path.exists(vqa_features):\n os.makedirs(vqa_features)\n print(\"Created directory:\", vqa_features)\n\n checkpoints_dir = os.path.join(captioning_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"logs\")\n create_directory_structure(checkpoints_dir)\n\n if not os.path.exists(conf[\"game_logs_dir\"]):\n os.makedirs(conf[\"game_logs_dir\"])\n print(\"Created directory:\", conf[\"game_logs_dir\"])\n\n ade20k_localized_narratives_train_file = os.path.join(conf[\"ade20k_dir\"], conf[\"ade20k_localized_narratives_train_file\"])\n\n if not os.path.exists(ade20k_localized_narratives_train_file):\n print(f\"Warning, config for ade20k_localized_narratives_train_file is missing: {ade20k_localized_narratives_train_file}\")\n conf[\"ade20k_localized_narratives_train_file\"] = ade20k_localized_narratives_train_file\n\n return conf",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def get_tf_config(config_path):\n return modeling.BertConfig.from_json_file(config_path).__dict__",
"def get_run_config():\n run_config = tf.contrib.learn.RunConfig()\n run_config = run_config.replace(model_dir=FLAGS.model_dir)\n return run_config",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")",
"def get_configs_from_multiple_files(model_config_path=\"\",\n train_config_path=\"\",\n train_input_config_path=\"\",\n eval_config_path=\"\",\n eval_input_config_path=\"\",\n graph_rewriter_config_path=\"\"):\n configs = {}\n if model_config_path:\n model_config = model_pb2.DetectionModel()\n with tf.gfile.GFile(model_config_path, \"r\") as f:\n text_format.Merge(f.read(), model_config)\n configs[\"model\"] = model_config\n\n if train_config_path:\n train_config = train_pb2.TrainConfig()\n with tf.gfile.GFile(train_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_config)\n configs[\"train_config\"] = train_config\n\n if train_input_config_path:\n train_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(train_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_input_config)\n configs[\"train_input_config\"] = train_input_config\n\n if eval_config_path:\n eval_config = eval_pb2.EvalConfig()\n with tf.gfile.GFile(eval_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_config)\n configs[\"eval_config\"] = eval_config\n\n if eval_input_config_path:\n eval_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(eval_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_input_config)\n configs[\"eval_input_configs\"] = [eval_input_config]\n\n if graph_rewriter_config_path:\n configs[\"graph_rewriter_config\"] = get_graph_rewriter_config_from_file(\n graph_rewriter_config_path)\n\n return configs",
"def test_model_checkpoint_path(tmpdir, logger_version, expected):\n tutils.reset_seed()\n model = EvalModelTemplate()\n logger = TensorBoardLogger(str(tmpdir), version=logger_version)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n overfit_pct=0.2,\n max_epochs=5,\n logger=logger\n )\n trainer.fit(model)\n\n ckpt_version = Path(trainer.ckpt_path).parent.name\n assert ckpt_version == expected",
"def load_config():\n model_type, run_name, run_comment, epoch, verbose = get_args()\n name = run_name + '-' + run_comment\n if model_type == \"s2s\": \n run_title = \"seq2seq\"\n else:\n run_title = \"def2vec\"\n path = \"outputs/{}/logs/{}/config.json\".format(run_title, name)\n config = None\n with open(path) as f:\n config = dict(json.load(f))\n config = load_config(eval=True)\n return (config, name, model_type)",
"def _load_model_conf(path, run_id=None):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf_path = os.path.join(path, \"MLmodel\")\n model = Model.load(conf_path)\n if FLAVOR_NAME not in model.flavors:\n raise Exception(\"Format '{format}' not found not in {path}.\".format(format=FLAVOR_NAME,\n path=conf_path))\n return model.flavors[FLAVOR_NAME]",
"def YumGetPathToConfig(vm):\n raise NotImplementedError",
"def get_model_dir(self, context=None):\n if context == 'eval' and \\\n 'eval' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['eval'].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config['eval']['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n elif context == 'train' and \\\n 'train' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['train'].keys() and \\\n 'model_type' in self.pipeline_config['train'].keys():\n return self.pipeline_config['train']['model_path']\n elif context == 'postprocess' and \\\n 'postprocess' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config[context].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config[context]['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n else:\n # FIXME: now having a class method and also a function named get_output_dir, confusing!\n output_dir = self.get_output_dir()\n print(f'Getting model from {output_dir}')\n return output_dir",
"def test_load_model_config(self) -> None:\n result = load_model_config()\n self.assertIs(type(result), dict)\n self.assertIsNot(result, {})",
"def check_config( config: 'bittensor.Config' ):\n assert config.neuron.batch_size_train > 0, \"batch_size_train must be a positive value\"\n assert config.neuron.learning_rate > 0, \"learning_rate must be a positive value.\"\n bittensor.logging.check_config( config )\n bittensor.wallet.check_config( config )\n bittensor.subtensor.check_config( config )\n bittensor.metagraph.check_config( config )\n bittensor.dataloader.check_config( config )\n bittensor.dendrite.check_config( config )\n bittensor.axon.check_config( config )\n GPT2Nucleus.check_config( config )\n SGMOERouter.check_config( config )\n full_path = os.path.expanduser('{}/{}/{}'.format( config.logging.logging_dir, config.wallet.name + \"-\" + config.wallet.hotkey, config.neuron.name ))\n config.neuron.full_path = os.path.expanduser(full_path)\n config.neuron.tensorboard_dir = config.neuron.full_path + '/tensorboard-' + '-'.join(str(datetime.now()).split())\n if not os.path.exists(config.neuron.full_path):\n os.makedirs(config.neuron.full_path)",
"def get_config(seed, shot):\n if args.coco:\n # COCO\n assert args.two_stage, 'Only supports novel weights for COCO now'\n\n if args.novel_finetune:\n # Fine-tune novel classifier\n ITERS = {\n 1: (10000, 500),\n 2: (10000, 1500),\n 3: (10000, 1500),\n 5: (10000, 1500),\n 10: (10000, 2000),\n 30: (10000, 6000),\n }\n mode = 'novel'\n\n assert not args.fc and not args.unfreeze\n else:\n # Fine-tune entire classifier\n ITERS = {\n 1: (14400, 16000),\n 2: (28800, 32000),\n 3: (43200, 48000),\n 5: (72000, 80000),\n 10: (144000, 160000),\n 30: (216000, 240000),\n }\n mode = 'all'\n split = temp_split = ''\n temp_mode = mode\n\n config_dir = 'configs/COCO-detection'\n ckpt_dir = 'checkpoints/coco/faster_rcnn'\n base_cfg = '../../Base-RCNN-FPN.yaml'\n else:\n # PASCAL VOC\n assert not args.two_stage, 'Only supports random weights for PASCAL now'\n\n ITERS = {\n 1: (3500, 4000),\n 2: (7000, 8000),\n 3: (10500, 12000),\n 5: (17500, 20000),\n 10: (35000, 40000),\n }\n split = 'split{}'.format(args.split)\n mode = 'all{}'.format(args.split)\n # temp_split = 'split1'\n # temp_mode = 'all1'\n temp_split=split\n temp_mode = mode\n\n config_dir = 'configs/PascalVOC-detection'\n ckpt_dir = 'checkpoints/voc/faster_rcnn'\n base_cfg = '../../../Base-RCNN-FPN.yaml'\n\n seed_str = 'seed{}'.format(seed) if seed != 0 else ''\n fc = '_fc' if args.fc else ''\n unfreeze = '_unfreeze' if args.unfreeze else ''\n # Read an example config file for the config parameters\n temp = os.path.join(\n temp_split, 'faster_rcnn_R_101_FPN_ft{}_{}_1shot{}'.format(\n fc, temp_mode, unfreeze)\n )\n print('temp_file:', temp)\n config = os.path.join(args.root, config_dir, temp + '.yaml')\n print('config_file:', config)\n\n prefix = 'faster_rcnn_R_101_FPN_ft{}_{}_{}shot{}{}'.format(\n fc, mode, shot, unfreeze, args.suffix)\n print('prefix_file:', prefix)\n\n output_dir = os.path.join(args.root, ckpt_dir, seed_str)\n print('output_dir',output_dir)\n os.makedirs(output_dir, exist_ok=True)\n \n save_dir = os.path.join(\n args.root, config_dir, split, seed_str,\n )\n print('save_dir',save_dir)\n os.makedirs(save_dir, exist_ok=True)\n save_file = os.path.join(save_dir, prefix + '.yaml')\n print('save_file' , save_file)\n\n configs = load_yaml_file(config)\n print('reading from this config file ',config)\n configs['_BASE_'] = base_cfg\n configs['DATASETS']['TRAIN'] = make_tuple(configs['DATASETS']['TRAIN'])\n configs['DATASETS']['TEST'] = make_tuple(configs['DATASETS']['TEST'])\n if args.coco and not args.novel_finetune:\n ckpt_path = os.path.join(output_dir, prefix, 'model_reset_combine.pth')\n if not os.path.exists(ckpt_path):\n src2 = os.path.join(\n output_dir, 'faster_rcnn_R_101_FPN_ft_novel_{}shot{}'.format(\n shot, args.suffix),\n 'model_final.pth',\n )\n if not os.path.exists(src2):\n print('Novel weights do not exist. Please run with the ' + \\\n '--novel-finetune flag first.')\n assert False\n combine_cmd = 'python tools/ckpt_surgery.py --coco --method ' + \\\n 'combine --src1 checkpoints/coco/faster_rcnn/faster_rcnn' + \\\n '_R_101_FPN_base/model_final.pth --src2 {}'.format(src2) + \\\n ' --save-dir {}'.format(os.path.join(output_dir, prefix))\n run_cmd(combine_cmd)\n assert os.path.exists(ckpt_path)\n configs['MODEL']['WEIGHTS'] = ckpt_path\n elif not args.coco:\n configs['MODEL']['WEIGHTS'] = configs['MODEL']['WEIGHTS'].replace(\n 'base1', 'base' + str(args.split))\n for dset in ['TRAIN', 'TEST']:\n configs['DATASETS'][dset] = (\n configs['DATASETS'][dset][0].replace(\n temp_mode, 'all' + str(args.split)),\n )\n configs['DATASETS']['TRAIN'] = (\n configs['DATASETS']['TRAIN'][0].replace(\n '1shot', str(shot) + 'shot'\n ) + ('_{}'.format(seed_str) if seed_str != '' else ''),\n )\n configs['SOLVER']['BASE_LR'] = args.lr\n configs['SOLVER']['MAX_ITER'] = ITERS[shot][1]\n configs['SOLVER']['STEPS'] = (ITERS[shot][0],)\n configs['SOLVER']['CHECKPOINT_PERIOD'] = ITERS[shot][1] // args.ckpt_freq\n configs['OUTPUT_DIR'] = os.path.join(output_dir, prefix)\n\n if seed != 0:\n with open(save_file, 'w') as fp:\n yaml.dump(configs, fp)\n\n return save_file, configs",
"def get_model_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, 'model.pickle'))",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def _assert_predefined_config_path(\n self,\n framework: str,\n domain: str,\n domain_flavour: str,\n expected_filename: str,\n ) -> None:\n result = get_predefined_config_path(framework, domain, domain_flavour)\n expected = os.path.join(\n os.path.abspath(\n os.path.dirname(\n inspect.getfile(get_predefined_config_path),\n ),\n ),\n \"configs\",\n \"predefined_configs\",\n f\"{framework}\",\n expected_filename,\n )\n self.assertEqual(result, expected)\n self.assertEqual(os.path.isfile(result), True)",
"def test_validate_nagl_model_path(model_name):\n model_path = validate_nagl_model_path(model_name)\n assert os.path.exists(model_path)"
] | [
"0.84492755",
"0.7392796",
"0.7295014",
"0.70541734",
"0.7037769",
"0.67110646",
"0.64360476",
"0.64352703",
"0.61914104",
"0.61029863",
"0.61006165",
"0.6068131",
"0.6064642",
"0.59318507",
"0.59008795",
"0.58501637",
"0.5847948",
"0.5827509",
"0.5666658",
"0.5661751",
"0.56328124",
"0.5628314",
"0.55852515",
"0.55244017",
"0.55199033",
"0.55006236",
"0.5482449",
"0.5479369",
"0.54707646",
"0.546365"
] | 0.76546496 | 1 |
Test getting predefined config path for TF NLP models. | def test_get_predefined_tf_nlp_config_path(self) -> None:
self._assert_predefined_config_path(
framework="tensorflow",
domain="nlp",
domain_flavour="",
expected_filename="nlp.yaml",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_predefined_onnx_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_get_predefined_tf_recommendation_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"recommendation\",\n domain_flavour=\"\",\n expected_filename=\"recommendation.yaml\",\n )",
"def test_get_predefined_tf_object_detection_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_get_predefined_tf_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def test_get_predefined_tf_object_detection_unknown_flavour_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"foo\",\n expected_filename=\"object_detection.yaml\",\n )",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def test_get_predefined_onnx_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def get_config():\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n\n if conf[\"use_dev_config\"]:\n print(\"Dev Setup: dev_config.json will be used\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dev_config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n else:\n print(\"Server Setup: config.json will be used\")\n\n pretrained_root = conf[\"pretrained_root\"]\n gloves_dir = os.path.join(pretrained_root, \"gloves\")\n if not os.path.exists(gloves_dir):\n os.makedirs(gloves_dir)\n print(\"Created directory:\", gloves_dir)\n\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\n \"pretrained_dir\"])\n\n captioning_pretrained_dir = conf[\"captioning\"][\"attention\"][\"pretrained_dir\"]\n vqa_pretrained_dir = conf[\"vqa\"][\"attention\"][\"pretrained_dir\"]\n lxmert_pretrained_dir = conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"]\n\n vqa_features = os.path.join(conf[\"ade20k_vqa_dir\"], \"precomputed_features/training\")\n\n gqa_dir = os.path.join(lxmert_pretrained_dir,\n \"gqa\")\n vqa_dir = os.path.join(lxmert_pretrained_dir,\n \"vqa\")\n\n if not os.path.exists(captioning_pretrained_dir):\n os.makedirs(captioning_pretrained_dir)\n print(\"Created directory:\", captioning_pretrained_dir)\n if not os.path.exists(vqa_pretrained_dir):\n os.makedirs(vqa_pretrained_dir)\n print(\"Created directory:\", vqa_pretrained_dir)\n\n if not os.path.exists(lxmert_pretrained_dir):\n os.makedirs(lxmert_pretrained_dir)\n print(\"Created directory:\", lxmert_pretrained_dir)\n\n if not os.path.exists(gqa_dir):\n os.makedirs(gqa_dir)\n print(\"Created directory:\", gqa_dir)\n\n if not os.path.exists(vqa_dir):\n os.makedirs(vqa_dir)\n print(\"Created directory:\", vqa_dir)\n\n if not os.path.exists(vqa_features):\n os.makedirs(vqa_features)\n print(\"Created directory:\", vqa_features)\n\n checkpoints_dir = os.path.join(captioning_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"logs\")\n create_directory_structure(checkpoints_dir)\n\n if not os.path.exists(conf[\"game_logs_dir\"]):\n os.makedirs(conf[\"game_logs_dir\"])\n print(\"Created directory:\", conf[\"game_logs_dir\"])\n\n ade20k_localized_narratives_train_file = os.path.join(conf[\"ade20k_dir\"], conf[\"ade20k_localized_narratives_train_file\"])\n\n if not os.path.exists(ade20k_localized_narratives_train_file):\n print(f\"Warning, config for ade20k_localized_narratives_train_file is missing: {ade20k_localized_narratives_train_file}\")\n conf[\"ade20k_localized_narratives_train_file\"] = ade20k_localized_narratives_train_file\n\n return conf",
"def get_run_config():\n run_config = tf.contrib.learn.RunConfig()\n run_config = run_config.replace(model_dir=FLAGS.model_dir)\n return run_config",
"def test_get_predefined_tf_object_detection_ssd_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"ssd\",\n expected_filename=\"object_detection_ssd.yaml\",\n )",
"def get_tf_config(config_path):\n return modeling.BertConfig.from_json_file(config_path).__dict__",
"def get_model_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, 'model.pickle'))",
"def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\"",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")",
"def _load_model_conf(path, run_id=None):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf_path = os.path.join(path, \"MLmodel\")\n model = Model.load(conf_path)\n if FLAVOR_NAME not in model.flavors:\n raise Exception(\"Format '{format}' not found not in {path}.\".format(format=FLAVOR_NAME,\n path=conf_path))\n return model.flavors[FLAVOR_NAME]",
"def get_model_filename(config):\n base = os.path.splitext(config['corpus'])[0]\n return '%s--%dT.model' % (base, config['T'])",
"def test_validate_nagl_model_path(model_name):\n model_path = validate_nagl_model_path(model_name)\n assert os.path.exists(model_path)",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def load_env(data_dir, model_dir):\n model_params = sketch_rnn_model.get_default_hparams()\n with tf.gfile.Open(os.path.join(model_dir, 'model_config.json'), 'r') as f:\n model_params.parse_json(f.read())\n return load_dataset(data_dir, model_params, inference_mode=True)",
"def load_config():\n model_type, run_name, run_comment, epoch, verbose = get_args()\n name = run_name + '-' + run_comment\n if model_type == \"s2s\": \n run_title = \"seq2seq\"\n else:\n run_title = \"def2vec\"\n path = \"outputs/{}/logs/{}/config.json\".format(run_title, name)\n config = None\n with open(path) as f:\n config = dict(json.load(f))\n config = load_config(eval=True)\n return (config, name, model_type)",
"def test_get_onnx_framework_from_path(self) -> None:\n path = \"/home/user/model.onnx\"\n result = get_framework_from_path(path)\n self.assertEqual(result, \"onnxrt\")",
"def setUp(self):\n cltk_data_dir = '~/cltk_data/sanskrit/model/sanskrit_models_cltk'\n INDIC_RESOURCES_PATH = os.path.expanduser(cltk_data_dir)\n\n resources_present = os.path.isdir(INDIC_RESOURCES_PATH)\n if not resources_present:\n corpus_importer = CorpusImporter('sanskrit')\n corpus_importer.import_corpus('sanskrit_models_cltk')",
"def test_validate_nagl_model_path_failed():\n with pytest.raises(FileNotFoundError):\n validate_nagl_model_path(\"does-not-exist.pt\")",
"def base_dir():\n return os.path.join(TrainFile.base_dir(), 'model')",
"def test_text_classifier_create_from_path(self):\n pass"
] | [
"0.75173897",
"0.73409075",
"0.69700027",
"0.69696283",
"0.6721619",
"0.63614434",
"0.62693655",
"0.6259654",
"0.62449867",
"0.61654174",
"0.6153191",
"0.612614",
"0.60378885",
"0.6006346",
"0.59780526",
"0.587454",
"0.5872058",
"0.5816616",
"0.5811135",
"0.5782591",
"0.5757114",
"0.57555044",
"0.572756",
"0.5725812",
"0.57054627",
"0.56645995",
"0.5631199",
"0.5613994",
"0.55838215",
"0.55784214"
] | 0.8228239 | 0 |
Test getting predefined config path for TF recommendation models. | def test_get_predefined_tf_recommendation_config_path(self) -> None:
self._assert_predefined_config_path(
framework="tensorflow",
domain="recommendation",
domain_flavour="",
expected_filename="recommendation.yaml",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_predefined_tf_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_get_predefined_tf_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_tf_object_detection_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"\",\n expected_filename=\"object_detection.yaml\",\n )",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def test_get_predefined_onnx_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def test_get_predefined_tf_object_detection_unknown_flavour_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"foo\",\n expected_filename=\"object_detection.yaml\",\n )",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def get_config():\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n\n if conf[\"use_dev_config\"]:\n print(\"Dev Setup: dev_config.json will be used\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dev_config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n else:\n print(\"Server Setup: config.json will be used\")\n\n pretrained_root = conf[\"pretrained_root\"]\n gloves_dir = os.path.join(pretrained_root, \"gloves\")\n if not os.path.exists(gloves_dir):\n os.makedirs(gloves_dir)\n print(\"Created directory:\", gloves_dir)\n\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\n \"pretrained_dir\"])\n\n captioning_pretrained_dir = conf[\"captioning\"][\"attention\"][\"pretrained_dir\"]\n vqa_pretrained_dir = conf[\"vqa\"][\"attention\"][\"pretrained_dir\"]\n lxmert_pretrained_dir = conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"]\n\n vqa_features = os.path.join(conf[\"ade20k_vqa_dir\"], \"precomputed_features/training\")\n\n gqa_dir = os.path.join(lxmert_pretrained_dir,\n \"gqa\")\n vqa_dir = os.path.join(lxmert_pretrained_dir,\n \"vqa\")\n\n if not os.path.exists(captioning_pretrained_dir):\n os.makedirs(captioning_pretrained_dir)\n print(\"Created directory:\", captioning_pretrained_dir)\n if not os.path.exists(vqa_pretrained_dir):\n os.makedirs(vqa_pretrained_dir)\n print(\"Created directory:\", vqa_pretrained_dir)\n\n if not os.path.exists(lxmert_pretrained_dir):\n os.makedirs(lxmert_pretrained_dir)\n print(\"Created directory:\", lxmert_pretrained_dir)\n\n if not os.path.exists(gqa_dir):\n os.makedirs(gqa_dir)\n print(\"Created directory:\", gqa_dir)\n\n if not os.path.exists(vqa_dir):\n os.makedirs(vqa_dir)\n print(\"Created directory:\", vqa_dir)\n\n if not os.path.exists(vqa_features):\n os.makedirs(vqa_features)\n print(\"Created directory:\", vqa_features)\n\n checkpoints_dir = os.path.join(captioning_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"logs\")\n create_directory_structure(checkpoints_dir)\n\n if not os.path.exists(conf[\"game_logs_dir\"]):\n os.makedirs(conf[\"game_logs_dir\"])\n print(\"Created directory:\", conf[\"game_logs_dir\"])\n\n ade20k_localized_narratives_train_file = os.path.join(conf[\"ade20k_dir\"], conf[\"ade20k_localized_narratives_train_file\"])\n\n if not os.path.exists(ade20k_localized_narratives_train_file):\n print(f\"Warning, config for ade20k_localized_narratives_train_file is missing: {ade20k_localized_narratives_train_file}\")\n conf[\"ade20k_localized_narratives_train_file\"] = ade20k_localized_narratives_train_file\n\n return conf",
"def get_run_config():\n run_config = tf.contrib.learn.RunConfig()\n run_config = run_config.replace(model_dir=FLAGS.model_dir)\n return run_config",
"def test_get_predefined_tf_object_detection_ssd_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"ssd\",\n expected_filename=\"object_detection_ssd.yaml\",\n )",
"def test_get_predefined_onnx_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def get_configs_from_multiple_files(model_config_path=\"\",\n train_config_path=\"\",\n train_input_config_path=\"\",\n eval_config_path=\"\",\n eval_input_config_path=\"\",\n graph_rewriter_config_path=\"\"):\n configs = {}\n if model_config_path:\n model_config = model_pb2.DetectionModel()\n with tf.gfile.GFile(model_config_path, \"r\") as f:\n text_format.Merge(f.read(), model_config)\n configs[\"model\"] = model_config\n\n if train_config_path:\n train_config = train_pb2.TrainConfig()\n with tf.gfile.GFile(train_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_config)\n configs[\"train_config\"] = train_config\n\n if train_input_config_path:\n train_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(train_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), train_input_config)\n configs[\"train_input_config\"] = train_input_config\n\n if eval_config_path:\n eval_config = eval_pb2.EvalConfig()\n with tf.gfile.GFile(eval_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_config)\n configs[\"eval_config\"] = eval_config\n\n if eval_input_config_path:\n eval_input_config = input_reader_pb2.InputReader()\n with tf.gfile.GFile(eval_input_config_path, \"r\") as f:\n text_format.Merge(f.read(), eval_input_config)\n configs[\"eval_input_configs\"] = [eval_input_config]\n\n if graph_rewriter_config_path:\n configs[\"graph_rewriter_config\"] = get_graph_rewriter_config_from_file(\n graph_rewriter_config_path)\n\n return configs",
"def _load_model_conf(path, run_id=None):\n if run_id:\n path = tracking.utils._get_model_log_dir(path, run_id)\n conf_path = os.path.join(path, \"MLmodel\")\n model = Model.load(conf_path)\n if FLAVOR_NAME not in model.flavors:\n raise Exception(\"Format '{format}' not found not in {path}.\".format(format=FLAVOR_NAME,\n path=conf_path))\n return model.flavors[FLAVOR_NAME]",
"def get_tf_config(config_path):\n return modeling.BertConfig.from_json_file(config_path).__dict__",
"def test_model_checkpoint_path(tmpdir, logger_version, expected):\n tutils.reset_seed()\n model = EvalModelTemplate()\n logger = TensorBoardLogger(str(tmpdir), version=logger_version)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n overfit_pct=0.2,\n max_epochs=5,\n logger=logger\n )\n trainer.fit(model)\n\n ckpt_version = Path(trainer.ckpt_path).parent.name\n assert ckpt_version == expected",
"def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\"",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def get_model_dir(self, context=None):\n if context == 'eval' and \\\n 'eval' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['eval'].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config['eval']['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n elif context == 'train' and \\\n 'train' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config['train'].keys() and \\\n 'model_type' in self.pipeline_config['train'].keys():\n return self.pipeline_config['train']['model_path']\n elif context == 'postprocess' and \\\n 'postprocess' in self.pipeline_config.keys() and \\\n 'model_path' in self.pipeline_config[context].keys():\n model_path = self.pipeline_config['output_dir'] + '/' + self.pipeline_config[context]['model_path']\n print(f'Using model path: {model_path}')\n return model_path\n else:\n # FIXME: now having a class method and also a function named get_output_dir, confusing!\n output_dir = self.get_output_dir()\n print(f'Getting model from {output_dir}')\n return output_dir",
"def get_model_path():\n misc_path = pkg_resources.resource_filename('sst', 'misc/')\n return os.path.abspath(os.path.join(misc_path, 'model.pickle'))",
"def get_config():\n config = ml_collections.ConfigDict()\n\n # Which model to use -- see ./models.py\n config.model_name = 'ViT-B_32'\n # Where to store training logs.\n config.log_dir = '.'\n\n # Number of steps to measure.\n config.steps = 30\n # Number of steps before measuring.\n config.initial_steps = 10\n\n # Batch size\n config.batch = 0\n # Number of output classes.\n config.num_classes = 0\n # Image size (width=height).\n config.image_size = 0\n\n config.train = 'inference_time'\n\n return config",
"def get_model_filename(config):\n base = os.path.splitext(config['corpus'])[0]\n return '%s--%dT.model' % (base, config['T'])",
"def fetch_predictor_config():\n try:\n f = open(os.path.join(CONFIG['results_path'], CONFIG['predictor_model_dir'], 'config.csv'), 'r')\n f.readline()\n values = f.readline().split(',')\n f.close()\n return int(values[0]), str(values[2]).strip()\n except:\n # If this happens, check the path.\n print('ERROR in fetching predictor config. Default values used. These may fail.')\n return CONFIG['training_img_size'], CONFIG['training_set_image_type']",
"def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")",
"def _assert_predefined_config_path(\n self,\n framework: str,\n domain: str,\n domain_flavour: str,\n expected_filename: str,\n ) -> None:\n result = get_predefined_config_path(framework, domain, domain_flavour)\n expected = os.path.join(\n os.path.abspath(\n os.path.dirname(\n inspect.getfile(get_predefined_config_path),\n ),\n ),\n \"configs\",\n \"predefined_configs\",\n f\"{framework}\",\n expected_filename,\n )\n self.assertEqual(result, expected)\n self.assertEqual(os.path.isfile(result), True)",
"def get_default_dataset_config():\n code_path = utils.get_code_path()\n default_config = {\n 'GT_FOLDER': os.path.join(code_path, 'data/gt/kitti/kitti_mots_val'), # Location of GT data\n 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/kitti/kitti_mots_val'), # Trackers location\n 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)\n 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)\n 'CLASSES_TO_EVAL': ['car', 'pedestrian'], # Valid: ['car', 'pedestrian']\n 'SPLIT_TO_EVAL': 'val', # Valid: 'training', 'val'\n 'INPUT_AS_ZIP': False, # Whether tracker input files are zipped\n 'PRINT_CONFIG': True, # Whether to print current config\n 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER\n 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER\n 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL\n 'SEQMAP_FOLDER': None, # Where seqmaps are found (if None, GT_FOLDER)\n 'SEQMAP_FILE': None, # Directly specify seqmap file (if none use seqmap_folder/split_to_eval.seqmap)\n 'SEQ_INFO': None, # If not None, directly specify sequences to eval and their number of timesteps\n 'GT_LOC_FORMAT': '{gt_folder}/label_02/{seq}.txt', # format of gt localization\n }\n return default_config",
"def get_config(ctx):\n global HISTORY_LOGS, EXPERIMENT_ID #Ugly hack, make it better at some point, may be ;)\n id = ctx.job.id\n EXPERIMENT_ID = hash(id)\n\n import montezuma_env\n\n ctx.job.register_action(\"Set starting point procssor:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.STARTING_POINT_SELECTOR))\n ctx.job.register_action(\"Set rewards:\",\n lambda str: set_motezuma_env_options(str, montezuma_env.REWARDS_FILE))\n\n logger.auto_set_dir(suffix=id)\n\n # (self, parameters, number_of_actions, input_shape)\n\n M = EXPERIMENT_MODEL\n\n name_base = str(uuid.uuid1())[:6]\n PIPE_DIR = os.environ.get('TENSORPACK_PIPEDIR_{}'.format(id), '.').rstrip('/')\n namec2s = 'ipc://{}/sim-c2s-{}-{}'.format(PIPE_DIR, name_base, id)\n names2c = 'ipc://{}/sim-s2c-{}-{}'.format(PIPE_DIR, name_base, id)\n procs = [MySimulatorWorker(k, namec2s, names2c) for k in range(SIMULATOR_PROC)]\n ensure_proc_terminate(procs)\n start_proc_mask_signal(procs)\n\n master = MySimulatorMaster(namec2s, names2c, M)\n dataflow = BatchData(DataFromQueue(master.queue), BATCH_SIZE)\n\n # My stuff - PM\n neptuneLogger = NeptuneLogger.get_instance()\n lr = tf.Variable(0.001, trainable=False, name='learning_rate')\n tf.scalar_summary('learning_rate', lr)\n num_epochs = get_atribute(ctx, \"num_epochs\", 100)\n\n rewards_str = get_atribute(ctx, \"rewards\", \"5 1 -200\")\n with open(montezuma_env.REWARDS_FILE, \"w\") as file:\n file.write(rewards_str)\n\n\n if hasattr(ctx.params, \"learning_rate_schedule\"):\n schedule_str = str(ctx.params.learning_rate_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting learing rate schedule:{}\".format(schedule_str))\n learning_rate_scheduler = ScheduledHyperParamSetter('learning_rate', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"entropy_beta_schedule\"):\n schedule_str = str(ctx.params.entropy_beta_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 0.0003], [120, 0.0001]]\"\n logger.info(\"Setting entropy beta schedule:{}\".format(schedule_str))\n entropy_beta_scheduler = ScheduledHyperParamSetter('entropy_beta', json.loads(schedule_str))\n\n if hasattr(ctx.params, \"explore_factor_schedule\"):\n schedule_str = str(ctx.params.explore_factor_schedule)\n else: #Default value inhereted from tensorpack\n schedule_str = \"[[80, 2], [100, 3], [120, 4], [140, 5]]\"\n logger.info(\"Setting explore factor schedule:{}\".format(schedule_str))\n explore_factor_scheduler = ScheduledHyperParamSetter('explore_factor', json.loads(schedule_str))\n\n\n\n return TrainConfig(\n dataset=dataflow,\n optimizer=tf.train.AdamOptimizer(lr, epsilon=1e-3),\n callbacks=Callbacks([\n StatPrinter(), ModelSaver(),\n learning_rate_scheduler, entropy_beta_scheduler, explore_factor_scheduler,\n HumanHyperParamSetter('learning_rate'),\n HumanHyperParamSetter('entropy_beta'),\n HumanHyperParamSetter('explore_factor'),\n NeputneHyperParamSetter('learning_rate', ctx),\n NeputneHyperParamSetter('entropy_beta', ctx),\n NeputneHyperParamSetter('explore_factor', ctx),\n master,\n StartProcOrThread(master),\n PeriodicCallback(Evaluator(EVAL_EPISODE, ['state'], ['logits'], neptuneLogger, HISTORY_LOGS), 1),\n neptuneLogger,\n ]),\n session_config=get_default_sess_config(0.5),\n model=M,\n step_per_epoch=STEP_PER_EPOCH,\n max_epoch=num_epochs,\n )"
] | [
"0.7441416",
"0.7052898",
"0.69839627",
"0.66950226",
"0.6658153",
"0.64765024",
"0.6476497",
"0.6430937",
"0.6410763",
"0.6309978",
"0.6242159",
"0.6237229",
"0.61450243",
"0.60556775",
"0.5982015",
"0.59642893",
"0.58971345",
"0.5868531",
"0.5844909",
"0.5821499",
"0.57609016",
"0.57583207",
"0.5738903",
"0.5699538",
"0.56952184",
"0.5690783",
"0.5670543",
"0.56535923",
"0.56480986",
"0.5595236"
] | 0.84726024 | 0 |
Test getting predefined config path for onnx image recognition models. | def test_get_predefined_onnx_image_recognition_config_path(self) -> None:
self._assert_predefined_config_path(
framework="onnxrt",
domain="image_recognition",
domain_flavour="",
expected_filename="image_recognition.yaml",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_predefined_tf_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_onnx_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def test_get_predefined_tf_object_detection_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_get_predefined_tf_object_detection_unknown_flavour_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"foo\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_get_predefined_tf_recommendation_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"recommendation\",\n domain_flavour=\"\",\n expected_filename=\"recommendation.yaml\",\n )",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def test_get_predefined_tf_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def check_config( config: 'bittensor.Config' ):\n assert config.neuron.batch_size_train > 0, \"batch_size_train must be a positive value\"\n assert config.neuron.learning_rate > 0, \"learning_rate must be a positive value.\"\n bittensor.logging.check_config( config )\n bittensor.wallet.check_config( config )\n bittensor.subtensor.check_config( config )\n bittensor.metagraph.check_config( config )\n bittensor.dataloader.check_config( config )\n bittensor.dendrite.check_config( config )\n bittensor.axon.check_config( config )\n GPT2Nucleus.check_config( config )\n SGMOERouter.check_config( config )\n full_path = os.path.expanduser('{}/{}/{}'.format( config.logging.logging_dir, config.wallet.name + \"-\" + config.wallet.hotkey, config.neuron.name ))\n config.neuron.full_path = os.path.expanduser(full_path)\n config.neuron.tensorboard_dir = config.neuron.full_path + '/tensorboard-' + '-'.join(str(datetime.now()).split())\n if not os.path.exists(config.neuron.full_path):\n os.makedirs(config.neuron.full_path)",
"def get_config():\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n\n if conf[\"use_dev_config\"]:\n print(\"Dev Setup: dev_config.json will be used\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dev_config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n else:\n print(\"Server Setup: config.json will be used\")\n\n pretrained_root = conf[\"pretrained_root\"]\n gloves_dir = os.path.join(pretrained_root, \"gloves\")\n if not os.path.exists(gloves_dir):\n os.makedirs(gloves_dir)\n print(\"Created directory:\", gloves_dir)\n\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\n \"pretrained_dir\"])\n\n captioning_pretrained_dir = conf[\"captioning\"][\"attention\"][\"pretrained_dir\"]\n vqa_pretrained_dir = conf[\"vqa\"][\"attention\"][\"pretrained_dir\"]\n lxmert_pretrained_dir = conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"]\n\n vqa_features = os.path.join(conf[\"ade20k_vqa_dir\"], \"precomputed_features/training\")\n\n gqa_dir = os.path.join(lxmert_pretrained_dir,\n \"gqa\")\n vqa_dir = os.path.join(lxmert_pretrained_dir,\n \"vqa\")\n\n if not os.path.exists(captioning_pretrained_dir):\n os.makedirs(captioning_pretrained_dir)\n print(\"Created directory:\", captioning_pretrained_dir)\n if not os.path.exists(vqa_pretrained_dir):\n os.makedirs(vqa_pretrained_dir)\n print(\"Created directory:\", vqa_pretrained_dir)\n\n if not os.path.exists(lxmert_pretrained_dir):\n os.makedirs(lxmert_pretrained_dir)\n print(\"Created directory:\", lxmert_pretrained_dir)\n\n if not os.path.exists(gqa_dir):\n os.makedirs(gqa_dir)\n print(\"Created directory:\", gqa_dir)\n\n if not os.path.exists(vqa_dir):\n os.makedirs(vqa_dir)\n print(\"Created directory:\", vqa_dir)\n\n if not os.path.exists(vqa_features):\n os.makedirs(vqa_features)\n print(\"Created directory:\", vqa_features)\n\n checkpoints_dir = os.path.join(captioning_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"logs\")\n create_directory_structure(checkpoints_dir)\n\n if not os.path.exists(conf[\"game_logs_dir\"]):\n os.makedirs(conf[\"game_logs_dir\"])\n print(\"Created directory:\", conf[\"game_logs_dir\"])\n\n ade20k_localized_narratives_train_file = os.path.join(conf[\"ade20k_dir\"], conf[\"ade20k_localized_narratives_train_file\"])\n\n if not os.path.exists(ade20k_localized_narratives_train_file):\n print(f\"Warning, config for ade20k_localized_narratives_train_file is missing: {ade20k_localized_narratives_train_file}\")\n conf[\"ade20k_localized_narratives_train_file\"] = ade20k_localized_narratives_train_file\n\n return conf",
"def _setupFilename(self):\n try:\n os.mkdir('./.netModel')\n except:\n pass # hope it's already there...\n filenames = os.listdir('./.netModel')\n configNum = 1\n i = 0\n configNumString = '%(c)04d' % {'c':configNum}\n while i < len(filenames):\n configNumString = '%(c)04d' % {'c':configNum}\n if (filenames[i][:4]==configNumString):\n configNum += 1\n i = 0\n else:\n i += 1\n return os.path.realpath('.')+'/.netModel/'+configNumString",
"def test_get_dataset_path(self) -> None:\n framework = \"tensorflow\"\n domain = \"image_recognition\"\n result = get_dataset_path(framework, domain)\n self.assertEqual(result, \"examples/test/dataset/imagenet\")",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def get_config(runner,\n raw_uri: str,\n processed_uri: str,\n root_uri: str,\n nochip: bool = True,\n multiband: bool = False,\n external_model: bool = False,\n test: bool = False) -> ObjectDetectionConfig:\n train_ids = TRAIN_IDS\n val_ids = VAL_IDS\n\n if test:\n train_ids = train_ids[:2]\n val_ids = val_ids[:2]\n\n if multiband:\n channel_order = [0, 1, 2, 3]\n channel_display_groups = {'RGB': [0, 1, 2], 'IR': [3]}\n else:\n channel_order = [0, 1, 2]\n channel_display_groups = None\n\n def make_scene(id: str) -> SceneConfig:\n raster_uri = join(raw_uri, f'4_Ortho_RGBIR/top_potsdam_{id}_RGBIR.tif')\n label_uri = join(processed_uri, 'labels', 'all',\n f'top_potsdam_{id}_RGBIR.json')\n\n if test:\n crop_uri = join(processed_uri, 'crops',\n os.path.basename(raster_uri))\n save_image_crop(\n raster_uri,\n crop_uri,\n label_uri=label_uri,\n vector_labels=True,\n size=2000,\n min_features=5,\n default_class_id=0)\n raster_uri = crop_uri\n\n raster_source = RasterioSourceConfig(\n uris=[raster_uri], channel_order=channel_order)\n\n vector_source = GeoJSONVectorSourceConfig(\n uris=label_uri,\n ignore_crs_field=True,\n transformers=[ClassInferenceTransformerConfig(default_class_id=0)])\n label_source = ObjectDetectionLabelSourceConfig(\n vector_source=vector_source)\n\n return SceneConfig(\n id=id, raster_source=raster_source, label_source=label_source)\n\n class_config = ClassConfig(names=['vehicle'], colors=['red'])\n scene_dataset = DatasetConfig(\n class_config=class_config,\n train_scenes=[make_scene(id) for id in train_ids],\n validation_scenes=[make_scene(id) for id in val_ids])\n\n chip_sz = 300\n img_sz = chip_sz\n\n chip_options = ObjectDetectionChipOptions(neg_ratio=5.0, ioa_thresh=0.9)\n if nochip:\n window_opts = ObjectDetectionGeoDataWindowConfig(\n method=GeoDataWindowMethod.random,\n size=chip_sz,\n size_lims=(chip_sz, chip_sz + 1),\n max_windows=500,\n max_sample_attempts=100,\n clip=True,\n neg_ratio=chip_options.neg_ratio,\n ioa_thresh=chip_options.ioa_thresh,\n neg_ioa_thresh=0.2)\n\n data = ObjectDetectionGeoDataConfig(\n scene_dataset=scene_dataset,\n window_opts=window_opts,\n img_sz=img_sz,\n num_workers=4,\n plot_options=PlotOptions(\n channel_display_groups=channel_display_groups))\n else:\n data = ObjectDetectionImageDataConfig(\n img_sz=img_sz,\n num_workers=4,\n plot_options=PlotOptions(\n channel_display_groups=channel_display_groups))\n\n if external_model:\n \"\"\"This demonstrates how to use an external model for object detection,\n but to successfully use this functionality with different settings, the\n following things should be kept in mind:\n\n (1) Torchvision does not expose its object detection models via\n torch hub (https://github.com/pytorch/vision/issues/1945). So, to\n use those, you will need to fork the torchvision repo and manually\n add those models or corresponding factory functions to hubconf.py.\n Example: github.com/AdeelH/vision/blob/det_hubconf_0.10/hubconf.py.\n Further, you should ensure that the branch of the fork is the same\n version as the version in Raster Vision's Docker image; or, if\n using outside Docker, it should match the version of the local\n torchvision installation.\n (2) The external model should behave exactly like torchvision\n object detection models. This includes, but might not be limited\n to:\n - Accepting targets as dicts with keys: 'boxes' and 'labels'.\n - Accepting 1-indexed class labels.\n - Computing losses internally and returning them in a dict\n during training.\n - Returning predictions as dicts with keys: 'boxes', 'labels',\n and 'scores'.\n \"\"\"\n\n model = ObjectDetectionModelConfig(\n external_def=ExternalModuleConfig(\n github_repo='AdeelH/vision:det_hubconf_0.12',\n name='ssd',\n entrypoint='ssd300_vgg16',\n force_reload=True,\n entrypoint_kwargs={\n # torchvision OD models need add an additional null class,\n # so +1 is needed here\n 'num_classes': len(class_config.names) + 1,\n 'pretrained': False,\n 'pretrained_backbone': True\n }))\n else:\n model = ObjectDetectionModelConfig(backbone=Backbone.resnet18)\n\n backend = PyTorchObjectDetectionConfig(\n data=data,\n model=model,\n solver=SolverConfig(\n lr=1e-4,\n num_epochs=10,\n test_num_epochs=2,\n batch_sz=16,\n one_cycle=True),\n log_tensorboard=False,\n run_tensorboard=False,\n test_mode=test)\n\n predict_options = ObjectDetectionPredictOptions(\n merge_thresh=0.5, score_thresh=0.9)\n\n pipeline = ObjectDetectionConfig(\n root_uri=root_uri,\n dataset=scene_dataset,\n backend=backend,\n train_chip_sz=chip_sz,\n predict_chip_sz=chip_sz,\n chip_options=chip_options,\n predict_options=predict_options)\n\n return pipeline",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def test_get_onnx_framework_from_path(self) -> None:\n path = \"/home/user/model.onnx\"\n result = get_framework_from_path(path)\n self.assertEqual(result, \"onnxrt\")",
"def fetch_predictor_config():\n try:\n f = open(os.path.join(CONFIG['results_path'], CONFIG['predictor_model_dir'], 'config.csv'), 'r')\n f.readline()\n values = f.readline().split(',')\n f.close()\n return int(values[0]), str(values[2]).strip()\n except:\n # If this happens, check the path.\n print('ERROR in fetching predictor config. Default values used. These may fail.')\n return CONFIG['training_img_size'], CONFIG['training_set_image_type']",
"def ignor_test_load_default_config(self):\n config = AnnotatorConfig()\n assert config[\"config\"] == \"config.json\"",
"def test_get_predefined_tf_object_detection_ssd_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"ssd\",\n expected_filename=\"object_detection_ssd.yaml\",\n )",
"def load_config(test = False, config_file = \"config.json\"):\n\n global config\n with open(config_file, \"r\") as f:\n config = json.load(f)\n print(\"config:\", config)\n\n for key in config:\n # Check if every configuration is set\n if config[key]==\"\":\n print(\"Please complete the config.json first!\")\n sys.exit(1)\n else:\n config[\"default-k\"] = int(config[\"default-k\"])\n if test:\n config[\"default-suffix\"] = config[\"test-suffix\"]\n config[\"default-table\"] = \"knn_\"+config['test-suffix']\n config[\"data-width\"] = 3\n config[\"data-dir\"] += \"_test\"\n else:\n config[\"default-suffix\"] = config[\"suffix\"]\n config[\"default-table\"] = \"knn_\" + config[\"suffix\"]\n config[\"data-width\"] = int(config[\"data-width\"])\n\n print(\"Configuration Check success\")",
"def test_image_path(self):\n self.assertEqual(\n self.mineral.image_path,\n 'minerals/images/some_filename.jpg')",
"def testConfigD(self):\n assert type(self.config['icon_img_path']) == str, \"Not parsing string to correct type\"",
"def mip_config_path():\n return \"tests/fixtures/global_config.yaml\"",
"def check_nn_config(config):\n if config['model']['model_type'] in ['fm']:\n required_parameters = ['train_file', 'eval_file', 'FEATURE_COUNT', 'dim', 'loss', 'data_format', 'method']\n elif config['model']['model_type'] in ['lr']:\n required_parameters = ['train_file', 'eval_file', 'FEATURE_COUNT', 'loss', 'data_format', 'method']\n elif config['model']['model_type'] in ['din']:\n required_parameters = ['train_file', 'eval_file', 'PAIR_NUM', 'DNN_FIELD_NUM', 'FEATURE_COUNT', 'dim', \\\n 'layer_sizes', 'activation', 'attention_layer_sizes', 'attention_activation', 'loss', \\\n 'data_format', 'dropout', 'method']\n elif config['model']['model_type'] in ['cccfnet']:\n required_parameters = ['train_file', 'eval_file', 'dim', 'layer_sizes', 'n_user', 'n_item', 'n_user_attr',\n 'n_item_attr',\n 'activation', 'loss', 'data_format', 'dropout', 'mu', 'method']\n elif config['model']['model_type'] in ['exDeepFM']:\n required_parameters = ['train_file', 'eval_file', 'FIELD_COUNT', 'FEATURE_COUNT', 'method',\n 'dim', 'layer_sizes', 'cross_layer_sizes', 'activation', 'loss', 'data_format', 'dropout']\n elif config['model']['model_type'] in ['deepcross']:\n required_parameters = ['train_file', 'eval_file', 'FIELD_COUNT', 'FEATURE_COUNT', 'method',\n 'dim', 'layer_sizes', 'cross_layers', 'activation', 'loss', 'data_format',\n 'dropout']\n else:\n required_parameters = ['train_file', 'eval_file', 'FIELD_COUNT', 'FEATURE_COUNT', 'method',\n 'dim', 'layer_sizes', 'activation', 'loss', 'data_format', 'dropout']\n f_config = flat_config(config)\n # check required parameters\n for param in required_parameters:\n if param not in f_config:\n raise ValueError(\"parameters {0} must be set\".format(param))\n if f_config['model_type'] == 'din':\n if f_config['data_format'] != 'din':\n raise ValueError(\n \"for din model, data format must be din, but your set is {0}\".format(f_config['data_format']))\n elif f_config['model_type'] == 'cccfnet':\n if f_config['data_format'] != 'cccfnet':\n raise ValueError(\n \"for cccfnet model, data format must be cccfnet, but your set is {0}\".format(f_config['data_format']))\n else:\n if f_config['data_format'] != 'ffm':\n raise ValueError(\"data format must be ffm, but your set is {0}\".format(f_config['data_format']))\n check_type(f_config)",
"def load_onnx(model_name):\n onnx_path = '%s.onnx' % model_name\n if not os.path.isfile(onnx_path):\n print('ERROR: file (%s) not found! You might want to run yolo_to_onnx.py first to generate it.' % onnx_path)\n return None\n else:\n with open(onnx_path, 'rb') as f:\n return f.read()",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def test_default_config_file_paths(\n config,\n):\n assert \"~/.config/yessssms.conf\" in CONFIG_FILE_PATHS\n assert \"/etc/yessssms.conf\" in CONFIG_FILE_PATHS",
"def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\""
] | [
"0.7796619",
"0.6986146",
"0.68707967",
"0.67667747",
"0.65804195",
"0.6415202",
"0.63782686",
"0.6302182",
"0.6213862",
"0.60774255",
"0.60295063",
"0.59473515",
"0.5907198",
"0.58103526",
"0.5796034",
"0.5795795",
"0.5795542",
"0.5785171",
"0.57451344",
"0.5743044",
"0.5689858",
"0.5653211",
"0.56407267",
"0.56276405",
"0.5625295",
"0.56119126",
"0.5587043",
"0.55626225",
"0.55550414",
"0.5533661"
] | 0.84311223 | 0 |
Test getting predefined config path for onnx NLP models. | def test_get_predefined_onnx_nlp_config_path(self) -> None:
self._assert_predefined_config_path(
framework="onnxrt",
domain="nlp",
domain_flavour="",
expected_filename="nlp.yaml",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_predefined_tf_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_get_predefined_onnx_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def test_get_predefined_tf_image_recognition_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"image_recognition\",\n domain_flavour=\"\",\n expected_filename=\"image_recognition.yaml\",\n )",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def test_get_predefined_tf_recommendation_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"recommendation\",\n domain_flavour=\"\",\n expected_filename=\"recommendation.yaml\",\n )",
"def test_get_predefined_tf_object_detection_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"\",\n expected_filename=\"object_detection.yaml\",\n )",
"def find_config():\n print(\"in find_config()\")\n print(os.getcwd())\n print(os.listdir(os.getcwd()))\n print(os.path.expanduser(\"~/.pylcmodel\"))\n if os.path.isfile(os.path.join(os.getcwd(), \".pylcmodel\")):\n return os.path.join(os.getcwd(), \".pylcmodel\")\n elif os.path.isfile(os.path.expanduser(\"~/.pylcmodel\")):\n return os.path.expanduser(\"~/.pylcmodel\")\n else:\n raise FileNotFoundError(\"No .pylcmodel config file found.\")",
"def test_validate_nagl_model_path(model_name):\n model_path = validate_nagl_model_path(model_name)\n assert os.path.exists(model_path)",
"def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True",
"def test_get_onnx_framework_from_path(self) -> None:\n path = \"/home/user/model.onnx\"\n result = get_framework_from_path(path)\n self.assertEqual(result, \"onnxrt\")",
"def get_config():\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n\n if conf[\"use_dev_config\"]:\n print(\"Dev Setup: dev_config.json will be used\")\n config_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dev_config.json\")\n with open(config_file, \"r\") as read_file:\n conf = json.load(read_file)\n else:\n print(\"Server Setup: config.json will be used\")\n\n pretrained_root = conf[\"pretrained_root\"]\n gloves_dir = os.path.join(pretrained_root, \"gloves\")\n if not os.path.exists(gloves_dir):\n os.makedirs(gloves_dir)\n print(\"Created directory:\", gloves_dir)\n\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"captioning\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"attention\"][\"pretrained_dir\"])\n\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"] = os.path.join(pretrained_root,\n conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\n \"pretrained_dir\"])\n\n captioning_pretrained_dir = conf[\"captioning\"][\"attention\"][\"pretrained_dir\"]\n vqa_pretrained_dir = conf[\"vqa\"][\"attention\"][\"pretrained_dir\"]\n lxmert_pretrained_dir = conf[\"vqa\"][\"lxmert\"][\"fine_tuning\"][\"pretrained_dir\"]\n\n vqa_features = os.path.join(conf[\"ade20k_vqa_dir\"], \"precomputed_features/training\")\n\n gqa_dir = os.path.join(lxmert_pretrained_dir,\n \"gqa\")\n vqa_dir = os.path.join(lxmert_pretrained_dir,\n \"vqa\")\n\n if not os.path.exists(captioning_pretrained_dir):\n os.makedirs(captioning_pretrained_dir)\n print(\"Created directory:\", captioning_pretrained_dir)\n if not os.path.exists(vqa_pretrained_dir):\n os.makedirs(vqa_pretrained_dir)\n print(\"Created directory:\", vqa_pretrained_dir)\n\n if not os.path.exists(lxmert_pretrained_dir):\n os.makedirs(lxmert_pretrained_dir)\n print(\"Created directory:\", lxmert_pretrained_dir)\n\n if not os.path.exists(gqa_dir):\n os.makedirs(gqa_dir)\n print(\"Created directory:\", gqa_dir)\n\n if not os.path.exists(vqa_dir):\n os.makedirs(vqa_dir)\n print(\"Created directory:\", vqa_dir)\n\n if not os.path.exists(vqa_features):\n os.makedirs(vqa_features)\n print(\"Created directory:\", vqa_features)\n\n checkpoints_dir = os.path.join(captioning_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"checkpoints\")\n create_directory_structure(checkpoints_dir)\n\n checkpoints_dir = os.path.join(vqa_pretrained_dir, \"logs\")\n create_directory_structure(checkpoints_dir)\n\n if not os.path.exists(conf[\"game_logs_dir\"]):\n os.makedirs(conf[\"game_logs_dir\"])\n print(\"Created directory:\", conf[\"game_logs_dir\"])\n\n ade20k_localized_narratives_train_file = os.path.join(conf[\"ade20k_dir\"], conf[\"ade20k_localized_narratives_train_file\"])\n\n if not os.path.exists(ade20k_localized_narratives_train_file):\n print(f\"Warning, config for ade20k_localized_narratives_train_file is missing: {ade20k_localized_narratives_train_file}\")\n conf[\"ade20k_localized_narratives_train_file\"] = ade20k_localized_narratives_train_file\n\n return conf",
"def get_config_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, configs.DEFAULT_FILENAME_CONFIG)",
"def test_get_predefined_tf_object_detection_unknown_flavour_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"foo\",\n expected_filename=\"object_detection.yaml\",\n )",
"def _setupFilename(self):\n try:\n os.mkdir('./.netModel')\n except:\n pass # hope it's already there...\n filenames = os.listdir('./.netModel')\n configNum = 1\n i = 0\n configNumString = '%(c)04d' % {'c':configNum}\n while i < len(filenames):\n configNumString = '%(c)04d' % {'c':configNum}\n if (filenames[i][:4]==configNumString):\n configNum += 1\n i = 0\n else:\n i += 1\n return os.path.realpath('.')+'/.netModel/'+configNumString",
"def test_default_config_file_paths(\n config,\n):\n assert \"~/.config/yessssms.conf\" in CONFIG_FILE_PATHS\n assert \"/etc/yessssms.conf\" in CONFIG_FILE_PATHS",
"def load_onnx(model_name):\n onnx_path = '%s.onnx' % model_name\n if not os.path.isfile(onnx_path):\n print('ERROR: file (%s) not found! You might want to run yolo_to_onnx.py first to generate it.' % onnx_path)\n return None\n else:\n with open(onnx_path, 'rb') as f:\n return f.read()",
"def get_model_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, models.DEFAULT_FILENAME_MODEL)",
"def mip_config_path():\n return \"tests/fixtures/global_config.yaml\"",
"def test_validate_nagl_model_path_failed():\n with pytest.raises(FileNotFoundError):\n validate_nagl_model_path(\"does-not-exist.pt\")",
"def ignor_test_load_default_config(self):\n config = AnnotatorConfig()\n assert config[\"config\"] == \"config.json\"",
"def test_read_namespaced_build_config(self):\n pass",
"def check_config( config: 'bittensor.Config' ):\n assert config.neuron.batch_size_train > 0, \"batch_size_train must be a positive value\"\n assert config.neuron.learning_rate > 0, \"learning_rate must be a positive value.\"\n bittensor.logging.check_config( config )\n bittensor.wallet.check_config( config )\n bittensor.subtensor.check_config( config )\n bittensor.metagraph.check_config( config )\n bittensor.dataloader.check_config( config )\n bittensor.dendrite.check_config( config )\n bittensor.axon.check_config( config )\n GPT2Nucleus.check_config( config )\n SGMOERouter.check_config( config )\n full_path = os.path.expanduser('{}/{}/{}'.format( config.logging.logging_dir, config.wallet.name + \"-\" + config.wallet.hotkey, config.neuron.name ))\n config.neuron.full_path = os.path.expanduser(full_path)\n config.neuron.tensorboard_dir = config.neuron.full_path + '/tensorboard-' + '-'.join(str(datetime.now()).split())\n if not os.path.exists(config.neuron.full_path):\n os.makedirs(config.neuron.full_path)",
"def setUp(self):\n cltk_data_dir = '~/cltk_data/sanskrit/model/sanskrit_models_cltk'\n INDIC_RESOURCES_PATH = os.path.expanduser(cltk_data_dir)\n\n resources_present = os.path.isdir(INDIC_RESOURCES_PATH)\n if not resources_present:\n corpus_importer = CorpusImporter('sanskrit')\n corpus_importer.import_corpus('sanskrit_models_cltk')",
"def get_model_filename(config):\n base = os.path.splitext(config['corpus'])[0]\n return '%s--%dT.model' % (base, config['T'])",
"def get_config():\n\n parser = argparse.ArgumentParser(\n description='ZoomingSloMo or only Slo-Mo training argument parser')\n parser.add_argument('--cfg', default=\"./config.yaml\")\n args, _ = parser.parse_known_args()\n conf = read_yaml(args.cfg)\n\n parser.add_argument('--lmdb-data-gt', type=str, default=\"datasets/\",\n help='Path to HR frames lmdb for training')\n\n parser.add_argument('--lmdb-data-lq', type=str, default=\"datasets/\",\n help='Path to LR frames lmdb for training')\n\n parser.add_argument('--output-dir', type=str, default=\"models/\",\n help='Path to store trained models')\n\n parser.add_argument('--batch-size', type=int, default=\"12\",\n help='Maximum number of iterations for training')\n\n parser.add_argument('--gt-size', type=int, default=128,\n help='Ground truth frame size')\n\n parser.add_argument('--only-slomo', action='store_true', default=False,\n help='If True, network will train for Slo-Mo only (No Zooming)')\n\n args = parser.parse_args()\n\n # Refine config file variables\n conf.data.lmdb_data_gt = args.lmdb_data_gt\n conf.data.lmdb_data_lq = args.lmdb_data_lq\n conf.data.output_dir = args.output_dir\n conf.train.batch_size = args.batch_size\n conf.train.only_slomo = args.only_slomo\n conf.data.gt_size = args.gt_size if not args.only_slomo else args.gt_size // 4\n conf.data.lr_size = args.gt_size // 4\n\n return conf",
"def test_model_uris(self):\n for filename in [\n LOCAL_RDF_FILE_NAME,\n LOCAL_SHEXJ_FILE_NAME,\n LOCAL_SHEXC_FILE_NAME,\n LOCAL_TYPES_LDCONTEXT_FILE,\n LOCAL_SHEXC_FILE_NAME,\n LOCAL_SHEXJ_FILE_NAME,\n LOCAL_RDF_FILE_NAME,\n LOCAL_TYPES_LDCONTEXT_FILE,\n LOCAL_MAPPINGS_YAML_FILE,\n LOCAL_MAPPINGS_LDCONTEXT_FILE,\n LOCAL_MAPPINGS_JSONLD_FILE,\n LOCAL_TYPES_LDCONTEXT_FILE,\n LOCAL_TYPES_JSONLD_FILE,\n LOCAL_TYPES_YAML_FILE,\n LOCAL_MAPPINGS_JSONLD_FILE,\n LOCAL_METAMODEL_JSONLD_FILE,\n LOCAL_METAMODEL_LDCONTEXT_FILE,\n ]:\n self.assertTrue(os.path.exists(filename), msg=f\"{filename} does not exist\")\n self.validate_yaml_content(\n METAMODEL_URI,\n METAMODEL_NAME,\n METAMODEL_NAMESPACE_NAME,\n METAMODEL_NAMESPACE,\n LOCAL_METAMODEL_YAML_FILE,\n )\n self.validate_yaml_content(\n METATYPE_URI,\n METATYPE_NAME,\n METATYPE_NAMESPACE_NAME,\n METATYPE_NAMESPACE,\n LOCAL_TYPES_YAML_FILE,\n )\n self.validate_yaml_content(\n METAMAPPING_URI,\n METAMAPPING_NAME,\n METAMAPPING_NAMESPACE_NAME,\n METAMAPPING_NAMESPACE,\n LOCAL_MAPPINGS_YAML_FILE,\n )",
"def test_create_nontar_model():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n model_file = os.path.join(misc_folder, \"model-nonexistent.bla\")\n create.main(\"mlp\", \"10:12:8\", model_file)\n # TODO: Check if error was logged",
"def load_config(test = False, config_file = \"config.json\"):\n\n global config\n with open(config_file, \"r\") as f:\n config = json.load(f)\n print(\"config:\", config)\n\n for key in config:\n # Check if every configuration is set\n if config[key]==\"\":\n print(\"Please complete the config.json first!\")\n sys.exit(1)\n else:\n config[\"default-k\"] = int(config[\"default-k\"])\n if test:\n config[\"default-suffix\"] = config[\"test-suffix\"]\n config[\"default-table\"] = \"knn_\"+config['test-suffix']\n config[\"data-width\"] = 3\n config[\"data-dir\"] += \"_test\"\n else:\n config[\"default-suffix\"] = config[\"suffix\"]\n config[\"default-table\"] = \"knn_\" + config[\"suffix\"]\n config[\"data-width\"] = int(config[\"data-width\"])\n\n print(\"Configuration Check success\")",
"def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\""
] | [
"0.7163459",
"0.7056394",
"0.6645824",
"0.64085156",
"0.6328148",
"0.62867266",
"0.61514914",
"0.61138886",
"0.61060506",
"0.6087492",
"0.6009389",
"0.59438986",
"0.5923466",
"0.5904732",
"0.58298385",
"0.5808922",
"0.58066463",
"0.5793833",
"0.5764205",
"0.5678265",
"0.56763303",
"0.5644252",
"0.56379855",
"0.5616942",
"0.55997294",
"0.55941737",
"0.5536885",
"0.5536101",
"0.55200434",
"0.5493519"
] | 0.7960524 | 0 |
Test checking existing module. | def test_check_module(self) -> None:
check_module("os") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_module(self):\n pass",
"def test_module_exists(self):\n project_path = os.getcwd()\n rango_app_path = os.path.join(project_path, 'rango')\n forms_module_path = os.path.join(rango_app_path, 'forms.py')\n\n self.assertTrue(os.path.exists(forms_module_path), f\"{FAILURE_HEADER}Couldn't find forms.py module.{FAILURE_FOOTER}\")",
"def test_check_non_existing_module(self) -> None:\n with self.assertRaises(ClientErrorException):\n check_module(\"non_existing_module\")",
"def test_ifPythonModuleIsInstalled():\n for name in config.toTest:\n testConfig = dynamicallyLoadModule(name)\n if \"pyModule\" in testConfig.config:\n print \"pyModule: \"+ testConfig.config[\"name\"]\n yield assertionFunctions.checkIfPythonModuleIsInstalled, testConfig.config",
"def test_module_imports(self):\n apps = [\n 'customers',\n 'customers.migrations',\n 'customers.management',\n 'customers.management.commands',\n 'customers.management.commands.load_customers_to_redis',\n 'customers.forms',\n 'customers.admin',\n 'customers.models',\n 'customers.urls',\n 'customers.views',\n ]\n for a in apps:\n self.assertTrue(module_exists(a))",
"def test_molecool_imported():\n assert \"molecool\" in sys.modules",
"def testCheckPythonModule(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n dependency = dependencies.DependencyDefinition('os')\n result, _ = dependency_helper._CheckPythonModule(dependency)\n self.assertTrue(result)\n\n dependency = dependencies.DependencyDefinition('bogus')\n result, _ = dependency_helper._CheckPythonModule(dependency)\n self.assertFalse(result)",
"def test_IMOD_pass(self):\n self.assertTrue(self.mod.isset)",
"def test_qm_project_python_testing_imported():\n assert \"qm_project_python_testing\" in sys.modules",
"def testNoSuchModule(self):\n self.assertRaises(messages.DefinitionNotFoundError,\n messages.find_definition,\n 'does.not.exist',\n importer=self.Importer)",
"def test_create_module_successful(self):\n payload = {'name': 'Test Module'}\n self.client.post(MODULES_URL, payload)\n\n exists = Module.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)",
"def test_exists(self):\n self.assertTrue(os.path.exists(__file__) == self._system.exists(__file__))",
"def test_imports():\n assert False",
"def test_molssi_project_imported():\n assert \"molssi_project\" in sys.modules",
"def module_exists(module_name):\r\n\r\n try:\r\n __import__(module_name)\r\n except ImportError:\r\n return False\r\n else:\r\n return True",
"def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise",
"def check_module(name):\n return importlib.util.find_spec(name) is not None",
"def checkIfPythonModuleIsInstalled(testConfig):\n try:\n exec(\"import \"+testConfig[\"pyModule\"])\n assert True\n except Exception as e:\n assert False, testConfig[\"name\"]+\": \"+testConfig[\"pyModule\"]+\" could not successfully be loaded in Python.\"",
"def test_rlmm_imported():\n assert \"rlmm\" in sys.modules",
"def testRefersToModule(self):\n self.DefineModule('i.am.a.module')\n self.assertRaises(messages.DefinitionNotFoundError,\n messages.find_definition,\n 'i.am.a.module',\n importer=self.Importer)",
"def testImportPythonModule(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n module_object = dependency_helper._ImportPythonModule('os')\n self.assertIsNotNone(module_object)\n\n # TODO: add test with submodule.",
"def test_parrot_imported():\n assert \"parrot\" in sys.modules",
"def testPynocleImportsPynocle(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(THISDIR, '__init__')\r\n self.assertEqual(expected, modulefinder.get_module_filename('pynocle', __file__))",
"def test_require():",
"def check_import():\n print('[GenHub] Checking Python modules.')\n\n basemod = [('yaml', 'pyyaml'), ('pycurl', 'pycurl')]\n devmod = ['pep8', 'pytest', 'pytest-cov', 'coverage']\n\n packages = dict()\n for importname, packagename in basemod:\n try:\n importlib.import_module(importname)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n for packagename in devmod:\n try:\n importlib.import_module(packagename)\n packages[packagename] = True\n except ImportError:\n packages[packagename] = False\n\n rundep = False\n for pkg in packages:\n char = '+'\n msg = 'Installed.'\n if packages[pkg] is False:\n char = '-'\n msg = 'Not installed!'\n rundep = True\n print('%c package %-12s: %s' % (char, pkg, msg))\n if rundep is True:\n print('Please install these dependencies before proceding')\n print('')",
"def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules",
"def test_module():\n query = {\n 'operator': 'all',\n 'children': [\n {\n 'field': 'sample.malware',\n 'operator': 'is',\n 'value': 1\n }\n ]\n }\n\n do_search('samples', query=query, scope='Public', err_operation='Test module failed')\n return",
"def test_get_module_name():\n\n assert application_services.get_module_name() == 'tests.unit'",
"def module_present(module, load=True):\n with open('/proc/modules', 'r') as modules_file:\n if module.replace('-','_') in modules_file.read():\n return True\n cmd = '/sbin/modprobe {}{}'.format('' if load else '-n ',\n module)\n if os.system(cmd) != 0:\n return False\n else:\n return True",
"def testCheckPythonModuleVersion(self):\n dependencies_file = self._GetTestFilePath(['dependencies.ini'])\n self._SkipIfPathNotExists(dependencies_file)\n\n dependency_helper = dependencies.DependencyHelper(\n dependencies_file=dependencies_file)\n\n module_object = dependency_helper._ImportPythonModule('os')\n\n result, _ = dependency_helper._CheckPythonModuleVersion(\n 'os', module_object, '__version__', '1.0', '2.0')\n self.assertFalse(result)\n\n # TODO: add test with version with suffix 17.0.0b1"
] | [
"0.7775068",
"0.7622405",
"0.75304526",
"0.71911955",
"0.71885073",
"0.7102597",
"0.70338994",
"0.70180136",
"0.6914303",
"0.68790066",
"0.68734944",
"0.68504345",
"0.6809024",
"0.6796673",
"0.6772522",
"0.67269987",
"0.6687166",
"0.667671",
"0.6617383",
"0.6616602",
"0.6591627",
"0.6575203",
"0.65442413",
"0.6520232",
"0.64947605",
"0.6484368",
"0.64518446",
"0.6448528",
"0.64394253",
"0.64233"
] | 0.78205836 | 0 |
Test checking non existing module. | def test_check_non_existing_module(self) -> None:
with self.assertRaises(ClientErrorException):
check_module("non_existing_module") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def testNoSuchModule(self):\n self.assertRaises(messages.DefinitionNotFoundError,\n messages.find_definition,\n 'does.not.exist',\n importer=self.Importer)",
"def test_check_module(self) -> None:\n check_module(\"os\")",
"def test_ensureWhenNotImportedDontPrevent(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\")\n self.assertEqual(modules, {})",
"def test_ensureWhenNotImported(self):\n modules = {}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\", \"m3\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None, \"m3\": None})",
"def test_module(self):\n pass",
"def test_import_allows_multiple_modules_failure(self):\n # Deliberately using modules that will already be imported to avoid side effects.\n feature = LazyImportTester([\"site\", \"sys\", \"_qiskit_module_does_not_exist_\"])\n with mock_availability_test(feature) as check:\n check.assert_not_called()\n self.assertFalse(feature)\n check.assert_called_once()",
"def test_ensureWhenFailedToImport(self):\n modules = {\"m2\": None}\n self.patch(sys, \"modules\", modules)\n ensureNotImported([\"m1\", \"m2\"], \"A message.\", preventImports=[\"m1\", \"m2\"])\n self.assertEqual(modules, {\"m1\": None, \"m2\": None})",
"def test_module_exists(self):\n project_path = os.getcwd()\n rango_app_path = os.path.join(project_path, 'rango')\n forms_module_path = os.path.join(rango_app_path, 'forms.py')\n\n self.assertTrue(os.path.exists(forms_module_path), f\"{FAILURE_HEADER}Couldn't find forms.py module.{FAILURE_FOOTER}\")",
"def check_module(name):\n return importlib.util.find_spec(name) is not None",
"def test_imports():\n assert False",
"def test_absent_imports():\n module, HABEMUS_MODULE = optional_import(\"not_real_module\")\n\n assert not HABEMUS_MODULE\n assert module.__name__ == \"not_real_module\"\n with pytest.raises(ModuleNotFoundError):\n _ = module.layers",
"def test_missing_file():\n passed = False\n try:\n x = XPIManager('foo.bar')\n except:\n passed = True\n assert passed",
"def test_import_fails_with_no_modules(self):\n with self.assertRaises(ValueError):\n LazyImportTester([])",
"def test_molecool_imported():\n assert \"molecool\" in sys.modules",
"def test_modules(self):\n for mod in self.expected_modules:\n try:\n __import__(mod)\n except ImportError:\n raise",
"def test_instantiate_non_existent_module(self):\n # create test configs\n test_configs = [\n {\"_target_\": \"non_existent_module.some_class\"},\n {\"_target_\": \"another_non_existent_module.some_class\", \"a\": 1, \"b\": 2}\n ]\n\n # check that instantiate raises ModuleNotFoundError for each test config\n for test_conf in test_configs:\n self.assertRaises(ModuleNotFoundError, instantiate, test_conf)",
"def _should_ignore_module(cls, module_name):\n # exclude test modules for now to avoid spurious failures\n # TODO(jelle): enable for test modules too\n return module_name.split(\".\")[-1].startswith(\"test\")",
"def test_broken_module(self):\r\n module = self.descriptor._xmodule\r\n self.assertIsInstance(module, ErrorModule)",
"def test_IMOD_pass(self):\n self.assertTrue(self.mod.isset)",
"def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule",
"def unavailable_importer(**kwargs):\n return LazyImportTester(\"_qiskit_this_module_does_not_exist_\", **kwargs)",
"def test_import_string_missing_module(self):\n invalid_module = 'ttgn.nonexistent_module.foobar'\n with pytest.raises(ImportError):\n utils.import_string(invalid_module)",
"def testNoDefinition(self):\n self.DefineModule('i.am.a.module')\n self.assertRaises(messages.DefinitionNotFoundError,\n messages.find_definition,\n 'i.am.a.module.MyMessage',\n importer=self.Importer)",
"def test_ensureFailsWhenImported(self):\n module = object()\n modules = {\"m2\": module}\n self.patch(sys, \"modules\", modules)\n e = self.assertRaises(\n ImportError,\n ensureNotImported,\n [\"m1\", \"m2\"],\n \"A message.\",\n preventImports=[\"m1\", \"m2\"],\n )\n self.assertEqual(modules, {\"m2\": module})\n self.assertEqual(e.args, (\"A message.\",))",
"def test_deprecated_modules(self):\n\n deprecated_modules_present = False\n\n deprecated_modules = [\n \"game_assets\",\n \"models\",\n \"world\",\n \"modular_assets\",\n ]\n\n for path in self.application_files:\n for module in deprecated_modules:\n module_text = open(path).read()\n found_reference = False\n if \"import %s\" % module in module_text:\n found_reference = True\n if \"from %s\" % module in module_text:\n found_reference = True\n\n if found_reference:\n print(\"Found '%s' reference in %s\" % (module, path))\n deprecated_modules_present = True\n\n self.assertFalse(deprecated_modules_present)",
"def test_module_imports(self):\n apps = [\n 'customers',\n 'customers.migrations',\n 'customers.management',\n 'customers.management.commands',\n 'customers.management.commands.load_customers_to_redis',\n 'customers.forms',\n 'customers.admin',\n 'customers.models',\n 'customers.urls',\n 'customers.views',\n ]\n for a in apps:\n self.assertTrue(module_exists(a))",
"def test_qm_project_python_testing_imported():\n assert \"qm_project_python_testing\" in sys.modules",
"def module_exists(module_name):\r\n\r\n try:\r\n __import__(module_name)\r\n except ImportError:\r\n return False\r\n else:\r\n return True",
"def test_create_module_invalid(self):\n payload = {'name': ''}\n res = self.client.post(MODULES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)",
"def test_taskmod_no_taskfile(modpath):\n sys.meta_path.append(TaskImporter())\n task = import_module(modpath)\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert task.__taskmodules__ == []"
] | [
"0.76044667",
"0.7416591",
"0.7399717",
"0.72337514",
"0.72061455",
"0.7133136",
"0.7049381",
"0.7046301",
"0.69785535",
"0.6971145",
"0.69622946",
"0.6960073",
"0.6942464",
"0.69311404",
"0.68618923",
"0.68603796",
"0.6849098",
"0.6828724",
"0.6819381",
"0.6799914",
"0.6779855",
"0.676375",
"0.6755521",
"0.66932726",
"0.6684907",
"0.6639443",
"0.65951866",
"0.6593653",
"0.65508777",
"0.6538248"
] | 0.8172532 | 0 |
Test getting models config. | def test_load_model_config(self) -> None:
result = load_model_config()
self.assertIs(type(result), dict)
self.assertIsNot(result, {}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_coupledmodels_get(self):\n pass",
"def test_get_model(self) -> None:\n get_model()",
"def testGetModelsData(self):\n models = models_logic._getModelsData()\n self.assertTrue(models)",
"def get_model_config(self, model_num=0):\n return [], resources.get_file(\n \"config/tests/methods/unsupervised/train_test.gin\")",
"def setup_models(self):\n pass",
"def test_get_config(self):\r\n config = self.profile.get_config('testing.conf', TestConfig, storage_args=['this_section'])\r\n self.assertIsInstance(config, TestConfig)\r\n self.assertIsNone(config.save())",
"def test_get_models(self):\n models = get_models()\n self.assertTrue(len(models) > 6)\n self.assertIn((\"csvimport.Item\", \"csvimport.Item\"), models)",
"def test_settingmodel_init():\n SettingsModel()",
"def test_listModel(self):\n\t\tmodelOptions = {}\n\t\twith open(os.path.join(os.path.dirname(cancerscope.get_models.__file__), 'resources/scope_files.txt'), 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif line.strip()!= '':\n\t\t\t\t\tmodelname, url, expectedFile, expectedmd5 = line.strip().split('\\t')\n\t\t\t\t\tmodelOptions[modelname] = (url, expectedFile, expectedmd5)\n\t\n\t\tassert len(modelOptions.keys()) == 5",
"def test_test_config(self):\r\n\r\n app = create_app('movieapp.settings.TestConfig')\r\n\r\n assert app.config['DEBUG'] is True\r\n assert app.config['SQLALCHEMY_ECHO'] is True\r\n assert app.config['CACHE_TYPE'] == 'null'",
"def testGetConfig():\n configs = GetConfig()\n # print(configs.host_ip)\n # print(configs.proxy_local)\n \n # print(configs.proxy_online)\n # print(configs.user_img_url)\n # print(configs.user_login_url)\n print(configs.user_start_id)\n\n # assert isinstance(configs.proxy_getter_functions, list)\n # print(configs.proxy_getter_functions)",
"def test_get_configs():\n\n configs = application_services.get_configs()\n\n assert isinstance(configs, dict)\n assert len(configs) > 0",
"def test_config(app):\n assert app.testing",
"def setUpConfig(self):\n pass",
"def test_config():\n assert not sample.create_app().testing\n assert sample.create_app({\"TESTING\": True}).testing",
"def test_config():\n\n # assert create_app().testing\n assert create_app(\"testing\", settings={\n \"TESTING\": True,\n \"SQLALCHEMY_TRACK_MODIFICATIONS\": False\n }).testing",
"def fake_model():\n app.config['FAKE_MODEL'] = True",
"def test_get(self):\n self.assertEqual(self.config.get('basic','greeting'),'hello')",
"def test_all_configs_values():\n\n app_configs = application_services.get_configs()\n\n assert app_configs['TITLE'] == 'pyrin_unit_tests'\n assert app_configs['ENCODING'] == 'utf-8'\n assert app_configs['FLASK_LOG_LEVEL'] == 'DEBUG'\n assert app_configs['SERVER_NAME'] is None\n assert app_configs['SERVER_HOST'] == '127.0.0.1'\n assert app_configs['SERVER_PORT'] == 5001\n assert app_configs['ENV'] == 'testing'\n assert app_configs['DEBUG'] is False\n assert app_configs['TESTING'] is True\n assert app_configs['UNIT_TESTING'] is True",
"def test_get_mt_settings(self):\n pass",
"def test_models(self) -> None:\n full_name = \"owenstranathan/kupy-test\"\n self.assertEqual(full_name, self.project.full_name)\n self.assertEqual(sha1(full_name), self.project.id)\n self.assertEqual('12345', self.project.secrets['SECRET_TOKEN'])\n self.assertIn(self.build, list(self.project.builds))\n self.assertIn(self.deploy, list(self.project.deploys))\n\n build_id = sha1(f\"{self.project.id}/{self.commit_id}\")\n self.assertEqual(self.build.id, build_id)\n self.assertEqual(self.build.branch, \"develop\")\n self.assertEqual(self.build.commit_id, self.commit_id)\n self.assertEqual(self.build.project, self.project)\n self.assertIn(self.deploy, list(self.build.deploys))\n\n deploy_id = sha1(f\"{self.project.id}/{self.build.id}\")\n self.assertEqual(deploy_id, self.deploy.id)\n self.assertEqual(self.project, self.deploy.project)\n self.assertEqual(self.build, self.deploy.build)",
"def test_config():\n assert not create_app().testing\n assert create_app(TestConfig).testing",
"def test_get_model_names():\n\n names = Instafilter.get_models()\n assert isinstance(names, list)\n assert len(names) > 1",
"def test_config_class():\n assert config is not None",
"def test_getJsonConfig(self) -> None:\n jsonConf = getJsonConfig()\n self.assertTrue(\"idSrvDiscoUrl\" in jsonConf)\n self.assertTrue(\"accessTokenFetchAudience\" in jsonConf)",
"def test_model_section_config(tardis_config_verysimple):\n conf = Configuration.from_config_dict(\n tardis_config_verysimple, validate=True, config_dirname=\"test\"\n )\n\n assert conf.model.structure.density.type == \"branch85_w7\"\n\n tardis_config_verysimple[\"model\"][\"structure\"][\"velocity\"][\n \"start\"\n ] = Quantity(\"2.0e4 km/s\")\n tardis_config_verysimple[\"model\"][\"structure\"][\"velocity\"][\n \"stop\"\n ] = Quantity(\"1.1e4 km/s\")\n\n with pytest.raises(ValueError):\n conf = Configuration.from_config_dict(\n tardis_config_verysimple, validate=True, config_dirname=\"test\"\n )",
"def test_get_model_list():\n with app.test_client() as c:\n response = c.get('/REST/api/v1.0/model_list') \n assert response.status_code == 201",
"def test_config(self):\n self.assertIs(self.view.model, TempLanguage)\n self.assertEqual(self.view.template_name, \"resources/templanguage_list.html\")",
"def test_model_found(arguments):\n ...",
"def test_list_models():\n model_names = find_model_files()\n listed_model_names = list_available_nagl_models()\n assert listed_model_names == model_names"
] | [
"0.7344181",
"0.6897746",
"0.6867049",
"0.65969276",
"0.6582802",
"0.655109",
"0.6541584",
"0.65313196",
"0.6443056",
"0.6425103",
"0.642088",
"0.63794756",
"0.6372846",
"0.62939525",
"0.6282946",
"0.6266822",
"0.6264599",
"0.62148494",
"0.61819917",
"0.6163742",
"0.6146844",
"0.6141398",
"0.61266816",
"0.61202556",
"0.61192644",
"0.61135066",
"0.6095669",
"0.6093659",
"0.60917556",
"0.6086014"
] | 0.71640885 | 1 |
Test getting dataloaders config. | def test_load_dataloader_config(self) -> None:
result = load_dataloader_config()
self.assertIs(type(result), list)
self.assertIsNot(result, []) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_dataloader(self) -> DataLoader:\n return self._custom_data_loader()",
"def test_dataloader(self) -> torch.utils.data.DataLoader: \n return torch.utils.data.DataLoader(self.dataset_test, **self.dl_kwargs)",
"def test_dataloader(self) -> data.DataLoader:\n return data.DataLoader(dataset=self.datasets['test'], batch_size=self.batch_size,\n num_workers=self.num_workers, shuffle=False, pin_memory=False)",
"def test_get_data_loader():\n\n # single paired data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, PairedDataLoader)\n\n config = load_yaml(\"config/test/paired_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, PairedDataLoader)\n\n # single unpaired data loader\n config = load_yaml(\"config/test/unpaired_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n config = load_yaml(\"config/test/unpaired_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n # single grouped data loader\n config = load_yaml(\"config/test/grouped_nifti.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, GroupedDataLoader)\n\n config = load_yaml(\"config/test/grouped_h5.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, GroupedDataLoader)\n\n # empty data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = \"\"\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert got is None\n\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = None\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert got is None\n\n # unpaired data loader with multiple dirs\n config = load_yaml(\"config/test/unpaired_nifti_multi_dirs.yaml\")\n got = load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert isinstance(got, UnpairedDataLoader)\n\n # check not a directory error\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] += \".h5\"\n with pytest.raises(ValueError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert \"is not a directory or does not exist\" in str(err_info.value)\n\n # check directory not existed error\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n config[\"dataset\"][\"dir\"][\"train\"] = \"/this_should_not_existed\"\n with pytest.raises(ValueError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"train\")\n assert \"is not a directory or does not exist\" in str(err_info.value)\n\n # check mode\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n with pytest.raises(AssertionError) as err_info:\n load.get_data_loader(data_config=config[\"dataset\"], mode=\"example\")\n assert \"mode must be one of train/valid/test\" in str(err_info.value)",
"def testGetConfig():\n configs = GetConfig()\n # print(configs.host_ip)\n # print(configs.proxy_local)\n \n # print(configs.proxy_online)\n # print(configs.user_img_url)\n # print(configs.user_login_url)\n print(configs.user_start_id)\n\n # assert isinstance(configs.proxy_getter_functions, list)\n # print(configs.proxy_getter_functions)",
"def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })",
"def test_dataloader(self) -> DataLoader:\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=False,\n num_workers=multiprocessing.cpu_count(),\n )",
"def create_test_dataloader(configs):\n\n test_dataset = KittiDataset(configs, mode='test', lidar_aug=None, hflip_prob=0., num_samples=configs.num_samples)\n test_sampler = None\n if configs.distributed:\n test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset)\n test_dataloader = DataLoader(test_dataset, batch_size=configs.batch_size, shuffle=False,\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=test_sampler)\n\n return test_dataloader",
"def testLoadConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n self.assertEqual(\n os.path.basename(loader.taskHolders()[0].var('contextConfig')),\n 'config.hjson'\n )",
"def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader",
"def get_dataloaders(args):\n if args.dataset == 'heat':\n dataset_class = heat.HeatDiffusionDataset\n else:\n raise ValueError(f'Unknown dataset {args.dataset}')\n train_dataset = dataset_class(\n dataset_class.get_train_path(args.data_path), args, train=True)\n if args.dist:\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_dataset)\n else:\n train_sampler = torch.utils.data.RandomSampler(train_dataset)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size, num_workers=args.workers,\n sampler=train_sampler, pin_memory=True, drop_last=args.drop_last)\n if not args.no_eval:\n validation_dataset = dataset_class(\n dataset_class.get_validation_path(args.data_path), args, train=False)\n if args.dist:\n validation_sampler = torch.utils.data.distributed.DistributedSampler(\n validation_dataset, shuffle=False)\n else:\n validation_sampler = torch.utils.data.SequentialSampler(\n validation_dataset)\n validation_loader = torch.utils.data.DataLoader(\n validation_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=validation_sampler,\n pin_memory=True, drop_last=args.drop_last)\n\n test_dataset = dataset_class(\n dataset_class.get_test_path(args.data_path), args, train=False)\n if args.dist:\n test_sampler = torch.utils.data.distributed.DistributedSampler(\n test_dataset, shuffle=False)\n else:\n test_sampler = torch.utils.data.SequentialSampler(\n test_dataset)\n test_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n num_workers=args.workers, sampler=test_sampler,\n pin_memory=True, drop_last=args.drop_last)\n else:\n validation_loader = None\n test_loader = None\n\n # Update the data shape if needed.\n if args.data_shape is None:\n args.data_shape = train_dataset.get_shape()\n if args.data_target_shape is None:\n args.data_target_shape = train_dataset.get_target_shape()\n\n return train_loader, validation_loader, test_loader",
"def testLoadConfigs(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(config_path, lab_config.IsYaml))\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('cluster1'))\n self.assertIsNotNone(pool.GetHostConfigs('cluster2'))",
"def test_basic():\n client = TestClient()\n client.run(\"config get\")\n assert \"default_profile = default\" in client.out\n assert \"path = ./data\" in client.out",
"def test_get_learners(self):\n pass",
"def test_get_single_data_loader():\n common_args = dict(\n file_loader=NiftiFileLoader, labeled=True, sample_label=\"sample\", seed=0\n )\n\n # single paired data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n got = load.get_single_data_loader(\n data_type=config[\"dataset\"][\"type\"],\n data_config=config[\"dataset\"],\n common_args=common_args,\n data_dir_paths=[config[\"dataset\"][\"dir\"][\"train\"]],\n )\n assert isinstance(got, PairedDataLoader)\n\n # single unpaired data loader\n config = load_yaml(\"config/test/unpaired_nifti.yaml\")\n got = load.get_single_data_loader(\n data_type=config[\"dataset\"][\"type\"],\n data_config=config[\"dataset\"],\n common_args=common_args,\n data_dir_paths=[config[\"dataset\"][\"dir\"][\"train\"]],\n )\n assert isinstance(got, UnpairedDataLoader)\n\n # single grouped data loader\n config = load_yaml(\"config/test/grouped_nifti.yaml\")\n got = load.get_single_data_loader(\n data_type=config[\"dataset\"][\"type\"],\n data_config=config[\"dataset\"],\n common_args=common_args,\n data_dir_paths=[config[\"dataset\"][\"dir\"][\"train\"]],\n )\n assert isinstance(got, GroupedDataLoader)\n\n # not supported data loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n with pytest.raises(ValueError) as err_info:\n load.get_single_data_loader(\n data_type=\"NotSupported\",\n data_config=config[\"dataset\"],\n common_args=common_args,\n data_dir_paths=[config[\"dataset\"][\"dir\"][\"train\"]],\n )\n assert \"Unknown data format\" in str(err_info.value)\n\n # wrong keys for paired loader\n config = load_yaml(\"config/test/paired_nifti.yaml\")\n # delete correct keys and add wrong one\n config[\"dataset\"].pop(\"moving_image_shape\", None)\n config[\"dataset\"].pop(\"fixed_image_shape\", None)\n with pytest.raises(ValueError) as err_info:\n load.get_single_data_loader(\n data_type=\"paired\",\n data_config=config[\"dataset\"],\n common_args=common_args,\n data_dir_paths=[config[\"dataset\"][\"dir\"][\"train\"]],\n )\n assert (\n \"Paired Loader requires 'moving_image_shape' \"\n \"and 'fixed_image_shape'\" in str(err_info.value)\n )\n\n # wrong keys for unpaired loader\n config = load_yaml(\"config/test/unpaired_nifti.yaml\")\n # delete correct keys and add wrong one\n config[\"dataset\"].pop(\"image_shape\", None)\n with pytest.raises(ValueError) as err_info:\n load.get_single_data_loader(\n data_type=\"unpaired\",\n data_config=config[\"dataset\"],\n common_args=common_args,\n data_dir_paths=[config[\"dataset\"][\"dir\"][\"train\"]],\n )\n assert \"Unpaired Loader requires 'image_shape'\" in str(err_info.value)\n\n # wrong keys for grouped loader\n config = load_yaml(\"config/test/unpaired_nifti.yaml\")\n # delete correct keys and add wrong one\n config[\"dataset\"].pop(\"intra_group_prob\", None)\n with pytest.raises(ValueError) as err_info:\n load.get_single_data_loader(\n data_type=\"grouped\",\n data_config=config[\"dataset\"],\n common_args=common_args,\n data_dir_paths=[config[\"dataset\"][\"dir\"][\"train\"]],\n )\n assert \"Grouped Loader requires 'image_shape'\" in str(err_info.value)",
"def get_dataloaders(logging, batch_size):\n # Load Data\n logging.info(\"Reading Train and Test data...\")\n train_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-tr.txt\", header=None)\n test_df = pd.read_csv(\"C:/tmp/avila_classification/data/avila-ts.txt\", header=None)\n\n # Fix column names\n col_names = ['col_' + str(j + 1) for j in range(train_df.shape[1] - 1)]\n indep_cols = col_names.copy()\n col_names.append('y')\n\n logging.debug(\"Assigning columns\")\n train_df.columns = col_names\n test_df.columns = col_names\n\n # Encode dependent variable column\n le = LabelEncoder()\n le.fit(train_df['y'])\n logging.debug(f\"Classes: {le.classes_}\")\n logging.debug(f\"Transformed Classes: {le.transform(le.classes_)}\")\n\n train_df['y_enc'] = le.transform(train_df['y'])\n test_df['y_enc'] = le.transform(test_df['y'])\n\n # train_df.head()\n logging.debug(f\"Shape of train data: {train_df.shape}\")\n logging.debug(f\"Shape of test data: {test_df.shape}\")\n\n # Create train and validation dataloaders\n train_ds = AvilaDataset(data_frame=train_df, indep_cols=indep_cols, dep_col='y_enc')\n valid_ds = AvilaDataset(data_frame=test_df, indep_cols=indep_cols, dep_col='y_enc')\n\n # Should be some exponent of 2 (128, 256)\n # batch_size = 256\n train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n valid_dl = DataLoader(valid_ds, batch_size=batch_size, shuffle=False)\n\n return train_dl, valid_dl, le",
"def __init__(self, config_path):\n cfg = Config.fromfile(config_path)\n self.cfg = cfg\n\n # Now make the dataloader\n self.dataset = build_dataset(cfg.data.test)\n\n self.loader = build_dataloader(\n self.dataset,\n imgs_per_gpu=1,\n workers_per_gpu=0,\n dist=False,\n shuffle=False\n )",
"def test_config_is_loaded(config):\n assert config[\"DEBUG\"] is False",
"def get_online_dataloaders(params):\n # Pick environment\n env = EnvFactory.get_environment(**params[\"environment\"])\n\n # Train\n trainDS = EnvironmentSampler(\n environment=env,\n dataset_len=params[\"dataset\"][\"num_train_samples\"],\n number_of_frames=params[\"dataset\"][\"rollout\"][\"seq_length\"],\n delta_time=params[\"dataset\"][\"rollout\"][\"delta_time\"],\n number_of_rollouts=params[\"optimization\"][\"batch_size\"],\n img_size=params[\"dataset\"][\"img_size\"],\n color=params[\"dataset\"][\"rollout\"][\"n_channels\"] == 3,\n radius_bound=params[\"dataset\"][\"radius_bound\"],\n noise_level=params[\"dataset\"][\"rollout\"][\"noise_level\"],\n seed=None)\n train_data_loader = torch.utils.data.DataLoader(trainDS,\n shuffle=False,\n batch_size=None)\n # Test\n testDS = EnvironmentSampler(\n environment=env,\n dataset_len=params[\"dataset\"][\"num_test_samples\"],\n number_of_frames=params[\"dataset\"][\"rollout\"][\"seq_length\"],\n delta_time=params[\"dataset\"][\"rollout\"][\"delta_time\"],\n number_of_rollouts=params[\"optimization\"][\"batch_size\"],\n img_size=params[\"dataset\"][\"img_size\"],\n color=params[\"dataset\"][\"rollout\"][\"n_channels\"] == 3,\n radius_bound=params[\"dataset\"][\"radius_bound\"],\n noise_level=params[\"dataset\"][\"rollout\"][\"noise_level\"],\n seed=None)\n test_data_loader = torch.utils.data.DataLoader(testDS,\n shuffle=False,\n batch_size=None)\n return train_data_loader, test_data_loader",
"def get_loader(config):\n train_transform = [T.Resize((256, 128)), T.RandomHorizontalFlip(), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n train_transform = T.Compose(train_transform)\n\n test_transform = [T.Resize((256, 128)), T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]\n test_transform = T.Compose(test_transform)\n\n # Datasets.\n if config.source_dataset in ['duke'] and config.target_dataset in ['market']:\n source_image_dir = config.duke_image_dir\n target_image_dir = config.market_image_dir\n elif config.source_dataset in ['market'] and config.target_dataset in ['duke']:\n source_image_dir = config.market_image_dir\n target_image_dir = config.duke_image_dir\n else:\n assert 'Dataset not support!'\n source_set = ReidDataset(source_image_dir, train_transform)\n target_set = ReidDataset(target_image_dir, train_transform, config.expanding_cam)\n test_set = ReidDataset(source_image_dir, test_transform)\n\n # Dataloader.\n source_loader = data.DataLoader(dataset=source_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n target_loader = data.DataLoader(dataset=target_set, batch_size=config.batch_size,\n num_workers=config.num_workers, shuffle=True, pin_memory=True, drop_last=True)\n\n test_loader = data.DataLoader(dataset=test_set, batch_size=config.batch_size, num_workers=config.num_workers,\n shuffle=False, pin_memory=True, drop_last=False)\n\n return {'source_loader': source_loader, 'target_loader': target_loader, 'test_loader': test_loader}",
"def testLoadConfigs(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('postsubmit'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball-power'))",
"def get_offline_dataloaders(params):\n # Train\n trainDS = EnvironmentLoader(params[\"dataset\"][\"train_data\"])\n train_data_loader = torch.utils.data.DataLoader(\n trainDS, shuffle=True, batch_size=params[\"optimization\"][\"batch_size\"])\n\n # Test\n test_DS = EnvironmentLoader(params[\"dataset\"][\"test_data\"])\n test_data_loader = torch.utils.data.DataLoader(\n test_DS, shuffle=True, batch_size=params[\"optimization\"][\"batch_size\"])\n\n return train_data_loader, test_data_loader",
"def test_dataloader(self, batch_size: Optional[int] = None) -> DataLoader:\n if self.test is not None:\n dataset = TabularDataset(\n task=self.config.task,\n data=self.test,\n categorical_cols=self.config.categorical_cols,\n continuous_cols=self.config.continuous_cols,\n embed_categorical=(not self.do_leave_one_out_encoder()),\n target=self.target,\n )\n return DataLoader(\n dataset,\n batch_size if batch_size is not None else self.batch_size,\n shuffle=False,\n num_workers=self.config.num_workers,\n pin_memory=self.config.pin_memory,\n )",
"def _video_test_loader_from_config(cfg, dataset_name, mapper=None):\n dataset = get_video_detection_dataset_dicts(\n [dataset_name],\n filter_empty=False,\n proposal_files=[\n cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]\n ]\n if cfg.MODEL.LOAD_PROPOSALS\n else None,\n )\n if mapper is None:\n mapper = VideoDatasetMapper(cfg, False)\n return {\n \"dataset\": dataset,\n \"mapper\": mapper,\n \"num_workers\": cfg.DATALOADER.NUM_WORKERS,\n \"first_frame_indices\": MetadataCatalog.get(dataset_name).first_frame_indices,\n # \"sampler_pair_offset\": cfg.DATALOADER.SAMPLER_PAIR_OFFSET_TEST,\n }",
"def create_train_dataloader(configs):\n train_lidar_aug = OneOf([\n Random_Rotation(limit_angle=np.pi / 4, p=1.0),\n Random_Scaling(scaling_range=(0.95, 1.05), p=1.0),\n ], p=0.66)\n train_dataset = KittiDataset(configs, mode='train', lidar_aug=train_lidar_aug, hflip_prob=configs.hflip_prob,\n num_samples=configs.num_samples)\n train_sampler = None\n if configs.distributed:\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, batch_size=configs.batch_size, shuffle=(train_sampler is None),\n pin_memory=configs.pin_memory, num_workers=configs.num_workers, sampler=train_sampler)\n\n return train_dataloader, train_sampler",
"def test_get_config(self):\r\n config = self.profile.get_config('testing.conf', TestConfig, storage_args=['this_section'])\r\n self.assertIsInstance(config, TestConfig)\r\n self.assertIsNone(config.save())",
"def get_dataloaders(data_dir,train_batch_size,val_batch_size,aug_flag):\n # Create the dataset object.\n transformed_dataset = PersonDataset(data_dir,False)\n # dataloader for train and validation\n validation_split = 0.2\n shuffle_dataset = True\n #random seed to keep the train-val split constant for inference purpose\n random_seed= 42\n # create indices for training and validation splits.\n dataset_size = len(transformed_dataset)\n # we create the indices using python range function and store it into a list\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split*dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices,val_indices = indices[split:],indices[:split]\n # create dataloaders...\n train_sampler = SubsetRandomSampler(train_indices)\n val_sampler = SubsetRandomSampler(val_indices)\n train_aug,val_aug = aug_flag,False\n train_loader = DataLoader(PersonDataset(data_dir,train_aug), batch_size=train_batch_size, shuffle=False, num_workers=0,sampler = train_sampler)\n val_loader = DataLoader(PersonDataset(data_dir,val_aug), batch_size=val_batch_size, shuffle=False, num_workers=0,sampler = val_sampler)\n\n # dictionary for data loaders..\n dataloaders = {\"train\" :train_loader,\n \"val\":val_loader\n }\n return dataloaders",
"def test_error_on_dataloader_passed_to_fit(tmpdir):\n\n # only train passed to fit\n model = EvalModelTemplate()\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n limit_val_batches=0.1,\n limit_train_batches=0.2,\n auto_scale_batch_size='power',\n )\n fit_options = dict(train_dataloader=model.dataloader(train=True))\n\n with pytest.raises(MisconfigurationException):\n trainer.tune(model, **fit_options)",
"def test_loader_metric():\n clb = pt_clb.LoaderMetrics(TEST_METRIC)\n runner = Runner(model=TEST_MODEL, optimizer=TEST_OPTIMIZER, criterion=TEST_CRITERION, callbacks=clb)\n runner.fit(TEST_LOADER, epochs=2)\n assert clb.target[0].grad_fn is None\n assert clb.output[0].grad_fn is None\n assert clb.target[0].device == torch.device(\"cpu\")\n assert clb.output[0].device == torch.device(\"cpu\")",
"def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape"
] | [
"0.6893552",
"0.6832417",
"0.6727525",
"0.67140794",
"0.6579673",
"0.6515249",
"0.642833",
"0.63334775",
"0.6258725",
"0.6231027",
"0.6213907",
"0.6207928",
"0.6139557",
"0.60178506",
"0.5996533",
"0.599519",
"0.5989954",
"0.59818614",
"0.59683573",
"0.59597766",
"0.5954509",
"0.59106696",
"0.5889543",
"0.5862169",
"0.5853096",
"0.58508426",
"0.5849653",
"0.58436763",
"0.58112586",
"0.58100706"
] | 0.7478321 | 0 |
Test getting transforms config. | def test_load_transforms_config(self) -> None:
result = load_transforms_config()
self.assertIs(type(result), list)
self.assertIsNot(result, []) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_load_full_transform(self):\n self.add_transform(cond_artist=True, cond_album=True, cond_title=True,\n cond_ensemble=True, cond_composer=True, cond_conductor=True,\n change_artist=True, change_album=True, change_title=True,\n change_ensemble=True, change_composer=True, change_conductor=True,\n pattern_artist='Artist', pattern_album='Album', pattern_title='Title',\n pattern_ensemble='Ensemble', pattern_composer='Composer', pattern_conductor='Conductor',\n to_artist='Artist 2', to_album='Album 2', to_title='Title 2',\n to_ensemble='Ensemble 2', to_composer='Composer 2', to_conductor='Conductor 2')\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 1)\n transform = self.app.transforms.transforms[1]\n self.assertEqual(transform.cond_artist, True)\n self.assertEqual(transform.cond_album, True)\n self.assertEqual(transform.cond_title, True)\n self.assertEqual(transform.cond_ensemble, True)\n self.assertEqual(transform.cond_composer, True)\n self.assertEqual(transform.cond_conductor, True)\n self.assertEqual(transform.change_artist, True)\n self.assertEqual(transform.change_album, True)\n self.assertEqual(transform.change_title, True)\n self.assertEqual(transform.change_ensemble, True)\n self.assertEqual(transform.change_composer, True)\n self.assertEqual(transform.change_conductor, True)\n self.assertEqual(transform.pattern_artist, 'Artist')\n self.assertEqual(transform.pattern_album, 'Album')\n self.assertEqual(transform.pattern_title, 'Title')\n self.assertEqual(transform.pattern_ensemble, 'Ensemble')\n self.assertEqual(transform.pattern_composer, 'Composer')\n self.assertEqual(transform.pattern_conductor, 'Conductor')\n self.assertEqual(transform.to_artist, 'Artist 2')\n self.assertEqual(transform.to_album, 'Album 2')\n self.assertEqual(transform.to_title, 'Title 2')\n self.assertEqual(transform.to_ensemble, 'Ensemble 2')\n self.assertEqual(transform.to_composer, 'Composer 2')\n self.assertEqual(transform.to_conductor, 'Conductor 2')",
"def test_transforms(self):\n\n rank_zero_deprecation(\n \"DataModule property `test_transforms` was deprecated in v1.5 and will be removed in v1.7.\"\n )\n return self._test_transforms",
"def test_config(self):\n\n # Get simple config file from UnitTestUtils.py\n cfg = OCIO.Config().CreateFromStream(SIMPLE_CONFIG)\n\n # Test ColorSpace class object getters from config\n cs = cfg.getColorSpace('vd8')\n self.assertEqual(cs.getName(), 'vd8')\n self.assertEqual(cs.getDescription(), 'how many transforms can we use?')\n self.assertEqual(cs.getFamily(), 'vd8')\n self.assertEqual(cs.getAllocation(), OCIO.ALLOCATION_UNIFORM)\n self.assertEqual(cs.getAllocationVars(), [])\n self.assertEqual(cs.getEqualityGroup(), '')\n self.assertEqual(cs.getBitDepth(), OCIO.BIT_DEPTH_UINT8)\n self.assertFalse(cs.isData())\n\n to_ref = cs.getTransform(OCIO.COLORSPACE_DIR_TO_REFERENCE)\n self.assertIsInstance(to_ref, OCIO.GroupTransform)\n self.assertEqual(len(to_ref), 3)",
"def config_transform(self):\r\n std_transform_list = []\r\n if self.split == 'train' or self.split == 'val':\r\n # To tensor and normalize\r\n std_transform_list += [\r\n transforms.ToTensorVideo(),\r\n ]\r\n # Add color aug\r\n if self.cfg.AUGMENTATION.COLOR_AUG:\r\n std_transform_list.append(\r\n ColorJitter(\r\n brightness=self.cfg.AUGMENTATION.BRIGHTNESS,\r\n contrast=self.cfg.AUGMENTATION.CONTRAST,\r\n saturation=self.cfg.AUGMENTATION.SATURATION,\r\n hue=self.cfg.AUGMENTATION.HUE,\r\n grayscale=self.cfg.AUGMENTATION.GRAYSCALE,\r\n consistent=self.cfg.AUGMENTATION.CONSISTENT,\r\n shuffle=self.cfg.AUGMENTATION.SHUFFLE,\r\n gray_first=self.cfg.AUGMENTATION.GRAY_FIRST,\r\n ),\r\n )\r\n std_transform_list += [\r\n transforms.NormalizeVideo(\r\n mean=self.cfg.DATA.MEAN,\r\n std=self.cfg.DATA.STD,\r\n inplace=True\r\n ),\r\n transforms.RandomHorizontalFlipVideo(),\r\n ]\r\n self.transform = Compose(std_transform_list)\r\n elif self.split == 'test':\r\n std_transform_list += [\r\n transforms.ToTensorVideo(),\r\n transforms.NormalizeVideo(\r\n mean=self.cfg.DATA.MEAN,\r\n std=self.cfg.DATA.STD,\r\n inplace=True\r\n )\r\n ]\r\n self.transform = Compose(std_transform_list)",
"def test_load_empty_transform(self):\n self.add_transform()\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 1)\n transform = self.app.transforms.transforms[1]\n self.assertEqual(transform.cond_artist, False)\n self.assertEqual(transform.cond_album, False)\n self.assertEqual(transform.cond_title, False)\n self.assertEqual(transform.cond_ensemble, False)\n self.assertEqual(transform.cond_conductor, False)\n self.assertEqual(transform.cond_composer, False)\n self.assertEqual(transform.change_artist, False)\n self.assertEqual(transform.change_album, False)\n self.assertEqual(transform.change_title, False)\n self.assertEqual(transform.change_ensemble, False)\n self.assertEqual(transform.change_conductor, False)\n self.assertEqual(transform.change_composer, False)\n self.assertEqual(transform.pattern_artist, '')\n self.assertEqual(transform.pattern_album, '')\n self.assertEqual(transform.pattern_title, '')\n self.assertEqual(transform.pattern_ensemble, '')\n self.assertEqual(transform.pattern_conductor, '')\n self.assertEqual(transform.pattern_composer, '')\n self.assertEqual(transform.to_artist, '')\n self.assertEqual(transform.to_album, '')\n self.assertEqual(transform.to_title, '')\n self.assertEqual(transform.to_ensemble, '')\n self.assertEqual(transform.to_conductor, '')\n self.assertEqual(transform.to_composer, '')",
"def testGetConfig():\n configs = GetConfig()\n # print(configs.host_ip)\n # print(configs.proxy_local)\n \n # print(configs.proxy_online)\n # print(configs.user_img_url)\n # print(configs.user_login_url)\n print(configs.user_start_id)\n\n # assert isinstance(configs.proxy_getter_functions, list)\n # print(configs.proxy_getter_functions)",
"def test_get_machine_translate_settings_for_project_template(self):\n pass",
"def test_get_mt_settings(self):\n pass",
"def test_init(self):\n assert self.route.route[\"transform\"] == \"transform\"\n assert self.route.route[\"output\"] == \"output\"\n assert \"api\" not in self.route.route",
"def test_load_one_transform(self):\n self.add_transform(cond_artist=True, pattern_artist='Foo',\n change_artist=True, to_artist='Bar')\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 1)",
"def test_get_config_th(self):\n self.assertTrue(settings.TH_TRELLO)\n self.assertIn('consumer_key', settings.TH_TRELLO)\n self.assertIn('consumer_secret', settings.TH_TRELLO)",
"def test_get_is_callable():\n assert callable(config.get)",
"def test_read_env_config4(config, environment_vars_set):\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"yesss\"",
"def build_transforms(transforms_config: List[Dict[str, Any]]) -> Callable:\n transform_list = [build_transform(config) for config in transforms_config]\n return transforms.Compose(transform_list)",
"def test_composed_transforms(config, dummy_input):\n cfg = config\n transforms = compose(cfg.dataset.transforms)\n\n # H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n _image, _label = transforms(image, label, dtypes=[torch.float, torch.long], elastic_deformation_orders=[3, 0])\n assert _image.dtype == torch.float\n assert _image.size() == (256, 256, image.shape[2])\n assert _label.dtype == torch.long\n assert _label.size() == (256, 256, label.shape[2])\n\n # Test feeding only image\n _image = transforms(image, dtypes=[torch.float])\n assert _image.dtype == torch.float\n assert _image.size() == (256, 256, image.shape[2])",
"def transform(self, cfg_pipeline):\n return",
"def test_config_is_loaded(config):\n assert config[\"DEBUG\"] is False",
"def get_transforms(self):\n return self.transforms",
"def test_transform(self):\n new_route = self.route.transform(\"transformed\")\n assert new_route != self.route\n assert new_route.route[\"transform\"] == \"transformed\"",
"def test_read_env_config2(config, environment_vars_set_wowww):\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"wowww\"",
"def test_get_tosca_template(self):\n pass",
"def __init__(self, transforms):\n self.transforms = transforms",
"def ParseTransformerConfigFile(ref, args, req):\n del ref\n messages = apis.GetMessagesModule('mediaasset', 'v1alpha')\n message_class = messages.Transformer\n if args.create_transformer_configs_file:\n transformer_configs = json.loads(args.create_transformer_configs_file)\n transformer = encoding.DictToMessage(transformer_configs, message_class)\n utils.ValidateMediaAssetMessage(transformer)\n req.transformer = transformer\n if args.IsKnownAndSpecified('labels'):\n req.transformer.labels = encoding.DictToMessage(\n args.labels, messages.Transformer.LabelsValue)\n return req",
"def test_read_env_config3(config, environment_vars_set):\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS()\n assert sms._provider == \"goood\"",
"def test_load_zero_transforms(self):\n self.app.load_data()\n self.assertEqual(len(self.app.transforms), 0)",
"def test_perspective_transform():\n # TODO: write this\n assert(True)",
"def test_transform():\n args = get_layer('transform', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)",
"def test_read_namespaced_build_config(self):\n pass",
"def testGetConfigLambda(self):\n self.ports.getconfig_lambda(file_name = 'get_lambda.xml', port_ids = portsDict['port_ids'], lambdas = portsDict['lambda'])",
"def test_get(self):\n self.assertEqual(self.config.get('basic','greeting'),'hello')"
] | [
"0.6457925",
"0.6132974",
"0.6045049",
"0.591944",
"0.5891703",
"0.5852024",
"0.581294",
"0.57519954",
"0.5733683",
"0.5728028",
"0.56447434",
"0.56063056",
"0.5597566",
"0.55973953",
"0.5580112",
"0.55531615",
"0.5521915",
"0.54951215",
"0.54724187",
"0.5457946",
"0.54475075",
"0.54148096",
"0.5400221",
"0.53971714",
"0.5392666",
"0.539031",
"0.5368037",
"0.53472906",
"0.5346098",
"0.53323954"
] | 0.77776486 | 0 |
Assert predefined config path. | def _assert_predefined_config_path(
self,
framework: str,
domain: str,
domain_flavour: str,
expected_filename: str,
) -> None:
result = get_predefined_config_path(framework, domain, domain_flavour)
expected = os.path.join(
os.path.abspath(
os.path.dirname(
inspect.getfile(get_predefined_config_path),
),
),
"configs",
"predefined_configs",
f"{framework}",
expected_filename,
)
self.assertEqual(result, expected)
self.assertEqual(os.path.isfile(result), True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_default_config_file_paths(\n config,\n):\n assert \"~/.config/yessssms.conf\" in CONFIG_FILE_PATHS\n assert \"/etc/yessssms.conf\" in CONFIG_FILE_PATHS",
"def test_expected_config(expectedconfig):\n expected = expectedconfig.read_text()\n config = CONFIGSDIR / expectedconfig.name\n\n assert dumpconfig(config) == expected",
"def test_config_imports_file(config_file_location):\n\n assert Config(str(config_file_location)).file_location == \"E:\\\\A\\\\iMaterialistFiles\\\\\"",
"def test_config_file():\n relevant_path = 'config/config.yaml'\n abs_path = os.path.realpath(relevant_path)\n\n # Check if file exists.\n assert os.path.exists(abs_path)\n # Check if file is empty.\n assert os.stat(abs_path).st_size",
"def test_config():\n if not os.path.exists(CONFIG_DIR):\n raise mupub.BadConfiguration('Configuration folder not found.')\n if not os.path.exists(_CONFIG_FNM):\n raise mupub.BadConfiguration('Configuration file not found.')\n if not os.path.exists(getDBPath()):\n raise mupub.BadConfiguration('Local database not found.')\n if len(CONFIG_DICT) == 0:\n raise mupub.BadConfiguration('Configuration was not loaded.')",
"def test_get_predefined_config_path_domain_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"object_detection\",\n )",
"def test_loads_a_config_file(self):\n from test.resources import config\n self.assertIsInstance(config, type(sys))\n self.assertIsNotNone(config.example)\n self.assertEqual(config.example.config_option, 'config-value')",
"def test_get_config_default_value(configs):\n assert get_config('SOURCE_FOLDER') == configs['SOURCE_FOLDER']",
"def test_pkgutil(self):\n print(utilities.CONFIG_FILE)\n assert utilities.get_config('ROBINHOOD', 'oauth_endpoint')",
"def test_global_config_file_creation():\n GlobalConfig()\n\n # Raw config should now exist\n assert fileio.file_exists(os.path.join(APP_DIR, GLOBAL_CONFIG_NAME))",
"def test_get_predefined_onnx_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"onnxrt\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_config(self):\n if self.config.get('base_url')[-1] is '/':\n raise exceptions.ScidashClientWrongConfigException('Remove last '\n 'slash '\n 'from base_url')",
"def test_path_override(self):\n path_example = os.path.join(here, 'path-example.ini')\n manifest = ManifestParser(manifests=(path_example,))\n self.assertEqual(manifest.tests[0]['path'],\n os.path.join(here, 'fleem'))",
"def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )",
"def test_load_configs_testing(self):\n global locator, config_paths\n locator.load_config(config_paths[0])\n\n self.assertEqual(locator.config['routines'], ['debug'])\n self.assertEqual(locator.config['driver'],\n {\n 'type': 'TestDriver',\n 'kwargs': {\n 'verbose': False\n }\n })",
"def test_get_predefined_tf_object_detection_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"object_detection\",\n domain_flavour=\"\",\n expected_filename=\"object_detection.yaml\",\n )",
"def test_config_is_loaded(config):\n assert config[\"DEBUG\"] is False",
"def test_config(app):\n assert app.testing",
"def test_config_overwrites():\n basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\", '..'))\n temppath = '/tmp/'\n\n conf = core.Config(datapath=temppath)\n\n assert conf.basepath.lower() == basepath.lower()\n assert conf.datapath.lower() == temppath.lower()",
"def test_path_settings_safety(self):\r\n settings = {'STATIC_PATHS': 'foo/bar',\r\n 'THEME_STATIC_PATHS': 'bar/baz',\r\n # These 4 settings are required to run configure_settings\r\n 'PATH': '.',\r\n 'THEME': DEFAULT_THEME,\r\n 'SITEURL': 'http://blog.notmyidea.org/',\r\n 'LOCALE': '',\r\n }\r\n configure_settings(settings)\r\n self.assertEqual(settings['STATIC_PATHS'],\r\n DEFAULT_CONFIG['STATIC_PATHS'])\r\n self.assertEqual(settings['THEME_STATIC_PATHS'],\r\n DEFAULT_CONFIG['THEME_STATIC_PATHS'])",
"def test_load(yaml_config_file):\n config = Config()\n config.load(PATH_FILE_CONFIG)\n assert config.backup_root_directory == yaml_config_file.backup\n assert config.docker_compose_wordpress_project_directory == yaml_config_file.docker_compose_wordpress_project",
"def test_missing_paths():\n with pytest.raises(InputError):\n make_config([])",
"def assert_plugin_path(config: Config) -> Config:\n if (\n os.path.isfile(config.path) and\n config.path.endswith(\".py\")\n ):\n return config\n\n raise Exception(\n \"{} is not a valid python file\".format(config.path)\n )",
"def _check(self, config: Dict):\n if 'path' not in config:\n raise FileNotFoundError(\"File not found.\")",
"def test_volume_service_config_path(self):\n self.assertEqual(\n ChangeStateScript()._deployer._volume_service._config_path,\n DEFAULT_CONFIG_PATH)",
"def test_get_predefined_tf_nlp_config_path(self) -> None:\n self._assert_predefined_config_path(\n framework=\"tensorflow\",\n domain=\"nlp\",\n domain_flavour=\"\",\n expected_filename=\"nlp.yaml\",\n )",
"def test_environment_path_subdir_leadingslash(self):\n self.assertRaises(\n RuntimeError,\n self.secrets_env.environment_path,\n subdir=\"/keys\"\n )",
"def test_config_must_exist(cls, values):\n configs = [c.config for c in values.get('configs')]\n for test in values.get('tests'):\n if test.config not in configs:\n raise ValueError(\n f\"Test '{test.test}' gave the config '{test.config}', but \"\n \"this config does not exist in the file \"\n f\"'{values.get('yaml')}'. Configs detected : {configs} \\n\")\n return values",
"def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG",
"def _expected_config(self) -> Dict[str, Optional[str]]:\n return EXPECTED_CONFIG"
] | [
"0.72531724",
"0.6966318",
"0.687894",
"0.68690413",
"0.68319833",
"0.6828153",
"0.68121266",
"0.68025386",
"0.6744204",
"0.66994476",
"0.66757023",
"0.66216826",
"0.65864456",
"0.6554743",
"0.6550599",
"0.6525278",
"0.64727885",
"0.64717287",
"0.6456509",
"0.6455779",
"0.6452166",
"0.6438719",
"0.64349395",
"0.6397896",
"0.6376077",
"0.63393545",
"0.63388425",
"0.63354015",
"0.6324315",
"0.6324315"
] | 0.78012013 | 0 |
Check if development env is activated. | def test_is_development_env(self) -> None:
os.environ.update({"NC_MODE": "development"})
is_develop = is_development_env()
self.assertTrue(is_develop) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_development():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')",
"def get_site_env(self):\n return self.config['SITE_ENVIRONMENT'] == 'DEV'",
"def _global_development_mode() -> bool:\n return (\n not env_util.is_pex()\n and \"site-packages\" not in __file__\n and \"dist-packages\" not in __file__\n and \"__pypackages__\" not in __file__\n )",
"def test_app_is_development(self):\n self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')\n self.assertTrue(app.config['DEBUG'])\n self.assertFalse(current_app is None)",
"def test_app_is_development(self):\n\n self.assertFalse(self.app.config['SECRET_KEY'] is 'my_precious')\n self.assertTrue(self.app.config['DEBUG'] is True)\n self.assertFalse(current_app is None)",
"def is_development_mode(registry):\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False",
"def test_is_production_env(self) -> None:\n os.environ.update({\"NC_MODE\": \"production\"})\n is_develop = is_development_env()\n self.assertFalse(is_develop)",
"def is_activated(self):\n\n return 'VIRTUAL_ENV' in os.environ and \\\n os.environ['VIRTUAL_ENV'] == self.path",
"def is_developer_mode() -> bool:\n return False if os.getenv(\"SQLTASK_DEVELOPER_MODE\") is None else True",
"def is_debugging() -> bool:\n if os.getenv(\"DEBUGGING\") == \"1\":\n return True\n return False",
"def is_devel(self):\r\n\r\n return self.is_debug()",
"def prod(environment):\n return environment == 'live' or environment == 'debug' or environment == 'prod'",
"def is_production_environment(self):\n return self.get_current_environment() == Environment.PRODUCTION",
"def is_env_active():\n\n if sys.prefix == sys.base_prefix:\n print(\"Virtual environment is not active, exiting...\\n\")\n sys.exit(1)\n\n print(\"Virtual environment is active, proceeding...\\n\")",
"def is_debug_environment():\n return find_loader('cli') is None",
"def in_runtime(self):\n\n return self.is_valid_platform() and self['ENVIRONMENT']",
"def test_is_local_dev(self):\n\n expected = True\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)",
"def development():\n env.branch = 'development'",
"def is_production_mode(self):\n return getattr(self.env, 'mode', None) == 'production'",
"def is_dev(self):\n\n return self.dev",
"def debug():\n return bool(_environ.get(\"ACCELPY_DEBUG\", False))",
"def load_dev():\n from django.conf import settings as st\n\n return getattr(st, 'AUTOMATED_LOGGING_DEV', False)",
"def _should_profile_development_default():\n return True",
"def is_debugger_active() -> bool:\n global _is_debugger_active\n return _is_debugger_active",
"def test_is_not_local_dev(self):\n\n expected = False\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)",
"def test_app_is_production(self):\n self.assertFalse(app.config['DEBUG'])\n self.assertFalse(app.config['TESTING'])",
"def on_appengine():\n runtime = os.environ.get('SERVER_SOFTWARE', '')\n return (runtime.startswith('Development/') or\n runtime.startswith('Google App Engine/'))",
"def on_file(self) -> bool:\n\n return (\n self.env_var_helper.set_name(\"PYFUNCEBLE_DEBUG\").exists()\n or self.env_var_helper.set_name(\"DEBUG_PYFUNCEBLE\").exists()\n )",
"def in_build(self):\n\n return self.is_valid_platform() and not self['ENVIRONMENT']",
"def is_staging(version=None):\n return is_host_google() and not is_default_version(version)"
] | [
"0.8337604",
"0.7692659",
"0.7504923",
"0.74659204",
"0.7465296",
"0.7377704",
"0.7323249",
"0.72394073",
"0.72125643",
"0.71630734",
"0.7148706",
"0.70545775",
"0.70386916",
"0.7021576",
"0.69120103",
"0.68301886",
"0.68271375",
"0.6818852",
"0.67625374",
"0.6734069",
"0.6722818",
"0.66559803",
"0.6644303",
"0.66393965",
"0.6632056",
"0.65792704",
"0.6538745",
"0.6533906",
"0.649458",
"0.64942825"
] | 0.81877184 | 1 |
Test release tag building. | def test_release_tag(self) -> None:
self.assertEqual("v3.14.15", release_tag()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_release_deployment_run(self):\n pass",
"def test_release_tag_for_dev_version(self) -> None:\n self.assertEqual(\"v42.12\", release_tag())",
"def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"",
"def release(context):\n print(f\"Starting a release of v{IMAGE_VER} on GitHub!\")\n run_cmd(context, exec_cmd=\"git checkout main\", pty=False, error_message=\"Failed to checkout main!\")\n\n run_cmd(context, exec_cmd=\"git pull origin main\", pty=False, error_message=\"Failed to pull from origin/main\")\n\n run_cmd(\n context, exec_cmd=f\"git tag v{IMAGE_VER}\", pty=False, error_message=f\"Failed to create the tag 'v{IMAGE_VER}'!\"\n )\n\n run_cmd(context, exec_cmd=\"git push --tags\", pty=False, error_message=f\"Failed to push the tag 'v{IMAGE_VER}'!\")",
"def tag_release():\n # We're assuming that setup.py has already been updated\n # manually or using scripts/release/bump-version so the\n # current version in setup.py is the version number we should tag.\n version_number = get_current_version_number()\n click.echo(\"Tagging %s release\" % version_number)\n subprocess.check_call(\n ['git', 'tag', '-a', version_number,\n '-m', 'Tagging %s release' % version_number],\n )",
"def test_create_release(self):\n releases_before = self.hello_world_project.get_releases()\n latest_release = releases_before[0].tag_name\n count_before = len(releases_before)\n increased_release = \".\".join(\n [\n latest_release.rsplit(\".\", 1)[0],\n str(int(latest_release.rsplit(\".\", 1)[1]) + 1),\n ]\n )\n release = self.hello_world_project.create_release(\n tag=increased_release, name=\"test\", message=\"testing release\"\n )\n count_after = len(self.hello_world_project.get_releases())\n assert release.tag_name == increased_release\n assert release.title == \"test\"\n assert release.body == \"testing release\"\n assert count_before + 1 == count_after",
"def test(tag_name: str):\n\n image_full_name = f\"{GITLAB_IMAGE_URL}:{tag_name}-test\"\n _build(tag_name=tag_name, image_full_name=image_full_name)\n _test(image_full_name=image_full_name)",
"def _doReleaseBuild(self, farbconfig):\n print \"Building all releases ...\"\n try:\n rbr = runner.ReleaseBuildRunner(farbconfig)\n rbr.run()\n print \"Release build completed.\"\n except runner.ReleaseBuildRunnerError, e:\n print >>sys.stderr, e\n sys.exit(1)",
"def test_release_tag_for_empty(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version \"):\n release_tag()",
"def create_release(config, args):\n yield config.repo.create_release(args.tag_name, name=args.name,\n target_commitish=args.get(\"target_commitish\"), body=args.get(\"body\"),\n draft=args.get_bool(\"draft\"), prerelease=args.get_bool(\"prerelease\"))",
"def test_release(self):\n runCommand(\n [\"git\", \"checkout\", \"-b\", \"release-16.11111-9001\"], cwd=self.repo.path\n )\n\n somefile = self.repo.child(\"somefile\")\n somefile.setContent(b\"change\")\n\n runCommand([\"git\", \"add\", somefile.path, somefile.path], cwd=self.repo.path)\n runCommand([\"git\", \"commit\", \"-m\", \"some file\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(logs[-1], \"Release branch with no newsfragments, all good.\")",
"def build(ctx):\n if 'cicd' in run('hostname').stdout.strip():\n # Check if we are executing the task from an aws instance\n if requests.get('http://169.254.169.254/latest/meta-data/').status_code == 200:\n git_ref_source = os.environ.get('GIT_SOURCE_BRANCH')\n git_ref_target = os.environ.get('GIT_TARGET_BRANCH')\n run('git fetch --all')\n run('git checkout {}'.format(git_ref_target))\n\n \n tar_name = \"Frontend\"\n #'wordpress-{}-en_CA.tar.gz'.format(WORDPRESS_VERSION)\n #tar_file = open(tar_name, 'wb')\n #tar_file.write(wp_tar.content)\n #tar_file.close()\n\n #run('tar -xzf {}'.format(tar_name))\n \n # Download the postmedia source-code and patches/config\n #clone(git_ref_target, git_ref_source)\n\n # merge (if applicable) and create the release\n if git_ref_source:\n git_pr_id = os.getenv('GIT_PR_ID')\n github_util.put('repos/{}/{}/pulls/{}/merge'.format(GIT_ORG, GIT_REPO, git_pr_id), params={'merge_method': 'squash'})\n version = github_util.get_next_rc()\n github_util.set_release(target_commitish='master', tag=version, prerelease=True)\n build_type = 'release candidate'\n else:\n version = github_util.get_next_hf()\n github_util.set_release(git_ref_target, version)\n build_type = 'hotfix'\n\n # package and upload to S3\n author = os.environ.get('GIT_AUTHOR')\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n tarball = package(notes, version)\n print(\"No upload to S3\")\n #upload(tarball, S3_BUCKET_STAGE)\n else:\n author = input('please enter your name for the release notes: ')\n\n valid_snapshot_name = False\n while not valid_snapshot_name:\n snapshot_name = input('please enter a name for your snapshot: ')\n snapshot_name = snapshot_name.lower()\n snapshot_name = re.sub('-', '_', snapshot_name)\n\n # domain sections cannot be longer than 63 characters, so snapshot\n # name cannot be longer than 26 (63 minus snapshot-20190128-1713-homesanddesign - 37)\n if (len(snapshot_name) <= 26):\n valid_snapshot_name = True\n else:\n print(\"{} is too long. Please enter a new snapshot name of 28 characters or less.\".format(snapshot_name))\n\n build_type = 'snapshot'\n \n version = '{}_{}_{}'.format(build_type, snapshot_name,\n datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"))\n print(\"Building snapshot {}\".format(version))\n git_ref_target = 'master'\n git_ref_source = 'HEAD'\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n os.chdir('/opt/')\n if os.path.exists(WORK_DIR):\n os.system('rm -rf {}'.format(WORK_DIR))\n os.mkdir(WORK_DIR)\n tarball = package(notes, version)\n print (\"No upload to S3\")\n #upload(tarball, S3_BUCKET_DEV)",
"def make_release():\n parser = OptionParser()\n parser.add_option(\"-d\", \"--destination\", action=\"store\", type=\"string\", \n dest=\"destdir\",\n help=\"directory where distributions and docs will be placed\")\n parser.add_option(\"-v\", \"--version\", action=\"store\", type=\"string\", \n dest=\"version\",\n help=\"version string applied to all openmdao distributions\")\n parser.add_option(\"-m\", action=\"store\", type=\"string\", dest=\"comment\",\n help=\"optional comment for version tag\")\n parser.add_option(\"-b\", \"--basebranch\", action=\"store\", type=\"string\", \n dest=\"base\", default='master', \n help=\"base branch for release. defaults to master\")\n parser.add_option(\"-t\", \"--test\", action=\"store_true\", dest=\"test\",\n help=\"used for testing. A release branch will not be created\")\n parser.add_option(\"-n\", \"--nodocbuild\", action=\"store_true\", \n dest=\"nodocbuild\",\n help=\"used for testing. The docs will not be rebuilt if they already exist\")\n parser.add_option(\"--host\", action='append', dest='hosts', metavar='HOST',\n default=[],\n help=\"host from config file to build bdist_eggs on. \"\n \"Multiple --host args are allowed.\")\n parser.add_option(\"-c\", \"--config\", action='store', dest='cfg', \n metavar='CONFIG', default='~/.openmdao/testhosts.cfg',\n help=\"path of config file where info for hosts is located\")\n (options, args) = parser.parse_args(sys.argv[1:])\n \n if not options.version or not options.destdir:\n parser.print_help()\n sys.exit(-1)\n \n _check_version(options.version)\n\n options.cfg = os.path.expanduser(options.cfg)\n \n config = ConfigParser.ConfigParser()\n config.readfp(open(options.cfg))\n \n haswin = False\n for host in options.hosts:\n if host == 'localhost':\n if sys.platform.startswith('win'):\n haswin = True\n elif config.has_section(host):\n platform = config.get(host, 'platform')\n if platform == 'windows':\n haswin = True\n if not haswin:\n print \"no windows host was specified, so can't build binary eggs for windows\"\n sys.exit(-1)\n \n orig_branch = get_git_branch()\n if not orig_branch:\n print \"You must run mkrelease from within a git repository. aborting\"\n sys.exit(-1)\n\n if not options.test:\n if orig_branch != options.base:\n print \"Your current branch '%s', is not the specified base branch '%s'\" % (orig_branch, options.base)\n sys.exit(-1)\n \n if _has_checkouts():\n print \"There are uncommitted changes. You must run mkrelease.py from a clean branch\"\n sys.exit(-1)\n \n if orig_branch == 'master':\n print \"pulling master\"\n os.system(\"git pull origin master\")\n if _has_checkouts():\n print \"something went wrong during pull. aborting\"\n sys.exit(-1)\n else:\n print \"WARNING: base branch is not 'master' so it has not been\"\n print \"automatically brought up-to-date.\"\n answer = raw_input(\"Proceed? (Y/N) \")\n if answer.lower() not in [\"y\", \"yes\"]:\n sys.exit(-1)\n \n relbranch = \"release_%s\" % options.version\n if relbranch in get_git_branches():\n print \"release branch %s already exists in this repo\" % relbranch\n sys.exit(-1)\n\n print \"creating release branch '%s' from base branch '%s'\" % (relbranch, orig_branch)\n check_call(['git', 'branch', relbranch])\n print \"checking out branch '%s'\" % relbranch\n check_call(['git', 'checkout', relbranch])\n \n destdir = os.path.abspath(options.destdir)\n if not os.path.exists(destdir):\n os.makedirs(destdir)\n\n startdir = os.getcwd()\n topdir = repo_top()\n \n cfgpath = os.path.expanduser(options.cfg)\n \n try:\n _update_releaseinfo_files(options.version)\n \n # build the docs\n docdir = os.path.join(topdir, 'docs')\n idxpath = os.path.join(docdir, '_build', 'html', 'index.html')\n \n if not os.path.isfile(idxpath) or not options.nodocbuild:\n build_docs(argv=['-v', options.version])\n shutil.copytree(os.path.join(topdir,'docs','_build', 'html'), \n os.path.join(destdir,'docs'))\n\n if not options.test:\n # commit the changes to the release branch\n print \"committing all changes to branch '%s'\" % relbranch\n check_call(['git', 'commit', '-a', '-m', \n '\"updating releaseinfo files for release %s\"' % \n options.version])\n\n # build openmdao package distributions\n proj_dirs = []\n for project_name, pdir, pkgtype in openmdao_packages:\n pdir = os.path.join(topdir, pdir, project_name)\n if 'src' in os.listdir(pdir):\n os.chdir(os.path.join(pdir, 'src'))\n else:\n os.chdir(pdir)\n print 'building %s' % project_name\n _build_sdist(pdir, destdir, options.version)\n if pkgtype == 'bdist_egg':\n proj_dirs.append(pdir)\n \n os.chdir(startdir)\n _build_bdist_eggs(proj_dirs, destdir, options.hosts, cfgpath)\n \n print 'creating bootstrapping installer script go-openmdao.py'\n installer = os.path.join(os.path.dirname(__file__),\n 'mkinstaller.py')\n \n check_call([sys.executable, installer, '--dest=%s'%destdir])\n\n if options.comment:\n comment = options.comment\n else:\n comment = 'creating release %s' % options.version\n \n if options.test:\n _rollback_releaseinfo_files()\n else:\n # tag the current revision with the release version id\n print \"tagging release with '%s'\" % options.version\n check_call(['git', 'tag', '-f', '-a', options.version, '-m', comment])\n \n check_call(['git', 'checkout', orig_branch])\n print \"\\n*REMEMBER* to push '%s' up to the master branch if this release is official\" % relbranch\n \n print \"new release files have been placed in %s\" % destdir\n \n finally:\n os.chdir(startdir)",
"def test_release_version_found(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n set_version_from_git_tag(self.project, self.logger)\n self.assertEqual(self.logger.info.call_count, 2)\n self.assertEqual(self.project.version, '1.2.3')",
"def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)",
"def test_quick_build(self):\n pass",
"def test_quick_build1(self):\n pass",
"def cmake_release(session):\n _cmake(session, BUILD_TYPE_RELEASE)",
"def test_os_release(self):\n self.assertEqual(self.settings.OS_RELEASE, platform.release())",
"def create_tag(path, name, version, notes, test=False):\n\n tag_name = \"{}-{}\".format(name, version)\n tag_contents = \"Release %s for %s\\n\\n%s\" % (version, name, notes)\n\n if test:\n tag_name = \"test@\" + tag_name\n tag_contents = \"Test \" + tag_contents\n\n print(\"Creating annotated release tag: %s\" % tag_name)\n run_in_component(path, ['git', 'tag', '-a', '-F', '-', tag_name], stdin=tag_contents)",
"def release(c, dry_run=False):\n tox_args = \"--skip-pkg-install -e py37\" if not dry_run else \"\"\n c.run(f\"tox {tox_args}\")\n dry = \"--dry-run\" if dry_run else \"\"\n c.run(f\"bump2version {dry} --verbose patch\")\n\n if not dry_run:\n c.run(\"git push --tags\")",
"def generateReleaseRunBB(self, job):\n pass",
"def main(argv=sys.argv):\n prog = os.path.basename(argv[0])\n args = parse_command_line(argv)\n\n # Configure logging\n logging.basicConfig(\n level=min(args.logging_level, logging.INFO),\n filename='bv_tag_release.log',\n filemode='a',\n )\n root_logger = logging.getLogger()\n formatter = logging.Formatter(f'{prog}: %(levelname)s: %(message)s')\n try:\n from logutils.colorize import ColorizingStreamHandler\n handler = ColorizingStreamHandler(sys.stderr)\n handler.setFormatter(formatter)\n handler.setLevel(args.logging_level)\n root_logger.addHandler(handler)\n except ModuleNotFoundError:\n handler = logging.StreamHandler(sys.stderr)\n handler.setFormatter(formatter)\n handler.setLevel(args.logging_level)\n root_logger.addHandler(handler)\n logger.info('You can activate colorful logging by installing '\n 'the \"logutils\" Python package (e.g. with '\n 'python3 -m pip install logutils)')\n\n return bv_tag_release(\n args.source_root,\n args.version_to_tag,\n distro=args.distro,\n dry_run=args.dry_run,\n fix_source_version_numbers=args.fix_source_version_numbers,\n ) or 0",
"def build_binary(release_message, release_tag, source):\n subprocess.call([\n 'bash',\n source + '/build_binary.sh',\n release_message,\n release_tag,\n ])",
"def do_release(self, version):\n build_dir = self.options.buildroot\n patch_dir = self.options.patch_dir\n\n # variables related to the version\n prev_version = version.prev_version\n\n # If we're operating in the same repo as this script, kindly make it\n # in a subdirectory to avoid polluting things\n if build_dir == os.path.dirname(os.path.abspath(__file__)):\n build_dir = os.path.join(build_dir, 'build')\n\n if not os.path.exists(build_dir):\n logging.debug('Creating build dir: %s', build_dir)\n os.mkdir(build_dir)\n\n os.chdir(build_dir)\n\n package = 'mediawiki-' + version.raw\n package_dir = os.path.join(build_dir, package)\n\n # Export the target. If we're going to patch later, use the branch\n if patch_dir:\n get_git(package_dir, version.branch)\n else:\n get_git(package_dir, version.tag)\n\n if patch_dir:\n maybe_apply_patches(\n package,\n get_patches_for_repo(patch_dir, 'core', version.branch))\n maybe_apply_patches(\n os.path.join(package, 'vendor'),\n get_patches_for_repo(patch_dir, 'vendor', version.branch))\n\n ext_exclude = []\n for ext in get_skins_and_extensions(package_dir):\n if patch_dir:\n maybe_apply_patches(\n os.path.join(package, ext),\n get_patches_for_repo(patch_dir, ext, version.branch))\n ext_exclude.append(\"--exclude\")\n ext_exclude.append(ext)\n\n # Generate the .tar.gz files\n out_files = [\n self.make_tar(\n package=package,\n input_dir=package,\n build_dir=build_dir),\n self.make_tar(\n package='mediawiki-core-' + version.raw,\n input_dir=package,\n build_dir=build_dir,\n add_args=ext_exclude)\n ]\n\n # Patch\n if not self.options.no_previous and prev_version is not None:\n prev_dir = 'mediawiki-' + prev_version\n get_git(os.path.join(build_dir, prev_dir),\n MwVersion(prev_version).tag)\n\n self.make_patch(\n build_dir, package + '.patch.gz', prev_dir, package, 'normal')\n out_files.append(package + '.patch.gz')\n logging.debug('%s.patch.gz written', package)\n if os.path.exists(os.path.join(package, 'languages', 'messages')):\n i18n_patch = 'mediawiki-i18n-' + version.raw + '.patch.gz'\n if (self.make_patch(\n build_dir, i18n_patch, prev_dir, package, 'i18n')):\n out_files.append(i18n_patch)\n logging.info('%s written', i18n_patch)\n else:\n i18n_patch = None\n\n # Sign\n for file_name in out_files:\n if self.options.sign:\n try:\n proc = subprocess.Popen([\n 'gpg', '--detach-sign',\n os.path.join(build_dir, file_name)])\n except OSError as ose:\n logging.error(\"gpg failed, does it exist? Skip with \" +\n \"--dont-sign.\")\n logging.error(\"Error %s: %s\", ose.errno, ose.strerror)\n sys.exit(1)\n if proc.wait() != 0:\n logging.error(\"gpg failed, exiting\")\n sys.exit(1)\n output(version, out_files)\n return 0",
"def test_release_simple(client_mock):\n store = Store()\n store.release('testname', 123, ['somechannel'])\n\n expected_body = [{'revision': 123, 'channel': 'somechannel'}]\n assert client_mock.mock_calls == [\n call.post('/v1/charm/testname/releases', expected_body),\n ]",
"def release_command(project_path=None, noop=None):\n\n if not sys.version_info.major == 3:\n noop or abort(colors.bold(\n 'Releases are only compatible with both Python2 and Python3 if done via Python3. Aborting since this is Python2.'\n ))\n\n auto_version = version_or_exit(project_path)\n\n if auto_version == '0':\n echo.bold('Tag-Version check failed:', colors.cyan(auto_version))\n abort('It looks like no (initial) version tag(s) exist(s).')\n\n released = '.dev' not in auto_version\n if released:\n echo.bold('Tag-Version check failed:', colors.cyan(auto_version))\n abort('Are you trying to re-release the current version tag?')\n\n dirty = 'dirty' in auto_version\n if dirty:\n echo.bold('Tag-Version check failed:', colors.red(auto_version))\n abort('You have to commit all changes before releasing.')\n\n #XXX: Check more? like branch... might move it to gitflow then\n\n echo.bold('Tag-Version check passed:', colors.green(auto_version))\n echo.bold('Bumping version... ', nl=False)\n\n if noop: return\n\n bump_result = run_command(join(project_path, 'setup.py bump'))\n if bump_result.returncode:\n echo.red(bump_result.stdout)\n echo.bold(colors.red(bump_result.stderr))\n sys.exit(bump_result.returncode)\n\n auto_version = version_or_exit(project_path)\n echo.bold('version is now:', colors.green(auto_version))\n\n tag = noop or bump_result.stdout.split('\\n')[-2].split()[-1]\n message = colors.bold('Do the release? (tag: %s)' % tag)\n if noop or click.confirm(message):\n do_publish(tag)\n else:\n noop or rollback(tag)",
"def tag_build(buildinfo, tag, session):\n nvr = '%(name)s-%(version)s-%(release)s' % buildinfo\n log.info('tagging %s into %s' % (nvr, tag))\n task_id = session.tagBuild(tag, nvr)\n task_result = watch_tasks(session, [task_id], poll_interval=15)\n if task_result != 0:\n raise RuntimeError('failed to tag builds')",
"def releaser_middle(data):\n\n import os\n import sys\n\n from zest.releaser.git import Git\n from zest.releaser.release import Releaser\n\n # Copied verbatim from zest.releaser, but with the cmd string modified to\n # use the -s option to create a signed tag\n def _my_create_tag(self, version):\n msg = \"Tagging %s\" % (version,)\n cmd = 'git tag -s %s -m \"%s\"' % (version, msg)\n if os.path.isdir('.git/svn'):\n print_(\"\\nEXPERIMENTAL support for git-svn tagging!\\n\")\n cur_branch = open('.git/HEAD').read().strip().split('/')[-1]\n print_(\"You are on branch %s.\" % (cur_branch,))\n if cur_branch != 'master':\n print_(\"Only the master branch is supported for git-svn \"\n \"tagging.\")\n print_(\"Please tag yourself.\")\n print_(\"'git tag' needs to list tag named %s.\" % (version,))\n sys.exit()\n cmd = [cmd]\n local_head = open('.git/refs/heads/master').read()\n trunk = open('.git/refs/remotes/trunk').read()\n if local_head != trunk:\n print_(\"Your local master diverges from trunk.\\n\")\n # dcommit before local tagging\n cmd.insert(0, 'git svn dcommit')\n # create tag in svn\n cmd.append('git svn tag -m \"%s\" %s' % (msg, version))\n return cmd\n\n # Similarly copied from zer.releaser to support use of 'v' in front\n # of the version number\n def _my_make_tag(self):\n from zest.releaser import utils\n from os import system\n\n if self.data['tag_already_exists']:\n return\n cmds = self.vcs.cmd_create_tag(self.data['version'])\n if not isinstance(cmds, list):\n cmds = [cmds]\n if len(cmds) == 1:\n print_(\"Tag needed to proceed, you can use the following command:\")\n for cmd in cmds:\n print_(cmd)\n if utils.ask(\"Run this command\"):\n print_(system(cmd))\n else:\n # all commands are needed in order to proceed normally\n print_(\"Please create a tag for %s yourself and rerun.\" % \\\n (self.data['version'],))\n sys.exit()\n if not self.vcs.tag_exists('v' + self.data['version']):\n print_(\"\\nFailed to create tag %s!\" % (self.data['version'],))\n sys.exit()\n\n # Normally all this does is to return '--formats=zip', which is currently\n # hard-coded as an option to always add to the sdist command; they ought to\n # make this actually optional\n def _my_sdist_options(self):\n return ''\n\n Git.cmd_create_tag = _my_create_tag\n Releaser._make_tag = _my_make_tag\n Releaser._sdist_options = _my_sdist_options",
"def test_build(self):\r\n self.mkbundle('file1', 'file2', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\""
] | [
"0.7284068",
"0.70147276",
"0.6728345",
"0.6709383",
"0.66933805",
"0.66797566",
"0.6608459",
"0.64857477",
"0.64158696",
"0.6389684",
"0.6376367",
"0.6313048",
"0.6291171",
"0.6285542",
"0.6277399",
"0.627431",
"0.6255137",
"0.6237298",
"0.62290806",
"0.6162094",
"0.6159651",
"0.6149859",
"0.61243474",
"0.6116308",
"0.6115068",
"0.61104447",
"0.60831505",
"0.60167074",
"0.59887034",
"0.59585816"
] | 0.7737768 | 0 |
Turn URL into PIL image. Can throw a timout error. | def url2img(url : str, timeout = 1) -> Image:
response = requests.get(url, timeout = timeout)
return Image.open(BytesIO(response.content)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def urlToImage(url):\n\n response = requests.get(url)\n image = Image.open(BytesIO(response.content))\n return image",
"def _url_to_image(url: str) -> Image.Image:\n assert url.lower().startswith(\"http\"), \"invalid url, must start with http\"\n content = requests.get(url).content\n image = Image.open(BytesIO(content))\n return image",
"def download_pil_image(self, url):\r\n return Image.open(urlopen(url))",
"def joblib_read_img_url(url):\n\n from matplotlib.image import imread\n fd = urlopen(url, timeout=10)\n return imread(io.BytesIO(fd.read()))",
"def download_image(url):\n buffer = BytesIO()\n download_from_url(url, buffer, pbar=False)\n buffer.seek(0)\n return Image.open(buffer)",
"def get_image_by_url(url):\n retry_count = 0\n while True:\n try:\n req_headers = {\"User-Agent\": DEFAULT_REQUEST_UA}\n r = requests.get(\n url, headers=req_headers, stream=True, timeout=DEFAULT_REQUEST_TIMEOUT\n )\n image_data = r.content\n if isinstance(image_data, bytes):\n image_data = BytesIO(image_data)\n else:\n image_data = StringIO(image_data)\n\n im = Image.open(image_data)\n return im\n except Timeout as e:\n if retry_count <= DEFAULT_REQUEST_RETRY:\n continue\n else:\n raise e\n except Exception as e:\n logging.exception(e)\n raise RequestException(e)",
"def getImage(url):\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img",
"def read_image(url):\n f = urllib2.urlopen(url)\n img = StringIO(f.read())\n return Image.open(img)",
"def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img",
"def fetch_image(url: str) -> Image.Image:\n r = httpx.get(url)\n if not r.status_code == httpx.codes.OK:\n raise HTTPException(r.status_code, detail=r.reason_phrase)\n f = BytesIO(r.content)\n im = handle_image_file(f)\n return im",
"def get_image(self, url):\n\n log(\"Getting image {}\".format(url))\n response = requests.get(url)\n if response.status_code == 200:\n image = self._pilimg.open(io.BytesIO(response.content))\n return image.convert('RGBA')\n return None",
"def load(url):\n response = requests.get(url)\n pil_image = Image.open(BytesIO(response.content)).convert(\"RGB\")\n # convert to BGR format\n image = np.array(pil_image)[:, :, [2, 1, 0]]\n return image",
"def downloadImage(self, url):\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n data = response.read()\n io = cStringIO.StringIO(data)\n return PIL.Image.open(io)",
"def get_img_from_url(index, url):\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass",
"def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image",
"def _import_image_by_url(self, url, session, field, line_number):\n maxsize = int(config.get(\"import_image_maxbytes\", DEFAULT_IMAGE_MAXBYTES))\n try:\n response = session.get(url, timeout=int(config.get(\"import_image_timeout\", DEFAULT_IMAGE_TIMEOUT)))\n response.raise_for_status()\n\n if response.headers.get('Content-Length') and int(response.headers['Content-Length']) > maxsize:\n raise ValueError(_(\"File size exceeds configured maximum (%s bytes)\") % maxsize)\n\n content = bytearray()\n for chunk in response.iter_content(DEFAULT_IMAGE_CHUNK_SIZE):\n content += chunk\n if len(content) > maxsize:\n raise ValueError(_(\"File size exceeds configured maximum (%s bytes)\") % maxsize)\n\n image = Image.open(io.BytesIO(content))\n w, h = image.size\n if w * h > 42e6: # Nokia Lumia 1020 photo resolution\n raise ValueError(\n u\"Image size excessive, imported images must be smaller \"\n u\"than 42 million pixel\")\n\n return base64.b64encode(content)\n except Exception as e:\n raise ValueError(_(\"Could not retrieve URL: %(url)s [%(field_name)s: L%(line_number)d]: %(error)s\") % {\n 'url': url,\n 'field_name': field,\n 'line_number': line_number + 1,\n 'error': e\n })",
"def download(self, url):\n req = self.request(url)\n inputfile, outputfile = BytesIO(urlopen(req).read()), BytesIO()\n\n img = Image.open(inputfile)\n img = img.convert(\"RGB\") if img.mode != \"RGB\" else img\n img.thumbnail((192, 192), Image.ANTIALIAS)\n img.save(outputfile, \"JPEG\")\n\n self.image.save(os.path.basename(\n self._clean_url(url)),\n ContentFile(outputfile.getvalue()),\n save=False,\n )",
"def getResponseFromHttpRequest(url):\n try:\n response = HTTP.Request(url, headers = {'User-agent': USER_AGENT, 'Accept': 'image/jpeg'})\n return response\n except:\n Log.Debug('Error fetching URL: \"%s\".' % url)\n return None",
"def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)",
"async def get_image(session, url):\n async with session.get(url) as resp:\n if resp.status != 200:\n logging.error(f'response status != 200 for image {url}')\n return None\n return await resp.read()",
"def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()",
"def set_image_from_url(self, url: str):\n response = httpx.get(url)\n if response.status_code == 200:\n file = ContentFile(response.content)\n file.name = \"url-\" + shortuuid.uuid()\n self.image = file\n self.save()",
"def get_tile(url):\n hash_name = hashlib.md5(url.encode(\"utf-16\")).hexdigest()\n fname = hash_name + \".jpeg\"\n print(\"Checking tile\" + fname)\n #if image is already downloaded, return it\n if os.path.isfile(fname):\n print(\"Downloaded!\")\n try:\n # image was fully downloaded, good to return\n return Image.open(fname) \n except Exception:\n print(\"Tile is corrupt :(\")\n # file is corrupted for some reason, so try to download it\n pass\n print(\"Downloading \" + fname)\n req.urlretrieve(url, fname) \n return Image.open(fname)",
"def download_image_from(link, directory, name):\n try:\n img_content = requests.get(link).content\n image_file = io.BytesIO(img_content)\n image = Image.open(image_file).convert('RGB')\n image.save(f'./{directory}/{name}.png', 'PNG', quality=100, subsampling=0)\n except:\n pass",
"def download(self):\n data = urllib.urlopen(self.remoteurl).read()\n s = StringIO.StringIO(data)\n return Image.open(s)",
"def requesturl(url):\n r = requests.get(url)\n text = r.text.strip()\n try:\n image = Image.open(io.BytesIO(r.content))\n return {\n 'source_url': url,\n 'url': r.url,\n 'md5': getmd5(image),\n 'img_grey': image_to_byte_array(convertgrey(image)),\n 'height': image.height,\n 'width': image.width,\n 'datetime_created': datetime.datetime.now()\n }\n except:\n if 'Error' in text:\n text = find_between(text)\n\n return {\n 'error': text,\n 'source_url': url,\n 'url': r.url,\n 'datetime_created': datetime.datetime.now()\n }",
"def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img",
"def get_image_from_uri(cache, url_fetcher, options, url, forced_mime_type=None,\n context=None, orientation='from-image'):\n if url in cache:\n return cache[url]\n\n try:\n with fetch(url_fetcher, url) as result:\n parsed_url = urlparse(result.get('redirected_url'))\n if parsed_url.scheme == 'file':\n filename = url2pathname(parsed_url.path)\n else:\n filename = None\n if 'string' in result:\n string = result['string']\n else:\n string = result['file_obj'].read()\n mime_type = forced_mime_type or result['mime_type']\n\n image = None\n svg_exceptions = []\n # Try to rely on given mimetype for SVG\n if mime_type == 'image/svg+xml':\n try:\n tree = ElementTree.fromstring(string)\n image = SVGImage(tree, url, url_fetcher, context)\n except Exception as svg_exception:\n svg_exceptions.append(svg_exception)\n # Try pillow for raster images, or for failing SVG\n if image is None:\n try:\n pillow_image = Image.open(BytesIO(string))\n except Exception as raster_exception:\n if mime_type == 'image/svg+xml':\n # Tried SVGImage then Pillow for a SVG, abort\n raise ImageLoadingError.from_exception(svg_exceptions[0])\n try:\n # Last chance, try SVG\n tree = ElementTree.fromstring(string)\n image = SVGImage(tree, url, url_fetcher, context)\n except Exception:\n # Tried Pillow then SVGImage for a raster, abort\n raise ImageLoadingError.from_exception(raster_exception)\n else:\n # Store image id to enable cache in Stream.add_image\n image_id = md5(url.encode()).hexdigest()\n image = RasterImage(\n pillow_image, image_id, string, filename, cache,\n orientation, options)\n\n except (URLFetchingError, ImageLoadingError) as exception:\n LOGGER.error('Failed to load image at %r: %s', url, exception)\n image = None\n\n cache[url] = image\n return image",
"def input_processing(url):\n try:\n response = requests.get(url)\n img_array = (Image.open(BytesIO(response.content)).convert('L')).resize((400, 400))\n img_array = np.array(img_array)\n except Exception as exception_type:\n print(exception_type)\n empty_img = Image.new('L', (400, 400))\n img_array = empty_img.resize((400, 400))\n img_array = np.array(img_array)\n\n return img_array",
"def get_image(\n url: str\n) -> Union[Dict[str, Union[int, str, BytesIO, None]], None]:\n try:\n logger.info('downloading image: %s', url)\n r = requests.get(url)\n\n if r.status_code == 200:\n\n # loading binary data to mem\n img = BytesIO(r.content)\n\n # loading image to PIL\n pil_img = Image.open(img)\n\n # seek to 0\n img.seek(0)\n\n return {\n 'content-type': r.headers.get('Content-Type'),\n 'image': img,\n 'width': pil_img.width,\n 'height': pil_img.height,\n }\n\n raise Exception('wrong status code %s', r.status_code)\n\n except BaseException as e:\n logger.error('could not download and analyze img: %s', str(e))\n\n return None"
] | [
"0.7684103",
"0.7520165",
"0.74877226",
"0.7116487",
"0.7027549",
"0.7010515",
"0.6980545",
"0.6932684",
"0.68663347",
"0.680157",
"0.6707649",
"0.649833",
"0.64743775",
"0.64194036",
"0.62737274",
"0.62376577",
"0.62025154",
"0.61823267",
"0.61713475",
"0.61658984",
"0.61528593",
"0.6128737",
"0.61211485",
"0.61093485",
"0.6108314",
"0.60999876",
"0.60925263",
"0.60721517",
"0.60599375",
"0.6023975"
] | 0.8368085 | 0 |
Goes through and removes all values in frames_rec and frames_proc that are outside of the window | def clean_window(self) -> None:
prune_before = time.time() - self.window_size
while self.frames_rec:
left = self.frames_rec.popleft()
if left[1] >= prune_before:
self.frames_rec.appendleft(left)
break
self.sum_frames_rec -= left[0]
while self.frames_proc:
left = self.frames_proc.popleft()
if left[1] >= prune_before:
self.frames_proc.appendleft(left)
break
self.sum_frames_proc -= left[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear_restriction_details(self):\n\t\n\t\tif getattr(self,'new_seq_win_objs',None):\n\t\t\tfor obj in self.new_seq_win_objs.keys():\n\t\t\t\tself.seqframe.delete(obj)\n\t\t\tself.new_seq_win_objs={}\n\t\t\tself.donepos={}\n\t\t\t#\n\t\t\tfor obj in self.temp_objs.keys():\n\t\t\t\tself.seqframe.delete(obj)\n\t\t\tself.temp_objs={}\n\t\t\tself.temp_sites={}\n\t\t\tself.seqframe.delete('labelrect')\n\t\t\tself.seqframe.delete('line')\n\t\t\tself.seqframe.delete('templabelrect')\n\t\t\tself.seqframe.delete('templine')\n\t\t\t#also clear the sites list - this is used in tidying and rendering lines/rects\n\n\t\treturn",
"def popFrameVariables(self):\n del self.frame_variables_stack[-1]\n del self.frame_type_descriptions[-1]",
"def clear_points(self):\n print \"clearing each frame of selected points\"\n self.point_3d = None\n self.allUVs = []\n for iFrame in self.frames:\n iFrame.lastClick = None; \n self.displayImage(iFrame)",
"def clean(self, ref):\n # NOTE: This currently only works on the top-most frame\n f1 = self.frames[0]\n f2 = ref.frames[0]\n f1.subtract(f2)",
"def clean_frames(self):\n for fn in os.listdir(self.frame_directory):\n if fn.endswith(\".png\") and fn in self.frame_fns:\n os.remove(fn)",
"def remove_old_graphs(self):\r\n widgets = self.winfo_children()\r\n graph_frames = []\r\n\r\n for widget in widgets:\r\n if type(widget) == tk.Frame:\r\n graph_frames.append(widget)\r\n\r\n for frame in range(len(graph_frames) - 1):\r\n graph_frames[frame].destroy()",
"def cleanup_data(df, bodypart, thresh=3, movementframes=None):\n if movementframes == None:\n xdata, ydata = np.array(bodypart_array(dataframe_per_bodypart(df, bodypart), pos='x')), \\\n np.array(bodypart_array(dataframe_per_bodypart(df, bodypart), pos='y'))\n else:\n xdata, ydata = np.array(bodypart_array(dataframe_per_bodypart(df, bodypart), pos='x'))[\n movementframes[0]:movementframes[1]], \\\n np.array(bodypart_array(dataframe_per_bodypart(df, bodypart), pos='y'))[\n movementframes[0]:movementframes[1]] # reject values thresh*SIGMA standarddev\n outlier_x = (np.abs(zscore(xdata - running_mean(x=xdata, N=25), nan_policy='omit')) > thresh)\n outlier_y = (np.abs(zscore(ydata - running_mean(x=ydata, N=25), nan_policy='omit')) > thresh)\n outlier_indexes = np.any([outlier_x, outlier_y], axis=0) # OR operator\n xdata[outlier_indexes], ydata[outlier_indexes] = np.nan, np.nan\n return fill_nan(xdata), fill_nan(ydata)",
"def clean_recording_gaps(self, pos_xy: np.ndarray, pos_times: np.ndarray):\n (\n position_gap_inds_above_threshold\n ) = self.check_for_position_gaps_above_threshold(pos_times)\n cleaned_pos_xy = pos_xy[:]\n for ind in position_gap_inds_above_threshold:\n cleaned_pos_xy[ind - 5 : ind + 5] = np.nan\n return (cleaned_pos_xy, position_gap_inds_above_threshold)",
"def _clear_window(self):\n self.buf[:] = []",
"def remove_off_pace_frames(frame_times_ms, output_fps=24):\n # Frame indices to keep\n to_keep = []\n n = len(frame_times_ms)\n\n last_claimed_frame = 0\n prev_frame_delta = output_fps * 1000\n\n for f_idx in range(n):\n frame_target = frame_times_ms[f_idx] * output_fps / 1000.0\n nearest_whole_frame = int(round(frame_target))\n dist_from_whole_frame = math.fabs(frame_target - nearest_whole_frame)\n\n if last_claimed_frame != nearest_whole_frame:\n to_keep.append(f_idx)\n last_claimed_frame = nearest_whole_frame\n elif dist_from_whole_frame < prev_frame_delta:\n # replace the last frame with the current frame\n if not not to_keep: # only remove if list is not empty\n to_keep.pop()\n to_keep.append(f_idx)\n\n prev_frame_delta = dist_from_whole_frame\n return to_keep",
"def deleteDouble(self):\n prev=None\n for x in reversed(self.activeFrames):\n if(prev):\n if(isinstance(x ,prev.__class__)):\n del self.activeFrames[self.activeFrames.index(prev)]\n prev=x",
"def clear_frame(self, table):\n for widget in table.winfo_children():\n widget.destroy()",
"def clearFrame(self, event=None):\n for widget in self.winfo_children():\n widget.destroy()\n del self.tiles[:]",
"def clear_overlays(self):\n while self._axes_overlays:\n overlay = self._axes_overlays.pop()\n overlay.remove()",
"def eye_cleanup(input_frame):\n f=input_frame\n f=f[f['facekeypressed']!='.']\n f=f[f['practice'].astype(int)>2]\n return f",
"def _wipe_currently_displayed_params(self):\n for rep in self.currently_displayed_params.values():\n for w in rep:\n try:\n rep[w].destroy()\n except: # e.g. buttons have None for label ('notNonelabel')\n pass",
"def remove_all(self):\n self.initial = None\n self.contour = None\n self.control_points = []",
"def clear_annotation(self):\n\n self.xValues = []\n self.yValues = []\n self.colors = []\n\n self.stop_video()",
"def remove_noise(self):\n kernel = np.ones((5, 5), np.uint8)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_CLOSE, kernel)\n self.frame = cv.morphologyEx(self.frame, cv.MORPH_OPEN, kernel)",
"def unloadAllFrames(self, event=None):\n for idx, frame in enumerate(self.frameList):\n frame.clearFrame()\n self.frameBtnList[idx].config(state=\"disabled\")",
"def process_non_menu_frame(w_indx: int) -> None:\n for nmi in list(non_menu_frame_widgets.keys()):\n if nmi == w_indx:\n v = non_menu_frame_widgets[nmi]\n for v_wid in v:\n print(c.BRIGHT_WHITE + '· ' + '│ ' * v_wid.get_frame_depth()\n + c.ENDC + widget_terminal_title(v_wid))\n del non_menu_frame_widgets[nmi]",
"def clear_overlays(self):\n while self._annotations:\n self.figure.renderers.remove(self._annotations.pop())\n\n while self._labels:\n self.figure.center.remove(self._labels.pop())",
"def stop_procrastinating(self):\n for layer, l in sorted(self.postponed.items()):\n for fun, args, rgb in l:\n self.set_source_rgb(*rgb)\n fun(*args, procrastinate=0)\n self.postponed = {}",
"def remove_to_destroy(total_buffer,to_destroy):\n totbuf=np.copy(total_buffer)\n for val,begInd,endInd in to_destroy:\n for j in range(endInd-begInd):\n index_beg = begInd+j\n totbuf[ total_buffer[:,:,index_beg]==val,index_beg]=0\n return totbuf",
"def frame_off_lvars(*args):\n return _ida_frame.frame_off_lvars(*args)",
"def removeFluxSurfaces(self):\n if self._fluxOverlayHandles is not None:\n for h in self._fluxOverlayHandles:\n h.remove()\n\n self._fluxOverlayHandles = []\n self.overlayFluxSurfaces = False",
"def check_off_screen(self):\n for bullet in self.bullets:\n if bullet.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\n self.bullets.remove(bullet)\n\n for target in self.targets:\n if target.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\n self.targets.remove(target)\n\n for cloud in self.clouds:\n if cloud.is_off_screen(SCREEN_WIDTH, SCREEN_HEIGHT):\n self.clouds.remove(cloud)",
"def clear_events():\n while len(events) > 0:\n canvas.delete(events.pop())",
"def process_frame(self, downsize):\n # if (not hasattr(downsize,'shape')) and (not hasattr(downsize,'len')):\n # downsize = np.array(downsize)\n\n if type(downsize) != np.ndarray:\n raise TypeError\n\n if not downsize.any():\n raise ValueError\n\n if self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.frame_history.append(downsize)\n\n # Remove no longer needed frames from memory\n self.frame_history = self.frame_history[-(self.LMC_rec_depth):]\n downsize = signal.lfilter(self.b, self.a, self.frame_history, axis=0)[-1]\n\n # Center surround antagonism kernel applied.\n\n downsize = cv2.filter2D(downsize, -1, self.CSKernel)\n\n # RTC filter.\n u_pos = deepcopy(downsize)\n u_neg = deepcopy(downsize)\n u_pos[u_pos < 0] = 0\n u_neg[u_neg > 0] = 0\n u_neg = -u_neg\n\n # On first step, instead of computing just save the images.\n if self.t == self.T0:\n self.v_pos_prev = deepcopy(u_pos)\n self.v_neg_prev = deepcopy(u_neg)\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Do everything for pos == ON.\n tau_pos = u_pos - self.u_pos_prev\n tau_pos[tau_pos >= 0] = 0.001\n tau_pos[tau_pos < 0] = 0.1\n mult_pos = self.rtc_exp(self.dt, tau_pos)\n v_pos = -(mult_pos - 1) * u_pos + mult_pos * self.v_pos_prev\n self.v_pos_prev = deepcopy(v_pos)\n\n # Do everything for neg == OFF.\n tau_neg = u_neg - self.u_neg_prev\n tau_neg[tau_neg >= 0] = 0.001\n tau_neg[tau_neg < 0] = 0.1\n mult_neg = self.rtc_exp(self.dt, tau_neg)\n v_neg = -(mult_neg - 1) * u_neg + mult_neg * self.v_neg_prev\n self.v_neg_prev = deepcopy(v_neg)\n\n # keep track of previous u.\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Subtract v from u to give the output of each channel.\n out_pos = u_pos - v_pos\n out_neg = u_neg - v_neg\n\n # Now apply yet another filter to both parts.\n out_pos = cv2.filter2D(out_pos, -1, self.H_filter)\n out_neg = cv2.filter2D(out_neg, -1, self.H_filter)\n out_pos[out_pos < 0] = 0\n out_neg[out_neg < 0] = 0\n\n if self.t == self.T0:\n self.out_neg_prev = deepcopy(out_neg)\n\n # Delay off channel.\n out_neg = signal.lfilter(self.b1, self.a1, [self.out_neg_prev, out_neg], axis=0)[-1]\n self.out_neg_prev = out_neg\n downsize = out_neg * out_pos\n\n # Show image.\n downsize *= self.gain\n downsize = np.tanh(downsize)\n\n # Threshold.\n downsize[downsize < self.threshold] = 0\n\n if not self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.t += self.dt\n\n return downsize",
"def clear_press(self):\n\n for win in self.window.additional_windows:\n win.del_win()\n\n pos = self.window.physics_canvas.physics_objects\n self.window.physics_canvas.physics_objects = []\n\n for obj in pos:\n self.window.physics_canvas.canvas.delete(obj.canvas_id)\n\n for force in self.window.physics_canvas.interacting_forces:\n force.remove()\n\n for particle in self.window.physics_canvas.particles:\n self.window.physics_canvas.canvas.delete(particle.canvas_id)"
] | [
"0.6719497",
"0.6220488",
"0.6218328",
"0.59831357",
"0.5953131",
"0.58991516",
"0.58938974",
"0.58484584",
"0.58360916",
"0.57718647",
"0.5765755",
"0.5718492",
"0.56914794",
"0.563131",
"0.56236684",
"0.56055874",
"0.55914086",
"0.5591337",
"0.55780524",
"0.5515344",
"0.54631007",
"0.54572755",
"0.54170895",
"0.53953743",
"0.5394278",
"0.53900766",
"0.5369669",
"0.5365013",
"0.53647834",
"0.53630346"
] | 0.7736172 | 0 |
Returns True if we have a full window of information available, False otherwise | def have_window(self) -> bool:
return (
self._first_enc_at is not None
and (time.time() > self._first_enc_at + self.window_size)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exists(self):\n if self.attributes[AT.GARBAGE]:\n return False\n if get_ticks() < self.attributes[AT.TIME_TO_BE_SHOWN]:\n return False\n return True",
"def is_full(self) -> bool:",
"def _can_render_now(self):\n # First check that no update events are pending.\n window = self._window\n if window._transition:\n return 0\n rgn = Qd.NewRgn()\n window._onscreen_wid.GetWindowUpdateRgn(rgn)\n ok = Qd.EmptyRgn(rgn)\n # Next check that we're topmost\n if ok:\n ok = window._is_on_top()\n Qd.DisposeRgn(rgn)\n return ok",
"def is_full_screen(self) -> bool:\n return self._full_screen_windows is not None",
"def is_full(self):\n return False",
"def full(self) -> bool:\n return self.current_offset == self.max_offset",
"def get_win_condition(self, info: Dict[str, Any]) -> bool:\n if not self.possible_to_win:\n return False\n return not info[\"lost_live\"] and info[\"terminal\"]",
"def contains_window(self, xstart, ystart, nx, ny, xbin, ybin):\n if self.number_windows() > 0:\n if int(xstart) >= int(self['x1_start']) and int(xstart)+int(nx) <= int(self['x1_start'])+int(self['x1_size']) and \\\n int(ystart) >= int(self['y1_start']) and int(ystart)+int(ny) <= int(self['y1_start'])+int(self['y1_size']) and \\\n int(xbin) % int(self['x_bin']) == 0 and int(ybin) % int(self['y_bin']) == 0 and \\\n (int(xstart) - int(self['x1_start'])) % int(xbin) == 0 and (int(ystart) - int(self['y1_start'])) % int(ybin) == 0:\n return True\n\n if self.number_windows() > 1:\n if int(xstart) >= int(self['x2_start']) and int(xstart)+int(nx) <= int(self['x2_start'])+int(self['x2_size']) and \\\n int(ystart) >= int(self['y2_start']) and int(ystart)+int(ny) <= int(self['y2_start'])+int(self['y2_size']) and \\\n int(xbin) % int(self['x_bin']) == 0 and int(ybin) % int(self['y_bin']) == 0 and \\\n (int(xstart) - int(self['x2_start'])) % int(xbin) == 0 and (int(ystart) - int(self['y2_start'])) % int(ybin) == 0:\n return True\n\n return False",
"def is_full(self) -> bool:\n pass",
"def is_idle(self) -> bool:",
"def available(self) -> bool:\n return self.thermostat[\"runtime\"][\"connected\"]",
"def is_alive(self):\r\n return self.visible",
"def is_screen_on(self):\n out = self.adb.get_window_policy_info()\n pattern = re.compile('mScreenOnFully=(true|false)')\n return pattern.search(str(out)).group(1)",
"def is_full_frame(self):\n return self['application'] == 'ap3_250_fullframe' or self['application'] == 'ap9_250_fullframe_mindead'",
"def isWin(self):\n\n return self.tiles == self.winCdt",
"def is_opened(self):\n return self.hcam is not None",
"def is_visible(self):\n return self.window.active_panel() == self.full_name",
"def available(self) -> bool:\n return True",
"def available(self) -> bool:\n return True",
"def check_win(self):\n return UNEXPOSED not in self.get_game() and self.get_game().count(FLAG) == len(self.get_pokemon_location)",
"def available(self):\n return (\n self._connector.station_id is not None\n and self._connector.latest_update is not None\n )",
"def is_full(self):\n return set(self._parent.letters()) == set(self.winners())",
"def available(self):\n #return not self.anonymous and len(self._data())\n return True",
"def multipleWindows(self):\n\t\treturn False if (len(self.driver.window_handles) == 1) else True",
"def available(self):\n return True",
"def available(self):\n return True",
"def IsOk(self):\r\n \r\n return self.window != None",
"def full_height_visible(self) -> bool:\n return (\n self.vertical_scroll == 0\n and self.last_visible_line() == self.content_height\n )",
"def available(self):\n\t\t\treturn True",
"def available(self):\n\t\t\treturn True"
] | [
"0.6700973",
"0.66572267",
"0.65650487",
"0.65073746",
"0.644696",
"0.6444967",
"0.6371296",
"0.63529104",
"0.632829",
"0.6322513",
"0.629695",
"0.6292452",
"0.6283709",
"0.6269726",
"0.62444526",
"0.62417144",
"0.6239692",
"0.6218333",
"0.6218333",
"0.62145",
"0.6200358",
"0.6190092",
"0.6181147",
"0.617762",
"0.6176852",
"0.6176852",
"0.61746144",
"0.61667234",
"0.6156046",
"0.6156046"
] | 0.7614796 | 0 |
Spawns the ffmpeg process | def _spawn_ffmpeg(self) -> None:
if self.ffmpeg_proc is not None:
raise RuntimeError('_spawn_ffmpeg called when ffmpeg_proc is '
+ f'{self.ffmpeg_proc} (not None)')
args = ['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo',
'-s', f'{self.frame_size[0]}x{self.frame_size[1]}',
'-pix_fmt', 'rgba', '-r', str(self.fps),
'-loglevel', 'quiet',
'-i', 'pipe:0',
'-vcodec', 'h264', '-pix_fmt', 'yuv420p',
'-movflags', '+faststart']
if self.bitrate > 0:
args.extend(['-b', f'{self.bitrate}k'])
args.extend(['-y', self.outfile])
create_flags = sp.CREATE_NO_WINDOW if 'nt' in os.name else 0
self.ffmpeg_proc = sp.Popen(args, shell=False, stdout=None, stderr=None,
stdin=sp.PIPE, creationflags=create_flags) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self) -> None:\r\n self._spawn_ffmpeg()",
"def ffmpeg(*options):\n\tffmpeg_command = [\"ffmpeg\"] + list(options)\n\tprint(\"Calling FFMPEG:\", \" \".join(ffmpeg_command))\n\n\tprocess = subprocess.Popen(ffmpeg_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"Calling FFmpeg failed with exit code {exit_code}. CERR: {cerr} . COUT: {cout}\".format(exit_code=exit_code, cerr=str(cerr), cout=str(cout)))",
"def async_start_ffmpeg(self):\n raise NotImplementedError()",
"def start_ffmpeg_record(stream, stream_url, formatted_date):\n filename = stream + '_' + formatted_date\n save_video_dir = 'rover_stream/' + stream\n subprocess.Popen(['mkdir rover_stream'], shell=True)\n subprocess.Popen(['mkdir ' + save_video_dir], shell=True)\n proc_video[stream] = subprocess.Popen(['ffmpeg -i ' + stream_url + ' -acodec copy -vcodec copy ' + save_video_dir + '/' + filename + '.mp4'], stdin=PIPE, shell=True)",
"def run(self):\n\n # Start the video stream process\n self._process.start()",
"def start(self, print_ffplay_proc_stderr=False, print_read_proc_stderr=False):\n # Set the image controls\n self.set_controls()\n \n # Create a process to read from the webcam\n # stdin should be pipe so it doesn't suck up keypresses (??)\n # stderr should be null, so pipe doesn't fill up and block\n # stdout will go to downstream process\n if print_read_proc_stderr:\n read_proc_stderr = None\n else:\n read_proc_stderr = open(os.devnull, 'w')\n read_proc_cmd_l = ['ffmpeg',\n '-f', 'video4linux2',\n '-i', self.device,\n '-vcodec', 'libx264',\n '-qp', '0',\n '-vf', 'format=gray',\n '-preset', 'ultrafast',\n '-f', 'rawvideo', '-',\n ] \n self.read_proc = subprocess.Popen(read_proc_cmd_l, stdin=subprocess.PIPE, \n stdout=subprocess.PIPE, stderr=read_proc_stderr)\n \n # Sometimes the read_proc fails because the device is busy or \"Input/ouput error\"\n # but the returncode isn't set or anything so I don't know how to\n # detect this.\n\n # Tee the compressed output to a file\n self.tee_proc = subprocess.Popen(['tee', self.output_filename], \n stdin=self.read_proc.stdout,\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n # Play the output\n if print_ffplay_proc_stderr:\n ffplay_proc_stderr = None\n else:\n ffplay_proc_stderr = open(os.devnull, 'w') \n self.ffplay_proc = subprocess.Popen([\n 'ffplay', \n #~ '-fflags', 'nobuffer', # not compatible with analyzeduration or probesize?\n '-analyzeduration', '500000', # 500 ms delay in starting\n '-window_title', self.window_title,\n '-',\n ], \n stdin=self.tee_proc.stdout,\n stdout=subprocess.PIPE, stderr=ffplay_proc_stderr)\n\n # This is supposed to allow SIGPIPE\n # https://docs.python.org/2/library/subprocess.html#replacing-shell-pipeline\n self.read_proc.stdout.close()\n self.tee_proc.stdout.close()",
"def _have_ffmpeg(self):\n from sage.misc.sage_ostools import have_program\n return have_program('ffmpeg')",
"def spawn(self):\n self._proc = subprocess.Popen(\n self._args, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )",
"def makeVideo():\n os.system(\"cd video && ffmpeg -r 10 -i img%05d.jpg -vcodec mpeg4 -y caronthehill_clip.mp4\")",
"def async_restart_ffmpeg(self):\n yield from self.async_stop_ffmpeg()\n yield from self.async_start_ffmpeg()",
"def Spawn(proc):\n proc.start()\n return proc",
"def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")",
"def _cleanup_ffmpeg(self) -> None:\r\n self.ffmpeg_proc.communicate()\r\n self.ffmpeg_proc = None",
"def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)",
"def test_ffmpeg_in_path(self) -> None:\n self.assertIsNotNone(which('ffmpeg'))",
"def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None",
"def start(self):\n if self.running:\n warnings.warn(\"ExifTool already running; doing nothing.\")\n return\n with open(os.devnull, \"w\") as devnull:\n procargs = [self.executable, \"-stay_open\", \"True\", \"-@\", \"-\",\n \"-common_args\", \"-G\", \"-n\"];\n procargs.extend(self.addedargs)\n logging.debug(procargs) \n self._process = subprocess.Popen(\n procargs,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=devnull)\n self.running = True",
"def take_one_shot(path_to_images, name_image, video_source=\"/dev/video0\"):\n subprocess_cmd(\"ffmpeg -f video4linux2 -s 1280x720 -i {} -frames 1 ./{}/{} -loglevel error -nostats\".format(video_source, path_to_images, name_image))",
"def start():\n global running\n # os.system('python3 /Users/bowenwaugh/Documents/GA/GA_Puzzles/simple.py')\n global process\n process = Popen(['python3', '/Users/bowenwaugh/Documents/GA/GA_Puzzles/simple.py'])\n running = True",
"def start_process():\n global command, process\n\n def on_data(data):\n data = data.decode().strip()\n print('{}'.format(data))\n\n cmd = command.split(' ')\n\n if process:\n process.terminate()\n\n process = MySubprocess(cmd, -1, functools.partial(on_data), None, None)",
"def run(self):\n self.process.start()",
"def runmecall():\n os.system('sudo /home/pi/flask-video-streaming-v1.5/RunMe.sh')",
"def start_streamer(params) -> None:\n print(\"Starting streamer...\")\n time.sleep(5)\n cmd = \"./streamer/fakewebcam.py -c \" + params[\"config_file\"] + \" >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\" Success !\")\n else:\n print(\" Failure !\")",
"def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)",
"def start_bot(self):\n self.proc = subprocess.Popen(\"./start\", stdin=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t\t\t cwd=os.path.abspath(self.path))",
"def __init__(\n self, executable=\"ffmpeg\", global_options=None, inputs=None, outputs=None\n ):\n self.executable = executable\n self._cmd = [executable]\n\n global_options = global_options or []\n if _is_sequence(global_options):\n normalized_global_options = []\n for opt in global_options:\n normalized_global_options += shlex.split(opt)\n else:\n normalized_global_options = shlex.split(global_options)\n\n self._cmd += normalized_global_options\n self._cmd += _merge_args_opts(inputs, add_input_option=True)\n self._cmd += _merge_args_opts(outputs)\n\n self.cmd = subprocess.list2cmdline(self._cmd)\n self.process = None",
"def main():\n destination = Path(argv[1])\n source_files = destination.glob(\"**/*.wma\")\n for file in source_files:\n new_name = file.name.rsplit(\".\", maxsplit=1)[0] + \".flac\"\n dest = str(file.parent / new_name)\n cmd = list(map(str, [\"avconv\", \"-i\", file, dest]))\n if platform == \"win32\":\n print(\"Running on windows... on Unix I'd run the following command:\")\n print(cmd)\n else:\n that = Popen(cmd)\n that.wait()",
"def play(filename):\n if sys.platform == \"win32\":\n os.startfile(filename)\n else:\n opener =\"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, filename])",
"def convert_to_mp3(filename: str, title: str, start: int=None, end: int=None) -> list:\n\t# setup args for ffmpeg\n\tfile_a = f\"{path_to_wrk_dir}{filename}.mp4\" # input file\n\tfile_b = f\"{path_to_wrk_dir}{title}.mp3\" # output file\n\tfiles_b = [] # this list need if file more than 30 mb\n\targs = [\n\t\t\"/usr/bin/ffmpeg\", # path to ffmpeg\n\t\t\"-i\", # flag for input file\n\t\tfile_a, # input file\n\t\t\"-acodec\", # setup codec\n\t\t\"libmp3lame\", # codec name\n\t\t]\n\n\t# now need setup timings for target encode\n\tif start is not None and start != 0:\n\t\targs = args + [\"-ss\", str(start)]\n\tif end is not None and end != 0:\n\t\targs = args + [\"-t\", str(end - start)]\n\n\t# and last part for args to ffmpeg\n\targs = args + [\n\t\t\"-metadata\", # setup metadata for file\n\t\tf\"title={title}\", # title\n\t\t\"-metadata\",\n\t\tf\"artist={title}\", # and artist\n\t\t\"-b:a\", # setup bitrate\n\t\t\"320k\", # setup max bitrate\n\t\tfile_b,\n\t\t]\n\tprint(f\"{args}\")\n\t# start subprocess for encoding\n\tpopen = subprocess.Popen(args)\n\tpopen.wait()\n\n\t# check size file. if he more than 30 mb, bot need split him to chunks.\n\tsize = getsize(file_b) / 1024 / 1024\n\tif size > 30 and ( start or end is None ):\n\t\t# setup args for split to chunks\n\t\targs = [\n\t\t\t\"ffprobe\",\n\t\t\t\"-show_entries\",\n\t\t\t\"format=duration\",\n\t\t\t\"-i\",\n\t\t\tfile_b,\n\t\t\t]\n\n\t\t# get duration video.\n\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\tpopen.wait()\n\t\toutput = popen.stdout.read()\n\t\t# now we know how long this audio file\n\t\t# split to 10 min chunks\n\t\tdur = re.findall(r\"\\d{1,10}\", str(output))\n\t\t# get chunks count for loop\n\t\tcount_chunks = (int(dur[0]) // 600) + 1\n\t\tfor chunk_start_time in range(0, count_chunks):\n\t\t\t# setup args for split\n\t\t\t# big parts of args the same for encode\n\t\t\targs = [\n\t\t\t\t\"/usr/bin/ffmpeg\",\n\t\t\t\t\"-i\",\n\t\t\t\tfile_b,\n\t\t\t\t\"-ss\",\n\t\t\t\tf\"{chunk_start_time * 600}\", # when start chunk\n\t\t\t\t\"-t\",\n\t\t\t\t\"600\", # 10 mints duration\n\t\t\t\t\"-acodec\",\n\t\t\t\t\"copy\", # copy\n\t\t\t\t\"-b:a\",\n\t\t\t\t\"320k\",\n\t\t\t\tf\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\", # now we have path to video with chunk number.\n\t\t\t]\n\t\t\ttry:\n\t\t\t\t# start process for cut chunk\n\t\t\t\tpopen = subprocess.Popen(args, stdout=subprocess.PIPE)\n\t\t\t\tpopen.wait()\n\t\t\t# handle except.\n\t\t\texcept Exception as e:\n\t\t\t\tprint(f\"Exception - {e}\")\n\t\t\tfiles_b.append(f\"{path_to_wrk_dir}{title}_{chunk_start_time}.mp3\") # append name of file in list\n\t\tremove(file_b)\n\ttry:\n\t\t# remove tmp file\n\t\tremove(file_a)\n\t# handle except\n\texcept FileNotFoundError:\n\t\tfiles = get_file_list(path_to_wrk_dir)\n\t\tfor i in files:\n\t\t\tif -1 != f\"{path_to_wrk_dir}{i}\".find(f\"{filename}\") and f\"{i}\".find(f\".mp3\") == -1:\n\t\t\t\ttry:\n\t\t\t\t\tremove(f\"{path_to_wrk_dir}{i}\")\n\t\t\t\texcept FileNotFoundError:\n\t\t\t\t\tprint(f\"can't remove file {path_to_wrk_dir}{i}\")\n\tif len(files_b) == 0:\n\t\treturn [file_b]\n\telse:\n\t\treturn files_b",
"def create_movie(name, folder):\n cmd = [\"ffmpeg\", \"-framerate\", \"1\", \"-i\", folder + \"/pic%04d.png\", \"-c:v\",\n \"libx264\", \"-r\", \"30\", \"-pix_fmt\", \"yuv420p\", name]\n return subprocess.call(cmd)"
] | [
"0.80590403",
"0.70434517",
"0.6731381",
"0.6610697",
"0.65816295",
"0.638432",
"0.62684786",
"0.61771494",
"0.61691076",
"0.61160266",
"0.60946435",
"0.6022187",
"0.60099894",
"0.5973808",
"0.59619623",
"0.5928561",
"0.59079623",
"0.5861136",
"0.5848352",
"0.582267",
"0.5821935",
"0.58112",
"0.57992834",
"0.57962465",
"0.5789407",
"0.5782944",
"0.57498956",
"0.5729546",
"0.5675533",
"0.5668853"
] | 0.81389457 | 0 |
Cleans up the ffmpeg process. This will wait for it to terminate | def _cleanup_ffmpeg(self) -> None:
self.ffmpeg_proc.communicate()
self.ffmpeg_proc = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def async_stop_ffmpeg(self):\n return self.ffmpeg.close()",
"def _cleanup_proc(self):\n logger.debug(\"{}: Cleaning up and waiting for process to exit\".format(\n self))\n try:\n self._loop.remove_reader(self._proc.stdout)\n self._proc.stdout.close()\n self._proc.stdin.close()\n except Exception:\n # Log errors, but otherwise ignore.\n logger.error(\"{}: Failed cleaning up process\".format(self),\n exc_info=True)\n finally:\n # If the wait fails, the sub-process will appear in the process\n # tree (labelled defunct). This is mostly harmless so just log a\n # warning.\n try:\n self._proc.wait(0)\n except subprocess.TimeoutExpired:\n logger.warning(\"{}: Wait failed\".format(self),\n exc_info=True)",
"def stop(self):\n if not self.ffmpeg:\n raise RuntimeError(\"ffmpeg is not running\")\n self.ffmpeg.send_signal(signal.SIGINT)",
"def timer_ffmpeg_process_timeout():\n try:\n if not self.ffmpeg_process_ps.is_alive():\n timer_ffmpeg_process.stop()\n self.w.hide()\n del (self.w)\n self.ffmpeg_process_ps = None\n except:\n pass",
"def cleanup():\n dist.destroy_process_group()",
"def cleanup(self):\n self.sock.close()\n cv2.destroyAllWindows()\n if self.video_writer is not None:\n self.video_writer.release()",
"def close(self):\n if self.proc:\n self.proc.terminate()\n self.proc.wait()\n self.proc = None\n del os.environ['DISPLAY']\n if self.fbdir:\n os.rmdir(self.fbdir)\n self.fbdir = None",
"def clean_up(self):\n dist.destroy_process_group()",
"def cleanup(self):\n\t\tself.pb.cleanup()\n\t\tsys.exit()",
"def finalize(self):\n self.busy = False\n self.pipe_start.send((\"FINISH\",None))\n self.process.join()\n if self.process.is_alive():\n self.process.terminate()",
"def cleanup():\n cv2.release()\n cv2.destroyAllWindows()",
"def clean_up(self):\n cv2.destroyAllWindows()\n # self.vs.release()",
"def close(self):\r\n try:\r\n self.proc.terminate()\r\n except (OSError, AttributeError): # pragma: no cover\r\n pass\r\n self.proc = None",
"def stop(self):\n try:\n self.logger.debug('Halting VideoSave thread')\n cv2.destroyAllWindows()\n except:\n self.logger.exception('Exception while halting VideoSave')\n super().stop()",
"def kill(self):\n self.proc.kill()\n self.proc.wait()\n self.thread.join()",
"def kill_video(self):\n self.cap.truncate(0)\n cv2.destroyAllWindows()",
"def terminate(self):\n if self.proc:\n self.proc.kill()\n self.proc = None",
"def close_exit(self):\n self.close_video()\n sys.exit()",
"def destroy(self):\n #print(\"vdec destroy****************\")\n if self._channel_desc is not None:\n ret = acl.media.vdec_destroy_channel(self._channel_desc)\n self._channel_desc = None\n\n self._thread_join()\n\n if self._frame_config is not None:\n acl.media.vdec_destroy_frame_config(self._frame_config)\n self._frame_config = None",
"def stop(self):\n if self._proc_is_alive():\n\n if os.name == 'nt':\n # os.killpg is not available on Windows\n # See: https://bugs.python.org/issue5115\n self._proc.kill()\n\n # When we kill the child process on Windows the return code\n # gets set to 1, so we want to reset the return code back to 0\n # in order to avoid creating logging output in the download(...)\n # method\n self._proc.returncode = 0\n else:\n os.killpg(self._proc.pid, signal.SIGKILL)\n\n self._set_returncode(self.STOPPED)",
"def stop(self):\n\t\tif self.__logging: self.__logger.debug('Terminating processes.')\n\t\t#terminate Threaded queue mode seperately\n\t\tif self.__threaded_queue_mode and not(self.__queue is None):\n\t\t\tif len(self.__queue)>0: self.__queue.clear()\n\t\t\tself.__threaded_queue_mode = False\n\t\t\tself.frame = None\n\n\t\t# indicate that the thread should be terminate\n\t\tself.__terminate = True\n\n\t\t# wait until stream resources are released (producer thread might be still grabbing frame)\n\t\tif self.__thread is not None:\n\t\t\tself.__thread.join()\n\t\t\t#properly handle thread exit\n\t\t\tif self.__youtube_mode:\n\t\t\t\t# kill thread-lock in youtube mode\n\t\t\t\tself.__thread = None",
"def terminate(self):\n logger.debug(\"terminating\")\n self._upload = None\n self._buf.close()",
"def cleanup(self):\r\n self.stop()\r\n self.PWM.stop() # stop the PWM output\r",
"def cleanup():\n logger.critical(\"Program termination cleanup routine executing.\")\n # Using os._exit() to fix a bug in subprocess.popen that causes the\n # interpreter to hang after on regular sys.exit, exit, or quit call.\n os._exit(0)",
"def close(self) -> None:\n if self._process:\n self._process.terminate()\n self._process.wait()\n self._process = None",
"def cleanup(self):\n self.all_wav_to_mp3()\n self.past_songs_db.close()\n self.move_tracks_to_music_folder( )\n self.delete_leftovers()\n print \"Cleanup finished\"",
"def xclip_cleanup():\n run_subprocess('pkill xclip')",
"def cleanup(self):\n log(\"[%s] Cleaning up\" % (self.__class__.__name__))\n self.manager.close()\n self.capture.release()\n self.barrier.abort()",
"def dist_cleanup():\n dist.destroy_process_group()",
"def cleanup(self):\n process_handler.terminate_root_and_child_processes(self._proc.pid)\n self._read_thread.join()\n if self._data_dir:\n shutil.rmtree(self._data_dir, ignore_errors=True)"
] | [
"0.731078",
"0.6837441",
"0.6784892",
"0.6526165",
"0.6473467",
"0.6470982",
"0.632042",
"0.63029844",
"0.6262408",
"0.6260875",
"0.61485505",
"0.61473036",
"0.60400724",
"0.60322404",
"0.6029492",
"0.6027344",
"0.6026851",
"0.6022948",
"0.5987162",
"0.59848726",
"0.59642464",
"0.5948816",
"0.59381",
"0.5932239",
"0.5912601",
"0.5906988",
"0.5889688",
"0.58830476",
"0.5874472",
"0.58554304"
] | 0.86087924 | 0 |
Registers the specified queuelike object as something frames can be received from. Must have a get_nowait and empty member. | def register_queue(self, queue) -> None:
if queue is None:
raise ValueError('queue is None')
if not hasattr(queue, 'empty'):
raise ValueError(f'queue {queue} is missing empty member')
if not hasattr(queue, 'get_nowait'):
raise ValueError(f'queue {queue} is missing get_nowait member')
self.receive_queues.append(queue) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_queue_declared(frame):\n start_consuming(frame)",
"def example(example_object, queue):\n queue.put(example_object)",
"def example(example_object, queue_object):\n queue_object.put(example_object)",
"def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')",
"def register(self, queue, project=None):\n\n # TODO(kgriffs): SHARDING - Implement this!\n pass",
"def put_nowait(self, obj) -> None:\n self.put(obj, False)",
"def dispatch_frame(self, frame):\n if frame.command == 'RECEIPT':\n self.receipt_queue.put(frame)\n elif frame.command == 'MESSAGE':\n with self.subscription_lock:\n if frame.destination in self.subscribed_destinations:\n enqueue = True\n else:\n enqueue = False\n if self.debug:\n self.log.debug(\"Ignoring frame for unsubscribed destination: %s\" % frame)\n if enqueue:\n self.message_queue.put(frame)\n elif frame.command == 'ERROR':\n self.error_queue.put(frame)\n elif frame.command == 'CONNECTED':\n self.connected_queue.put(frame)\n else:\n self.log.info(\"Ignoring frame from server: %s\" % frame)",
"def queue_append(self, obj, value):\n self.queue.append((obj, value))\n if len(self.queue) > self.queue_size:\n self.dump_queue()",
"def vendedorBehavior(queue):\n gr = register_message()",
"def test_enqueue(self):\n dest = '/queue/foo'\n frame = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='some data')\n self.store.enqueue(dest, frame)\n \n assert self.store.has_frames(dest) == True\n assert self.store.size(dest) == 1",
"def on_queue_declared(self, frame):\n\t\tself.channel.basic_qos(prefetch_count=1)\n\t\tself.channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\t\tself.consumer_tag = self.channel.basic_consume(\n\t\t\tself.handle_delivery, \n\t\t\tframe.method.queue\n\t\t)",
"def add_queue(self, queue):\n with self.mutex:\n self.queues.append(queue)",
"def encode_queue(self, queue):\n raise NotImplementedError()",
"def on_init(self, queue=None, **kwargs):\n self.queue = queue if queue else Queue()",
"def on_bindok(self, unused_frame):\n logger.info('Queue bound')\n self.setup_error_queue()",
"def _put(self, item, queue):",
"def setup_queue(self):\n self.logger.info('declaring queue %s', self.queue)\n if self.otq:\n self._channel.queue_declare(self.on_queue_declareok, self.queue, auto_delete=True)\n else:\n self._channel.queue_declare(self.on_queue_declareok, self.queue)",
"def on_queue_declareok(self, method_frame):\n self.logger.info('binding %s and %s together with %s', self.exchange, self.queue, self.routing_key)\n self._channel.queue_bind(self.on_bindok, self.queue, self.exchange, self.routing_key)",
"def setup_queue(self, method_frame):\n logger.info('Declaring queue %s', self.queue_name)\n # self._channel.queue_declare(self.on_queue_declareok, queue_name)\n\n self._channel.queue_declare(self.on_queue_declareok, exclusive=False, durable=True, queue=self.queue_name)",
"def dispatch_frame(self, frame):\n if frame.command == 'RECEIPT':\n self.receipt_queue.put(frame)\n elif frame.command == 'MESSAGE':\n with self.subscription_lock:\n if frame.destination in self.subscribed_destinations:\n handler = self.subscribed_destinations[frame.destination]\n else:\n handler = lambda f: None\n if self.debug:\n self.log.debug(\"Ignoring frame for unsubscribed destination: %s\" % frame)\n handler(frame)\n elif frame.command == 'ERROR':\n self.error_queue.put(frame)\n elif frame.command == 'CONNECTED':\n self.connected_queue.put(frame)\n else:\n self.log.info(\"Ignoring frame from server: %s\" % frame)",
"def add_queue(self, queue):\n\n queue_id = queue[\"ovsdb:queues\"][0][\"queue-id\"]\n self.queue_dict[queue_id] = queue",
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2",
"def __init__(self):\n self.queue = Queue()",
"def enqueue(self, element):\n self.the_queue.append(element)",
"def enqueue(self, record):\r\n self.queue.put_nowait(record)",
"def __init__(self, queue: Queue):\n super().__init__()\n self._cursor = 0\n self._queue = queue\n self._all_transferred = threading.Event()",
"def use_queue():\n q = queue.Queue()\n for i in range(10):\n q.put_nowait(i)\n while q.qsize() > 0:\n element = q.get_nowait()\n sys.stdout.write(\"poping out from queue: {0}\\n\".format(element))",
"def runQueueEnqueue(self):\n raise NotImplementedError",
"def test_can_instantiate_empty_queue(empty_queue):\n assert isinstance(empty_queue, Queue)"
] | [
"0.66028094",
"0.629318",
"0.61060864",
"0.597517",
"0.580639",
"0.5723007",
"0.5670818",
"0.565421",
"0.55929124",
"0.5555419",
"0.55552125",
"0.5430858",
"0.5422504",
"0.54204226",
"0.5383923",
"0.5375583",
"0.5371588",
"0.5342425",
"0.5279118",
"0.5275986",
"0.5268184",
"0.52674735",
"0.52382565",
"0.5234758",
"0.52258265",
"0.52248764",
"0.521307",
"0.5201684",
"0.520157",
"0.51898754"
] | 0.69833153 | 0 |
Checks for items from each of the receive queues and pushes them onto the local memory dict. Returns the number of frames received | def check_queues(self) -> int:
nframes = 0
for queue in self.receive_queues:
if not queue.empty():
nframes += 1
frame, img_bytes = queue.get_nowait()
if frame < self.next_frame:
raise ValueError('received frame we already processed! '
+ f'got {frame}, at {self.next_frame}')
if frame in self.ooo_frames:
raise ValueError(f'received duplicate frame: {frame}')
self.ooo_frames[frame] = img_bytes
if len(self.ooo_frames) > self.max_ooo_frames:
raise ValueError('exceeded maximum frame cache (now have '
+ f'{len(self.ooo_frames)} frames waiting)')
return nframes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_queues(self):\n\t\t#queues = [get_queue(lane) for lane in self.Vissim_Lanes]\n\t\t\n\t\tqueues = [0. if queue.AttValue('QLen(Current, Last)') is None else queue.AttValue('QLen(Current, Last)') for queue in self.queues_counters]\n\t\treturn queues",
"def queue_lengths(self):\r\n out = []\r\n for probe in self.__probes.values():\r\n if probe.complete():\r\n out.append(probe.queue_length)\r\n return out",
"def current_queues(petrol_stations):\n current_queues = {}\n for number_of_station in petrol_stations:\n info = {}\n info['cars in the queue'] = 0\n info['max of queue'] = petrol_stations[number_of_station]['queue']\n current_queues[number_of_station] = info\n return current_queues",
"def check_packet_queue(self, queue, out):\n time.sleep(2)\n if queue == \"all\":\n self.verify(\"Queue= 0\" in out and \"Queue= 1\" in out and \"Queue= 2\" in out and \"Queue= 3\" in out,\n \"There is some queues doesn't work.\")\n elif queue == \"0\":\n self.verify(\"Queue= 0\" in out and \"Queue= 1\" not in out and \"Queue= 2\" not in out and \"Queue= 3\" not in out,\n \"RSS is enabled.\")\n lines = out.split(\"\\r\\n\")\n reta_line = {}\n queue_flag = 0\n packet_sumnum = 0\n # collect the hash result and the queue id\n for line in lines:\n line = line.strip()\n if queue_flag == 1:\n result_scanner = r\"RX-packets:\\s?([0-9]+)\"\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(line)\n packet_num = m.group(1)\n packet_sumnum = packet_sumnum + int(packet_num)\n queue_flag = 0\n elif line.strip().startswith(\"------- Forward\"):\n queue_flag = 1\n elif line.strip().startswith(\"RX-packets\"):\n result_scanner = r\"RX-packets:\\s?([0-9]+)\"\n scanner = re.compile(result_scanner, re.DOTALL)\n m = scanner.search(line)\n packet_rec = m.group(1)\n\n self.verify(packet_sumnum == int(packet_rec) == 128, \"There are some packets lost.\")",
"def test_dequeue_specific(self):\n dest = '/queue/foo'\n notdest = '/queue/other'\n \n frame1 = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='message-1') \n self.store.enqueue(dest, frame1)\n \n frame2 = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='message-2') \n self.store.enqueue(notdest, frame2)\n \n frame3 = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='message-3') \n self.store.enqueue(dest, frame3)\n \n assert self.store.has_frames(dest) == True\n assert self.store.size(dest) == 2\n \n rframe1 = self.store.dequeue(dest)\n assert frame1 == rframe1\n \n rframe2 = self.store.dequeue(dest)\n assert frame3 == rframe2\n \n assert self.store.has_frames(dest) == False\n assert self.store.size(dest) == 0",
"def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())",
"def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize",
"def test_dequeue_order(self):\n dest = '/queue/foo'\n \n frame1 = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='message-1') \n self.store.enqueue(dest, frame1)\n \n frame2 = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='message-2') \n self.store.enqueue(dest, frame2)\n \n frame3 = Frame('MESSAGE', headers={'message-id': str(uuid.uuid4())}, body='message-3') \n self.store.enqueue(dest, frame3)\n \n assert self.store.has_frames(dest) == True\n assert self.store.size(dest) == 3\n \n rframe1 = self.store.dequeue(dest)\n assert frame1 == rframe1\n \n rframe2 = self.store.dequeue(dest)\n assert frame2 == rframe2\n \n rframe3 = self.store.dequeue(dest)\n assert frame3 == rframe3\n \n assert self.store.has_frames(dest) == False\n assert self.store.size(dest) == 0",
"def testQueueMultiMsg(self):\n for i in range(10):\n self.mgr.queueMsg(i)\n\n self.assertEqual( self.mgr.msgQueue.qsize(), 9)",
"def queue_count(self):\n with self.mutex:\n return len(self.queues)",
"def countSimulationEventQueues(self):\r\n raise NotImplementedError()",
"def incoming_buffer_loop(self):\n logger = logging.getLogger('lyse.FileBox.incoming')\n # HDF5 prints lots of errors by default, for things that aren't\n # actually errors. These are silenced on a per thread basis,\n # and automatically silenced in the main thread when h5py is\n # imported. So we'll silence them in this thread too:\n h5py._errors.silence_errors()\n n_shots_added = 0\n while True:\n try:\n filepaths = []\n filepath = self.incoming_queue.get()\n filepaths.append(filepath)\n if self.incoming_queue.qsize() == 0:\n # Wait momentarily in case more arrive so we can batch process them:\n time.sleep(0.1)\n while True:\n try:\n filepath = self.incoming_queue.get(False)\n except Queue.Empty:\n break\n else:\n filepaths.append(filepath)\n if len(filepaths) >= 5:\n break\n logger.info('adding:\\n%s' % '\\n'.join(filepaths))\n if n_shots_added == 0:\n total_shots = self.incoming_queue.qsize() + len(filepaths)\n self.set_add_shots_progress(1, total_shots)\n\n # Remove duplicates from the list (preserving order) in case the\n # client sent the same filepath multiple times:\n filepaths = sorted(set(filepaths), key=filepaths.index) # Inefficient but readable\n # We open the HDF5 files here outside the GUI thread so as not to hang the GUI:\n dataframes = []\n for i, filepath in enumerate(filepaths):\n dataframe = get_dataframe_from_shot(filepath)\n dataframes.append(dataframe)\n n_shots_added += 1\n shots_remaining = self.incoming_queue.qsize()\n total_shots = n_shots_added + shots_remaining + len(filepaths) - (i + 1)\n if i != len(filepaths) - 1:\n # Leave the last update until after dataframe concatenation.\n # Looks more responsive that way:\n self.set_add_shots_progress(n_shots_added, total_shots)\n new_row_data = concat_with_padding(*dataframes)\n self.set_add_shots_progress(n_shots_added, total_shots)\n self.shots_model.add_files(filepaths, new_row_data)\n if shots_remaining == 0:\n n_shots_added = 0 # reset our counter for the next batch\n # Let the analysis loop know to look for new shots:\n self.analysis_pending.set()\n except Exception:\n # Keep this incoming loop running at all costs, but make the\n # otherwise uncaught exception visible to the user:\n zprocess.raise_exception_in_thread(sys.exc_info())",
"def clear_queue(self):\n enqueued_pkt_count = len(self.packets)\n with self.packets_cv:\n self.packets = []\n return enqueued_pkt_count",
"def test_ipcrm_queues():\n IPCComm.ipcrm_queues()\n nt.assert_equal(len(IPCComm.ipc_queues()), 0)\n mq = IPCComm.get_queue()\n nt.assert_equal(len(IPCComm.ipc_queues()), 1)\n IPCComm.ipcrm_queues(str(mq.key))\n nt.assert_equal(len(IPCComm.ipc_queues()), 0)",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def _get_queues(self):\n return self.__queues",
"def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop",
"def listen_for_messages(self, callback):\n # generate get requests for all input queues\n requests = [port.in_queue.get() for port in self.ports]\n while requests:\n # helper variable for the asserts\n queues_with_pending_requests = [req.resource for req in requests]\n # There is a request for each input queue.\n assert set(self.input_queues) == set(queues_with_pending_requests)\n # For each input queue there's exactly one request.\n assert (\n len(queues_with_pending_requests) ==\n len(set(queues_with_pending_requests)))\n\n log.debug(\"{} waiting for next reception\".format(self))\n completed_requests = (yield self.env.any_of(requests))\n received_messages = list(completed_requests.values())\n log.debug(\"{} received {}\".format(\n self, received_messages))\n\n callback(received_messages)\n\n # Only leave the requests which have not been completed yet\n remaining_requests = [\n req for req in requests if req not in completed_requests]\n # Input queues that have been emptied since the last wake up.\n emptied_queues = [req.resource for req in completed_requests]\n # Add new get requests for the input queues that have been emptied.\n new_requests = []\n for input_queue in emptied_queues:\n new_requests.append(input_queue.get())\n requests = remaining_requests + new_requests",
"def get_queues_info() -> List[QueueInfo]:\n from src.server.oasisapi.analyses.models import AnalysisTaskStatus\n\n # setup an entry for every element in the broker (this will include\n # queues with no workers yet)\n res = [\n {\n 'name': q,\n 'pending_count': 0,\n 'queued_count': 0,\n 'running_count': 0,\n 'worker_count': 0,\n } for q in _get_broker_queue_names()\n ]\n\n # increment the number of workers available for each queue\n queues = _get_active_queues()\n if queues:\n for worker in queues.values():\n for queue in worker:\n try:\n next(r for r in res if r['name'] == queue['routing_key'])['worker_count'] += 1\n except StopIteration:\n # in case there are workers around still for inactive queues add it here\n res.append({\n 'name': queue['routing_key'],\n 'queued_count': 0,\n 'running_count': 0,\n 'worker_count': 1,\n })\n\n # get the stats of the running and queued tasks\n pending = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.PENDING,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n running = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.STARTED,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n queued = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.QUEUED,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n for entry in res:\n entry['pending_count'] = pending.get(entry['name'], 0)\n entry['queued_count'] = queued.get(entry['name'], 0)\n entry['running_count'] = running.get(entry['name'], 0)\n\n return res",
"def processEvents(self):\n self.framelist = sorted(self.framelist, key=lambda event: event.timestamp, reverse=True)\n self.framequeue = sorted(self.framequeue, key=lambda event: event.timestamp, reverse=True)\n self.packetqueue = sorted(self.packetqueue, key=lambda event: event.timestamp, reverse=True)\n \n print len(self.framequeue)\n print len(self.packetqueue)\n \n while len(self.framequeue) > 0 or len(self.packetqueue) > 0:\n self.getNextEvent().processEvent(self, self.decisionAlg)",
"def assert_queue_size(sizes):\n for queue in sizes:\n assert_that(count_messages(queue), is_(sizes[queue]))",
"def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass",
"def get_all_rcv(self) -> \"list[tuple[float, bytes]]\":\n\n return self._rcv_queue"
] | [
"0.58479637",
"0.58138037",
"0.5686759",
"0.55891025",
"0.5548044",
"0.55331",
"0.5531125",
"0.55247325",
"0.5522078",
"0.55212706",
"0.55021536",
"0.5491394",
"0.54898417",
"0.5464607",
"0.54142845",
"0.54142845",
"0.54142845",
"0.54142845",
"0.54142845",
"0.54142845",
"0.54142845",
"0.54142845",
"0.54142845",
"0.5400458",
"0.5392468",
"0.5385447",
"0.53679544",
"0.5361116",
"0.53518367",
"0.53030795"
] | 0.7559541 | 0 |
Processes the next frame to the ffmpeg process if it is available. Returns True if we processed a frame, False if we did not. | def process_frame(self) -> bool:
if self.next_frame not in self.ooo_frames:
return False
img_bytes = self.ooo_frames.pop(self.next_frame)
for kb_start in range(0, len(img_bytes), self.block_size):
self.ffmpeg_proc.stdin.write(
img_bytes[kb_start:kb_start + self.block_size])
self.next_frame += 1
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process(self, frame, metadata):\n self.log.debug(f\"In process() method...\")\n\n # Publishing metadata & frames\n self.publisher.publish((metadata, frame.tobytes(),))\n metadata, frame = self.subscriber.recv()\n\n if \"jpnb_frame_drop\" in metadata:\n del metadata[\"jpnb_frame_drop\"]\n return True, None, None\n elif \"jpnb_frame_updated\" in metadata and \"jpnb_metadata_updated\" in metadata:\n del metadata[\"jpnb_frame_updated\"]\n del metadata[\"jpnb_metadata_updated\"]\n return False, frame, metadata\n elif \"jpnb_frame_updated\" in metadata or \"jpnb_metadata_updated\" in metadata:\n if \"jpnb_frame_updated\" in metadata:\n # Decode frame to retain text\n frame = self.decode_frame(metadata, frame)\n del metadata[\"jpnb_frame_updated\"]\n return False, frame, None\n if \"jpnb_metadata_updated\" in metadata:\n del metadata[\"jpnb_metadata_updated\"]\n return False, None, metadata\n else:\n return False, None, None",
"def next_frame(self):\n while True:\n if self.grabbed:\n buffer = self.__get_next_yuv_frame()\n if len(buffer) != self.__frame_raw_data_size:\n self.frame = False, False\n self.stopped = True\n break\n\n y, u, v = self.__extract_yuv_planes(buffer)\n\n # Save YUV planes now because they will be reshaped from (height, width) to (height, width, 1)\n\n converted_frame = self.__concatenate_planes_to_444yuv_frame(y, u, v)\n\n self.frame = True, converted_frame\n self.grabbed = False\n\n if self.stopped:\n break\n\n time.sleep(1/1000)",
"def process_image():\n global last_frame, is_streaming\n i=0\n\n imgproc = ImgProc()\n while(True):\n if last_frame is not None and is_streaming:\n time.sleep(0.1)\n\n print(\"Processing frame \", i)\n imgproc.detect_object(last_frame, i)\n print(\"Processing complete \", i)\n i+=1",
"def _next_frame(self):\n ret, self.frame = self.capture.read()\n if not ret:\n self.logger.warning('Failed to read frame')\n if self.show_video:\n cv2.imshow('frame', self.frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n exit(0)\n return ret",
"def process_frame():\n return \"OK\"",
"def __is_decoding_finished(self, next_time, outputs):\n if self._sequence_length is None:\n # Do not stop generating frames.\n finished = tf.tile([False], [self._batch_size])\n else:\n # Stop if the desired sequence length was reached.\n finished = (next_time >= self._sequence_length)\n\n return finished",
"def frame_forward(self):\n if self.playMode == FFMPEG:\n self.ffmpegTimerOut()",
"def process(self):\n frame_count = 0\n size = self.frame.size\n while True:\n try:\n for i in range(parallel.BUFFER_LENGTH):\n offset = i * size;\n self.manager.image[offset : offset + size] = self.frame.ravel()\n self.ret, self.frame = self.capture.read()\n if not self.ret:\n self.clear_buffer(offset=offset + size + 1)\n raise StopIteration\n if DEBUG_LEVEL > 2:\n cv.imshow(self.name, self.frame)\n frame_count += 1\n key = cv.waitKey(self.toggle)\n if key is 27:\n raise StopIteration\n return\n elif key is 32:\n self.toggle ^= 1\n self.manager.detect()\n self.barrier.wait()\n except StopIteration:\n # Handle dangling frames in buffer and return gracefully\n self.manager.detect()\n self.barrier.wait()\n self.cleanup()\n try:\n # Handle rangequits in Phase 1\n for rv in self.variables:\n for event in rv['events']:\n if event['event_subtype'] == \"Finish\":\n return self.variables\n return None\n except:\n # Phase 0 -- no handling\n return self.variables\n except:\n # Any other exception is bad!\n return None",
"def more(self):\n # return True if there are still frames in the queue. If stream is not stopped, try to wait a moment\n tries = 0\n while self.Q.qsize() == 0 and not self.stopped and tries < 5:\n time.sleep(0.1)\n tries += 1\n\n return self.Q.qsize() > 0",
"def has_next(self):\n # type: () -> bool\n return len(self.buffer) > 0",
"def run(self):\n # get frame of mic samples\n if self.__stream is None:\n # callin code needs to open stream 1st\n return False\n else:\n # wait for sufficient sample, else pitch plot crashes\n if self.__recorded >= FRAMES_PER_BUFFER:\n # new file if time exceeded\n self.__openSampleFile()\n return True\n else:\n # not enough samples\n return False",
"def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame",
"def has_next(self):\n return not self.finished_function(self.peek)",
"def parse_frames(self):\r\n done = False\r\n self._ip = 13 + self.ct_len\r\n while not done:\r\n code = self.next_byte()\r\n if not code:\r\n raise ValueError(\"Unexcepted end of file\")\r\n if code == b\"\\x2C\":\r\n self.parse_frame()\r\n elif code == b\"\\x21\":\r\n code = self.next_byte()\r\n if code == b\"\\xF9\":\r\n self.g_ext.append(self.parse_gce())\r\n elif code == b\"\\xFF\":\r\n self.next_byte()\r\n app = self.next_bytes(11)\r\n if app == b\"NETSCAPE2.0\":\r\n self.parse_ne()\r\n else:\r\n self.skip()\r\n elif code == b\"\\xFE\":\r\n self.comments.append(self.parse_ce())\r\n else:\r\n self.next_bytes(13)\r\n self.skip()\r\n elif code == b\"\\x3B\":\r\n done = True",
"def process(self, frame, cur_count):\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n _, gray = cv.threshold(gray, 30, 255, cv.THRESH_BINARY)\n black_count = float(np.sum(gray)) / float(gray.size)\n # If at least 80% of the frame is true black, race has stopped\n if black_count <= 0.2:\n self.handle(frame, cur_count)",
"def start(self):\n if hasattr(self, 'process'):\n err = \"video '{}' Frames Extraction has already \" \\\n \"started.\".format(self.video_file)\n print err\n raise Exception(err)\n\n process_number = subprocess.Popen(self.start_frames, stdout=subprocess.PIPE)\n process_number.wait()\n return process_number",
"def process_message(self):\n while True:\n if not self.next_message:\n return False\n\n # check if next message is in the past, and drop it\n if (self.next_message.round, self.next_message.phase) < (self.round, self.phase):\n (self.logger.debug if self.is_leader else self.logger.warning)(\n \"dropping past message from round %d / phase %s\",\n self.next_message.round, self.next_message.phase.name\n )\n self.drop_message()\n else:\n break\n\n # check if next message is in the future, and process it at a later point in time\n if (self.next_message.round, self.next_message.phase) > (self.round, self.phase):\n return False\n\n msg_item = self.dequeue_message()\n msg_type = get_message_type(msg_item.content)\n msg_sender = get_message_sender(msg_item.content)\n\n if msg_sender == self.leader and msg_type != MessageType.Propose:\n self.logger.warning(f\"FLAGGING NODE {msg_sender} AS ADVERSARY, LEADER SENT DIFFERENT MESSAGE\")\n self.flag_adversary(msg_sender)\n self.recover()\n return True\n\n signed_msg: SignedMessage = SignedMessage.deserialize(msg_item.content)\n msg = signed_msg.message # signature was already verified prior to insertion into the message buffer\n assert msg.round_idx == self.round\n assert msg.type.to_phase() == self.phase\n\n # TODO: add try/except for deserialization, and flag leader as adversial upon failure\n\n self.logger.debug(\"processing %s message\", msg_type.name)\n if msg_type == MessageType.Propose:\n self.process_propose(msg)\n elif msg_type == MessageType.Acknowledge:\n self.process_acknowledge(msg)\n elif msg_type == MessageType.Confirm:\n self.process_confirm(signed_msg)\n elif msg_type == MessageType.Recover:\n self.process_recover(msg)\n else:\n assert False, \"message type not considered\"\n\n return True",
"def captureNextFrame(self):\n ret, readFrame=self.capture.read()\n if(ret==True):\n self.currentFrame=cv2.cvtColor(readFrame,cv2.COLOR_BGR2RGB)",
"def available(self):\n return self.ffmpeg.is_running",
"def hasCurrentFrame(self):\n if self.currentFrame == []:\n return False\n return True",
"def has_next(self):\n try:\n self.next()\n return True\n except (ParseException, struct.error):\n return False",
"def frame_available(self):\n return type(self._frame) != type(None)",
"def next(self):\n\n fn_frame = os.path.join(self.sequence_root, 'image_2/%06d.png' % (self.index))\n fn_velo = os.path.join(self.sequence_root, 'velodyne/%06d.bin' %(self.index))\n fn_label = os.path.join(self.sequence_root, 'labels/%06d.label' %(self.index))\n\n if not os.path.exists(fn_frame) or not os.path.exists(fn_velo):\n print('End of sequence')\n return False\n \n if not os.path.exists(fn_label):\n print('Semantic KITTI label file not found')\n return False\n\n self.frame = cv2.imread(fn_frame)\n if self.frame is None:\n print('File could not be read',fn_frame)\n \n self.points = np.fromfile(fn_velo, dtype=np.float32).reshape(-1, 4)[:,:3]\n self.n_pts = self.points.shape[0]\n label = np.fromfile(fn_label, dtype=np.uint32).reshape((-1))\n\n if label.shape[0] == self.points.shape[0]:\n self.sem_label = label & 0xFFFF # semantic label in lower half\n self.inst_label = label >> 16 # instance id in upper half\n assert((self.sem_label + (self.inst_label << 16) == label).all()) # sanity check\n else:\n print(\"Points shape: \", self.points.shape)\n print(\"Label shape: \", label.shape)\n raise ValueError(\"Scan and Label don't contain same number of points\")\n\n self.index += 1\n return True",
"def process_frame(self, frame):\n\t\treturn frame",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def process_frames(self, data):\n pass",
"def update_frame(self):\r\n while not self.stopped:\r\n if not self.grabbed or not self.cap.isOpened():\r\n self.stop()\r\n else:\r\n self.grabbed, self.frame = self.cap.read()\r\n try:\r\n if self.grabbed:\r\n #self.New_Frame_Time = time.time()\r\n #self.FPS = 1/(self.New_Frame_Time-self.Old_Frame_Time)\r\n #self.Old_Frame_Time = self.New_Frame_Time\r\n self.FrameCount += 1\r\n else:\r\n print(f'Grabbed status is: {self.grabbed}')\r\n #self.Old_Frame_Time = time.time()\r\n except ZeroDivisionError:\r\n print(\"Division by zero error when finding video feed fps\")\r\n self.FPS = 0\r\n self.Old_Frame_Time = time.time()"
] | [
"0.681551",
"0.62653166",
"0.62197095",
"0.62151057",
"0.6016715",
"0.5888307",
"0.585658",
"0.5855879",
"0.58241194",
"0.57367325",
"0.5663488",
"0.565733",
"0.561935",
"0.5575768",
"0.55190694",
"0.54909426",
"0.5479278",
"0.547769",
"0.54572225",
"0.54553086",
"0.54294866",
"0.53999394",
"0.5386982",
"0.53562534",
"0.535431",
"0.535431",
"0.535431",
"0.535431",
"0.535431",
"0.53363377"
] | 0.8323678 | 0 |
Helping function to get the color of the dock | def _getDockColor(self, plane):
color = (0,0,0)
if plane.zAxis != -1:
color = self.globalAxis[plane.zAxis].color[0:3]
return color | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def color(self):\n return self.container['color']",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def _get_color(self):\n return self.__color",
"def get_color(self):\n self.view.present(\n \"sheet\",\n orientations=ORIENTATIONS,\n )\n self.view.wait_modal()\n return self.rgb",
"def getColor(self):\r\n return self.color",
"def get_color(self):\n return self.color",
"def _get_red(self):\n return self.__red",
"def _get_red(self):\n return self.__red",
"def _get_red(self):\n return self.__red",
"def test_get_color(self):\n assert dockerprettyps.get_color(1) == \"\\033[94m\"\n assert dockerprettyps.get_color(200) == \"\\033[92m\"",
"def color(self):\n return self.COLOR",
"def get_color(self):\n return \"yellow\"",
"def get_color(self) -> str:\r\n return self.color",
"def get_color(self):\n return self._io.last_state['color']['front-center']",
"def color(self):\n return self['color']",
"def GetColor(self, id):\r\n\r\n if id == AUI_DOCKART_BACKGROUND_COLOUR:\r\n return self._background_brush.GetColour()\r\n elif id == AUI_DOCKART_BACKGROUND_GRADIENT_COLOUR:\r\n return self._background_gradient_colour\r\n elif id == AUI_DOCKART_SASH_COLOUR:\r\n return self._sash_brush.GetColour()\r\n elif id == AUI_DOCKART_INACTIVE_CAPTION_COLOUR:\r\n return self._inactive_caption_colour\r\n elif id == AUI_DOCKART_INACTIVE_CAPTION_GRADIENT_COLOUR:\r\n return self._inactive_caption_gradient_colour\r\n elif id == AUI_DOCKART_INACTIVE_CAPTION_TEXT_COLOUR:\r\n return self._inactive_caption_text_colour\r\n elif id == AUI_DOCKART_ACTIVE_CAPTION_COLOUR:\r\n return self._active_caption_colour\r\n elif id == AUI_DOCKART_ACTIVE_CAPTION_GRADIENT_COLOUR:\r\n return self._active_caption_gradient_colour\r\n elif id == AUI_DOCKART_ACTIVE_CAPTION_TEXT_COLOUR:\r\n return self._active_caption_text_colour \r\n elif id == AUI_DOCKART_BORDER_COLOUR:\r\n return self._border_pen.GetColour()\r\n elif id == AUI_DOCKART_GRIPPER_COLOUR:\r\n return self._gripper_brush.GetColour()\r\n else:\r\n raise Exception(\"Invalid Colour Ordinal.\")",
"def get_color(self):\r\n return self.__color",
"def getColor(self):\n return self.color",
"def color(self):\n return self.settings['color']",
"def get_colour(self):\n return self.colour",
"def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"",
"def color(self):\n return self._zoom.color",
"def get_color(self):\r\n return self._color",
"def get_color(self) -> str:\n return self.color",
"def computeUIPalette(self):\n\n\t\t# self.col['group-bg'] = QtGui.QColor(128, 128, 128)\n\t\tself.col['line'] = self.col['window'].darker(110)\n\t\tself.col['tooltip'] = QtGui.QColor(255, 255, 221)\n\t\tself.col['mandatory'] = QtGui.QColor(252, 152, 103)\n\t\tself.col['warning'] = QtGui.QColor(255, 216, 106)\n\t\tself.col['inherited'] = QtGui.QColor(161, 239, 228)\n\n\t\tif self.col['window'].lightness() < 128: # Dark UI\n\t\t\tself.imgtheme = \"light\"\n\t\t\tself.col['text'] = QtGui.QColor(204, 204, 204)\n\t\t\tself.col['group-bg'] = QtGui.QColor(0, 0, 0)\n\t\t\tself.col['disabled'] = QtGui.QColor(102, 102, 102)\n\t\t\t# self.col['disabled'] = self.offsetColor(self.col['window'], +51)\n\t\t\t# self.col['base'] = self.offsetColor(self.col['window'], -34, 34)\n\t\t\t# self.col['alternate'] = self.offsetColor(self.col['base'], +6)\n\t\t\t# self.col['button'] = self.offsetColor(self.col['window'], +34, 102)\n\t\t\t# self.col['button-border'] = self.offsetColor(self.col['button'], +8)\n\t\t\t# self.col['menu-bg'] = self.offsetColor(self.col['window'], -17, 68)\n\t\t\t# self.col['menu-border'] = self.offsetColor(self.col['menu-bg'], +17)\n\t\t\t# self.col['group-header'] = self.offsetColor(self.col['window'], +17)\n\t\t\tself.col['base'] = self.col['window'].darker(150)\n\t\t\tself.col['alternate'] = self.col['base'].lighter(106)\n\t\t\tself.col['button'] = self.col['window'].lighter(150)\n\t\t\tself.col['button-border'] = self.col['button']\n\t\t\tself.col['menu-bg'] = self.col['window'].darker(125)\n\t\t\tself.col['menu-border'] = self.col['menu-bg']\n\t\t\tself.col['group-header'] = self.col['window'].lighter(150)\n\t\telse: # Light UI\n\t\t\tself.imgtheme = \"dark\"\n\t\t\tself.col['text'] = QtGui.QColor(51, 51, 51)\n\t\t\tself.col['group-bg'] = QtGui.QColor(255, 255, 255)\n\t\t\tself.col['disabled'] = QtGui.QColor(102, 102, 102)\n\t\t\t# self.col['disabled'] = self.offsetColor(self.col['window'], -51)\n\t\t\t# self.col['base'] = self.offsetColor(self.col['window'], +34, 221)\n\t\t\t# self.col['alternate'] = self.offsetColor(self.col['base'], -6)\n\t\t\t# self.col['button'] = self.offsetColor(self.col['window'], -17, 204)\n\t\t\t# self.col['button-border'] = self.offsetColor(self.col['button'], -8)\n\t\t\t# self.col['menu-bg'] = self.offsetColor(self.col['window'], +17, 187)\n\t\t\t# self.col['menu-border'] = self.offsetColor(self.col['menu-bg'], -17)\n\t\t\t# self.col['group-header'] = self.offsetColor(self.col['window'], -17)\n\t\t\tself.col['base'] = self.col['window'].lighter(150)\n\t\t\tself.col['alternate'] = self.col['base'].darker(106)\n\t\t\tself.col['button'] = self.col['window'].darker(150)\n\t\t\tself.col['button-border'] = self.col['button']\n\t\t\tself.col['menu-bg'] = self.col['window'].lighter(125)\n\t\t\tself.col['menu-border'] = self.col['menu-bg']\n\t\t\tself.col['group-header'] = self.col['window'].darker(150)\n\n\t\t# self.col['hover'] = self.offsetColor(self.col['button'], +17)\n\t\t# self.col['checked'] = self.offsetColor(self.col['button'], -17)\n\t\tself.col['hover'] = self.col['button'].lighter(110)\n\t\tself.col['checked'] = self.col['button'].darker(110)\n\t\tself.col['pressed'] = self.col['checked'] #self.col['highlight']\n\n\t\tif self.col['highlight'].lightness() < 136:\n\t\t\tself.col['highlighted-text'] = QtGui.QColor(255, 255, 255)\n\t\telse:\n\t\t\tself.col['highlighted-text'] = QtGui.QColor(0, 0, 0)\n\n\t\tif self.col['tooltip'].lightness() < 136:\n\t\t\tself.col['tooltip-text'] = QtGui.QColor(255, 255, 255)\n\t\telse:\n\t\t\tself.col['tooltip-text'] = QtGui.QColor(0, 0, 0)\n\n\t\t# if self.col['button'].lightness() < 170:\n\t\t# \tself.col['button-text'] = self.offsetColor(self.col['button'], +68, 204)\n\t\t# else:\n\t\t# \tself.col['button-text'] = self.offsetColor(self.col['button'], -68, 51)\n\t\tself.col['button-text'] = self.col['text']\n\n\t\tself.col['mandatory-bg'] = self.col['mandatory']\n\t\tif self.col['mandatory-bg'].lightness() < 128:\n\t\t\tself.col['mandatory-text'] = self.offsetColor(self.col['mandatory-bg'], +68, 204)\n\t\telse:\n\t\t\tself.col['mandatory-text'] = self.offsetColor(self.col['mandatory-bg'], -68, 51)\n\n\t\tself.col['warning-bg'] = self.col['warning']\n\t\tif self.col['warning-bg'].lightness() < 128:\n\t\t\tself.col['warning-text'] = self.offsetColor(self.col['warning-bg'], +68, 204)\n\t\telse:\n\t\t\tself.col['warning-text'] = self.offsetColor(self.col['warning-bg'], -68, 51)\n\n\t\tself.col['inherited-bg'] = self.col['inherited']\n\t\tif self.col['inherited-bg'].lightness() < 128:\n\t\t\tself.col['inherited-text'] = self.offsetColor(self.col['inherited-bg'], +68, 204)\n\t\telse:\n\t\t\tself.col['inherited-text'] = self.offsetColor(self.col['inherited-bg'], -68, 51)",
"def get_color(self):\n\n return self.color",
"def color(self):\n return 0x2f3136",
"def color(self):\n if \"color\" in self._prop_dict:\n return self._prop_dict[\"color\"]\n else:\n return None"
] | [
"0.7055499",
"0.6641315",
"0.6641315",
"0.6641315",
"0.6641315",
"0.662588",
"0.6576148",
"0.6518216",
"0.65073246",
"0.65073246",
"0.65073246",
"0.650731",
"0.64849645",
"0.6471284",
"0.64579517",
"0.6433993",
"0.6423269",
"0.64182955",
"0.639525",
"0.6385216",
"0.63695",
"0.6367997",
"0.6345155",
"0.6336475",
"0.63328195",
"0.62927157",
"0.62845",
"0.62771684",
"0.6244185",
"0.6218426"
] | 0.7151222 | 0 |
Helper method to create default request meta. All 'SERP' and 'PRODUCT' requests need to " "implement this. It will propagate the meta information from the original/ parent request " "to the child requests. | def create_default_request_meta(
response: Union[ScrapyTextResponse, ScrapyHttpResponse], original_url: Optional[str] = None
) -> Dict:
return {
"original_URL": original_url if original_url else response.url,
"category": response.meta.get("category"),
"gender": response.meta.get("gender"),
"consumer_lifestage": response.meta.get("consumer_lifestage"),
"meta_data": response.meta.get("meta_data"),
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def request_info(self, request):\n\n\t\t# We have to re-resolve the request path here, because the information\n\t\t# is not stored on the request.\n\t\tview, args, kwargs = resolve(request.path)\n\t\tfor i, arg in enumerate(args):\n\t\t\tkwargs[i] = arg\n\n\t\tparameters = {}\n\t\tparameters.update(kwargs)\n\t\tparameters.update(request.POST.items())\n\n\t\treturn {\n\t\t\t\t\"request\": {\n\t\t\t\t\t\"session\": dict(request.session),\n\t\t\t\t\t\"remote_ip\": request.META[\"REMOTE_ADDR\"],\n\t\t\t\t\t\"parameters\": parameters,\n\t\t\t\t\t\"action\": view.__name__,\n\t\t\t\t\t\"application\": view.__module__,\n\t\t\t\t\t\"request_method\": request.method,\n\t\t\t\t\t}\n\t\t\t\t}",
"def meta(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'meta')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def DefaultRequestSet(self) -> _n_6_t_0:",
"def meta(self):\n raise NotImplementedError",
"def init_metadata(self, parent):\n parent_metadata = parent.get('metadata', {})\n return {\n 'started': utcnow(),\n 'dependencies_met': True,\n 'engine': self.ident,\n 'is_broadcast': parent_metadata.get('is_broadcast', False),\n 'is_coalescing': parent_metadata.get('is_coalescing', False),\n 'original_msg_id': parent_metadata.get('original_msg_id', ''),\n }",
"def process_metadata(self):\n\n self._responses = self._get_responses()\n\n (\n self._request_body_parameter,\n self._request_body_class,\n self._request_body_content_types,\n ) = self._get_request_body_parameter()\n\n if self._request_body_content_types is None:\n self._request_body_content_types = [\"application/json\"]\n\n self._request_body_file_type = self._get_request_body_file_type()\n if self._request_body_parameter is not None and self._request_body_file_type is not None:\n raise TypeError(\"An endpoint cannot accept both a file and a model\")\n\n self._query_parameters = dict(self._get_query_string_parameters())\n self._path_parameters = dict(self._get_path_parameters())\n\n self._security = [*self._get_security_requirements()]\n self._tags = [*self._get_tags()]",
"def meta(self):\n refresh = False\n if '_meta' not in self.__dict__:\n refresh = True\n # elif self.parameters != self.__dict__['_meta']['parameters']:\n # refresh = True\n\n if refresh:\n # parameters = self.parameters\n # if parameters is None:\n # parameters = dict()\n self._meta = {\n 'connector_keys' : self.connector_keys,\n 'metric_key' : self.metric_key,\n 'location_key' : self.location_key,\n # 'parameters' : parameters,\n 'instance' : self.instance_keys,\n }\n return self._meta",
"def _get_meta(self, request):\n for key, value in self._get_metadata(request):\n replacement = self._meta_replacement(key, value)\n if replacement:\n yield replacement",
"def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='OBJECT')\n self.meta['decker'] = dict(ext=0, card=None, default='default')\n self.meta['dichroic'] = dict(ext=0, card=None, default='default')\n self.meta['binning'] = dict(ext=0, card=None, default='1,1')\n\n self.meta['mjd'] = dict(ext=0, card='ACQTIME')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['idname'] = dict(ext=0, card='OBSTYPE')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')",
"def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='object')\n self.meta['idname'] = dict(ext=0, card='obsmode')\n self.meta['decker'] = dict(ext=0, card='MASKNAME')\n self.meta['binning'] = dict(card=None, compound=True)\n self.meta['detector'] = dict(ext=0, card='detector')\n self.meta['mjd'] = dict(ext=0, card='MJD-OBS')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['datasec'] = dict(ext=0, card='DETSIZE')\n self.meta['dichroic'] = dict(ext=0, card='FILTER1')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')\n self.meta['slitwid'] = dict(card=None, compound=True)",
"def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='object')\n self.meta['idname'] = dict(ext=0, card='obsmode')\n self.meta['decker'] = dict(ext=0, card='MASKNAME')\n self.meta['binning'] = dict(card=None, compound=True) # Uses CCDSUM\n self.meta['detector']=dict(ext=0,card='detector')\n self.meta['mjd'] = dict(ext=0, card='MJD-OBS')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['datasec'] = dict(ext=1, card='DATASEC')\n self.meta['dichroic'] = dict(ext=0, card='FILTER1')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')",
"def init_meta(self):\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=0, card='OBJRA')\n meta['dec'] = dict(ext=0, card='OBJDEC')\n meta['target'] = dict(ext=0, card='OBJECT')\n meta['decker'] = dict(ext=0, card='ALAPRTNM')\n meta['binning'] = dict(card=None, compound=True)\n\n meta['mjd'] = dict(ext=0, card=None, compound=True)\n meta['exptime'] = dict(ext=0, card='EXPTIME')\n meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=0, card='ALGRNM')\n meta['idname'] = dict(ext=0, card='IMAGETYP')\n # Lamps\n # Use Keck/LRIS approach\n\n # Ingest\n self.meta = meta",
"def get_metadata(self, req):\n try:\n new_meta = {}\n metadata = {}\n # get metadata from request headers\n metadata.update(\n (key.lower(), value)\n for key, value in req.headers.iteritems()\n if key.lower() in HEADERS or\n is_sys_or_user_meta('container', key))\n for key, value in metadata.iteritems():\n if key == 'x-container-read':\n new_meta.update({'r-' : value})\n elif key == 'x-container-write':\n new_meta.update({'w-' : value})\n else:\n ser_key = key.split('-')[2]\n if ser_key == 'meta':\n\n #Supported a single word key till first '-' \n #in the entire metadata header as X-Container-Meta-A\n #new_key = '%s-%s' % ('m', key.split('-')[3])\n \n #SANCHIT: This supports multi-part key for metadata \n #such as X-Container-Meta-A-B-C\n new_key = '%s-%s' % ('m', key.split('-', 3)[-1])\n new_meta.update({new_key : value})\n elif ser_key == 'sysmeta':\n #new_key = '%s-%s' % ('sm', key.split('-')[3])\n new_key = '%s-%s' % ('sm', key.split('-', 3)[-1])\n new_meta.update({new_key : value})\n else:\n self.logger.debug('Expected metadata not found')\n return new_meta\n except Exception as err:\n self.logger.error(('get_metadata failed ',\n 'close failure: %(exc)s : %(stack)s'),\n {'exc': err, 'stack': ''.join(traceback.format_stack())})\n raise err",
"def _build_request_data(request):\n\n # webob (pyramid)\n if WebobBaseRequest and isinstance(request, WebobBaseRequest):\n return _build_webob_request_data(request)\n\n # django\n if DjangoHttpRequest and isinstance(request, DjangoHttpRequest):\n return _build_django_request_data(request)\n\n # django rest framework\n if RestFrameworkRequest and isinstance(request, RestFrameworkRequest):\n return _build_django_request_data(request)\n\n # werkzeug (flask)\n if WerkzeugRequest and isinstance(request, WerkzeugRequest):\n return _build_werkzeug_request_data(request)\n\n # tornado\n if TornadoRequest and isinstance(request, TornadoRequest):\n return _build_tornado_request_data(request)\n\n # bottle\n if BottleRequest and isinstance(request, BottleRequest):\n return _build_bottle_request_data(request)\n\n # Sanic\n if SanicRequest and isinstance(request, SanicRequest):\n return _build_sanic_request_data(request)\n\n # falcon\n if FalconRequest and isinstance(request, FalconRequest):\n return _build_falcon_request_data(request)\n\n # Plain wsgi (should be last)\n if isinstance(request, dict) and 'wsgi.version' in request:\n return _build_wsgi_request_data(request)\n\n # FastAPI (built on top of Starlette, so keep the order)\n if FastAPIRequest and isinstance(request, FastAPIRequest):\n return _build_fastapi_request_data(request)\n\n # Starlette (should be the last one for Starlette based frameworks)\n if StarletteRequest and isinstance(request, StarletteRequest):\n return _build_starlette_request_data(request)\n\n return None",
"def _initiate_meta(kwargs, activity, ignores=()):\n meta = {AssociatedObjectId.ACTIVITY_ID: str(_retrieve_object_id(activity))}\n # also add the keys' in their snake case appearance so noPadding and no_padding, customHeight and custom_height\n keys_in_kwargs = KECARD_COMMON_KEYS + [snakecase(k) for k in KECARD_COMMON_KEYS]\n\n # initiate the meta based on known kwarg arguments\n for key in list(set(keys_in_kwargs)):\n if key in kwargs:\n meta[camelcase(key)] = kwargs.pop(key)\n\n # we check for custom_height specifically and deal with it.\n if snakecase(MetaWidget.CUSTOM_HEIGHT) in kwargs:\n meta[MetaWidget.CUSTOM_HEIGHT] = kwargs.pop(snakecase(MetaWidget.CUSTOM_HEIGHT))\n\n # remove the 'ignores' from the meta\n for key in ignores:\n if key in meta:\n del meta[key]\n\n return meta",
"def init_meta(self):\n meta = {}\n # Required (core)\n meta['ra'] = dict(ext=1, card='RA')\n meta['dec'] = dict(ext=1, card='DEC')\n meta['target'] = dict(ext=1, card='OBJECT')\n meta['decker'] = dict(ext=1, card='APERTURE')\n meta['dichroic'] = dict(ext=1, card='FILTER')\n meta['binning'] = dict(ext=1, card=None, default='1,1')\n\n meta['mjd'] = dict(ext=0, card=None, compound=True)\n meta['exptime'] = dict(ext=1, card='EXPTIME')\n meta['airmass'] = dict(ext=1, card='AIRMASS')\n # Extras for config and frametyping\n meta['dispname'] = dict(ext=1, card='DISPERSE')\n meta['idname'] = dict(ext=1, card='IMAGETYP')\n\n # Ingest\n self.meta = meta",
"def getDefaultMeta(self):\n\n meta = MetaDict()\n units = UnitDict()\n\n # meta[self.getStandardIdentifier('tsSamplingRate')] = 80000\n #\n # units[self.getStandardIdentifier('tsSamplingRate')] = 'Hz'\n\n return meta, units",
"def _meta_dict(self, node):\n meta = {n: self._text(node, n) for n in ('source', 'date', 'key')}\n meta.update(self.infon_dict(node))\n return meta",
"def _metadata(self):\n meta = super()._metadata\n meta.update({\n \"name\": self.name,\n \"lead_in_time\": self.lead_in_time,\n \"amplification\": self.amplification,\n \"amplifier_clipping\": self.amplifier_clipping,\n \"power_threshold\": self.power_threshold,\n })\n return meta",
"def meta_params(request):\n return request.param",
"def default_nested(self, data, many, **kwargs):\n if not data.get(\"metadata\"):\n data[\"metadata\"] = {}\n if not data.get(\"pids\"):\n data[\"pids\"] = {}\n\n return data",
"def get_meta():\n meta = {\n 'pages': _get_pages()\n }\n return meta",
"def prepare(self, request):\n pass",
"def meta_data(self) -> Dict:\n pass",
"def init_meta():\n meta = {}\n meta[\"title\"] = None\n meta[\"authors\"] = []\n meta[\"date\"] = None\n meta[\"abstract\"] = None\n meta[\"notes\"] = [] \n return meta",
"def get_parser_context(self, http_request):\n res = super().get_parser_context(http_request)\n res['json_schema'] = self.create_payload_schema\n return res",
"def get_save_meta(self):\n return {}",
"def reset_request_data(context):\n for name, default in default_request_data():\n setattr(context, name, default)",
"def __init__(self, **kwargs):\n super(Request, self).__init__(**kwargs)",
"def _identifying_params(self) -> Mapping[str, Any]:\n return {**{\"model_name\": self.model_name}, **self._default_params}"
] | [
"0.5762792",
"0.56348234",
"0.56074893",
"0.55480593",
"0.55469114",
"0.5497104",
"0.5468277",
"0.54309773",
"0.54052013",
"0.5381905",
"0.537283",
"0.53606224",
"0.5316467",
"0.5299309",
"0.5298441",
"0.5293145",
"0.52903724",
"0.5282184",
"0.52806866",
"0.5275312",
"0.5270135",
"0.52638257",
"0.5249074",
"0.5247199",
"0.5242565",
"0.52339214",
"0.52097845",
"0.52074003",
"0.5182293",
"0.51822245"
] | 0.7019134 | 0 |
Check if a given bbUser object is stored in the database. Currently only checks if a user with the same ID is stored in the database, not if the objects are the same. | def userObjExists(self, user : bbUser.bbUser) -> bool:
return self.userIDExists(user.id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exists_in_db(self) -> bool:\n query = \"\"\"SELECT * \n FROM Users \n WHERE Username=?;\"\"\"\n return len(self.db.fetchall(query, values=(self.username,))) > 0",
"def isOwner(id, userId):\n db = core.connect()\n return db[id][\"createdBy\"] == userId",
"def is_userAS(self, obj):\n # Some other places simply check for owner=None.\n return UserAS.objects.filter(as_ptr=obj).exists()",
"def check_user(user):\n result_user = search_column_with_constraint(choose_database(\"auth\"), \"users\", \"id\", \"id\", user)\n # result_user = search_single_entry(choose_database(\"auth\"), \"users\", \"id\", user)\n\n if len(result_user) == 0:\n return 0\n else:\n return 1",
"def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )",
"def user_exists(self,unique_ID):\n\t\ttry:\n\t\t\tself.data[unique_ID]\n\t\texcept KeyError:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True",
"def get_in_users(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user in obj.users.all():\n return True\n else:\n return False",
"def user_exists(mail_or_id) -> bool:\n conn = sqlite3.connect(\"db.sqlite3\")\n c = conn.cursor()\n\n if type(mail_or_id) is int:\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE id=?\n \"\"\", (mail_or_id,))\n else: #mail\n c.execute(\"\"\"\n SELECT 1 FROM Users\n WHERE mail=?\n \"\"\", (mail_or_id,))\n \n conn.commit()\n \n exists = bool(len(list(c)))\n \n conn.close()\n\n return exists",
"def has_history(self, user):\n\n header = connect(self.__path)\n curs = header.cursor()\n encrypted_id = md5((str(user.id) + \"typicaluser\").encode()).hexdigest()\n curs.execute(\"SELECT * FROM users WHERE id = (?)\", (encrypted_id,))\n data = curs.fetchall()\n return len(data) >= 1",
"def user_exists(cls, *args, **kwargs):\r\n user_model = cls.user_model()\r\n query = get_query_by_dict_param(user_model, kwargs)\r\n return user_model.select().where(query).count() > 0",
"def userIDExists(self, id : int) -> bool:\n return id in self.users.keys()",
"def __contains__(self, user_name):\n tuples = self._execute(\n \"SELECT name FROM users WHERE name == ?\",\n (user_name,)\n )\n return len(tuples) == 1",
"def user_exist(cls,user_name):\n for user in cls.user_list:\n if user.user_name == user_name:\n return True\n return False",
"def user_in_db(user_field, users_list, user_key):\n if any(user.get(user_key) == user_field for user in users_list):\n return True\n return False",
"def has_object_permission(self, request, view, obj):\n return request.user.id == obj.user_id",
"def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_club_privileges():\n return True\n return False",
"def hasUser(self, id):\n try:\n self.getUser(id)\n return True\n except KeyError:\n return False",
"def check_user_from_db(username: str, email: str) -> bool:\n if User.objects.filter(Q(username=username) | Q(email=email)).first():\n raise UniqueUser(\"Пользователь уже существует\")\n else:\n return True",
"def has_object_permission(self, request, view, obj):\n if request.method in permissions.SAFE_METHODS:\n return True\n # When the user make a request It will check that is on Safe methods, so it return true if the user is \n # trying to update is own profile or return false. And also it will return the obj.id == request.user.id\n return obj.id == request.user.id",
"def has_user(self, user): # pylint: disable=unused-argument\r\n return False",
"def _has_data(cls):\n return User.objects.count() > 0",
"def get_is_por_holder(self, obj):\n user = self.context['request'].user\n if not user.is_authenticated:\n return False\n # pylint: disable=no-member\n profile = UserProfile.objects.get(user=user)\n if obj in profile.get_council_privileges():\n return True\n return False",
"def has_object_permission(self, request, view, obj):\n return request.user == obj",
"def exists(cls, user_id):\n user_id = int(user_id)\n user = DB_USER_TABLE.get(doc_id=user_id)\n if not user:\n raise ValueError(f\"unknown user '{user_id}'\")\n return user_id",
"def exists_in_db(self) -> bool:\n query = '''SELECT * \n FROM ESLReceipts \n WHERE Transaction_Number=? AND Date=? AND Description=? \n AND Memo=? AND Amount_Debit=? \n AND Amount_Credit=? AND Balance=? \n AND Check_Number=? AND Fees=? \n AND Card_Type=? AND Is_Payment=? \n AND Is_Transaction=? AND User_id=?;'''\n return len(self.db.fetchall(query, values=self.to_tuple())) > 0",
"def __contains__(self, obj):\n if isinstance(obj, self):\n query = self.where(**obj.data).select()\n result = query.execute()\n if result.count:\n return True\n return False",
"def exists(self, obj):\n return False",
"def check_user_exists(self):\n is_exists = False\n if auth.UserInfo.objects.filter(\n user_id__username=self.username,\n is_active=True).exists():\n is_exists = True\n return is_exists",
"def check_user(self, username, password):\n user = [user for user in self.db if user['username'] == username]\n if user:\n if check_password_hash(user[0][\"password\"], password):\n return True\n return False\n return False",
"def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False"
] | [
"0.66454977",
"0.6629096",
"0.6502776",
"0.64873165",
"0.6384",
"0.63549405",
"0.634741",
"0.6321379",
"0.63175",
"0.63037634",
"0.62980324",
"0.6297806",
"0.6245325",
"0.62262326",
"0.62138486",
"0.62071496",
"0.6190167",
"0.6176963",
"0.616143",
"0.615577",
"0.6141846",
"0.6132627",
"0.61052483",
"0.60852957",
"0.60821223",
"0.6078981",
"0.60757846",
"0.6066675",
"0.6065301",
"0.6063468"
] | 0.7575999 | 0 |
Reset the stats for the user with the specified ID. | def reinitUser(self, id : int):
id = self.validateID(id)
# ensure the ID exists in the database
if not self.userIDExists(id):
raise KeyError("user not found: " + str(id))
# Reset the user
self.users[id].resetUser() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stats_reset(self):\n self.stats.reset()",
"def stats_reset(self):\n self.stats.reset()",
"def reset_stats(self):\n print(\"Reseting stats\")\n self.player_lives = self.ai_stts.player_lives\n self.score = 0\n self.level = 1",
"def resetUser(self):\n\t\turl = \"https://habitica.com/api/v4/user/reset\"\n\t\treturn(postUrl(url, self.credentials))",
"def reset_metric_stats(self):\n\n self.__stats_table = {}",
"def reset(self):\n self.stats = {}",
"def user_reset(user_id):\n\n con = connect()\n cursor = con.cursor()\n cursor.execute(f\"\"\"\n START TRANSACTION;\n \n DELETE FROM ratings\n WHERE user_id = {user_id};\n \n DELETE FROM counts_by_rating_type\n WHERE user_id = {user_id};\n \n {_count_reset_query(user_id, 1)}\n {_count_reset_query(user_id, 2)}\n {_count_reset_query(user_id, 3)}\n \n COMMIT;\n \"\"\")\n cursor.close()\n return jsonify({'message': 'ok'}), 200",
"def reset_stats(self):\n self.ships_left = self.sett.ship_limit\n self.score = 0\n self.level = 1",
"def reset_metric_stats(self):\n self.__stats_table = {}",
"def reset_stats(self):\n self.ships_left= self.settings.ship_limit\n self.score = 0\n self.level = 1",
"def reset_stats(self):\n self.lives_left = self.game_settings.lives\n self.score = 0\n self.level = 1",
"def resetUsers():\n global pollResults\n pollResults = dict()\n emitResults()",
"def reset_stats(self):\n self.ships_left = self.ai_settings.ship_limit\n self.score = 0\n # Never reset the high score!\n self.high_score = self.read_score()\n self.level = 1",
"def reset_stats(self):\r\n self.ships_left = self.ai_settings.ship_limit\r\n self.score = 0\r\n self.level = 1",
"def reset_stats(self):\r\n self.ship_left = self.settings.ship_limit\r\n self.score = 0\r\n self.level = 1",
"def delUser(self, id):\n del self.users[id]\n if id in self._nameCache:\n del self._nameCache[self._nameCache[id]]\n del self._nameCache[id]\n if id in self._hostmaskCache:\n for hostmask in self._hostmaskCache[id]:\n del self._hostmaskCache[hostmask]\n del self._hostmaskCache[id]\n self.flush()",
"def clear_stats(self):\n self._stats = None",
"async def red_delete_data_for_user(self, *, requester, user_id):\n\t\tawait self.config.user_from_id(user_id).clear()",
"def reset_turn_stats(self):\n\n # Set the attribute value to 0\n self._current_score = 0",
"def force_reset(user_id):\n if CURRENT_USER_KEY not in session or session[CURRENT_USER_KEY] != user_id:\n do_logout()\n return redirect('/')\n\n user = User.query.get_or_404(user_id)\n form = ForcedResetForm()\n\n if form.validate_on_submit():\n User.change_info(\n user_id=user_id, username=form.username.data, password=form.password.data)\n db.session.commit()\n flash('User information successfully changed!', 'success')\n return redirect('/')\n return render_template('/forced_reset.html', form=form, user=user)",
"def reset(cls):\n GrandChallenge.objects.all().delete()\n GrandChallengeUser.objects.update(lost=0, last_round=0)\n cls.set_current_round(0)",
"def reset_stats(self):\r\n self.pepes_left = self.ai_settings.pepe_limit\r\n self.score = 0\r\n self.level = 1",
"def update_user(id):\n pass",
"def update_user_metrics(self,user_id:int)->None:\n with connection.cursor() as cursor:\n cursor.execute(f\"SELECT update_user_metrics({user_id})\")\n ##TODO: this should return something ",
"async def reset(self, ctx, user : str=None):\n cyphon = discord.utils.get(ctx.message.server.members, id=\"186835826699665409\")\n\n if self.check_channel(ctx):\n if self.check_permission(ctx) or ctx.message.author == cyphon:\n userFound = False\n if (user == \"bot\"):\n self.intro_message = None\n else:\n for stream in self.twitch_streams:\n if (user):\n if (stream[\"NAME\"] == user):\n stream[\"MESSAGE\"] = None\n stream[\"ALREADY_ONLINE\"] = False\n stream[\"CHANNEL\"] = self.stream_channel\n userFound = True\n else:\n stream[\"MESSAGE\"] = None\n stream[\"ALREADY_ONLINE\"] = False\n stream[\"CHANNEL\"] = self.stream_channel\n\n if (user):\n if (userFound):\n await self.bot.say(\"Reset complete.\")\n else:\n await self.bot.say(\"User does not exist!\")\n else:\n await self.bot.say(\"Reset complete.\")\n else:\n await self.bot.send_message(ctx.message.author, \"You don't have permission to execute that command.\")",
"def reset_hl_stats(self):\n\n self.ships_left = self.settings.ship_limit\n self.score = 0\n self.level = 1",
"def reset(self):\n self.sum_metric = 0.\n self.num_inst = 0.\n self.metrics.reset_stats()",
"def reset(self) -> None:\n self.statistics = defaultdict(int)",
"def set_game_player_stats(self, user_id, stats):\n user = User.objects.get(id=user_id)\n\n try:\n player = GamePlayer.objects.get(game=self, player=user)\n if stats != {}:\n player.status = stats[\"status\"]\n player.life = stats[\"life\"]\n player.acorn = stats[\"acorn\"]\n player.energy = stats[\"energy\"]\n player.poison = stats[\"poison\"]\n player.experience = stats[\"exp\"]\n player.storm = stats[\"storm\"]\n player.clues = stats[\"clues\"]\n player.food = stats[\"food\"]\n player.treasure = stats[\"treasure\"]\n player.commander_cost = stats[\"commander_cost\"]\n player.commander_damage = stats[\"commander_damage\"]\n player.monarch = stats[\"monarch\"]\n player.citys_blessing = stats[\"citys_blessing\"]\n player.save()\n except GamePlayer.DoesNotExist:\n user_profile = UserProfile.objects.get(user=user)\n new_player = GamePlayer(\n game=self,\n player=user,\n life=self.game_type.starting_life,\n avatar_img=user_profile.avatar_img\n )\n new_player.save()",
"def reset_attempts(self, user=None):\r\n if user:\r\n self.q(css='input[id^=sd_fu_]').first.fill(user)\r\n self.q(css='section.staff-modal a#staff-debug-reset').click()"
] | [
"0.6416",
"0.6416",
"0.63771105",
"0.6241124",
"0.6069007",
"0.6018874",
"0.6008657",
"0.5996576",
"0.59836346",
"0.59318155",
"0.5928619",
"0.5910059",
"0.5876897",
"0.58712983",
"0.58664316",
"0.58137023",
"0.5787853",
"0.5773324",
"0.5766223",
"0.5684285",
"0.5558619",
"0.55504537",
"0.5522887",
"0.5496287",
"0.5492176",
"0.545307",
"0.54521644",
"0.5434743",
"0.5406982",
"0.5389609"
] | 0.694401 | 0 |
Create a new bbUser object with the specified ID and add it to the database | def addUser(self, id : int) -> bbUser.bbUser:
id = self.validateID(id)
# Ensure no user exists with the specified ID in the database
if self.userIDExists(id):
raise KeyError("Attempted to add a user that is already in this bbUserDB")
# Create and return a new user
newUser = bbUser.bbUser.fromDict(bbUser.defaultUserDict, id=id)
self.users[id] = newUser
return newUser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_user_by_id(cls, m_id):",
"def saveNewUser(self, userID):\n self.db.insert_new_user(userID)",
"def fusion_api_add_user(self, body, api=None, headers=None):\n return self.user.create(body, api, headers)",
"def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id",
"async def add_user(self, user_id) -> None:\n # await self.conn.execute(\n # \"INSERT INTO tg_users(userid) VALUES $1 ON CONFLICT DO NOTHING\",\n # user_id,\n # )\n return",
"def create_user(user_id):\n try:\n # connect to the database (normally, the cipherwallet sdk will connect to the same database)\n # we use a sqlite database here as an example\n db_engine = sqlalchemy.create_engine('sqlite:///your.db', echo=True)\n db = db_engine.connect()\n except:\n bottle.abort(503, \"Service Unavailable\")\n\n # make sure we have valid data\n firstname = bottle.request.POST.get('firstname', \"\").strip()\n password1 = bottle.request.POST.get('password1', \"\").strip()\n if (\n user_id is None or len(user_id) < 5 or len(user_id) > 64 or\n len(firstname) < 1 or len(firstname) > 64 or\n len(password1) < 5 or len(password1) > 64 \n ):\n bottle.abort(400, \"Bad Request\")\n \n # encrypt the password (you DO store the passwords in encrypted form, dont you)\n password = bcrypt.hashpw(password1, bcrypt.gensalt())\n\n # if the user already exists, delete it\n # (obviously, you wouldn't do that on your real website)\n db.execute(\n sql_statement(\"DELETE FROM users WHERE email = :user_id;\"),\n user_id=user_id\n )\n # now add the user\n ret = db.execute(\n sql_statement(\n \"INSERT INTO users(firstname, email, password, created_on) \" +\n \"VALUES (:firstname, :email, :password, :now);\"\n ),\n firstname=firstname,\n email=user_id,\n password=password,\n now=time.time()\n )\n if ret.rowcount:\n return {\n 'firstname': firstname,\n 'email': user_id,\n }\n else:\n bottle.abort(503, \"Service Unavailable\")",
"def create_user(self):\n if not self.is_valid():\n return None\n # generate a username \n ids = User.objects.values_list('id', flat=True).order_by('-id')[:1]\n if len(ids) > 0:\n # ids[0] will be the maximum value (due to order_by: '-id')\n idnum = ids[0] + 1\n else:\n idnum = 1\n # create User object \n username = \"user%s\" % idnum\n # NOTE: store email in lower case\n email = self.clean_email().lower()\n password = self.clean_password2()\n user = User(username=username, email=email, password='tmp')\n user.save()\n # set the real password\n user.set_password(password)\n # make user inactive (until user has confirmed account)\n user.is_active = False\n # update\n user.save()\n return user",
"def create_user(self, user_id):\n data = {\n 'email': self._email_for_user_id(user_id),\n 'username': user_id,\n 'password': str(uuid.uuid4()),\n 'name': user_id,\n }\n\n # create user and return it to caller\n return self._post('/users', data=data)",
"def add_new_user(self, user):\n # print(\"Saving new user\")\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, True, False, False))\n\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username']))",
"def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")",
"async def create_user(new_user: InUser, db_handler: DBHandler = Depends(database_dependency)):\n try:\n inserted_record = await db_handler.insert_user(new_user=new_user)\n inserted_record = init_BaseUser(inserted_record)\n except DBHandlerException as e:\n return JSONResponse(status_code=400)\n\n return inserted_record",
"def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)",
"async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}",
"def new_user(user_session):\n user = User(\n name=user_session['username'],\n email=user_session['email'])\n session.add(user)\n session.flush()\n session.commit()\n return user.id",
"def post(self):\n data = UserRegister.parser.parse_args()\n\n if UserModel.find_by_id(data['username']):\n print(\"Failed\", file=sys.stderr)\n return {\n 'message':\n \"A user with name '{}' already exists.\"\n .format(data['username'])\n }, 400\n\n\n user = UserModel(**data) # data['username'], data['details'].......\n user.save_to_db()\n\n return {\"message\": \"User created successfully.\"}, 201",
"def add_user(self):\n user = models.User(email=self.test_user,\n password=generate_password_hash(self.test_user_password))\n user.add()",
"def add_new_user_to_db():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n img_url = request.form['img_url']\n\n new_user = User(first_name=first_name,last_name=last_name, img_url=img_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/users')",
"def add_user(self, session, user_data: Dict) -> User:\n chat_id = user_data[\"chat_id\"]\n username = user_data[\"username\"]\n first_name = user_data[\"first_name\"]\n last_name = user_data[\"last_name\"]\n time_registered = user_data[\"time_registered\"]\n is_admin = False\n reminder_time = datetime.time(hour=21, tzinfo=TIME_ZONE)\n\n user = session.query(User).get(chat_id)\n if user:\n if user.username != username:\n user.username = username\n session.commit()\n if user.is_banned is True:\n user.is_banned = False\n session.commit()\n return user\n\n new_user = User(\n chat_id=chat_id,\n is_banned=False,\n username=username,\n first_name=first_name,\n last_name = last_name,\n time_registered = time_registered,\n is_admin = is_admin,\n reminder_time = reminder_time,\n )\n session.add(new_user)\n session.commit()\n return new_user",
"def create_user():\n record = request.get_json()\n if record is None:\n return {\"Error\": \"No data Supplied.\"}, 400\n\n schema = user_schema.load(record)\n\n if UserModel.objects(email=schema['email']):\n return {\"Error\": \"User Data already exists.\"}, 400\n user = UserModel(**schema)\n user.hash_password()\n user.save()\n ser_data = user_schema.dump(user)\n token = Auth.generate_token(ser_data[\"_id\"])\n return {\"message\": \"User Created Successfully\", \"Token\": token, \"id\": str(user.id)}, 200",
"def add_user(self, user_id, group_id='', user_level=1, user_name='', name='', method_id=1):\n stmt = \"\"\"INSERT INTO users (_user_id, group_id, user_level, _user_name, _name, method_id) \n SELECT ?, ?, ?, ?, ?, ? \n WHERE NOT EXISTS(SELECT 1 FROM users WHERE (?) = _user_id)\"\"\"\n args = (user_id, group_id, user_level, user_name, name, method_id, user_id)\n self.conn.execute(stmt, args)\n self.conn.commit()",
"def save_user(self):\n db.session.add(self)\n db.session.commit()",
"def post(self):\n data = flask.request.json\n user_dao.create_user(data)\n return None, 201",
"def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user",
"def create_user():\n try:\n\n user = User(username=request.json.get(\"username\"), score=0,)\n\n user.insert()\n\n response = jsonify({\"success\": True, \"created_user_id\": user.id})\n\n except AttributeError:\n abort(400)\n\n return response",
"def create_user(absolute_uid):\n\n try:\n user = User(absolute_uid=absolute_uid)\n with current_app.session_scope() as session:\n session.add(user)\n session.commit()\n\n except IntegrityError as error:\n current_app.logger.error('IntegrityError. User: {0:d} was not'\n 'added. Full traceback: {1}'\n .format(absolute_uid, error))\n raise",
"def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()",
"async def create(self):\n assert self.backend.username_field in self\n assert \"password\" in self\n self.setdefault(\"date_joined\", now_utc())\n self.setdefault(\"is_superuser\", False)\n self.setdefault(\"is_staff\", False)\n self.setdefault(\"is_active\", True)\n self[\"id\"] = await self.backend.insert(**self)",
"def add_user(self, user_id):\n user_doc = {\n 'type': 'user',\n 'name': user_id\n }\n return self.add_doc_if_not_exists(user_doc, 'name')",
"def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))",
"def insert_user(user):\n\n try:\n session.add(user)\n session.commit()\n except Exception as e:\n logger.error(e)"
] | [
"0.71949327",
"0.7119717",
"0.6967095",
"0.6960353",
"0.69133216",
"0.6906858",
"0.6867741",
"0.68506837",
"0.684258",
"0.682456",
"0.6823309",
"0.6796022",
"0.67648846",
"0.67618465",
"0.6743965",
"0.6695233",
"0.6691359",
"0.6680211",
"0.6674689",
"0.66608787",
"0.6653089",
"0.66282296",
"0.66210085",
"0.6602975",
"0.6596241",
"0.6595094",
"0.6592473",
"0.65922904",
"0.6592169",
"0.65775794"
] | 0.78019196 | 0 |
If a bbUser exists in the database with the requested ID, return it. If not, create and store a new bbUser and return it. | def getOrAddID(self, id : int) -> bbUser.bbUser:
return self.getUser(id) if self.userIDExists(id) else self.addUser(id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n return self.users[id]",
"def addUser(self, id : int) -> bbUser.bbUser:\n id = self.validateID(id)\n # Ensure no user exists with the specified ID in the database\n if self.userIDExists(id):\n raise KeyError(\"Attempted to add a user that is already in this bbUserDB\")\n # Create and return a new user\n newUser = bbUser.bbUser.fromDict(bbUser.defaultUserDict, id=id)\n self.users[id] = newUser\n return newUser",
"def _get_or_create_user(self, user):\n self.cursor.execute('SELECT id FROM user WHERE fb_id=%s', (user['id'],))\n user_ids = self.cursor.fetchall()\n assert len(user_ids) <= 1, 'Too many users: ' + user_ids\n if len(user_ids) == 1:\n return user_ids[0][0]\n else:\n self.cursor.execute('INSERT INTO user (fb_id, name) VALUES (%s,%s)', (user['id'], user['name']))\n return self.cursor.lastrowid",
"def get_or_create_user(user_id, sqlite_db):\n\n if user_id is None:\n return None\n\n cursor = sqlite_db.cursor()\n query = \"SELECT * FROM users WHERE id='{}'\".format(user_id)\n cursor.execute(query)\n\n users = cursor.fetchall()\n\n if len(users) > 1:\n raise Exception('Multiple users found for single node ID')\n\n if users:\n user_obj = {}\n for key in users[0].keys():\n user_obj[key] = users[0][key]\n user_obj['institutions'] = json.loads(user_obj['institutions'])\n return user_obj\n\n user = User.load(user_id)\n if user is None:\n return None\n\n institutions = [\n {'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path}\n for inst in user.affiliated_institutions\n ] if user else []\n\n query = \"INSERT INTO users (id, entry_point, locale, timezone, institutions) VALUES ('{id}', '{entry_point}', '{locale}', '{timezone}', '{institutions}');\".format(\n id=user_id,\n entry_point=None if user is None else get_entry_point(user),\n locale=getattr(user, 'locale', ''),\n timezone=getattr(user, 'timezone', ''),\n institutions=json.dumps(institutions),\n )\n cursor.execute(query)\n sqlite_db.commit()\n return get_or_create_user(user_id, sqlite_db)",
"def find_by_id(cls, _id):\n ## Setup Connection & Cursor\n connection, cursor = Database.connect_to_db()\n\n ## Find the user\n query = \"SELECT * FROM {table} WHERE id=?\".format(table=cls.TABLE_NAME)\n result = cursor.execute(query, (_id,)) ## Parameter must always be a tuple\n row = result.fetchone() ## Returns None if no results\n\n ## Create User object if we get data back\n if row:\n user = cls(*row)\n else:\n user = None\n\n ## Close Connection\n connection.close()\n\n return user",
"def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None",
"def _get_user_by_id(self, _id):\n user_resp = self._db.Users(database_pb2.UsersRequest(\n request_type=database_pb2.UsersRequest.FIND,\n match=database_pb2.UsersEntry(global_id=_id)))\n if user_resp.result_type != database_pb2.UsersResponse.OK:\n self._logger.warning(\n 'Could not find user: {}'.format(user_resp.error))\n return None\n if not len(user_resp.results):\n self._logger.warning('Could not find user.')\n return None\n return user_resp.results[0]",
"def find_user_by_id(id: str) -> User:\n\n # Find the id user in the database, else return None\n return User.query.get(int(id))",
"def load_user(id):\n return Users.query.get(id)",
"def user_by_id(user_id):\n user = User.query.filter(User.id == user_id).one_or_none()\n return user",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def get_user_by_id(self, id):\n\t\treturn self.users.get(id)",
"def find_user_by_id(id):\n try:\n cursor.execute(\"select * from users where id = %s\", (id,))\n user = cursor.fetchone()\n user = User(id=user[0], firstname=user[1], lastname=user[2], othername=user[3], email=user[4],phonenumber=user[5], passporturl=user[6], roles=user[7], nationalid=user[8], county=user[9],password=user[10], date_created=user[11], date_modified=user[12])\n return user.json_dump()\n except Exception:\n return False",
"def find_by_id(cls, _id):\n user = cls.query.filter_by(id=_id).first()\n return user",
"async def get_user_by_id(self, roblox_id: int) -> User:\n r = await self.request.request(url=f'https://api.roblox.com/users/{roblox_id}', method=\"GET\", noerror=True)\n json = r.json()\n if r.status_code != 200 or not json.get('Id') or not json.get('Username'):\n return None\n return User(self.request, json['Id'], json['Username'])",
"def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user",
"def load_user(id):\n\treturn User.query.get(int(id))",
"def get_user(id):\n pass",
"def checkIfUserExists(self, userID):\n return self.db.select_user(userID)",
"def load_user(id):\n\n return User.query.get(int(id))",
"def load_user(id):\r\n\r\n\tuser = User.query.get(int(id))\r\n\tif user is not None:\r\n\t\tuser.id = session['user_id']\r\n\t\treturn user\r\n\telse:\r\n\t\treturn None",
"def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user",
"def load_user(id):\n return User.get_by_id(int(id))",
"def _find_existing_user(self, username):\n users = User.objects.filter(username=username)\n if users.count() <= 0:\n return None\n else:\n return users[0]",
"def load_user(id):\n user = db.session.query(User).filter(User.id == id).first()\n return user",
"def get_by_id(self):\n user_node = graph.find_one(\"User\",\n property_key=\"id\",\n property_value=self.id)\n return user_node",
"def lookupUser_byID(self, user_id):\n sql = \"SELECT * FROM Users WHERE id='%s'\"\\\n % (user_id)\n res = self.execute(sql)\n reslist = res.fetchall()\n if reslist == []:\n return None\n else:\n return reslist[0]"
] | [
"0.71380436",
"0.7094037",
"0.70649517",
"0.6900218",
"0.68044686",
"0.67403007",
"0.6731143",
"0.67079204",
"0.6696172",
"0.6682262",
"0.6681719",
"0.6681719",
"0.6681719",
"0.6681719",
"0.6681203",
"0.666396",
"0.6660379",
"0.66603625",
"0.6650168",
"0.6632992",
"0.6603484",
"0.65684944",
"0.65654874",
"0.6544115",
"0.6509221",
"0.64940447",
"0.64664865",
"0.64660335",
"0.6461965",
"0.6453135"
] | 0.797189 | 0 |
Fetch the bbUser from the database with the given ID. | def getUser(self, id : int) -> bbUser.bbUser:
id = self.validateID(id)
return self.users[id] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def fetch_user(self, id: utils.Intable) -> User | None:\n id64 = make_id64(id=id, type=Type.Individual)\n return await self._connection.fetch_user(id64)",
"def get_user_by_id(self, id):\n\t\treturn self.users.get(id)",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n return User.query.get(int(id))",
"def load_user(id):\n\treturn User.query.get(int(id))",
"def load_user(id):\n return Users.query.get(id)",
"def load_user(id):\n\n return User.query.get(int(id))",
"async def fetch_user(self, id: str):\n user = await self.http.get_user(id)\n return User(state=self.http, data=user)",
"async def get_user_by_id(self, roblox_id: int) -> User:\n r = await self.request.request(url=f'https://api.roblox.com/users/{roblox_id}', method=\"GET\", noerror=True)\n json = r.json()\n if r.status_code != 200 or not json.get('Id') or not json.get('Username'):\n return None\n return User(self.request, json['Id'], json['Username'])",
"def get(cls,id):\n result = execute_query(\"\"\"SELECT * FROM Users Where username = ?\n \"\"\",\n [id])\n try:\n user = User(id,result[0][1])\n except Exception as e:\n return None\n \n return user",
"def _get_user_by_id(self, _id):\n user_resp = self._db.Users(database_pb2.UsersRequest(\n request_type=database_pb2.UsersRequest.FIND,\n match=database_pb2.UsersEntry(global_id=_id)))\n if user_resp.result_type != database_pb2.UsersResponse.OK:\n self._logger.warning(\n 'Could not find user: {}'.format(user_resp.error))\n return None\n if not len(user_resp.results):\n self._logger.warning('Could not find user.')\n return None\n return user_resp.results[0]",
"def get_user_by_id(self, user_id):\n query = \"SELECT * FROM users WHERE user_id = %s\"\n self.cursor.execute(query,[user_id])\n result = self.cursor.fetchone()\n return result",
"def load_user(id):\n user = db.session.query(User).filter(User.id == id).first()\n return user",
"def load_user(id):\n return User.get_by_id(int(id))",
"def get(id):\n return User.query.filter_by(id=id).first()",
"def find_by_id(cls, _id):\n ## Setup Connection & Cursor\n connection, cursor = Database.connect_to_db()\n\n ## Find the user\n query = \"SELECT * FROM {table} WHERE id=?\".format(table=cls.TABLE_NAME)\n result = cursor.execute(query, (_id,)) ## Parameter must always be a tuple\n row = result.fetchone() ## Returns None if no results\n\n ## Create User object if we get data back\n if row:\n user = cls(*row)\n else:\n user = None\n\n ## Close Connection\n connection.close()\n\n return user",
"def get_user(self, id: utils.Intable) -> User | None:\n id64 = make_id64(id=id, type=Type.Individual)\n return self._connection.get_user(id64)",
"async def getch_user(self, id: str):\n return self.get_user(id) or await self.fetch_user(id)",
"def find_user_by_id(id: str) -> User:\n\n # Find the id user in the database, else return None\n return User.query.get(int(id))",
"def get(self, id):\n\t\ttry:\n\t\t\tflask_app.logger.debug('We are getting the user: %d', id)\n\t\t\treturn user_service.get(id)\n\t\texcept AssertionError as e:\n\t\t\tuser_space.abort(400, e.args[0], status = \"Could not get user\", statusCode = \"400\")\n\t\texcept Exception as e:\n\t\t\tuser_space.abort(500, e.args[0], status = \"Could not get user\", statusCode = \"500\")",
"def get_user_by_id(id):\n user = session.query(User).get(id)\n return user",
"def find_by_id(cls, _id):\n user = cls.query.filter_by(id=_id).first()\n return user",
"def load_user():\n\n return User.query.get(int(id))",
"def find_user_by_id(id):\n try:\n cursor.execute(\"select * from users where id = %s\", (id,))\n user = cursor.fetchone()\n user = User(id=user[0], firstname=user[1], lastname=user[2], othername=user[3], email=user[4],phonenumber=user[5], passporturl=user[6], roles=user[7], nationalid=user[8], county=user[9],password=user[10], date_created=user[11], date_modified=user[12])\n return user.json_dump()\n except Exception:\n return False",
"def get_by_id(self, id):\n return self.session.query(User).filter_by(id=id).first()",
"def user_by_id(self, user_id):\n\n cur = self.db.cursor()\n cur.execute(\n \"\"\"SELECT user_id, username, password, phone, email, role\n FROM users WHERE user_id = %s\"\"\", (user_id, ))\n \n user_from_db = cur.fetchone()\n if cur.rowcount == 1: \n user_id, username, password, phone, email, role = user_from_db\n resp = dict(user_id=user_id, username=username, password=password, phone=phone, email=email, role=role)\n \n return resp\n return None",
"def load_user(user_id):\r\n return User.query.get(int(user_id))",
"def get_user(id):\n pass"
] | [
"0.8142412",
"0.7948193",
"0.79404026",
"0.79404026",
"0.79404026",
"0.79404026",
"0.7924561",
"0.7899446",
"0.7896432",
"0.7849403",
"0.78431547",
"0.78220767",
"0.78131187",
"0.7770938",
"0.7769073",
"0.77454466",
"0.77377176",
"0.7650907",
"0.7642752",
"0.7618635",
"0.7615345",
"0.75778913",
"0.757539",
"0.75479686",
"0.7542696",
"0.75379884",
"0.7522319",
"0.7491537",
"0.7472527",
"0.74632627"
] | 0.8525421 | 0 |
Serialise this bbUserDB into dictionary format. | def toDict(self, **kwargs) -> dict:
data = {}
# Iterate over all user IDs in the database
for id in self.getIds():
# Serialise each bbUser in the database and save it, along with its ID to dict
# JSON stores properties as strings, so ids must be converted to str first.
try:
data[str(id)] = self.users[id].toDict(**kwargs)
except Exception as e:
bbLogger.log("UserDB", "toDict", "Error serialising bbUser: " + e.__class__.__name__, trace=traceback.format_exc(), eventType="USERERR")
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def user2dict(self):\n d = {}\n d['username'] = self.username\n d['level'] = self.level\n d['name'] = self.name\n d['email'] = self.email\n d['creation'] = self.creation\n d['update'] = self.update\n d['nsentences'] = self.nsentences\n d['nsessions'] = self.nsessions\n d['score'] = self.score\n d['pw_hash'] = self.pw_hash\n return d",
"def to_dict(self):\n return self._user_data",
"def to_dictionary(self) -> dict:\n return {\n 'id': self.id,\n 'username': self.username,\n 'password': self.password,\n 'firstname': self.firstname,\n 'surname': self.surname,\n 'currency_id': self.currency_id,\n 'has_first_sign_in': self.has_first_sign_in,\n 'account_created': self.account_created,\n 'last_sign_in': self.last_sign_in\n }",
"def user_dict(self):\n return {\n \"user_id\": self.user_id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othernames\": self.othernames,\n \"username\": self.username,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"is_admin\": self.is_admin,\n \"password\": self.password,\n \"registered_on\": self.registered_on\n }",
"def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'userID': self.userID,\n }",
"def serialize(self):\n return {\n 'did' : self.did,\n 'name' : self.name,\n 'passwd' : self.passwd,\n 'email' : self.email,\n 'phone' : self.phone,\n 'addr_1' : self.addr_1,\n 'addr_2' : self.addr_2,\n 'city' : self.city,\n 'state' : self.state,\n 'zip' : self.zip,\n 'grade' : self.grade,\n }",
"def serialize(self):\n\t\treturn {\n\t\t\t'id': self.id,\n\t\t\t'name': self.name,\n\t\t\t'user': self.user_id\n\t\t}",
"def to_dict(self):\n return {\n \"id\":self.id,\n \"username\":self.email,\n \"email\":self.email,\n \"firstname\":self.firstname,\n \"lastname\":self.lastname\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'username' : self.username,\n 'email' : self.email\n }",
"def to_dict(self):\n user_idt = self.user_idt_format.format(user=self.user_id)\n\n return {'user': self.user_id,\n 'is_admin': self.is_admin,\n 'read_only': self.read_only,\n 'show_deleted': self.show_deleted,\n 'auth_token': self.auth_token,\n 'request_id': self.request_id,\n 'roles': self.roles,\n 'user_identity': user_idt,\n 'user_name': self.user_name}",
"def serialize(self):\n return {\n 'sid' : self.sid,\n 'name' : self.name,\n 'passwd' : self.passwd,\n 'email' : self.email,\n 'phone' : self.phone,\n 'addr_1' : self.addr_1,\n 'addr_2' : self.addr_2,\n 'city' : self.city,\n 'state' : self.state,\n 'zip' : self.zip,\n }",
"def serialize(self):\n return {\n 'user_id' : self.user_id,\n 'session_id' : self.session_id,\n }",
"def serialize(self):\n return {\n 'name' : self.name,\n 'email' : self.email,\n 'rfidno' : self.rfidno,\n 'pin' : self.pin,\n 'rollno' : self.rollno,\n 'userLevel' : self.userLevel,\n \n }",
"def to_dict(self):\n data = {\n \"id\": self.id,\n \"username\": self.username,\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"email\": self.email,\n \"city\": self.city,\n \"state\": self.state,\n \"active_plan\": self.active_plan\n }\n return data",
"def __to_dict(self):\n our_dict = {'username': self.username, 'email': self.email,\n 'name': self.name, 'enable': self.enable}\n return our_dict",
"def _to_dict(self):\n\t\treturn {'id': self.id,\n\t\t\t\t'name': self.name,\n\t\t\t\t'surname': self.surname}",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n }",
"def serialize(self):\n return {'id': self.id,\n 'rowId': self.id,\n 'name': self.name,\n 'owner': self.user.username,\n 'isOwner': current_user.id == self.owner_id,\n }",
"def serialize(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'joinedDate': self.joinedDate\n }",
"def user_data(self):\n return {\n 'username': self.username,\n 'email': self.email,\n 'password': self.password,\n '_id' : self._id\n }",
"def serialize(self):\n return {\n \"id\": self.id,\n \"user_id\": self.user_id,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time\n }",
"def serialize(self):\n return {\n 'id' : self.id,\n 'name' : self.name,\n 'owner' : self.user.name,\n 'room' : self.room.name,\n 'description' : self.description,\n 'price' : self.price,\n }",
"def serialize(self):\n\n try:\n owner = User.query.get(self.owner_id).username\n except Exception:\n owner = None\n\n return {'id': self.id,\n 'rowId': self.id,\n 'name': self.name,\n 'owner': owner,\n 'key': self.key,\n 'group': self.group.name,\n 'organization': self.group.organization.name,\n 'timeAdded': datetime_to_str(self.time_added),\n }",
"def serialize(self):\n return {\n \"first_name\" : self.first_name.capitalize(),\n \"last_name\" : self.last_name.capitalize(),\n \"name\" : self.first_name.capitalize() + ' ' + self.last_name.capitalize(),\n \"user_id\" : self.id,\n }",
"def serialize(self):\r\n return {\r\n \"user_id\": self.id,\r\n \"username\": self.username,\r\n \"image\": self.user_img,\r\n }",
"def serialize(self):\n return {\n 'user_id' : self.user_id,\n 'conference_id' : self.conference_id,\n }",
"def serialize(self):\n return {\n 'name' : self.name,\n 'id' : self.id,\n 'user_id' : self.user_id,\n 'last_edit' : self.time_str, \n }",
"def to_dict(self):\n return {\n 'user_id': self.id,\n 'name': self.name,\n 'email': self.email,\n 'registered': self.registered,\n 'approved': self.approved,\n 'signup_date': self.signup_date,\n 'api_key': self.api_key,\n 'calls': self.calls,\n 'messages': [message.text for message in self.messages]\n }",
"def to_dict(self, include_pwd=False):\n\n user_dict = {\n 'code': self.code,\n 'username': self.username,\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n 'registered_on': self.registered_on.isoformat()\n }\n\n if include_pwd:\n user_dict['password'] = self.password\n\n return user_dict",
"def serialize(self):\n return {\n 'id' :self.id,\n 'name' : self.name,\n 'email' : self.email\n }"
] | [
"0.73725003",
"0.72896534",
"0.71920073",
"0.71753246",
"0.716274",
"0.7151962",
"0.71234363",
"0.7105887",
"0.7040066",
"0.69651514",
"0.6952894",
"0.6938799",
"0.6933408",
"0.6907724",
"0.68980014",
"0.68346447",
"0.6777401",
"0.67636013",
"0.6733848",
"0.67159617",
"0.6711696",
"0.6681233",
"0.667652",
"0.6647735",
"0.6612952",
"0.6600742",
"0.6594389",
"0.6592579",
"0.6569775",
"0.6569107"
] | 0.7482245 | 0 |
Get summarising information about this bbUserDB in string format. Currently only the number of users stored. | def __str__(self) -> str:
return "<bbUserDB: " + str(len(self.users)) + " users>" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stats_get_str(self):\n return self.stats.get_all_str()",
"def stats_get_str(self):\n return self.stats.get_all_str()",
"def to_string(self):\n return \"User: {} Description: {} Ratings: {}\".format(self.id_user, self.description, self.ratings)",
"def get_stats(self):\n result = {\n 'datetime': dt.datetime.now().strftime('%d.%m.%Y %H:%M:%S'),\n 'total': db.session.query(User). \\\n count(),\n 'unverified': db.session.query(User). \\\n filter(db.not_(User.verified)). \\\n count(),\n 'male students': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Student). \\\n count(),\n 'male employees': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Employee). \\\n count(),\n 'male alumni': db.session.query(User). \\\n filter(User.sex == Sex.Male,\n User.type == UserType.Alumni). \\\n count(),\n 'female students': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Student). \\\n count(),\n 'female employees': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Employee). \\\n count(),\n 'female alumni': db.session.query(User). \\\n filter(User.sex == Sex.Female,\n User.type == UserType.Alumni). \\\n count()\n }\n\n return result",
"def get_user_info(self) -> str:\n return self._searcher.get_user_info()",
"def user_info(self):\n response = self.query('user_info')\n return response",
"def get_users_summary(self, select=None):\n query_parameters = {}\n if select is not None:\n query_parameters['select'] = self._serialize.query('select', select, 'str')\n response = self._send(http_method='GET',\n location_id='5ae55b13-c9dd-49d1-957e-6e76c152e3d9',\n version='6.0-preview.1',\n query_parameters=query_parameters)\n return self._deserialize('UsersSummary', response)",
"def info(self):\r\n cur = self.db.cursor()\r\n cur.execute(\"select * from lic where idx='USER'\")\r\n info = cur.fetchone()\r\n cur.close()\r\n return info",
"def get_users_info(): \n \n data = user_obj.get_users_info()\n return data",
"def dump(self):\n\n\t\twith self.lock:\n\n\t\t\tassert ltrace(TRACE_USERS, '| dump()')\n\n\t\t\tuids = self.users.keys()\n\t\t\tuids.sort()\n\n\t\t\tlogins = self.login_cache.keys()\n\t\t\tlogins.sort()\n\n\t\t\tdef dump_user(uid):\n\t\t\t\treturn 'users[%s] (%s) = %s ' % (\n\t\t\t\t\tstylize(ST_UGID, uid),\n\t\t\t\t\tstylize(ST_NAME, self.users[uid]['login']),\n\t\t\t\t\tstr(self.users[uid]).replace(\n\t\t\t\t\t', ', '\\n\\t').replace('{', '{\\n\\t').replace('}','\\n}'))\n\n\t\t\tdata = '%s:\\n%s\\n%s:\\n%s\\n' % (\n\t\t\t\tstylize(ST_IMPORTANT, 'core.users'),\n\t\t\t\t'\\n'.join(map(dump_user, uids)),\n\t\t\t\tstylize(ST_IMPORTANT, 'core.login_cache'),\n\t\t\t\t'\\n'.join(['\\t%s: %s' % (key, self.login_cache[key]) \\\n\t\t\t\t\tfor key in logins ])\n\t\t\t\t)\n\n\t\t\treturn data",
"def get_summary(self):\n mask = \"\"\"mask[\n nextInvoiceTotalAmount,\n pendingInvoice[invoiceTotalAmount],\n blockDeviceTemplateGroupCount,\n dedicatedHostCount,\n domainCount,\n hardwareCount,\n networkStorageCount,\n openTicketCount,\n networkVlanCount,\n subnetCount,\n userCount,\n virtualGuestCount\n ]\n \"\"\"\n return self.client.call('Account', 'getObject', mask=mask)",
"def getuserstatistics(self):\n userstatistics = []\n userstatistics.append({'text': _('Suggestions Accepted'), 'count': self.suggester.filter(state='accepted').count()})\n userstatistics.append({'text': _('Suggestions Pending'), 'count': self.suggester.filter(state='pending').count()})\n userstatistics.append({'text': _('Suggestions Reviewed'), 'count': self.reviewer.count()})\n userstatistics.append({'text': _('Submissions Made'), 'count': self.submission_set.count()})\n return userstatistics",
"def get_user_info(self):\n user_info = self.data_source.get_user_info(self.user_id)\n\n return user_info",
"def userinfo(self):\n return self._userinfo",
"def msgStats():\n r = {}\n r[\"users\"] = User.count()\n return jsonify(r)",
"def info(self):\n if str.__str__(self) in UID_dictionary:\n return UID_dictionary[self][2]\n\n return ''",
"def info(df):\n pad = 15\n usrs = df.user_id.nunique()\n rows, cols = df.shape\n print(f\"Users: {usrs:>{pad-len('users')},}\")\n print(f\"Rows: {rows:>{pad-len('rows')},}\")\n print(f\"Cols: {cols:>{pad-len('cols')},}\")",
"def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })",
"def get_user_str(self):\n return simplejson.dumps(self.userList)",
"def user_data(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_data\")",
"def get_users_count(self):\n try:\n roles = self.db_handler.get_roles_list()\n reply = ''\n\n for role_id, role_name in roles:\n reply += f'{role_name}ів - {self.db_handler.get_staff_count_by_role(role_id)[0]}\\n'\n\n return reply\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def get_user_details():\n rv = query_db('select * from user')\n return rv[0] if rv else None",
"def GetNewUserData(self):\n usercount, user = self.__sqlData[\"SELECT u.ID,u.UID,u.Login,u.GID,u.Passwd,u.Description,i.Shell\" \\\n \" FROM AccUser AS u, AccShells AS i\" \\\n \" WHERE u.Todo='%s' AND u.active='%s' AND u.Shell = i.ID \" % ('1', '1')]\n return usercount, user",
"def get_user_statistics(self, jid):\n self.data[jid] = {}\n\n iq = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n USER_STATS)\n sessionid = iq['command']['sessionid']\n\n form = self.xmpp.plugin['xep_0004'].make_form(ftype='submit')\n field = form.add_field(\n ftype='hidden',\n type='hidden',\n var='FORM_TYPE',\n value=ADMIN)\n field['type'] = 'hidden'\n form.add_field(var='accountjid', value=jid)\n\n result = self.xmpp.plugin['xep_0050'].send_command(\n config['domain'],\n USER_STATS,\n sessionid=sessionid,\n payload=form)\n fields = result['command']['form']['fields']\n\n for field in fields.values():\n if field['type'] != 'hidden':\n if field['var'] == 'onlineresources':\n value = field['value'].split('\\n')\n elif field['var'] == 'ipaddresses':\n value = []\n for ip in field['value'].split('\\n'):\n lookup = ip_lookup(ip)\n if not lookup:\n lookup = 'Unknown'\n value.append((ip, lookup))\n else:\n value = field['value']\n self.data[jid][field['var']] = value",
"def getUserInfo(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n displayName = j['displayName']\n name = j['name']\n uid = j['id']\n isBanned = j['isBanned']\n joinDate = j['created']\n description = j['description']\n return displayName,name,uid,isBanned,joinDate,description",
"def print_summary_stats(self) -> None:\n print(\"Number of Users: {}\".format(len(self.all_users)))\n print(\"Number of Utterances: {}\".format(len(self.utterances)))\n print(\"Number of Conversations: {}\".format(len(self.conversations)))",
"def __repr__(self):\n count=0\n for item in self.books.keys():\n count += 1\n return \"User {name} with email address {email} has read {count} book(s) with an average rating of {rating}\".format(name=self.name, email=self.email, count=count, rating=self.get_average_rating())",
"def get_db_info(self):\n total = 0\n info = {\n 'count': {},\n 'types': {}\n }\n for name in self._object_types:\n id, attrs, idx = self._object_types[name]\n info['types'][name] = {\n 'attrs': attrs,\n 'idx': idx\n }\n row = self._db_query_row('SELECT COUNT(*) FROM objects_%s' % name)\n info['count'][name] = row[0]\n total += row[0]\n\n info['total'] = total\n\n info['termcounts'] = {}\n for ivtidx in self._inverted_indexes:\n row = self._db_query_row('SELECT COUNT(*) FROM ivtidx_%s_terms' % ivtidx)\n info['termcounts'][ivtidx] = int(row[0])\n\n info['file'] = self._dbfile\n return info",
"def describe_user(self):\n print(\"We have stored next information about user \" +\n self.first_name.title() + \" \" + self.last_name.title() +\n \":\")\n print(\"- Username: \" + self.username)\n print(\"- Age: \" + str(self.age))\n print(\"- Location: \" + self.location.title())",
"def user_stats(df):\n\n print('\\n#4 USER INFO\\nCalculating User Stats...\\n')\n start_time = time.time()\n \n # TO DO: Display counts of user types\n print('Count of each User type:')\n print(df['User Type'].value_counts(dropna=False))\n \n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n print('\\nCount of each Gender type:')\n print(df['Gender'].value_counts(dropna=False))\n\n \n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print('\\nBirth Year Statistics:')\n print(df['Birth Year'].value_counts(sort=True).head(1))\n print(df['Birth Year'].min())\n print(df['Birth Year'].max())\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)"
] | [
"0.6607278",
"0.6607278",
"0.65238464",
"0.6518738",
"0.6509178",
"0.6467798",
"0.641684",
"0.6371659",
"0.62588465",
"0.6195195",
"0.61339843",
"0.6110665",
"0.6098782",
"0.6087525",
"0.60621756",
"0.60452694",
"0.60246444",
"0.60194707",
"0.6006507",
"0.6000051",
"0.59721947",
"0.5928915",
"0.5914123",
"0.59006643",
"0.5895775",
"0.58894587",
"0.5863257",
"0.58623284",
"0.58583313",
"0.5838691"
] | 0.76016575 | 0 |
Construct a bbUserDB from a dictionaryserialised representation the reverse of bbUserDB.toDict() | def fromDict(cls, userDBDict : dict, **kwargs) -> bbUserDB:
# Instance the new bbUserDB
newDB = bbUserDB()
# iterate over all user IDs to spawn
for id in userDBDict.keys():
# Construct new bbUsers for each ID in the database
# JSON stores properties as strings, so ids must be converted to int first.
newDB.addUserObj(bbUser.bbUser.fromDict(userDBDict[id], id=int(id)))
return newDB | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_dict(cls, dikt: dict) -> 'UserBase':\n return util.deserialize_model(dikt, cls)",
"def toDict(self, **kwargs) -> dict:\n data = {}\n # Iterate over all user IDs in the database\n for id in self.getIds():\n # Serialise each bbUser in the database and save it, along with its ID to dict \n # JSON stores properties as strings, so ids must be converted to str first.\n try:\n data[str(id)] = self.users[id].toDict(**kwargs)\n except Exception as e:\n bbLogger.log(\"UserDB\", \"toDict\", \"Error serialising bbUser: \" + e.__class__.__name__, trace=traceback.format_exc(), eventType=\"USERERR\")\n return data",
"def from_dict(cls, dikt: dict) -> 'User':\n return util.deserialize_model(dikt, cls)",
"def create_from_dict(user_data_dict: dict):\n empty = create_empty()\n user_data = _dict_to_dict(empty, user_data_dict)\n return user_data",
"def from_dict(cls, dikt) -> 'VotingUser':\n return util.deserialize_model(dikt, cls)",
"def user2dict(self):\n d = {}\n d['username'] = self.username\n d['level'] = self.level\n d['name'] = self.name\n d['email'] = self.email\n d['creation'] = self.creation\n d['update'] = self.update\n d['nsentences'] = self.nsentences\n d['nsessions'] = self.nsessions\n d['score'] = self.score\n d['pw_hash'] = self.pw_hash\n return d",
"def _from_db_object(user, db_user):\n foreign_key = ['project']\n for field in user.fields:\n if field not in foreign_key:\n user[field] = db_user[field]\n elif field == 'project' and db_user.project:\n user['project'] = db_user.project.name\n\n user.obj_reset_changes()\n return user",
"def init_from_db(db_repr):\n if type(db_repr) is not tuple:\n raise TypeError(\n \"Expected tuple as db_repr, received: \" + str(type(db_repr))\n )\n\n return User_Info(\n user_id=db_repr[0],\n email=db_repr[3],\n profile_pic=db_repr[4],\n google_id=db_repr[5],\n permissions=db_repr[6],\n )",
"def to_object(cls, query_dict: Dict):\n user = User()\n user.id = query_dict.get(\"id\")\n user.first_name = query_dict.get(\"firstname\")\n user.last_name = query_dict.get(\"lastname\")\n user.other_name = query_dict.get(\"othernames\")\n user.email = query_dict.get(\"email\")\n user.phone_number = query_dict.get(\"phone\")\n user.user_name = query_dict.get(\"username\")\n user.is_admin = query_dict.get(\"role\")\n user.password = query_dict.get(\"password\")\n return user",
"def get_serialized_user(cls, user):\n return {\n 'email': user.email,\n 'is_superuser': user.is_superuser,\n 'name': user.name,\n 'sodar_uuid': str(user.sodar_uuid),\n 'username': user.username,\n }",
"def load_user_map_from_db():\n user_map = {}\n\n try:\n users = get_users_from_table()\n for user in users:\n user_dict = {\n \"user_id\" : user[0],\n \"username\" : user[1],\n \"id_last_message_sent\" : user[2],\n \"id_last_message_stickered\" : user[3],\n \"count_since_last_stickered\" : user[4],\n \"is_new_user\" : False\n }\n us = user_store.UserStore(data_dict=user_dict)\n user_map[us.get_user_id()] = us\n print(\"user_map loaded\")\n except IOError:\n print(\"Database load failed. Loading empty user_map.\")\n \n return user_map",
"def from_dict(cls, dikt) -> 'AccountManagerColumn':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, json, user=None):\n if user is None:\n user = cls()\n\n user.username = json['username']\n user.first_name = json['first_name']\n user.last_name = json['last_name']\n\n user.password = json['password']\n\n return user",
"def deepwrap_userdict(obj) -> ColumnGroupSchema:\n if isinstance(obj, collections.abc.Mapping):\n # UserDict trick does not require multi-level UserDict\n return UserDict(obj)\n if isinstance(obj, collections.abc.Collection) and not isinstance(obj, str):\n return [deepwrap_userdict(value) for value in obj]\n return obj",
"def load_dict_from_json(path_to_db, convert_keys_to_int = False):\r\n with open(path_to_db, 'r') as fproc:\r\n dict_ = json.load(fproc)\r\n\r\n if not isinstance(dict_, dict):\r\n raise TypeError(\"Loaded object is not a dictionary.\")\r\n \r\n if convert_keys_to_int:\r\n try:\r\n dict_ = {int(k) : v for k, v in dict_.items()}\r\n except:\r\n raise\r\n \r\n return dict_",
"def make_from_mongo_dict(household_as_dict):\n # First do the id shuffle\n mongo_id = household_as_dict['_id']\n del household_as_dict['_id']\n household_as_dict['_Household__id'] = str(mongo_id)\n # now unpickle\n return jsonpickle.decode(json.dumps(household_as_dict))",
"def user_to_dict(self, user):\n udd = user._to_dict() #pylint: disable=protected-access\n response_dict = {}\n for arg in self.signup_args:\n response_dict[arg] = udd.get(arg)\n response_dict['user_id'] = user.get_id()\n response_dict['user_key'] = user.key.urlsafe()\n return response_dict",
"def from_dict(user_json):\n user = User()\n user.id = user_json['id']\n user.first_name = user_json['first_name']\n user.surname = user_json['surname']\n user.full_name = \"{} {}\".format(user.first_name, user.surname)\n user.email = user_json['email']\n user.organisation = user_json['organisation']\n user.roles = user_json['roles']\n user.status = user_json['status']\n user.is_admin = user.check_admin()\n user.jwt = user_json['jwt']\n if 'permissions' in user_json:\n user.permissions = user_json['permissions']\n\n return user",
"def _from_dict(cls, _dict: dict) -> 'BaseDataConnection':\n pass",
"def uid_to_obj(uid):\n serialized_b64 = uid.encode()\n object_ser = base64.decodebytes(serialized_b64)\n obj = pickle.loads(object_ser)\n return obj",
"def test_to_dict(self):\n\n user = CustomUser.objects.get(email=\"[email protected]\")\n result = user.to_dict()\n expected = {\n 'id': 7,\n 'first_name': \"TestName\",\n 'second_name': \"TestSecondName\",\n 'email': \"[email protected]\",\n 'avatar': \"dd46a756faad4727fb679320751f6dea\"\n }\n\n self.assertDictEqual(result, expected)",
"def to_dictionary(self) -> dict:\n return {\n 'id': self.id,\n 'username': self.username,\n 'password': self.password,\n 'firstname': self.firstname,\n 'surname': self.surname,\n 'currency_id': self.currency_id,\n 'has_first_sign_in': self.has_first_sign_in,\n 'account_created': self.account_created,\n 'last_sign_in': self.last_sign_in\n }",
"def _from_dict_transform(cls: Type[TElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n if 'application' in data:\n data['created_by'] = data.pop('application')\n\n if 'added_timestamp' in data:\n data['created_ts'] = data.pop('added_timestamp')\n\n if 'created_ts' not in data:\n # some really old nin entries in the database have neither created_ts nor modified_ts\n data['_no_created_ts_in_db'] = True\n data['created_ts'] = datetime.fromisoformat('1900-01-01')\n\n if 'modified_ts' not in data:\n data['_no_modified_ts_in_db'] = True\n # Use created_ts as modified_ts if no explicit modified_ts was found\n data['modified_ts'] = data['created_ts']\n\n return data",
"def user_dict(self):\n return {\n \"user_id\": self.user_id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othernames\": self.othernames,\n \"username\": self.username,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"is_admin\": self.is_admin,\n \"password\": self.password,\n \"registered_on\": self.registered_on\n }",
"def load_dict_from_db(path_to_db):\n with open(path_to_db, mode='rb') as handle:\n result = pickle.loads(handle.read())\n\n return result",
"def _from_dict_transform(cls: Type[TPrimaryElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'primary' in data:\n data['is_primary'] = data.pop('primary')\n\n return data",
"def from_dict(cls, uni):\n return cls(uni.get('interface'),\n uni.get('user_tag'))",
"def test_to_dict_creates_dict(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n u = Student(**user_details)\n new_d = u.to_dict()\n self.assertEqual(type(new_d), dict)\n self.assertFalse(\"_sa_instance_state\" in new_d)\n for attr in u.__dict__:\n if attr is not \"_sa_instance_state\":\n self.assertTrue(attr in new_d)\n self.assertTrue(\"__class__\" in new_d)",
"def to_dict(self):\n return self._user_data",
"def deserialize(filedb):\n details={}\n fo = open(filedb, \"rb\")\n stringdb=fo.read()\n beg=1\n end=0\n eof=stringdb.index(\"}}\")\n while end!=eof:\n keylist=[]\n valuelist=[] \n end=beg+stringdb[beg:].index(\": {\")\n uname=stringdb[beg:end]\n uname=uname.strip(\"'\")\n beg=end+3\n eor=stringdb[beg:].find(\"},\")\n if eor==-1:\n eor=eof\n else:\n eor=beg+eor\n while end!=eor:\n end=beg+stringdb[beg:].index(':')\n key=stringdb[beg:end]\n key=key.strip(\"'\")\n keylist.append(key)\n beg=end+2\n if stringdb[beg]=='[':\n listlist=[]\n beg=beg+1\n listend=beg+stringdb[beg:].index(']')\n while end<listend:\n end=stringdb[beg:listend].find(',')\n if end==-1:\n end=listend\n else:\n end+=beg\n listvalue=stringdb[beg:end]\n listvalue=listvalue.strip(\"'\")\n listlist.append(listvalue)\n beg=end+2\n beg+=1\n valuelist.append(listlist)\n else:\n end=stringdb[beg:eor].find(',')\n if end==(-1):\n end=eor\n else:\n end+=beg\n value=stringdb[beg:end]\n value=value.strip(\"'\")\n valuelist.append(value)\n beg=end+2\n beg+=1\n detail={}\n for k,v in izip_longest(keylist,valuelist):\n detail[k]=v\n details[uname]=detail \n fo.close()\n return details"
] | [
"0.67769945",
"0.6622561",
"0.65516293",
"0.63922143",
"0.63320786",
"0.62116504",
"0.6042782",
"0.6004884",
"0.5900546",
"0.5864641",
"0.5853948",
"0.578413",
"0.5688235",
"0.5666956",
"0.56637603",
"0.5660747",
"0.5650265",
"0.5648268",
"0.56327444",
"0.5618494",
"0.560434",
"0.55475587",
"0.5538976",
"0.5538566",
"0.551414",
"0.5471608",
"0.5471328",
"0.54592234",
"0.54430884",
"0.5406055"
] | 0.71973324 | 0 |
Screen with calendar for one month | def create_month_scr(self, month, toogle_today=False):
scr = Screen()
m = self.month_names_eng[self.active_date[1] - 1]
scr.name = "%s-%s" % (m, self.active_date[2]) # like march-2015
# Grid for days
grid_layout = GridLayout(cols=7, rows=7, size_hint=(1, 1), pos_hint={"top": 1})
scr.add_widget(grid_layout)
# Days abbrs
for i in range(7):
if i >= 5: # weekends
l = Label(text=self.days_abrs[i], color=(1, 0, 0, 1))
else: # work days
l = Label(text=self.days_abrs[i], text_size=(self.size[0], None), halign="center")
grid_layout.add_widget(l)
global holiday, halfday
# Buttons with days numbers
for week in month:
for day in week:
if day[1] >= 6: # weekends
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
else:
self.tbtn = ToggleBtn(text=str(day[0]), color=(0, 0, 0, 1))
for i in range(len(holiday)):
if self.active_date[2] == holiday[i][2]:
if self.active_date[1] == holiday[i][1]:
if day[0] == holiday[i][0]:
self.tbtn.background_color=(128, 0, 128, 1)
for i in range(len(halfday)):
if self.active_date[2] == halfday[i][2]:
if self.active_date[1] == halfday[i][1]:
if day[0] == halfday[i][0]:
self.tbtn.background_color=(0, 255, 255, 0.5)
self.tbtn.bind(on_press=self.get_btn_value)
if toogle_today:
# Down today button
if day[0] == self.active_date[0] and day[2] == 1:
self.tbtn.state = "down"
# Disable buttons with days from other months
if day[2] == 0:
self.tbtn.text = " "
self.tbtn.disabled = True
self.tbtn.background_color = (0, 0, 0, 0.1)
grid_layout.add_widget(self.tbtn)
self.sm.add_widget(scr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_cal(request, year=None, month=None):\n if year == None:\n # get the current comic as a starting point\n lToday = Comic.objects.filter(published=True).order_by('-date')[0].date\n year = lToday.year\n month = lToday.month\n\n return calendar(request, year, month)",
"def main():\n# year = int(input(\"Enter year for calendar: \"))\n# first_day = first_day_of_year(year)\n\n # Loop through months 1 through 12\n # for month in range(1, NUM_MONTHS + 1):\n# first_day = print_month(first_day, month, year)\n\n canvas = make_canvas(CANVAS_WIDTH, CANVAS_HEIGHT, 'Calendar')\n # present the header, today's date\n\n top_rows(canvas)\n # present two buttons: weekly display and monthly display\n weekly_display_type = True\n date_to_present = date.today()\n #button_weekly(canvas,weekly_display_type,date_to_present)\n #button_monthly(canvas, weekly_display_type, date_to_present)\n # present weekly display\n canvas.update()\n canvas.mainloop()",
"def calendar(request, pYear, pMonth):\n lYear = int(pYear)\n lMonth = int(pMonth)\n lCalendarFromMonth = datetime.date(lYear, lMonth, 1)\n lCalendarToMonth = datetime.date(lYear, lMonth, monthrange(lYear, lMonth)[1])\n lComics = Comic.objects.filter(published=True, date__gte=lCalendarFromMonth, date__lte=lCalendarToMonth).order_by('date')\n lCalendar = ArchiveCalendar(lComics).formatmonth(lYear, lMonth)\n lPreviousYear = lYear\n lPreviousMonth = lMonth - 1\n if lPreviousMonth == 0:\n lPreviousMonth = 12\n lPreviousYear = lYear - 1\n lNextYear = lYear\n lNextMonth = lMonth + 1\n if lNextMonth == 13:\n lNextMonth = 1\n lNextYear = lYear + 1\n pmn = named_month(lPreviousMonth)\n nmn = named_month(lNextMonth)\n \n # now for something fun:\n # if we have the first or last comics in a collection, we DON'T want to paginate this!\n fComic = lComics[0]\n lComic = lComics.reverse()[0]\n aComic = fComic.get_first()\n bComic = fComic.get_latest()\n \n \n if aComic is None or fComic.id == aComic.id:\n lPreviousYear = 0\n lPreviousMonth = 0\n if bComic is None or lComic.id == bComic.id:\n lNextYear = 0\n lNextMonth = 0\n \n\n return render(request, 'archive/archive_cal.html', {'Calendar' : mark_safe(lCalendar),\n 'Month' : str(lMonth),\n 'MonthName' : named_month(lMonth),\n 'Year' : str(lYear),\n 'PreviousMonth' : str(lPreviousMonth),\n 'PreviousMonthName' : pmn,\n 'PreviousYear' : str(lPreviousYear),\n 'NextMonth' : str(lNextMonth),\n 'NextMonthName' : nmn,\n 'NextYear' : str(lNextYear),\n })",
"def calendar(self):\r\n self.cal = QCalendarWidget()\r\n self.cal.setWindowTitle(\"Get Birthday\")\r\n self.cal.show()\r\n self.cal.clicked.connect(self.dateB)",
"def abrirCalendar():\n try:\n var.dlgcalendar.show()\n except Exception as error:\n print('Error: %s ' % str(error))",
"def calendar(request, year=None, month=None):\n today = datetime.date.today()\n year = int(year) if year else today.year\n month = int(month) if month else today.month\n try:\n first_of_month = datetime.date(year, month, 1)\n except ValueError: # Not a valid year and month\n raise Http404\n\n events = Event.objects.filter(event_start__year=year, event_start__month=month)\n cal = EventCalendar(events, year, month).formatmonth(year, month)\n\n user = request.user\n future_attending_events = attending_events(user, today)\n\n months = year * 12 + month - 1 # months since epoch (Christ)\n month_list = [\n datetime.date(m // 12, m % 12 + 1, 1) for m in range(months - 5, months + 7)\n ]\n\n # Get some random dates in the current, next, and previous month.\n # These dates are used load the calendar for that month.\n # * prev is some day in the previous month\n # * this is some day in this month\n # * next is some day in the next month\n context = {\n \"calendar\": mark_safe(cal),\n \"prev\": first_of_month - datetime.timedelta(27),\n \"this\": first_of_month,\n \"next\": first_of_month + datetime.timedelta(32),\n \"future_attending_events\": future_attending_events,\n \"month_list\": month_list,\n }\n\n return render(request, \"events/event_list.html\", context)",
"def calender(self, month, year):\n\n day = ['S', ' M', ' T', ' W', ' Th', 'F', ' S']\n\n days = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n values = 1\n d = 1\n\n m = month\n y = year\n y0 = y - (14 - m) // 12\n x = y0 + y0 // 4 - y0 // 100 + y0 // 400\n m0 = m + 12 * ((14 - m) // 12) - 2\n d0 = (d + x + 31 * m0 // 12) % 7\n\n if utility_obj.isleap_year(str(year)):\n days[1] = 29\n row = 6\n column = 7\n two_d_array = [[0 for j in range(column)] for i in range(row)]\n\n print('Your Calender is Ready\\n')\n\n for i in range(0, 6 + 1):\n print(day[i], end=' ')\n print()\n for i in range(row):\n\n for j in range(column):\n\n if values <= days[m - 1]:\n if i == 0 and j < d0:\n two_d_array[i][j] = ' '\n continue\n\n two_d_array[i][j] = values\n values += 1\n\n for i in range(row):\n\n for j in range(column):\n if two_d_array[i][j] != 0:\n x = two_d_array[i][j]\n x1 = str(x).ljust(2)\n print(x1, end=\" \")\n\n print()",
"def refresh_calendar():\n manage.refresh_calendar()",
"def _next_month(self):\r\n self._canvas.place_forget()\r\n\r\n year, month = self._date.year, self._date.month\r\n self._date = self._date + self.timedelta(\r\n days=calendar.monthrange(year, month)[1] + 1)\r\n self._date = self.datetime(self._date.year, self._date.month, 1)\r\n self._build_calendar() # reconstruct calendar\r",
"def _next_month(self):\n self._canvas.place_forget()\n\n year, month = self._date.year, self._date.month\n self._date = self._date + self.timedelta(\n days=calendar.monthrange(year, month)[1] + 1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstruct calendar",
"def display_calendar(daze, month, year):\n log = daze.dateDict\n if not year:\n year = date.today().year # defaults to this year\n if month:\n first = date(year, month, 1)\n last = max([day for day in cal.itermonthdates(year, month) if day.month == month])\n s, ndates, firstdate, lastdate = daze.summarize(firstdate=first, lastdate=last)\n else:\n s, ndates, firstdate, lastdate = daze.summarize()\n places = sorted(s, key=s.get, reverse=True)\n colors = ['green', 'magenta', 'white', 'cyan', 'blue', 'red', 'yellow']\n months = calendar.month_name[1:]\n dates = [firstdate + timedelta(days=i) for i in range((lastdate - firstdate).days + 1)]\n\n matches = {p: c for (p, c) in zip(places, colors)}\n\n for (p, c) in matches.items():\n click.secho(\" %s \" % p, bg=c, fg='black', bold=True)\n\n for _date in dates:\n if _date.day == 1 or _date == firstdate:\n click.echo('')\n click.echo(\"\\n\" + months[_date.month - 1])\n if (_date.isoweekday() != 7):\n click.echo(\" \" * 3 * _date.isoweekday(), nl=False)\n if _date in log:\n p = log[_date]\n click.secho(\"%s\" % str(_date.day).rjust(3),\n fg='black',\n bg=matches[p],\n nl=(_date.isoweekday() == 6))\n else:\n click.secho(\"%s\" % str(_date.day).rjust(3),\n fg='black', nl=(_date.isoweekday() == 6))\n\n click.echo('\\n\\n\\n')",
"def main(request, year=None):\n\tif year: year = int(year)\n\telse: year = time.localtime()[0]\n\n\tnowYear, nowMonth = time.localtime()[:2]\n\tlst = []\n\n\tfor y in [year, year+1, year+2]:\n\t\tmonthLst = []\n\t\tfor n, month in enumerate(MONTH_NAMES):\n\t\t\tentry\t= current = False\n\t\t\tentries\t= entry.objects.filter(date__year=y, date__month=n+1)\n\n\t\t\tif entries:\n\t\t\t\tentry = True\n\t\t\tif y == nowYear and n+1 == nowMonth:\n\t\t\t\tcurrent = True\n\t\t\tmonthLst.append(dict(n=n+1, name=month, entry=entry, current=current))\n\t\tlst.append((y, monthLst))\n\n\treturn render_to_response(\"cal/\", dict(years=lst, user=request.user, year=year, reminders=reminders(request)))",
"def display_calendar_redo(daze, year, month):\n log = daze.dateDict\n\n # Set first and last dates\n if year is None:\n year = date.today().year\n if month is None:\n first = date(year, 1, 1)\n if year == date.today().year:\n last = date.today()\n else:\n last = date(year, 12, 31)\n else:\n first = date(year, month, 1)\n last = date(2016, month, calendar.monthrange(2016, month)[1])\n\n # Get summarized data\n s, ndates, firstdate, lastdate = daze.summarize()\n places = sorted(s, key=s.get, reverse=True)\n colors = ['green', 'magenta', 'white', 'cyan', 'blue', 'red', 'yellow']",
"def on_btnCalendar_clicked(self, widget):\n try:\n variables.semaforo = 1\n variables.vencalendar.connect('delete-event', lambda w, e: w.hide() or True)\n variables.vencalendar.show()\n\n except:\n print('error abrir calendario')",
"def _prev_month(self):\r\n self._canvas.place_forget()\r\n\r\n self._date = self._date - self.timedelta(days=1)\r\n self._date = self.datetime(self._date.year, self._date.month, 1)\r\n self._build_calendar() # reconstuct calendar\r",
"def click_next_month(self):\n self.action.click(self.calendar_next)\n time.sleep(3)",
"def showNextMonth(self):\n pass",
"def __init__(self, d, m, y):\n\n self.set_calendar(d, m, y)",
"def _prev_month(self):\n self._canvas.place_forget()\n\n self._date = self._date - self.timedelta(days=1)\n self._date = self.datetime(self._date.year, self._date.month, 1)\n self._build_calendar() # reconstuct calendar",
"def events_in_month(request, year, month):\n month = datetime(year=year, month=month, day=1)\n next_month = month + timedelta(months=1)\n month_events = Event.objects.filter(date__gte=month, date__lte=next_month).order_by('date')\n return render_short(request, 'adhoc_calendar/events.html', context)",
"def changeDisplayedMonth(self):\n #ho bisogno di sapere qual è il mese mostrato\n currentMonth = self.indexMonth\n currentYear = self.currentYear\n\n sender = self.sender().objectName()\n if sender == 'bot_next':\n # if currentMonth < 11:\n if self.indexMonth < 11:\n self.indexMonth += 1\n self.setBaseDate(self.baseDate.addMonths(1))\n else:\n self.indexMonth = 0\n self.setCurrentYear(currentYear+1)\n # print('baseDate before', self.baseDate)\n self.setBaseDate(self.baseDate.addMonths(1))\n # print('baseDate after', self.baseDate)\n # print('new Year: ', self.currentYear)\n\n elif sender == 'bot_prev':\n # if currentMonth > 0:\n if self.indexMonth > 0:\n self.indexMonth -= 1\n self.setBaseDate(self.baseDate.addMonths(-1))\n else:\n self.indexMonth = 11\n self.setCurrentYear(currentYear-1)\n self.setBaseDate(self.baseDate.addMonths(-1))\n # print('new Year: ', self.currentYear)\n if currentMonth != self.indexMonth:\n # print(f'currentPageChanged.emit({self.indexMonth})')\n self.currentPageChanged.emit(self.indexMonth)\n self.combo_mesi.setCurrentIndex(self.indexMonth)\n if currentYear != self.currentYear:\n # print('current year changed')\n self.setListaGiorniDellAnno(self.createDates(self.baseDate), self.indexMonth)",
"def intro():\n os.system('cls')\n print(\"-------------------------\")\n print(\" MOON PHASE CALENDAR\")\n print(\"-------------------------\")",
"def index():\n # return render_template('index.html', events=get_calendar_events_today(CALENDAR_URL))\n return render_template('index.html', events=get_calendar_events_limit(CALENDAR_URL), events_sorted=True)",
"def news_for_month(self):\n\n raise NotImplementedError",
"def on_btnCalendarResIn_clicked(self,widget):\n try:\n variables.semaforo = 2\n variables.vencalendar.connect('delete-event', lambda w, e: w.hide() or True)\n variables.vencalendar.show()\n except:\n print('error abrir calendario')",
"def print_calendar(month, year):\n print MONTH_NAME[month - 1] + ', ' + str(year)\n\n calendar = calculate_date(month, year)\n for i in DAY_NAME:\n print(i),\n\n print\n\n for i in range(len(calendar)):\n if calendar[i] == 0:\n print(align_day_block(0)),\n else:\n print(align_day_block(calendar[i])),\n\n if i % 7 == 0:\n print",
"def schedule(request):\r\n\r\n return render(request, 'editorial/schedule.html', {})",
"def calendarPageChanged(self, year, month):\n success = self.porker_thread.extendDates(datetime.date(year, month, 1))\n #if not success:\n # self.alertMessage(\"Failure!\",\"Unable to extend the thread's dates for some reason.\")\n #efficiency = self.porker_thread.getEfficiencyFor(self.getActiveDate())\n #self.porker_thread.sentDatesData = False",
"def calendar_month(year, month):\n start = datetime.datetime(year, month, 1)\n if month == 12:\n end = datetime.datetime(year+1, 1, 1)\n else:\n end = datetime.datetime(year, month+1, 1)\n print(start)\n print(end)\n return start, end",
"def calender_runner():\n utility_obj = Utility()\n\n logic_obj = Logic()\n\n print('Enter Month')\n try:\n month = utility_obj.get_int()\n except Exception as e:\n print(e)\n print(\"Enter integer only \")\n print('Enter Year')\n try:\n year = utility_obj.get_int()\n except Exception as e:\n print(e)\n print(\"Enter integer only\")\n\n logic_obj.calender_stack(month, year)"
] | [
"0.73538274",
"0.6901004",
"0.6769055",
"0.6733737",
"0.6690152",
"0.65959895",
"0.65230334",
"0.6518159",
"0.64170593",
"0.63540894",
"0.6350945",
"0.6336885",
"0.6195404",
"0.6162957",
"0.61168915",
"0.60795885",
"0.6076546",
"0.60162735",
"0.59980357",
"0.59725446",
"0.5954102",
"0.5894898",
"0.5844367",
"0.583368",
"0.58124804",
"0.5800298",
"0.5792928",
"0.5785537",
"0.57686156",
"0.57572794"
] | 0.7408204 | 0 |
Get day value from pressed button | def get_btn_value(self, inst):
self.active_date[0] = int(inst.text)
selected = [self.active_date[0], self.active_date[1], self.active_date[2]]
global selectedDates
if selected in selectedDates:
selectedDates.remove(selected)
else:
selectedDates.append(selected)
if self.as_popup:
self.parent_popup.dismiss()
#getInfo.openPopup() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_day():\n return handle_invalid_inputs(question_4, days)",
"def day(sym, date):\n return get(sym, date, date)[0][1]",
"def on_Calendar_day_selected_double_click(self, widget):\n try:\n agno, mes, dia = variables.calendar.get_date()\n fecha = \"%02d/\" % dia + \"%02d/\" % (mes + 1) + \"%s\" % agno\n if variables.semaforo == 1:\n variables.filacli[3].set_text(fecha)\n elif variables.semaforo == 2:\n variables.filareserva[2].set_text(fecha)\n elif variables.semaforo == 3:\n variables.filareserva[3].set_text(fecha)\n funcionesreser.calculardias()\n else:\n pass\n variables.vencalendar.hide()\n except:\n print('error al coger la fecha')",
"def day(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"day\")",
"def day(self):\n data = await self.get_data(LIGHT)\n return data['day']",
"def day(self) -> int:\n return pulumi.get(self, \"day\")",
"def day(self):\n return self._days",
"def day(self):\n return self._day",
"def day(self):\n return self._day",
"def day(self):\n return self._day",
"def day(self):\n return 0",
"def day(self):\n return 0",
"def day(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"day\")",
"def __get_day(self, day, month, year):\n date = self.today.today().replace(day=day, month=month, year=year).date()\n # emoji format for current date\n ret = emoji.emojize(':round_pushpin:') if self.today.date() == date else ''\n\n return ret + str(day)",
"def getDay(self):\n return _libsbml.Date_getDay(self)",
"def day(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"day\")",
"def pDay(self):\n return self._pday",
"def currentDay(self):\n day = datetime.datetime.today().day\n return day",
"def _pressed(self, evt):\n x, y, widget = evt.x, evt.y, evt.widget\n item = widget.identify_row(y)\n column = widget.identify_column(x)\n\n if not column or not item in self._items:\n # clicked in the weekdays row or just outside the columns\n return\n\n item_values = widget.item(item)['values']\n if not len(item_values): # row is empty for this month\n return\n\n text = item_values[int(column[1]) - 1]\n if not text: # date is empty\n return\n\n bbox = widget.bbox(item, column)\n if not bbox: # calendar not visible yet\n return\n\n # update and then show selection\n text = '%02d' % text\n self._selection = (text, item, column)\n self._show_selection(text, bbox)",
"def get_interactive_days(self):\n answer = input(\"Press return to get entries of past day or input number of days to go back in time: \")\n if answer == '':\n days = 1\n else:\n try:\n days = int(answer)\n except:\n print(\"You didn't enter a number, assuming 1 day.\")\n days = 1\n return days",
"def get_day(self):\n\n # First we get the first 8 bits stored in the day register\n # and translate it to an integer\n day_bcd = self.__read_register(_REGISTER_DAY)\n\n # Then we extract the digits and the tens\n tens = (day_bcd & 0x30) >> 4 # 0x30 = 0b00110000\n digit = (day_bcd & 0x0F) # 0x0F = 0b00001111\n\n # End return the last value\n return 10 * (tens) + digit",
"def clickedCell(self, row, col) ->QDate:\n itemWidget = self.table.cellWidget(row, col)\n data = itemWidget.data\n dataMonth = data.month() - 1\n dataYear = data.year()\n self.currentDate = data\n # print('cell clicked flags: ', itemWidget.dictFlags)\n # print('cell clicked flags currentDate: ', data)\n # print('cell clicked flags item date: ', itemWidget.data)\n if data not in self.daysInTheMonth:\n if dataYear > self.currentYear:\n self.bot_next.click()\n elif dataYear < self.currentYear:\n self.bot_prev.click()\n elif dataMonth > self.indexMonth:\n self.bot_next.click()\n else:\n self.bot_prev.click()\n self.singleClicked.emit(data)\n return data",
"def Day(self):\n return self._fday",
"def get_dayu_checked(self):\n return self._button_group.checkedId()",
"def day(self) -> int:\r\n return self._day",
"def get_current_day() -> int:\n return datetime.now().day",
"def getOneDay(self,day_number=0):\n return self.event_time_sequence[day_number]",
"def value(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n return round(self.data.loc[day, 'Value'], 2)\n else:\n return 0",
"def day(self):\n try:\n return self.schedule.day\n except Schedule.DoesNotExist:\n return None",
"def day(d):\n\t\tx = db.cquery(\"day\",d)\n\t\tprint \"Total:\", x[0]\n\t\tf = raw_input(\"[L]ist [N]ew overview or [B]ack to home \").lower()\n\t\tif f == \"l\":\n\t\t\tfor i in x[1]:\n\t\t\t\tprint ui.statsid(), i[0], i[1], \" \", ui.statstimein(), i[2], ui.statstimeout(), i[3]\n\t\t\traw_input(\"[Enter] to go back to search\")\n\t\t\thome_stats()\n\t\telif f == \"n\":\n\t\t\thome_stats()\n\t\telif f == \"b\":\n\t\t\thome()\n\t\telse:\n\t\t\tpass"
] | [
"0.66065156",
"0.6411967",
"0.62181884",
"0.61390465",
"0.6137295",
"0.61261487",
"0.6031694",
"0.6031341",
"0.6031341",
"0.6031341",
"0.60150236",
"0.60150236",
"0.5965724",
"0.59653014",
"0.5939489",
"0.59322006",
"0.58828443",
"0.587401",
"0.58704",
"0.58308136",
"0.58195955",
"0.5802431",
"0.5796729",
"0.5781783",
"0.57666487",
"0.5724874",
"0.5694276",
"0.56365174",
"0.5591336",
"0.5588268"
] | 0.6740263 | 0 |
Project a vector onto an L1 ball. | def project_L1_ball(x: "fasta.linalg.Vector", t: float) -> "fasta.linalg.Vector":
# By Moreau's identity, we convert to proximal of dual problem (L-inf norm)
return x - project_Linf_ball(x, t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def translate(self, vector):\n if self.blender_object:\n self.blender_object.location = vector",
"def project_vector(self, vector: array_like) -> Vector:\n point_in_space = self.point + vector\n point_on_plane = self.project_point(point_in_space)\n\n return Vector.from_points(self.point, point_on_plane)",
"def ball_increase_velocity():\n global ball_vel\n ball_vel[0] = ball_vel[0] * 1.10\n ball_vel[1] = ball_vel[1] * 1.10",
"def dot_with_light_vector(val):\n return val.dot(Vector([0, 0, 1]))",
"def project_point_along_2Dvector(): \n \n # 2d vector \n a = vec2( 1 , 1 )\n b = vec2( -1 , -1 )\n com = vec2() \n\n #fb = pixel_op() \n #fb.create_buffer(800, 800)\n #fb.graticule(pixels_per_unit)\n\n vecs = [a,b]\n pts = [com.project_pt(a, b, 2)]\n\n bloody_simple_2drender('2d_render.png', vecs=vecs, pts=pts, gridsize=40)",
"def __init__(self, posn_x, posn_y, velocity_x, velocity_y, kula): \n self.posn_x = posn_x # x position of box containing the ball (bottom). \n self.posn_y = posn_y # x position of box containing the ball (left edge). \n self.velocity_x = velocity_x # amount of x-movement each cycle of the 'for' loop. \n self.velocity_y = 100.0 # amount of y-movement each cycle of the 'for' loop. \n self.color = kula # color of the ball \n\n self.ball_width = 20.0 # size of ball - width (x-dimension). \n self.ball_height = 20.0 # size of ball - height (y-dimension). \n self.coef_restitution = 0.90",
"def ball_bounce(wall):\n if wall: # top & bottom walls\n ball_vel[1] = -ball_vel[1]\n else: # left or right walls\n ball_vel[0] = -ball_vel[0]",
"def update_ball(self):\n\t\tself.ball_x += self.velocity_x\n\t\tself.ball_y += self.velocity_y\n\t\tif self.ball_y < 0:\n\t\t\tself.ball_y = -self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_y > 1:\n\t\t\tself.ball_y = 2 - self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_x < 0:\n\t\t\tself.ball_x = -self.ball_x\n\t\t\tself.velocity_x = -self.velocity_x\n\t\tif self.ball_x < 1:\n\t\t\treturn 0\n\t\tif self.ball_y > self.paddle_y + State.paddle_height or self.ball_y < self.paddle_y:\n\t\t\treturn -1\n\t\tself.ball_x = 2 - self.ball_x\n\t\tself.velocity_x = random.uniform(-0.015, 0.015) - self.velocity_x\n\t\tif abs(self.velocity_x) < 0.03:\n\t\t\tself.velocity_x = 0.03 if self.velocity_x > 0 else -0.03\n\t\tself.velocity_y = random.uniform(-0.03, 0.03) - self.velocity_y\n\t\tself.velocity_x = max(min(self.velocity_x, 1.0), -1.0)\n\t\tself.velocity_y = max(min(self.velocity_y, 1.0), -1.0)\n\t\treturn 1",
"def add(self, vector):\n self.x += vector.x\n self.y += vector.y",
"def project_vector(u, v):\n u_np = np.array([u.get_x(), u.get_y()])\n v_np = np.array([v.get_x(), v.get_y()])\n proj = (np.dot(u_np, v_np) / np.dot(v_np, v_np)) * v_np\n return Point(proj[0], proj[1])",
"def translate(self, vector):\n \n matrix = wf.translationMatrix(*vector)\n for wireframe in self.wireframes.values():\n wireframe.transform(matrix)",
"def create_ball():\n balls.append(gen_ball())\n generate_velocity(balls)",
"def __add__(self, vector):\n return self.translated(vector)",
"def compute_coll(self, ball, step):\n m1 = self.mass\n m2 = ball.mass\n r1 = self.radius\n r2 = ball.radius\n v1 = self.velocity\n v2 = ball.velocity\n x1 = self.position\n x2 = ball.position\n di = x2-x1\n norm = np.linalg.norm(di)\n\n if norm-r1-r2 < step*abs(np.dot(v1-v2,di))/norm:\n self.vafter = v1 - 2.*m2/(m1+m2) * np.dot(v1-v2,di)/(np.linalg.norm(di)**2.) * di",
"def euclidean_proj_l1ball(v, s=1):\n assert s > 0, \"Radius s must be strictly positive (%d <= 0)\" % s\n n, = v.shape # will raise ValueError if v is not 1-D\n # compute the vector of absolute values\n u = np.abs(v)\n # check if v is already a solution #DB I commented this out since I want it to always be an equality ||v||_1 = 1\n #if u.sum() <= s:\n # L1-norm is <= s\n # return v\n # v is not already a solution: optimum lies on the boundary (norm == s)\n # project *u* on the simplex\n w = euclidean_proj_simplex(u, s=s)\n # compute the solution to the original problem on v\n w *= np.sign(v)\n return w",
"def project_Linf_ball(x: \"fasta.linalg.Vector\", t: float) -> \"fasta.linalg.Vector\":\n N = len(x)\n xabs = np.abs(x)\n\n # Reverse sort the absolute values of z\n flipped = xabs.copy()\n flipped[::-1].sort()\n\n # Magic\n alpha = np.max((np.cumsum(flipped) - t) / np.arange(1, N+1))\n\n if alpha > 0:\n return np.minimum(xabs, alpha) * np.sign(x)\n else:\n return np.zeros(N)",
"def lmap(v: float, x: Interval, y: Interval) -> float:\n return y[0] + (v - x[0]) * (y[1] - y[0]) / (x[1] - x[0])",
"def euclidean_proj_l1ball(v, s=1):\n assert s > 0, \"Radius s must be strictly positive (%d <= 0)\" % s\n n, = v.shape # will raise ValueError if v is not 1-D\n # compute the vector of absolute values\n u = np.abs(v)\n # check if v is already a solution\n if u.sum() <= s:\n # L1-norm is <= s\n return v\n # v is not already a solution: optimum lies on the boundary (norm == s)\n # project *u* on the simplex\n w = euclidean_proj_simplex(u, s=s)\n # compute the solution to the original problem on v\n w *= np.sign(v)\n return w",
"def localize_ball(x, y, radius, ball_radius, focal_length):\n # the y axis is flipped in camera coordinates\n y = -y\n\n # find three points on the circle\n center = (x, y)\n circle_points = [\n (0, -radius),\n (radius * np.cos(np.pi/6), radius * np.sin(np.pi/6)),\n (radius * np.cos(5/6 * np.pi), radius * np.sin(5/6 * np.pi)),\n ]\n npoints = 3\n\n # calculate the direction vectors of the camera rays that contain all the possible projected points\n # the formula is derived by reversing the pinhole camera model, intersecting two planes and calculating the\n # cross product of their normals\n line_directions = [np.array([u * focal_length, -v * focal_length, focal_length ** 2]) for (u, v) in circle_points]\n\n # solve a nonlinear equation to find the center of the sphere\n def unpack(x):\n # pull out the intersection points of the rays and the sphere and the center of the sphere from the root\n p = [x[i * 3:i * 3 + 3] for i in range(npoints)]\n s = x[-3:]\n return p, s\n\n def equation(x):\n p, s = unpack(x)\n print(p, s)\n\n ret = np.ravel(np.array([\n [\n # distance from the line (easier than checking if the point is on the line and better for the solver)\n np.linalg.norm(np.cross(p[i], line_directions[i])) / np.linalg.norm(line_directions[i]),\n # distance between the intersection point and the center of the ball should be its radius\n np.linalg.norm(p[i] - s) - ball_radius,\n # the direction of the ray and the radius vector to the intersection point should be orthogonal\n np.dot(s - p[i], line_directions[i])\n ]\n for i in range(npoints)\n ]))\n print(ret)\n return ret\n\n x0 = scipy.optimize.broyden1(equation, 100*np.random.random(npoints*3+3) - 50)\n _, s = unpack(x0)\n\n # flip the point if it's behind the camera projection plane (there are two solutions and we need that one)\n if s[2] < 0:\n s = -s\n\n return s",
"def feed(self, vector):\n return vector",
"def translate(self, vect):\n self.apply(lambda c: (vector(c) + vect).coords())",
"def spawn_ball(direction):\n global ball_pos, ball_vel \n ball_pos = [WIDTH / 2, HEIGHT / 2]\n ball_vel = ball_generate_velocity(direction) # Ball velocity randomization ",
"def spawn_ball(direction):\r\n \r\n global ball_pos, ball_vel # these are vectors stored as lists\r\n global strike_counter\r\n \r\n # clear strike counter each new run\r\n strike_counter = 0\r\n \r\n # determine new initial velocity\r\n ball_pos = [WIDTH / 2, HEIGHT / 2]\r\n if direction == RIGHT:\r\n ball_vel = [random.randrange(120, 240) / REFRESH_RATE, -random.randrange(60, 180) / REFRESH_RATE]\r\n elif direction == LEFT:\r\n ball_vel = [-random.randrange(120, 240) / REFRESH_RATE, -random.randrange(60, 180) / REFRESH_RATE]",
"def bomb_vector(self):\n\n\t\tif self.b_offset == 0:\n\t\t\top = sin\n\t\telse:\n\t\t\top = cos\n\n\t\tself.y -= self.speed\n\t\tself.rect.y = self.y\n\t\t# MMMMMMMMMMMMMMMMMMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATHS\n\t\tself.x = int((self.g_settings.screen_height/2) + self.amplitude*op(self.frequency*((float(self.y)/self.g_settings.screen_width)*(2*pi) + (self.speed*time()))))\n\t\tif self.b_offset == 0:\n\t\t\tself.rect.x = self.x + self.position_x - 16\n\t\telif self.b_offset == 1:\n\t\t\tself.rect.x = self.x + self.position_x + 16\n\t\tself.screen.blit(self.image, self.rect)",
"def update_ball(self,X,Y,X_V,Y_V):\n if self.right_side:\n self.ball_x = self.game_dimension[0] - X\n else:\n self.ball_x = X\n\n self.ball_x -= int(self.ball_size/2)\n self.ball_y = Y - int(self.ball_size/2)\n\n self.ball_rect.x = self.ball_x\n self.ball_rect.y = self.ball_y\n return",
"def setVector(self, vector):\n self.p2 = vector(self.p1)",
"def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)",
"def forward(self):\n #print('forward\\r')\n self.linearVector = Vector3(x=1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)",
"def make_vector_laplace(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(1)\n\n laplace_r = make_laplace(bcs.extract_component(0))\n laplace_z = make_laplace(bcs.extract_component(1))\n laplace_phi = make_laplace(bcs.extract_component(2))\n\n @jit_allocate_out(out_shape=(3,) + bcs.grid.shape)\n def vector_laplace(arr, out=None):\n \"\"\"apply gradient operator to array `arr`\"\"\"\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out\n\n return vector_laplace # type: ignore",
"def vector_laplace(arr, out=None):\n laplace_r(arr[0], out=out[0])\n laplace_z(arr[1], out=out[1])\n laplace_phi(arr[2], out=out[2])\n return out"
] | [
"0.66520876",
"0.6209839",
"0.6140181",
"0.6015735",
"0.5979458",
"0.5940027",
"0.5939712",
"0.59238505",
"0.5875229",
"0.5867069",
"0.5799263",
"0.5797262",
"0.57569957",
"0.5726671",
"0.57266325",
"0.5723105",
"0.5687589",
"0.56815106",
"0.56803626",
"0.5629013",
"0.5628878",
"0.56276757",
"0.5624004",
"0.5622257",
"0.5616185",
"0.5606765",
"0.5598775",
"0.55938447",
"0.5569389",
"0.55557793"
] | 0.7311037 | 0 |
The shrink (softthresholding) operator, which is also the proximal operator for the L1norm. The shrink operator reducing the magnitudes of all entries in x by t, leaving them at zero if they're already less than t. | def shrink(x: np.ndarray, t: float) -> np.ndarray:
return np.sign(x) * np.maximum(np.abs(x) - t, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_softshrink(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n threshold = _expr.const(op.attr(\"lambda\"), dtype=dtype)\n zeros = _op.zeros_like(x)\n out = _op.where(x < -threshold, x + threshold, zeros) + _op.where(\n x > threshold, x - threshold, zeros\n )\n g.add_node(op.output(\"Out\")[0], out)",
"def shrink(self):\n x = np.nan_to_num(self.X.values)\n\n # de-mean returns\n t, n = np.shape(x)\n meanx = x.mean(axis=0)\n x = x - np.tile(meanx, (t, 1))\n xmkt = x.mean(axis=1).reshape(t, 1)\n\n # compute sample covariance matrix\n sample = np.cov(np.append(x, xmkt, axis=1), rowvar=False) * (t - 1) / t\n covmkt = sample[0:n, n].reshape(n, 1)\n varmkt = sample[n, n]\n sample = sample[:n, :n]\n prior = np.dot(covmkt, covmkt.T) / varmkt\n prior[np.eye(n) == 1] = np.diag(sample)\n\n # compute shrinkage parameters\n if self.delta is None:\n c = np.linalg.norm(sample - prior, \"fro\") ** 2\n y = x ** 2\n p = 1 / t * np.sum(np.dot(y.T, y)) - np.sum(sample ** 2)\n # r is divided into diagonal\n # and off-diagonal terms, and the off-diagonal term\n # is itself divided into smaller terms\n rdiag = 1 / t * np.sum(y ** 2) - sum(np.diag(sample) ** 2)\n z = x * np.tile(xmkt, (n,))\n v1 = 1 / t * np.dot(y.T, z) - np.tile(covmkt, (n,)) * sample\n roff1 = (\n np.sum(v1 * np.tile(covmkt, (n,)).T) / varmkt\n - np.sum(np.diag(v1) * covmkt.T) / varmkt\n )\n v3 = 1 / t * np.dot(z.T, z) - varmkt * sample\n roff3 = (\n np.sum(v3 * np.dot(covmkt, covmkt.T)) / varmkt ** 2\n - np.sum(np.diag(v3).reshape(-1, 1) * covmkt ** 2) / varmkt ** 2\n )\n roff = 2 * roff1 - roff3\n r = rdiag + roff\n\n # compute shrinkage constant\n k = (p - r) / c\n shrinkage = max(0, min(1, k / t))\n self.delta = shrinkage\n else:\n # use specified constant\n shrinkage = self.delta\n\n # compute the estimator\n sigma = shrinkage * prior + (1 - shrinkage) * sample\n return self.format_and_annualise(sigma)",
"def convert_hard_shrink(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n dtype = infer_type(x).checked_type.dtype\n threshold = op.attr(\"threshold\")\n threshold = _op.const(threshold, dtype)\n out = _op.logical_or(x < _op.const(-1.0, dtype) * threshold, x > threshold)\n out = _op.cast(out, dtype) * x\n g.add_node(op.output(\"Out\")[0], out)",
"def shrink(X, tau):\n V = np.copy(X).reshape(X.size)\n for i in xrange(V.size):\n V[i] = math.copysign(max(abs(V[i]) - tau, 0), V[i])\n if V[i] == -0:\n V[i] = 0\n return V.reshape(X.shape)",
"def shrink(value):\n return (1 + value) / 2",
"def shrink(self):\n x = np.nan_to_num(self.X.values)\n\n # de-mean returns\n t, n = np.shape(x)\n meanx = x.mean(axis=0)\n x = x - np.tile(meanx, (t, 1))\n\n # compute sample covariance matrix\n sample = (1.0 / t) * np.dot(x.T, x)\n\n # compute prior\n var = np.diag(sample).reshape(-1, 1)\n sqrtvar = np.sqrt(var)\n _var = np.tile(var, (n,))\n _sqrtvar = np.tile(sqrtvar, (n,))\n r_bar = (np.sum(sample / (_sqrtvar * _sqrtvar.T)) - n) / (n * (n - 1))\n prior = r_bar * (_sqrtvar * _sqrtvar.T)\n prior[np.eye(n) == 1] = var.reshape(-1)\n\n # compute shrinkage parameters and constant\n if self.delta is None:\n\n # what we call pi-hat\n y = x ** 2.0\n phi_mat = np.dot(y.T, y) / t - 2 * np.dot(x.T, x) * sample / t + sample ** 2\n phi = np.sum(phi_mat)\n\n # what we call rho-hat\n term1 = np.dot((x ** 3).T, x) / t\n help_ = np.dot(x.T, x) / t\n help_diag = np.diag(help_)\n term2 = np.tile(help_diag, (n, 1)).T * sample\n term3 = help_ * _var\n term4 = _var * sample\n theta_mat = term1 - term2 - term3 + term4\n theta_mat[np.eye(n) == 1] = np.zeros(n)\n rho = sum(np.diag(phi_mat)) + r_bar * np.sum(\n np.dot((1.0 / sqrtvar), sqrtvar.T) * theta_mat\n )\n\n # what we call gamma-hat\n gamma = np.linalg.norm(sample - prior, \"fro\") ** 2\n\n # compute shrinkage constant\n kappa = (phi - rho) / gamma\n shrinkage = max(0.0, min(1.0, kappa / t))\n self.delta = shrinkage\n else:\n # use specified constant\n shrinkage = self.delta\n\n # compute the estimator\n sigma = shrinkage * prior + (1 - shrinkage) * sample\n return self.format_and_annualise(sigma)",
"def shrink_soft_threshold(r,rvar,theta):\n if len(theta.get_shape())>0 and theta.get_shape() != (1,):\n lam = theta[0] * tf.sqrt(rvar)\n scale=theta[1]\n else:\n lam = theta * tf.sqrt(rvar)\n scale = None\n lam = tf.maximum(lam,0)\n arml = tf.abs(r) - lam\n xhat = tf.sign(r) * tf.maximum(arml,0)\n dxdr = tf.reduce_mean(tf.to_float(arml>0),0)\n if scale is not None:\n xhat = xhat*scale\n dxdr = dxdr*scale\n return (xhat,dxdr)",
"def shrink(self):\n self.mass *= 0.8",
"def shrink(self, factor_x:int=1, factor_y:int=1):\n # vertical shrink\n shrunk = self._pixels[::factor_y]\n # horizontal shrink\n shrunk = self._outer(_row[::factor_x] for _row in shrunk)\n return type(self)(shrunk, _0=self._0, _1=self._1)",
"def soft_thresh(x: float, lmb: float) -> float:\n if x < lmb:\n return x + lmb\n elif x > lmb:\n return x - lmb\n else:\n return 0.0",
"def temperature_scaling(x,t):\n n,d = x.shape\n res = np.copy(x)\n\n res *= (1./t)\n for i in range(n):\n res[i] = softmax(res[i])\n return(res)",
"def prox(self, x):\n if self.regularize:\n x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)\n return x",
"def prox(self, x):\n if self.regularize:\n x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)\n return x",
"def prox(self, x):\n if self.regularize:\n x[:self.offset(x)] /= (1 + 2 * self.tau * self.lambda_)\n return x",
"def shrink(self):\n # We assume that if an all-zero block of bytes is an interesting\n # example then we're not going to do better than that.\n # This might not technically be true: e.g. for integers() | booleans()\n # the simplest example is actually [1, 0]. Missing this case is fairly\n # harmless and this allows us to make various simplifying assumptions\n # about the structure of the data (principally that we're never\n # operating on a block of all zero bytes so can use non-zeroness as a\n # signpost of complexity).\n if not any(self.shrink_target.buffer) or self.incorporate_new_buffer(\n hbytes(len(self.shrink_target.buffer))\n ):\n return\n\n try:\n self.greedy_shrink()\n finally:\n if self.__engine.report_debug_info:\n\n def s(n):\n return \"s\" if n != 1 else \"\"\n\n total_deleted = self.initial_size - len(self.shrink_target.buffer)\n\n self.debug(\"---------------------\")\n self.debug(\"Shrink pass profiling\")\n self.debug(\"---------------------\")\n self.debug(\"\")\n calls = self.__engine.call_count - self.initial_calls\n self.debug(\n (\n \"Shrinking made a total of %d call%s \"\n \"of which %d shrank. This deleted %d byte%s out of %d.\"\n )\n % (\n calls,\n s(calls),\n self.shrinks,\n total_deleted,\n s(total_deleted),\n self.initial_size,\n )\n )\n for useful in [True, False]:\n self.debug(\"\")\n if useful:\n self.debug(\"Useful passes:\")\n else:\n self.debug(\"Useless passes:\")\n self.debug(\"\")\n for p in sorted(\n self.passes,\n key=lambda t: (-t.calls, -t.runs, t.deletions, t.shrinks),\n ):\n if p.calls == 0:\n continue\n if (p.shrinks != 0) != useful:\n continue\n\n self.debug(\n (\n \" * %s ran %d time%s, making %d call%s of which \"\n \"%d shrank, deleting %d byte%s.\"\n )\n % (\n p.name,\n p.runs,\n s(p.runs),\n p.calls,\n s(p.calls),\n p.shrinks,\n p.deletions,\n s(p.deletions),\n )\n )\n self.debug(\"\")",
"def widen(self):\n t, h = self.time, self.half_duration\n h *= self.scaling_coeff_x\n self.set_interval((t - h, t + h))",
"def free(x):\n _, p = extract_q_p(x)\n return tf.squeeze(0.5 * tf.reduce_sum(tf.square(p), axis=1))",
"def scaled_sigmoid(self, x):\r\n return (tf.keras.backend.sigmoid(x) * 30 - 5)",
"def scaled_tanh(self, x):\r\n return tf.keras.backend.tanh(x) * 3",
"def unit_scale(x, eps=1e-8):\n\tx = x.copy()\n\tx -= x.min()\n\tx *= 1.0 / (x.max() + eps)\n\treturn x",
"def ThreshSURE(self,x):\n import numpy as np\n # (N - 2 * (idx + 1) + (N - (idx + 1))*sqr_coeff + sum(sqr_coeffs[0:idx+1])) / N\n x = x.flatten()\n n = np.size(x)\n dx = np.sort(np.abs(x))\n n1 = n-2*np.arange(0,n,1)\n n2 = np.arange(n-1,-1,-1)\n cd1 = np.cumsum(dx**2,axis=0)\n risk = (n1+cd1+n2*dx**2)/n\n ichosen = np.argmin(risk)\n thr = dx[ichosen]\n\n return thr",
"def threshold_and_normalize_pixels(x, eps=1e-2):\n x = torch.clamp(x, min=eps)\n x = x / torch.sum(x, dim=1, keepdim=True)\n return x",
"def relu(x: jnp.DeviceArray) -> jnp.DeviceArray:\n return jnp.clip(x, a_min=0)",
"def shrink(self):\n for i in range(1, len(self.vertices)):\n self.vertices[i] = self.vertices[0] + self.sigma*(self.vertices[i]-self.vertices[0])",
"def unstandardize(\n x: torch.Tensor,\n stats: Dict[str, torch.Tensor]) -> torch.Tensor:\n x_scaled = x * stats['std'] + stats['mean']\n return x_scaled",
"def shrink_kernel(self, kernel, up_scale):\n up_scale = torch.tensor(up_scale).float()\n # boundary padding based on the scaling law\n pad_in = (torch.ceil(up_scale**2).int())*((kernel.shape[2]-1)//2)\n pad_h = (torch.ceil(up_scale).int())*((kernel.shape[3]-1)//2)\n pad_w = (torch.ceil(up_scale).int())*((kernel.shape[4]-1)//2)\n padded_kernel = F.pad(kernel, (pad_w, pad_w, pad_h, pad_h, pad_in, pad_in))\n delta = up_scale%1\n \n if delta == 0:\n shrink_factor = 1\n else:\n # shrink_factor for coordinates.\n shrink_factor = (((kernel.shape[4]-1))/(padded_kernel.shape[-1]-1)*(up_scale+1))\n \n # Adjustment to deal with weird filtering on the grid sample function.\n shrink_factor = 1.5*(shrink_factor-0.5)**3 + 0.57 \n\n grid = torch.meshgrid(torch.linspace(-1, 1, kernel.shape[2])*(shrink_factor**2),\n torch.linspace(-1, 1, kernel.shape[3])*shrink_factor, \n torch.linspace(-1, 1, kernel.shape[4])*shrink_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(padded_kernel, grid.to(device))\n if kernel.shape[-1] - 2*up_scale > 0:\n new_kernel = new_kernel * (kernel.shape[-1]**2/((kernel.shape[-1] - 2*up_scale)**2 + 0.01))\n return new_kernel",
"def _shrink(self):\n raise NotImplementedError(\"Should have implemented this.\")",
"def _shrink_main(self, amt):\n self.ratio -= amt\n self.ratio = max(self.min_ratio, self.ratio)",
"def half_space_cooling_waermefluss(k, T0, T1, kappa, t):\n return k * (T1 - T0) / (numpy.sqrt(math.pi * kappa * t))",
"def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))"
] | [
"0.65450245",
"0.6496893",
"0.64630055",
"0.6456143",
"0.6226922",
"0.61587155",
"0.5946745",
"0.5927948",
"0.5875077",
"0.57929087",
"0.57728153",
"0.5728194",
"0.5728194",
"0.5728194",
"0.5629461",
"0.5480745",
"0.543062",
"0.5420821",
"0.54135394",
"0.5374806",
"0.53720266",
"0.5362515",
"0.53516877",
"0.534372",
"0.53413534",
"0.53027105",
"0.52982765",
"0.5291772",
"0.5289104",
"0.52774066"
] | 0.78240633 | 0 |
build resnet backbone and position embedding according to config | def build_backbone(config):
assert config.MODEL.BACKBONE in ['resnet50', 'resnet101'], "backbone name is not supported!"
backbone_name = config.MODEL.BACKBONE
dilation = False
train_backbone = not config.EVAL
return_interm_layers = False #TODO: impl case True for segmentation
position_embedding = build_position_encoding(config.MODEL.TRANS.HIDDEN_SIZE)
backbone = Backbone(backbone_name, train_backbone, return_interm_layers, dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backbone_generator(params):\n if params.architecture.backbone == 'resnet':\n resnet_params = params.resnet\n backbone_fn = resnet.Resnet(\n resnet_depth=resnet_params.resnet_depth,\n dropblock=dropblock_generator(params.dropblock),\n activation=params.batch_norm_activation.activation,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation),\n init_drop_connect_rate=resnet_params.init_drop_connect_rate,\n space_to_depth_block_size=params.architecture.space_to_depth_block_size)\n elif params.architecture.backbone == 'spinenet':\n spinenet_params = params.spinenet\n backbone_fn = spinenet.spinenet_builder(\n model_id=spinenet_params.model_id,\n min_level=params.architecture.min_level,\n max_level=params.architecture.max_level,\n use_native_resize_op=spinenet_params.use_native_resize_op,\n activation=params.batch_norm_activation.activation,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation),\n init_drop_connect_rate=spinenet_params.init_drop_connect_rate)\n elif params.architecture.backbone == 'spinenet_mbconv':\n spinenet_mbconv_params = params.spinenet_mbconv\n backbone_fn = spinenet_mbconv.spinenet_mbconv_builder(\n model_id=spinenet_mbconv_params.model_id,\n min_level=params.architecture.min_level,\n max_level=params.architecture.max_level,\n use_native_resize_op=spinenet_mbconv_params.use_native_resize_op,\n se_ratio=spinenet_mbconv_params.se_ratio,\n activation=params.batch_norm_activation.activation,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation),\n init_drop_connect_rate=spinenet_mbconv_params.init_drop_connect_rate)\n elif 'efficientnet' in params.architecture.backbone:\n backbone_fn = efficientnet.Efficientnet(params.architecture.backbone)\n else:\n raise ValueError(\n 'Backbone model %s is not supported.' % params.architecture.backbone)\n\n return backbone_fn",
"def __init__(\n self,\n config, # the config is loaded from scratch later on anyways\n protstonkgs_model_type: str = PROTSTONKGS_MODEL_TYPE,\n lm_model_type: str = NLP_MODEL_TYPE,\n lm_vocab_size: int = 28996,\n prot_start_idx: int = 1024,\n prot_model_type: str = PROT_SEQ_MODEL_TYPE,\n prot_vocab_size: int = 30,\n kg_start_idx: int = 768,\n kg_embedding_dict_path: str = PROT_EMBEDDINGS_PATH,\n ):\n # Initialize the KG dict from the file here, rather than passing it as a parameter, so that it can\n # be loaded from a checkpoint\n kg_embedding_dict = prepare_df(kg_embedding_dict_path)\n # Initialize the BigBird config for the model architecture\n config = BigBirdConfig.from_pretrained(protstonkgs_model_type)\n # Use gradient checkpointing to save memory at the expense of speed\n config.update({\"gradient_checkpointing\": True})\n # Add the number of KG entities to the default config of a standard BigBird model\n config.update({\"lm_vocab_size\": lm_vocab_size})\n # Add the number of KG entities to the default config of a standard BigBird model\n config.update({\"kg_vocab_size\": len(kg_embedding_dict)})\n # Add the protein sequence vocabulary size to the default config as well\n config.update({\"prot_vocab_size\": prot_vocab_size})\n\n # Initialize the underlying LongformerForPreTraining model that will be used to build the STonKGs\n # Transformer layers\n super().__init__(config)\n\n # Initialize the three backbones for generating the initial embeddings for the three modalities (text, KG, prot)\n # 1. LM backbone for text (pre-trained BERT-based model to get the initial embeddings)\n # based on the specified protstonkgs_model_type (e.g. BioBERT)\n self.lm_backbone = BertModel.from_pretrained(lm_model_type)\n\n # 2. Prot backbone for protein sequences (e.g. ProtBERT)\n # do_lower_case is required, see example in https://huggingface.co/Rostlab/prot_bert\n self.prot_backbone = BertModel.from_pretrained(prot_model_type)\n self.prot_start_idx = prot_start_idx\n\n # Initialize the ProtSTonKGs tokenizer\n self.protstonkgs_tokenizer = BigBirdTokenizer.from_pretrained(protstonkgs_model_type)\n\n # In order to initialize the KG backbone: First get the separator, mask and unknown token ids from the\n # ProtSTonKGs model base (BigBird)\n self.sep_id = self.protstonkgs_tokenizer.sep_token_id\n self.mask_id = self.protstonkgs_tokenizer.mask_token_id\n self.unk_id = self.protstonkgs_tokenizer.unk_token_id\n\n # 3. KG backbone for KG entities (pretrained node2vec model)\n # Get numeric indices for the KG embedding vectors except for the sep, unk, mask ids which are reserved for the\n # LM [SEP] embedding vectors (see below)\n numeric_indices = list(range(len(kg_embedding_dict) + 3))\n # Keep the numeric indices of the special tokens free, don't put the kg embeds there\n for special_token_id in [self.sep_id, self.mask_id, self.unk_id]:\n numeric_indices.remove(special_token_id)\n # Generate numeric indices for the KG node names (iterating .keys() is deterministic)\n self.kg_idx_to_name = {i: key for i, key in zip(numeric_indices, kg_embedding_dict.keys())}\n # Initialize KG index to embeddings based on the provided kg_embedding_dict\n self.kg_backbone = {\n i: torch.tensor(kg_embedding_dict[self.kg_idx_to_name[i]]).to(self.lm_backbone.device)\n for i in self.kg_idx_to_name.keys()\n }\n self.kg_start_idx = kg_start_idx\n # Add the MASK, SEP and UNK (LM backbone) embedding vectors to the KG backbone so that the labels are correctly\n # identified in the loss function later on\n # [0][0][0] is required to get the shape from batch x seq_len x hidden_size to hidden_size\n with torch.no_grad():\n for special_token_id in [self.sep_id, self.mask_id, self.unk_id]:\n self.kg_backbone[special_token_id] = self.lm_backbone(\n torch.tensor([[special_token_id]]).to(self.device),\n )[0][0][0]\n\n # Override the standard MLM head: In the underlying BigBirdForPreTraining model, change the MLM head to a\n # custom ProtSTonKGsELMPredictionHead so that it can be used on the concatenated text/entity/prot sequence input\n self.cls.predictions = ProtSTonKGsPELMPredictionHead(\n config,\n kg_start_idx=kg_start_idx,\n prot_start_idx=prot_start_idx,\n )\n\n # Freeze the parameters of the LM and Prot backbones so that they're not updated during training\n # (We only want to train the ProtSTonKGs Transformer layers + prot to hidden linear layer)\n for backbone in [self.lm_backbone, self.prot_backbone]:\n for param in backbone.parameters():\n param.requires_grad = False\n\n # Add another layer that transforms the hidden size of the protein model onto the ProtSTonKGs hidden size\n self.prot_to_lm_hidden_linear = nn.Linear(\n self.prot_backbone.config.hidden_size,\n self.config.hidden_size,\n )",
"def __init__(self,embedding_size):\n super(ResNetEncoder,self).__init__()\n resnet = models.resnet50(pretrained=True)\n modules = list(resnet.children())[:-1]\n #Create a sequential models upto top fc layer add a custom fc layer compatible with embedding size of decoder RNN\n self.resnet = nn.Sequential(*modules)\n self.embed = nn.Linear(resnet.fc.in_features,embedding_size)\n self.bn = nn.BatchNorm1d(embedding_size,momentum=0.01)\n self.init_weights()",
"def build_model(cfg, char_voca, word_voca=None, gazet=None, pos_voca=None):\n\n # Build Embedder\n embedder = Embedder(\n window=cfg.window,\n char_voca=char_voca,\n word_voca=word_voca,\n jaso_dim=cfg.jaso_dim,\n char_dim=cfg.char_dim,\n word_dim=cfg.word_dim,\n gazet=gazet,\n gazet_embed=True,\n pos_enc=True,\n phoneme=True,\n pos_voca_size=len(pos_voca),\n pos_dim=cfg.pos_dim)\n\n print('Total Embedding_size: ', embedder.embed_dim)\n\n\n encoder_name, decoder_name = cfg.model_name.lower().split('-')\n\n # Build Encoder\n if encoder_name == 'fnn5':\n encoder = models.Fnn5(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn7':\n encoder = models.Cnn7(in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn8':\n encoder = models.Cnn8(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name in ['gru', 'lstm', 'sru']:\n encoder = models.RnnEncoder(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n out_dim=cfg.hidden_dim,\n cell=encoder_name)\n else:\n raise ValueError('unknown model name: %s' % cfg.model_name)\n\n # Build Decoder\n if decoder_name.lower() == 'fc':\n decoder = models.FCDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags)\n elif decoder_name in ['gru', 'lstm', 'sru']:\n decoder = models.RnnDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags,\n num_layers=cfg.num_layers,\n cell=decoder_name)\n\n model = models.Ner(embedder, encoder, decoder)\n\n return model",
"def build(self):\n self.build_inputs()\n self.image_embeddings = self.build_image_embeddings(self.images)\n self.seq_embeddings = self.build_seq_embeddings(self.input_seqs)\n self.build_model()\n self.setup_inception_initializer()\n self.setup_global_step()",
"def build_modules(self):\n self.backbone = Backbone(\n self.configs['backbone'],\n freeze_backbone=self.configs['freeze_backbone'],\n freeze_batchnorm=True\n )\n\n backbone_channel_sizes = get_backbone_channel_sizes(self.backbone)\n\n self.fpn = FeaturePyramidNetwork(\n backbone_channel_sizes=backbone_channel_sizes,\n min_feature_level=self.configs['min_feature_level'],\n max_feature_level=self.configs['max_feature_level'],\n feature_size=self.configs['pyramid_feature_size']\n )\n\n self.shared_conv_model = SharedConvModel(\n input_feature_size=self.configs['pyramid_feature_size'],\n feature_size=self.configs['shared_conv_feature_size'],\n num_layers=self.configs['shared_conv_num_layers']\n )\n\n if self.configs['shared_conv_num_layers'] > 0:\n shared_conv_output_size = self.configs['shared_conv_feature_size']\n else:\n shared_conv_output_size = self.configs['pyramid_feature_size']\n\n self.ofn = ObjectFinderNetwork(\n input_feature_size=shared_conv_output_size,\n feature_size=self.configs['finder_feature_size'],\n num_layers=self.configs['finder_num_layers']\n )\n\n self.ofn_loss_fn\n\n # self.classification_model = ClassificationModel()\n #\n # self.regression_model = RegressionModel()",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def __init__(self, voc_size=8000, embed_size=100, hid_size=100, trunc=4,\n model=None):\n\n self.log = logging.getLogger(\"TEST.Embed\")\n self.log.setLevel(logging.INFO)\n\n self.unknown_token = \"UNKNOWN_TOKEN\"\n self.sentence_start_token = \"SENTENCE_START\"\n self.sentence_end_token = \"SENTENCE_END\"\n\n if model is None:\n self.log.info(\"Initializing RNN parameters and functions...\")\n\n self.vocabulary_size = voc_size\n self.embed_size = embed_size\n self.hidden_size = hid_size\n self.bptt_truncate = trunc\n\n # Instantiate the network weights\n # I feel like the first and third are switched for some reason...\n # but it's pretty consistent in the example code. Perhaps it's\n # backwards for a purpose\n # The weights going from the input layer to the word embedding\n # layer (E, in tutorial)\n weights_ie = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (embed_size, voc_size))\n\n # The weights going from input layer to hidden layer\n # (U, in tutorial)\n weights_eh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, embed_size))\n\n # The weights going from hidden layer to hidden layer\n # (W, in tutorial)\n weights_hh = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (3, hid_size, hid_size))\n\n # The weights going from hidden layer to output layer\n # (V, in tutorial)\n weights_ho = np.random.uniform(-np.sqrt(1./voc_size),\n np.sqrt(1./voc_size),\n (voc_size, hid_size))\n\n # The bias for the hidden units (no bias applied to embedding layer)\n bias = np.zeros((3, hid_size))\n\n # The bias for the output units\n out_bias = np.zeros(voc_size)\n\n self.weights_ie = theano.shared(\n name='weights_ie',\n value=weights_ie.astype(theano.config.floatX))\n\n self.weights_eh = theano.shared(\n name='weights_eh',\n value=weights_eh.astype(theano.config.floatX))\n\n self.weights_hh = theano.shared(\n name='weights_hh',\n value=weights_hh.astype(theano.config.floatX))\n\n self.weights_ho = theano.shared(\n name='weights_ho',\n value=weights_ho.astype(theano.config.floatX))\n\n self.bias = theano.shared(\n name='bias',\n value=bias.astype(theano.config.floatX))\n\n self.out_bias = theano.shared(\n name='out_bias',\n value=out_bias.astype(theano.config.floatX))\n\n self.cache_ie = theano.shared(\n name='cache_ie',\n value=np.zeros(weights_ie.shape).astype(theano.config.floatX))\n\n self.cache_eh = theano.shared(\n name='cache_eh',\n value=np.zeros(weights_eh.shape).astype(theano.config.floatX))\n\n self.cache_hh = theano.shared(\n name='cache_hh',\n value=np.zeros(weights_hh.shape).astype(theano.config.floatX))\n\n self.cache_ho = theano.shared(\n name='cache_ho',\n value=np.zeros(weights_ho.shape).astype(theano.config.floatX))\n\n self.cache_bias = theano.shared(\n name='cache_bias',\n value=np.zeros(bias.shape).astype(theano.config.floatX))\n\n self.cache_out_bias = theano.shared(\n name='cache_out_bias',\n value=np.zeros(out_bias.shape).astype(theano.config.floatX))\n\n self.vocabulary = []\n self.word_to_index = {}\n self.index_to_word = []\n else:\n self.log.info(\"Loading model parameters from saved model...\")\n\n with open(model, \"rb\") as modelFile:\n params = cPickle.load(modelFile)\n\n self.vocabulary_size = params[0]\n self.embed_size = params[1]\n self.hidden_size = params[2]\n self.bptt_truncate = params[3]\n\n self.weights_ie = params[4]\n self.weights_eh = params[5]\n self.weights_hh = params[6]\n self.weights_ho = params[7]\n\n self.vocabulary = params[8]\n if not self.vocabulary[-1] == self.unknown_token:\n self.log.info(\"Appending unknown token\")\n self.vocabulary[-1] = self.unknown_token\n self.index_to_word = params[9]\n self.word_to_index = params[10]\n\n self.bias = params[11]\n self.out_bias = params[12]\n\n self.cache_ie = params[13]\n self.cache_eh = params[14]\n self.cache_hh = params[15]\n self.cache_ho = params[16]\n self.cache_bias = params[17]\n self.cache_out_bias = params[18]\n # End of if statement\n\n # Symbolic representation of one input sentence\n input = T.ivector('sentence')\n\n # Symbolic representation of the one output sentence\n output = T.ivector('sentence')\n\n # Symbolic representation of the cache decay for RMSprop\n decay = T.scalar('decay')\n\n # Stochastic Gradient Descent step\n learning_rate = T.scalar('learning_rate')\n\n def forward_propagate(word, previous_state):\n \"\"\"\n Vertically propagates one of the words.\n\n :type word: int\n :param word: the index of the current input word\n\n :type previous_state: T.dvector()\n :param word: the output of the hidden layer from the previous\n horizontal layer\n \"\"\"\n # Embedding layer\n word_vector = self.weights_ie[:, word]\n\n # GRU layer\n update_gate = T.nnet.hard_sigmoid(\n self.weights_eh[0].dot(word_vector) +\n self.weights_hh[0].dot(previous_state) +\n self.bias[0]\n )\n\n reset_gate = T.nnet.hard_sigmoid(\n self.weights_eh[1].dot(word_vector) +\n self.weights_hh[1].dot(previous_state) +\n self.bias[1]\n )\n\n hypothesis = T.tanh(\n self.weights_eh[2].dot(word_vector) +\n self.weights_hh[2].dot(previous_state * reset_gate) +\n self.bias[2]\n )\n\n current_state = (T.ones_like(update_gate) - update_gate) * hypothesis + update_gate * previous_state\n\n # Output layer\n current_output = T.nnet.softmax(\n self.weights_ho.dot(current_state) + self.out_bias\n )[0]\n\n # Not sure why current_output[0] and not just current_output...\n return [current_output, current_state]\n\n #######################################################################\n # Symbolically represents going through each input sentence word and\n # then calculating the state of the hidden layer and output word for\n # each word. The forward_propagate function is the one used to\n # generate the output word and hidden layer state.\n #######################################################################\n self.theano = {}\n\n [out, state], updates = theano.scan(\n forward_propagate,\n sequences=input,\n truncate_gradient=self.bptt_truncate,\n outputs_info=[None, dict(initial=T.zeros(self.hidden_size))],\n name=\"forward_propagate\"\n )\n\n # Predicts the output words for each word in the sentence\n prediction = T.argmax(out, axis=1)\n\n # Calculates the output error between the predicted output and the\n # actual output\n out_error = T.sum(T.nnet.categorical_crossentropy(out, output))\n\n # Symbolically represents gradient calculations for gradient descent\n d_weights_ie = T.grad(out_error, self.weights_ie)\n d_weights_eh = T.grad(out_error, self.weights_eh)\n d_weights_hh = T.grad(out_error, self.weights_hh)\n d_weights_ho = T.grad(out_error, self.weights_ho)\n d_bias = T.grad(out_error, self.bias)\n d_out_bias = T.grad(out_error, self.out_bias)\n\n # Symbolic theano functions\n self.forward_propagate = theano.function([input], out,\n name=\"forward_propagate\")\n self.predict = theano.function([input], prediction, name=\"predict\")\n self.calculate_error = theano.function([input, output], out_error,\n name=\"calculate_error\")\n self.bptt = theano.function([input, output],\n [d_weights_ie, d_weights_eh, d_weights_hh, d_weights_ho, d_bias,\n d_out_bias],\n name=\"bptt\")\n\n # RMSprop parameters\n cache_ie = (decay * self.cache_ie) + ((1 - decay) * d_weights_ie ** 2)\n cache_eh = (decay * self.cache_eh) + ((1 - decay) * d_weights_eh ** 2)\n cache_hh = (decay * self.cache_hh) + ((1 - decay) * d_weights_hh ** 2)\n cache_ho = (decay * self.cache_ho) + ((1 - decay) * d_weights_ho ** 2)\n cache_bias = (decay * self.cache_bias) + ((1 - decay) * d_bias ** 2)\n cache_out_bias = (decay * self.cache_out_bias) + ((1 - decay) * d_out_bias ** 2)\n eps = 1e-6 # Prevents division by 0\n\n self.sgd_step = theano.function(\n [input, output, learning_rate, theano.In(decay, value=0.9)],\n [],\n updates=[\n (self.weights_ie, self.weights_ie - learning_rate *\n d_weights_ie / (T.sqrt(self.cache_ie + eps))),\n (self.weights_eh, self.weights_eh - learning_rate *\n d_weights_eh / (T.sqrt(self.cache_eh + eps))),\n (self.weights_hh, self.weights_hh - learning_rate *\n d_weights_hh / (T.sqrt(self.cache_hh + eps))),\n (self.weights_ho, self.weights_ho - learning_rate *\n d_weights_ho / (T.sqrt(self.cache_ho + eps))),\n (self.bias, self.bias - learning_rate * d_bias /\n (T.sqrt(self.cache_bias + eps))),\n (self.out_bias, self.out_bias - learning_rate *\n d_out_bias / (T.sqrt(self.cache_out_bias + eps))),\n (self.cache_ie, cache_ie),\n (self.cache_eh, cache_eh),\n (self.cache_hh, cache_hh),\n (self.cache_ho, cache_ho),\n (self.cache_bias, cache_bias),\n (self.cache_out_bias, cache_out_bias)]\n )\n\n self.x_train = None\n self.y_train = None",
"def __init__(self, cfg, name=''):\n nn.Layer.__init__(self)\n self.cfg = cfg\n d_model = cfg['hidden_size']\n d_emb = cfg.get('emb_size', cfg['hidden_size'])\n d_vocab = cfg['vocab_size']\n d_pos = cfg['max_position_embeddings']\n # d_sent = cfg.get(\"sent_type_vocab_size\", 4) or cfg.get('type_vocab_size', 4)\n if cfg.get('sent_type_vocab_size'):\n d_sent = cfg['sent_type_vocab_size']\n else:\n d_sent = cfg.get('type_vocab_size', 2)\n self.n_head = cfg['num_attention_heads']\n self.return_additional_info = cfg.get('return_additional_info', False)\n self.initializer = nn.initializer.TruncatedNormal(std=cfg['initializer_range'])\n\n self.ln = _build_ln(d_model, name=append_name(name, 'pre_encoder'))\n self.word_emb = nn.Embedding(d_vocab,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'word_embedding'),\n initializer=self.initializer))\n self.pos_emb = nn.Embedding(d_pos,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'pos_embedding'),\n initializer=self.initializer))\n # self.sent_emb = nn.Embedding(\n # d_sent,\n # d_emb,\n # weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'), initializer=self.initializer))\n self._use_sent_id = cfg.get('use_sent_id', True)\n self._use_sent_id = False\n if self._use_sent_id:\n self.sent_emb = nn.Embedding(d_sent,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'),\n initializer=self.initializer))\n self._use_task_id = cfg.get('use_task_id', False)\n self._use_task_id = False\n if self._use_task_id:\n self._task_types = cfg.get('task_type_vocab_size', 3)\n logging.info('using task_id, #task_types:{}'.format(self._task_types))\n self.task_emb = nn.Embedding(self._task_types,\n d_emb,\n weight_attr=paddle.ParamAttr(name=append_name(name, 'task_embedding'),\n initializer=self.initializer))\n\n prob = cfg['hidden_dropout_prob']\n self.dropout = nn.Dropout(p=prob)\n\n self.encoder_stack = ErnieEncoderStack(cfg, append_name(name, 'encoder'))\n\n if cfg.get('has_pooler', True):\n self.pooler = _build_linear(cfg['hidden_size'], cfg['hidden_size'], append_name(name, 'pooled_fc'),\n self.initializer)\n else:\n self.pooler = None\n\n self.key_tag = None\n self._checkpoints = []\n self.train()",
"def build_backbone(self):\n backbone = self.arch.backbone\n self.backbone = build_blocks(backbone, 'backbone')",
"def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()",
"def build(self):\n self.build_inputs()\n self.build_image_embeddings()\n self.build_seq_embeddings()\n self.build_encoder()\n self.build_prediction_model()\n self.setup_encoder_initializer()\n self.setup_global_step()\n self.list_trainable_variables()",
"def __init__(self, embed_size):\n super(Encoder, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules) \n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)",
"def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)",
"def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.pooling = nn.MaxPool2d(2,stride = 2)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()",
"def build(self):\n self.build_inputs()\n self.build_word_embeddings()\n self.build_encoder()\n self.build_fc()\n self.build_loss()\n self.build_global_step()",
"def __init__(self, name, config):\n super(RelationalNetwork, self).__init__(name, RelationalNetwork, config)\n\n # Get key mappings.\n self.key_feature_maps = self.stream_keys[\"feature_maps\"]\n self.key_question_encodings = self.stream_keys[\"question_encodings\"]\n self.key_outputs = self.stream_keys[\"outputs\"]\n\n # Retrieve input sizes from globals.\n self.feature_maps_height = self.globals[\"feature_maps_height\"]\n self.feature_maps_width = self.globals[\"feature_maps_width\"]\n self.feature_maps_depth = self.globals[\"feature_maps_depth\"]\n self.question_encoding_size = self.globals[\"question_encoding_size\"]\n \n # Create \"object\" coordinates.\n self.obj_coords = []\n for h in range(self.feature_maps_height):\n for w in range(self.feature_maps_width):\n self.obj_coords.append((h,w))\n\n # Calculate input size to the g_theta: two \"objects\" + question (+ optionally: image size)\n input_size = 2 * self.feature_maps_depth + self.question_encoding_size\n\n # Create the module list.\n modules = []\n\n # Retrieve dropout rate value - if set, will put dropout between every layer.\n dropout_rate = self.config[\"dropout_rate\"]\n\n # Create the model, i.e. the \"relational\" g_theta network.\n g_theta_sizes = self.config[\"g_theta_sizes\"]\n if type(g_theta_sizes) == list and len(g_theta_sizes) > 1:\n # First input dim.\n input_dim = input_size\n for hidden_dim in g_theta_sizes:\n # Add linear layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n # Add activation and dropout.\n modules.append( torch.nn.ReLU() )\n if (dropout_rate > 0):\n modules.append( torch.nn.Dropout(dropout_rate) )\n # Remember input dim of next layer.\n input_dim = hidden_dim\n\n # Add output layer.\n modules.append( torch.nn.Linear(input_dim, hidden_dim) )\n\n self.logger.info(\"Created g_theta network with {} layers\".format(len(g_theta_sizes)+1))\n\n else:\n raise ConfigurationError(\"'g_theta_sizes' must contain a list with numbers of neurons in g_theta layers (currently {})\".format(self.hidden_sizes))\n\n # Export output_size to globals.\n self.output_size = g_theta_sizes[-1]\n self.globals[\"output_size\"] = self.output_size\n\n # Finally create the sequential model out of those modules.\n self.g_theta = torch.nn.Sequential(*modules)",
"def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)",
"def _build_representation_layer(self,\n input_question_word,\n input_question_word_mask,\n input_question_subword,\n input_question_subword_mask,\n input_question_char,\n input_question_char_mask,\n input_context_word,\n input_context_word_mask,\n input_context_subword,\n input_context_subword_mask,\n input_context_char,\n input_context_char_mask): \n word_vocab_size = self.hyperparams.data_word_vocab_size\n word_embed_dim = self.hyperparams.model_representation_word_embed_dim\n word_dropout = self.hyperparams.model_representation_word_dropout if self.mode == \"train\" else 0.0\n word_embed_pretrained = self.hyperparams.model_representation_word_embed_pretrained\n word_feat_trainable = self.hyperparams.model_representation_word_feat_trainable\n word_feat_enable = self.hyperparams.model_representation_word_feat_enable\n subword_vocab_size = self.hyperparams.data_subword_vocab_size\n subword_embed_dim = self.hyperparams.model_representation_subword_embed_dim\n subword_unit_dim = self.hyperparams.model_representation_subword_unit_dim\n subword_feat_trainable = self.hyperparams.model_representation_subword_feat_trainable\n subword_window_size = self.hyperparams.model_representation_subword_window_size\n subword_hidden_activation = self.hyperparams.model_representation_subword_hidden_activation\n subword_dropout = self.hyperparams.model_representation_subword_dropout if self.mode == \"train\" else 0.0\n subword_pooling_type = self.hyperparams.model_representation_subword_pooling_type\n subword_feat_enable = self.hyperparams.model_representation_subword_feat_enable\n char_vocab_size = self.hyperparams.data_char_vocab_size\n char_embed_dim = self.hyperparams.model_representation_char_embed_dim\n char_unit_dim = self.hyperparams.model_representation_char_unit_dim\n char_feat_trainable = self.hyperparams.model_representation_char_feat_trainable\n char_window_size = self.hyperparams.model_representation_char_window_size\n char_hidden_activation = self.hyperparams.model_representation_char_hidden_activation\n char_dropout = self.hyperparams.model_representation_char_dropout if self.mode == \"train\" else 0.0\n char_pooling_type = self.hyperparams.model_representation_char_pooling_type\n char_feat_enable = self.hyperparams.model_representation_char_feat_enable\n fusion_type = self.hyperparams.model_representation_fusion_type\n fusion_num_layer = self.hyperparams.model_representation_fusion_num_layer\n fusion_unit_dim = self.hyperparams.model_representation_fusion_unit_dim\n fusion_hidden_activation = self.hyperparams.model_representation_fusion_hidden_activation\n fusion_dropout = self.hyperparams.model_representation_fusion_dropout if self.mode == \"train\" else 0.0\n fusion_trainable = self.hyperparams.model_representation_fusion_trainable\n \n with tf.variable_scope(\"representation\", reuse=tf.AUTO_REUSE):\n input_question_feat_list = []\n input_question_feat_mask_list = []\n input_context_feat_list = []\n input_context_feat_mask_list = []\n \n if word_feat_enable == True:\n self.logger.log_print(\"# build word-level representation layer\")\n word_feat_layer = WordFeat(vocab_size=word_vocab_size, embed_dim=word_embed_dim,\n dropout=word_dropout, pretrained=word_embed_pretrained, embedding=self.word_embedding,\n num_gpus=self.num_gpus, default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=word_feat_trainable)\n \n (input_question_word_feat,\n input_question_word_feat_mask) = word_feat_layer(input_question_word, input_question_word_mask)\n (input_context_word_feat,\n input_context_word_feat_mask) = word_feat_layer(input_context_word, input_context_word_mask)\n \n input_question_feat_list.append(input_question_word_feat)\n input_question_feat_mask_list.append(input_question_word_feat_mask)\n input_context_feat_list.append(input_context_word_feat)\n input_context_feat_mask_list.append(input_context_word_feat_mask)\n \n word_unit_dim = word_embed_dim\n self.word_embedding_placeholder = word_feat_layer.get_embedding_placeholder()\n else:\n word_unit_dim = 0\n self.word_embedding_placeholder = None\n \n if subword_feat_enable == True:\n self.logger.log_print(\"# build subword-level representation layer\")\n subword_feat_layer = SubwordFeat(vocab_size=subword_vocab_size, embed_dim=subword_embed_dim,\n unit_dim=subword_unit_dim, window_size=subword_window_size, hidden_activation=subword_hidden_activation,\n pooling_type=subword_pooling_type, dropout=subword_dropout, num_gpus=self.num_gpus,\n default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=subword_feat_trainable)\n \n (input_question_subword_feat,\n input_question_subword_feat_mask) = subword_feat_layer(input_question_subword, input_question_subword_mask)\n (input_context_subword_feat,\n input_context_subword_feat_mask) = subword_feat_layer(input_context_subword, input_context_subword_mask)\n \n input_question_feat_list.append(input_question_subword_feat)\n input_question_feat_mask_list.append(input_question_subword_feat_mask)\n input_context_feat_list.append(input_context_subword_feat)\n input_context_feat_mask_list.append(input_context_subword_feat_mask)\n else:\n subword_unit_dim = 0\n \n if char_feat_enable == True:\n self.logger.log_print(\"# build char-level representation layer\")\n char_feat_layer = CharFeat(vocab_size=char_vocab_size, embed_dim=char_embed_dim,\n unit_dim=char_unit_dim, window_size=char_window_size, hidden_activation=char_hidden_activation,\n pooling_type=char_pooling_type, dropout=char_dropout, num_gpus=self.num_gpus,\n default_gpu_id=self.default_gpu_id, regularizer=self.regularizer,\n random_seed=self.random_seed, trainable=char_feat_trainable)\n \n (input_question_char_feat,\n input_question_char_feat_mask) = char_feat_layer(input_question_char, input_question_char_mask)\n (input_context_char_feat,\n input_context_char_feat_mask) = char_feat_layer(input_context_char, input_context_char_mask)\n \n input_question_feat_list.append(input_question_char_feat)\n input_question_feat_mask_list.append(input_question_char_feat_mask)\n input_context_feat_list.append(input_context_char_feat)\n input_context_feat_mask_list.append(input_context_char_feat_mask)\n else:\n char_unit_dim = 0\n \n feat_unit_dim = word_unit_dim + subword_unit_dim + char_unit_dim\n \n feat_fusion_layer = self._create_fusion_layer(feat_unit_dim, fusion_unit_dim,\n fusion_type, fusion_num_layer, fusion_hidden_activation, fusion_dropout,\n self.num_gpus, self.default_gpu_id, self.regularizer, self.random_seed, fusion_trainable)\n input_question_feat, input_question_feat_mask = self._build_fusion_result(input_question_feat_list,\n input_question_feat_mask_list, feat_fusion_layer)\n input_context_feat, input_context_feat_mask = self._build_fusion_result(input_context_feat_list,\n input_context_feat_mask_list, feat_fusion_layer)\n \n return input_question_feat, input_question_feat_mask, input_context_feat, input_context_feat_mask",
"def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n # model_dict = model.state_dict()\n\n if pretrained:\n # pretrained_dict=model_zoo.load_url(model_urls['resnet34'],model_dir='/home/FENGsl/JBHI/Pretrain_model')\n # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n # model_dict.update(pretrained_dict)\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='/home/FENGsl/JBHI/Pretrain_model'))\n print('===> Pretrain Model Have Been Loaded, Please fasten your seat belt and get ready to take off!')\n return model",
"def __init__(self, backbone_name, config):\n\n backbone_config = Schema(\n {\n Required(\"input_shape\"): Schema((int, int, int)),\n Required(\"include_top\"): bool,\n Required(\"weights\"): str,\n Optional(\"alpha\"): float,\n }\n )\n\n config = backbone_config(config)\n\n if backbone_name == \"MobileNetV2\":\n self.model = tf.keras.applications.MobileNetV2(**config)\n elif backbone_name == \"ResNet50\":\n self.model = tf.keras.applications.ResNet50(**config)\n elif backbone_name == \"InceptionV3\":\n self.model = tf.keras.applications.InceptionV3(**config)\n\n # Remove Layers until Conv4\n for i, layer in enumerate(reversed(self.model.layers)):\n if backbone_name == \"ResNet50\" and layer._name == \"conv4_block6_out\":\n break\n elif (\n backbone_name == \"MobileNetV2\" and layer._name == \"block_13_expand_relu\"\n ):\n break\n else:\n self.model._layers.pop()\n\n self.model.layers[-1]._name = \"feature_map\"\n\n self.model = Model(\n self.model.input, self.model.layers[-1].output, name=\"Backbone\"\n )",
"def build(self, mode, config):\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, 3], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n pass\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(256, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(256, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(256, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(256, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(256, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), 256)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n pass\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in \n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n return model",
"def DontuseThis():\n BCM_outputs = ['phi','rho','theta',\n 'r_probabilityMaps','l_probabilityMaps',\n 'models']\n BCM_Models = pe.Node(interface=nio.DataGrabber(input_names=['structures'],\n outfields=BCM_outputs),\n name='10_BCM_Models')\n BCM_Models.inputs.base_directory = atlas_fname_wpath\n BCM_Models.inputs.template_args['phi'] = [['spatialImages','phi','nii.gz']]\n BCM_Models.inputs.template_args['rho'] = [['spatialImages','rho','nii.gz']]\n BCM_Models.inputs.template_args['theta'] = [['spatialImages','theta','nii.gz']]\n BCM_Models.inputs.template_args['r_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['l_probabilityMaps'] = [['structures']]\n BCM_Models.inputs.template_args['models'] = [['structures']]\n\n BRAINSCut_structures = ['caudate','thalamus','putamen','hippocampus']\n #BRAINSCut_structures = ['caudate','thalamus']\n BCM_Models.iterables = ( 'structures', BRAINSCut_structures )\n BCM_Models.inputs.template = '%s/%s.%s'\n BCM_Models.inputs.field_template = dict(\n r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',\n l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',\n models='modelFiles/%sModel*',\n )\n\n \"\"\"\n The xml creation and BRAINSCut need to be their own mini-pipeline that gets\n executed once for each of the structures in BRAINSCut_structures. This can be\n accomplished with a map node and a new pipeline.\n \"\"\"\n \"\"\"\n Create xml file for BRAINSCut\n \"\"\"\n\n\n BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),name=\"BFitAtlasToSubject\")\n BFitAtlasToSubject.inputs.costMetric=\"MMI\"\n BFitAtlasToSubject.inputs.maskProcessingMode=\"ROI\"\n BFitAtlasToSubject.inputs.numberOfSamples=100000\n BFitAtlasToSubject.inputs.numberOfIterations=[1500,1500]\n BFitAtlasToSubject.inputs.numberOfHistogramBins=50\n BFitAtlasToSubject.inputs.maximumStepLength=0.2\n BFitAtlasToSubject.inputs.minimumStepLength=[0.005,0.005]\n BFitAtlasToSubject.inputs.transformType= [\"Affine\",\"BSpline\"]\n BFitAtlasToSubject.inputs.maxBSplineDisplacement= 7\n BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter=65\n BFitAtlasToSubject.inputs.splineGridSize=[28,20,24]\n BFitAtlasToSubject.inputs.outputVolume=\"Trial_Initializer_Output.nii.gz\"\n BFitAtlasToSubject.inputs.outputTransform=\"Trial_Initializer_Output.mat\"\n cutWF.connect(SplitAvgBABC,'avgBABCT1',BFitAtlasToSubject,'fixedVolume')\n cutWF.connect(BABC,'outputLabels',BFitAtlasToSubject,'fixedBinaryVolume')\n cutWF.connect(BAtlas,'template_t1',BFitAtlasToSubject,'movingVolume')\n cutWF.connect(BAtlas,'template_brain',BFitAtlasToSubject,'movingBinaryVolume')\n cutWF.connect(BLI,'outputTransformFilename',BFitAtlasToSubject,'initialTransform')\n\n CreateBRAINSCutXML = pe.Node(Function(input_names=['rho','phi','theta',\n 'model',\n 'r_probabilityMap',\n 'l_probabilityMap',\n 'atlasT1','atlasBrain',\n 'subjT1','subjT2',\n 'subjT1GAD','subjT2GAD',\n 'subjSGGAD','subjBrain',\n 'atlasToSubj','output_dir'],\n output_names=['xml_filename','rl_structure_filename_list'],\n function = create_BRAINSCut_XML),\n overwrite = True,\n name=\"CreateBRAINSCutXML\")\n\n ## HACK Makde better directory\n CreateBRAINSCutXML.inputs.output_dir = \".\" #os.path.join(cutWF.base_dir, \"BRAINSCut_output\")\n cutWF.connect(BCM_Models,'models',CreateBRAINSCutXML,'model')\n cutWF.connect(BCM_Models,'rho',CreateBRAINSCutXML,'rho')\n cutWF.connect(BCM_Models,'phi',CreateBRAINSCutXML,'phi')\n cutWF.connect(BCM_Models,'theta',CreateBRAINSCutXML,'theta')\n cutWF.connect(BCM_Models,'r_probabilityMaps',CreateBRAINSCutXML,'r_probabilityMap')\n cutWF.connect(BCM_Models,'l_probabilityMaps',CreateBRAINSCutXML,'l_probabilityMap')\n cutWF.connect(BAtlas,'template_t1',CreateBRAINSCutXML,'atlasT1')\n cutWF.connect(BAtlas,'template_brain',CreateBRAINSCutXML,'atlasBrain')\n cutWF.connect(SplitAvgBABC,'avgBABCT1',CreateBRAINSCutXML,'subjT1')\n cutWF.connect(SplitAvgBABC,'avgBABCT2',CreateBRAINSCutXML,'subjT2')\n cutWF.connect(GADT1,'outputVolume',CreateBRAINSCutXML,'subjT1GAD')\n cutWF.connect(GADT2,'outputVolume',CreateBRAINSCutXML,'subjT2GAD')\n cutWF.connect(SGI,'outputFileName',CreateBRAINSCutXML,'subjSGGAD')\n cutWF.connect(BABC,'outputLabels',CreateBRAINSCutXML,'subjBrain')\n cutWF.connect(BFitAtlasToSubject,'outputTransform',CreateBRAINSCutXML,'atlasToSubj')\n #CreateBRAINSCutXML.inputs.atlasToSubj = \"INTERNAL_REGISTER.mat\"\n #cutWF.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')\n\n \"\"\"\n ResampleNACLabels\n \"\"\"\n ResampleAtlasNACLabels=pe.Node(interface=BRAINSResample(),name=\"ResampleAtlasNACLabels\")\n ResampleAtlasNACLabels.inputs.interpolationMode = \"NearestNeighbor\"\n ResampleAtlasNACLabels.inputs.outputVolume = \"atlasToSubjectNACLabels.nii.gz\"\n\n cutWF.connect(cutWF,'OutputSpec.atlasToSubjectTransform',ResampleAtlasNACLabels,'warpTransform')\n cutWF.connect(cutWF,'OutputSpec.t1_corrected',ResampleAtlasNACLabels,'referenceVolume')\n cutWF.connect(BAtlas,'template_nac_lables',ResampleAtlasNACLabels,'inputVolume')\n\n \"\"\"\n BRAINSMush\n \"\"\"\n BMUSH=pe.Node(interface=BRAINSMush(),name=\"BMUSH\")\n BMUSH.inputs.outputVolume = \"MushImage.nii.gz\"\n BMUSH.inputs.outputMask = \"MushMask.nii.gz\"\n BMUSH.inputs.lowerThresholdFactor = 1.2\n BMUSH.inputs.upperThresholdFactor = 0.55\n\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BMUSH,'inputFirstVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.t2_corrected',BMUSH,'inputSecondVolume')\n cutWF.connect(myLocalTCWF,'OutputSpec.outputLabels',BMUSH,'inputMaskVolume')\n\n \"\"\"\n BRAINSROIAuto\n \"\"\"\n BROI = pe.Node(interface=BRAINSROIAuto(), name=\"BRAINSROIAuto\")\n BROI.inputs.closingSize=12\n BROI.inputs.otsuPercentileThreshold=0.01\n BROI.inputs.thresholdCorrectionFactor=1.0\n BROI.inputs.outputROIMaskVolume = \"temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz\"\n cutWF.connect(myLocalTCWF,'OutputSpec.t1_corrected',BROI,'inputVolume')\n\n \"\"\"\n Split the implicit outputs of BABCext\n \"\"\"\n SplitAvgBABC = pe.Node(Function(input_names=['in_files','T1_count'], output_names=['avgBABCT1','avgBABCT2'],\n function = get_first_T1_and_T2), run_without_submitting=True, name=\"99_SplitAvgBABC\")\n SplitAvgBABC.inputs.T1_count = 1 ## There is only 1 average T1 image.\n\n cutWF.connect(myLocalTCWF,'OutputSpec.outputAverageImages',SplitAvgBABC,'in_files')\n\n\n\n def printFullPath(outFileFullPath):\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"=\"*80)\n print(\"{0}\".format(outFileFullPath))\n return outFileFullPath\n printOutImage = pe.Node( Function(function=printFullPath, input_names = ['outFileFullPath'], output_names = ['genoutFileFullPath']), run_without_submitting=True, name=\"99_printOutImage\")\n cutWF.connect( GADT2, 'outputVolume', printOutImage, 'outFileFullPath' )",
"def build_model(cls, args, task):\n\n # make sure all arguments are present in older models\n base_architecture(args)\n\n if getattr(args, \"max_source_positions\", None) is None:\n args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS\n\n args.ddp_rank = distributed_utils.get_data_parallel_rank()\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\"--share-all-embeddings requires a joined dictionary\")\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n \"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim\"\n )\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path\n ):\n raise ValueError(\n \"--share-all-embeddings not compatible with --decoder-embed-path\"\n )\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n else:\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, args.encoder_embed_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = cls.build_embedding(\n args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path\n )\n if getattr(args, \"offload_activations\", False):\n args.checkpoint_activations = True # offloading implies checkpointing\n\n encoder_embed_positions = (\n PositionalEmbedding(\n args.max_source_positions,\n args.encoder_embed_dim,\n src_dict.pad(),\n learned=args.encoder_learned_pos,\n )\n if not args.no_token_positional_embeddings\n else None\n )\n\n decoder_embed_positions = (\n PositionalEmbedding(\n args.max_target_positions,\n args.decoder_embed_dim,\n tgt_dict.pad(),\n learned=args.decoder_learned_pos,\n )\n if not args.no_token_positional_embeddings\n else None\n )\n\n if args.share_decoder_input_output_embed:\n output_projection = torch.nn.Linear(\n decoder_embed_tokens.weight.shape[1],\n decoder_embed_tokens.weight.shape[0],\n bias=False,\n )\n output_projection.weight = decoder_embed_tokens.weight\n else:\n output_projection = torch.nn.Linear(\n args.decoder_embed_dim, len(tgt_dict), bias=False\n )\n torch.nn.init.normal_(\n output_projection.weight, mean=0, std=args.decoder_embed_dim**-0.5\n )\n\n encoder = cls.build_encoder(\n args,\n encoder_embed_tokens,\n encoder_embed_positions,\n src_dict,\n )\n decoder = cls.build_decoder(\n args,\n decoder_embed_tokens,\n decoder_embed_positions,\n output_projection,\n tgt_dict,\n )\n\n if not args.share_all_embeddings:\n min_params_to_wrap = getattr(\n args, \"min_params_to_wrap\", DEFAULT_MIN_PARAMS_TO_WRAP\n )\n # fsdp_wrap is a no-op when --ddp-backend != fully_sharded\n encoder = fsdp_wrap(encoder, min_num_params=min_params_to_wrap)\n decoder = fsdp_wrap(decoder, min_num_params=min_params_to_wrap)\n return cls(args, encoder, decoder)",
"def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)",
"def build_resnet101(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 4):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b3_feats = temp\n \n res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 23):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b22_feats = temp\n\n res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train",
"def build_model():",
"def build(config):",
"def __init__(self, config: Dict[str, Any], initPretrainedWeights: bool = True):\n\n super().__init__()\n if initPretrainedWeights:\n self.transformer = AutoModel.from_pretrained(config[\"transformer_type\"], cache_dir=config[\"cache\"])\n else:\n self.transformer = AutoModel.from_config(\n AutoConfig.from_pretrained(config[\"transformer_type\"], cache_dir=config[\"cache\"]))\n\n self.startEndProjection = torch.nn.Linear(self.transformer.config.hidden_size, 2, bias=False)\n\n self.selectedProjection = torch.nn.Linear(self.transformer.config.hidden_size, 1, bias=False)\n\n self.config = config\n\n self.init_weights()",
"def build_models(config):\n inputs = Input(shape=(config['patch_height'], config['patch_width'], config['depth'], config['channel']),\n name='patchimg')\n\n kernelinitfun = keras.initializers.RandomNormal(mean=0.0, stddev=0.1, seed=None)\n activationfun = 'relu'\n # kernelinitfun = 'glorot_normal'\n\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv1_1')(inputs)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv1_2')(x)\n # x = Dropout(0.3)(x)\n x = BatchNormalization(name='bn1')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp1', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv2_1')(x)\n x = Conv3D(32, (3, 3, 3), padding='same', kernel_initializer=kernelinitfun, name='conv2_2')(x)\n # x = Dropout(0.2)(x)\n x = BatchNormalization(name='bn2')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp2', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv3_1')(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv3_2')(x)\n # x = Dropout(0.5)(x)\n x = BatchNormalization(name='bn3')(x)\n x = Activation(activationfun)(x)\n x = MaxPooling3D(name='mp3', strides=(2, 2, 1))(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv4_1')(x)\n x = Conv3D(32, (3, 3, 3), strides=(1, 1, 1), padding='same', kernel_initializer=kernelinitfun, name='conv4_2')(x)\n # x = Dropout(0.5)(x)\n x = BatchNormalization(name='bn4')(x)\n x = Activation(activationfun)(x)\n\n x4 = Flatten(name='aux_fx')(x)\n\n source_classifier = Dropout(0.5)(x4)\n source_classifier = Dense(512, activation='softmax', name=\"mo1\")(source_classifier)\n source_classifier = Dropout(0.5)(source_classifier)\n source_classifier = Dense(128, activation='softmax', name=\"mo2\")(source_classifier)\n # source_classifier = Dropout(0.3)(source_classifier)\n source_classifier = Dense(1, name=\"mo\")(source_classifier)\n\n domain_classifier = Dense(32, activation='linear', name=\"do4\")(x4)\n domain_classifier = BatchNormalization(name=\"do5\")(domain_classifier)\n domain_classifier = Activation(\"elu\", name=\"do6\")(domain_classifier)\n domain_classifier = Dropout(0.5)(domain_classifier)\n\n domain_classifier = Dense(2, activation='softmax', name=\"do\")(domain_classifier)\n\n adamop = keras.optimizers.Adam(learning_rate=1e-5, beta_1=0.9, beta_2=0.999, amsgrad=False)\n comb_model = Model(inputs=inputs, outputs=[source_classifier, domain_classifier])\n comb_model.compile(optimizer=adamop,\n loss={'mo': 'mae', 'do': 'categorical_crossentropy'},\n loss_weights={'mo': 1, 'do': 2}, metrics=['accuracy'], )\n\n source_classification_model = Model(inputs=inputs, outputs=[source_classifier])\n source_classification_model.compile(optimizer=adamop,\n loss={'mo': 'mae'}, metrics=['accuracy'], )\n\n domain_classification_model = Model(inputs=inputs, outputs=[domain_classifier])\n domain_classification_model.compile(optimizer=adamop,\n loss={'do': 'categorical_crossentropy'}, metrics=['accuracy'])\n\n embeddings_model = Model(inputs=inputs, outputs=[x4])\n embeddings_model.compile(optimizer=adamop, loss='categorical_crossentropy', metrics=['accuracy'])\n\n return comb_model, source_classification_model, domain_classification_model, embeddings_model"
] | [
"0.6338696",
"0.62638956",
"0.62531716",
"0.6237589",
"0.6195611",
"0.6154484",
"0.60957676",
"0.6093641",
"0.6082765",
"0.6067601",
"0.6043595",
"0.6010979",
"0.6000153",
"0.5991751",
"0.59515923",
"0.59420633",
"0.59154165",
"0.58285683",
"0.58096284",
"0.58037734",
"0.5793482",
"0.57884026",
"0.57841206",
"0.57715225",
"0.5762704",
"0.57610136",
"0.5755669",
"0.5748564",
"0.57293427",
"0.57273597"
] | 0.66462153 | 0 |
Return the data for the supplied model with or without auxiliary data from the model. The model is needed as the order of the data depends on the order of the channels in the model. | def data(self, model, with_aux=True):
try:
observed_data = sum(
(self.observations[c] for c in model.config.channels), []
)
except KeyError:
log.error(
"Invalid channel: the workspace does not have observation data for one of the channels in the model."
)
raise
if with_aux:
observed_data += model.config.auxdata
return observed_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_input_data_for_model(self, extra_data=None):\n extra_data = {} if extra_data is None else extra_data\n if self.metadata['sample_rate'] is not None:\n if self.audio_signal.sample_rate != self.metadata['sample_rate']:\n self.audio_signal.resample(self.metadata['sample_rate'])\n\n self.audio_signal.stft_params = self.metadata['stft_params']\n self.audio_signal.stft()\n\n data = {'mix': self.audio_signal}\n data.update(extra_data)\n data = self.transform(data)\n\n for key in data:\n if torch.is_tensor(data[key]):\n data[key] = data[key].unsqueeze(0).to(self.device).float()\n if self.metadata['num_channels'] == 1:\n # then each channel is processed indep\n data[key] = data[key].transpose(0, self.channel_dim)\n self.input_data = data\n return self.input_data",
"def get_model_data(self):\n sen_info=self._get_sensor_info()\n cam_info=lib.is_GetCameraInfo(self.hcam)\n dll_ver=lib.is_GetDLLVersion()\n dll_ver=\"{}.{}.{}\".format((dll_ver>>24),(dll_ver>>16)&0xFF,dll_ver&0xFFFF)\n return self.ModelData(py3.as_str(sen_info.strSensorName),py3.as_str(cam_info.ID),py3.as_str(cam_info.SerNo),py3.as_str(cam_info.Version),\n py3.as_str(cam_info.Date),dll_ver,cam_info.Type)",
"def read_data_model(self, input_model):\n # The info4oif_dict will get pickled to disk when we write txt files of results.\n # That way we don't drag in objects like instrument_data into code that reads text results\n # and writes oifits files - a simple built-in dictionary is the only object used in this transfer.\n self.telname = \"JWST\"\n\n # To use ami_sim's eg 65.6 mas/pixel scale we hardcode it here.,,\n pscalex_deg = 65.6 / (1000 * 60 * 60)\n pscaley_deg = 65.6 / (1000 * 60 * 60)\n\n # Whatever we did set is averaged for isotropic pixel scale here\n self.pscale_mas = 0.5 * (pscalex_deg + pscaley_deg) * (60 * 60 * 1000)\n self.pscale_rad = utils.mas2rad(self.pscale_mas)\n self.mask = NRM_mask_definitions(maskname=self.arrname, chooseholes=self.chooseholes,\n holeshape=self.holeshape)\n\n return input_model.data",
"def get_model_data(self, oc): # FROM DB\r\n model_data = self.db_adapter.get_model_by_oc(oc)\r\n\r\n model, features, dummies = (None, None, None)\r\n if model_data:\r\n model = model_data['model']\r\n features = model_data['features']\r\n dummies = model_data['dummies']\r\n return (model, features, dummies)",
"def get_data_and_model_samples(self):\n model_samples = (\n self.net_.sample_fantasy(\n x=self.model_samples_[-1],\n num_mc_steps=self.num_sample_mc_steps,\n beta=self.sample_beta,\n mc_dynamics=self.sampler,\n )\n .detach()\n .cpu()\n .numpy()\n )\n data_sample_ixs = torch.randint(\n 0, self.samples.shape[0], size=(model_samples.shape[0],)\n )\n data_samples = self.samples[data_sample_ixs, ...]\n return data_samples, model_samples",
"def get_dataset_info(model):\n instrume = model.meta.instrument.name\n frame_time = model.meta.exposure.frame_time\n ngroups = model.meta.exposure.ngroups\n group_time = model.meta.exposure.group_time\n\n n_int = model.data.shape[0]\n nreads = model.data.shape[1]\n asize2 = model.data.shape[2]\n asize1 = model.data.shape[3]\n\n # If nreads and ngroups are not the same, override the value of ngroups\n # with nreads, which is more likely to be correct, since it's based on\n # the image shape.\n if nreads != ngroups:\n log.warning('The value from the key NGROUPS does not (but should) match')\n log.warning(' the value of nreads from the data; will use value of')\n log.warning(' nreads: %s' % (nreads ))\n ngroups = nreads\n\n npix = asize2 * asize1 # number of pixels in 2D array\n imshape = (asize2, asize1)\n cubeshape = (nreads,) + imshape\n\n return nreads, npix, imshape, cubeshape, n_int, instrume, frame_time, \\\n ngroups, group_time",
"def getModelData(self, model, columns=None):\n if not model in self._models:\n raise (\n PE.PyAValError(\n \"No model with parameters: \" + str(model),\n solution=\"Use, e.g., `getAvailableValues` to check what is available.\",\n )\n )\n if columns is None:\n columns = list(self.nameToCol.keys())\n # Check validity of column names\n for col in columns:\n if not col in self.nameToCol.keys():\n raise (\n PE.PyAValError(\n \"No column named '\" + str(col) + \"'.\",\n solution=\"Choose one of: \" + str(self.nameToCol.keys()),\n where=\"getModel\",\n )\n )\n # There is a valid model, return it as a recarray\n dt = []\n for col in columns:\n dt.append((col, float))\n result = np.recarray(shape=(len(self._models[model]),), dtype=dt)\n for col in columns:\n result[col] = self.dat[self._models[model], self.nameToCol[col]]\n return result",
"def get_model_and_tile_weights(model):\n weight = model.weight.data.detach().cpu().numpy()\n bias = model.bias.data.detach().cpu().numpy()\n analog_weight, analog_bias = model.analog_tile.get_weights()\n analog_weight = analog_weight.detach().cpu().numpy().reshape(weight.shape)\n analog_bias = analog_bias.detach().cpu().numpy()\n return weight, bias, analog_weight, analog_bias",
"def _get_data_dict(dataset_model):\n header = dataset_model.to_flat_dict(include_arrays=False)\n return _clean_flat_dict(header)",
"def get_twolayer_model(threelayer_model):\n\n threelayer_refl = photon_transport.forward_model.get_reflectance(threelayer_model)\n two_layer_model = [dict(threelayer_model[0]), dict(threelayer_model[1])]\n two_layer_model[1]['mua'] = inverse.estimate_muad(two_layer_model, threelayer_refl).copy()\n two_layer_model[1]['d'] = 1.0\n return two_layer_model",
"def get_unprocessed_data(self, how_many, model_settings, mode):\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = how_many\n desired_samples = model_settings['desired_samples']\n words_list = self.words_list\n data = np.zeros((sample_count, desired_samples))\n labels = []\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n foreground_volume_placeholder = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio,\n foreground_volume_placeholder)\n for i in range(sample_count):\n if how_many == -1:\n sample_index = i\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n input_dict = {wav_filename_placeholder: sample['file']}\n if sample['label'] == SILENCE_LABEL:\n input_dict[foreground_volume_placeholder] = 0\n else:\n input_dict[foreground_volume_placeholder] = 1\n data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()\n label_index = self.word_to_index[sample['label']]\n labels.append(words_list[label_index])\n return data, labels",
"def get_model_data(request):\n modelname = request.matchdict['modelname']\n # Check that model is defined\n exists = db_model_definition(request.db)[modelname]\n if not exists:\n raise NotFound(\"Unknown model %s\" % modelname)\n # Return array of records\n results = db_model_data(request.db)[modelname]\n # TODO: should we transmit uuids or keep them secret for editing\n data = [result.value for result in results]\n return {'data': data}",
"def get_model_data_for_trace(self, trace):\n pass",
"def may_data_parallel(model):\n if torch.cuda.device_count() > 1:\n model = TransparentDataParallel(model)\n return model",
"def read_simulation_model_and_data(self, model, data, index):\n var_id = read_var_table_as_id(self.dismod_file)\n sim_model = read_simulation_model(self.dismod_file, model, var_id, index)\n sim_data = read_simulation_data(self.dismod_file, data, index)\n return sim_model, sim_data",
"def model(self, model_num = 0):\n return self.struct[model_num]",
"def generate_data(model, n_batches=5, samples_per_batch=200):\n\tcalcium, spec, yn_samples, yb_samples = [], [], [], []\n\twith torch.no_grad():\n\t\tfor i in range(n_batches):\n\t\t\tz_sample = torch.randn(samples_per_batch,1,1,model.z_dim).to(model.device)\n\t\t\tyn_sample = model.sample_yn(z_sample)\n\t\t\tyb_sample = model.sample_yb(z_sample)\n\t\t\tc_μ = model.decode_calcium(yn_sample).squeeze()\n\t\t\tif model.model_type == 'sparse_poe_finch':\n\t\t\t\tspec_rec, _ = model.decode_spec(yb_sample)\n\t\t\telse:\n\t\t\t\tspec_rec = model.decode_spec(yb_sample)\n\t\t\tspec_rec = spec_rec.squeeze()\n\t\t\tcalcium.append(c_μ)\n\t\t\tspec.append(spec_rec)\n\t\t\tyn_samples.append(yn_sample.squeeze(1).squeeze(1))\n\t\t\tyb_samples.append(yb_sample.squeeze(1).squeeze(1))\n\t\tcalcium = torch.cat(calcium, dim=0).detach().cpu().numpy()\n\t\tspec = torch.cat(spec, dim=0).detach().cpu().numpy()\n\t\tyn_samples = torch.cat(yn_samples, dim=0).detach().cpu().numpy()\n\t\tyb_samples = torch.cat(yb_samples, dim=0).detach().cpu().numpy()\n\treturn calcium, spec, yn_samples, yb_samples",
"def get_plot_data(self, model: str) -> dict:\n if model not in self._plot_data:\n raise ModelNotAssociatedError(\"{m} does not have any plot data associated with it in this \"\n \"spectrum\".format(m=model))\n\n return self._plot_data[model]",
"def model_data():\n x_train, y_train, x_val, y_val, x_test, y_test = read_data(\"src/tests/dataclassificationmodel/ferPlus_processed.pbz2\", False)\n return x_train, y_train, x_val, y_val, x_test, y_test",
"def get_model_data_from_files(self, oc):\r\n # Load model related files\r\n model_path = self.config['DATA_PATH'] + self.config['CUSTOMER_NAME'] + '/models/'\r\n\r\n features_file = model_path + self.task + '_' + str(oc) + '_features.txt'\r\n dummies_file = model_path + self.task + '_' + str(oc) + '_dummies.txt'\r\n model_file = model_path + self.task + '_' + str(oc) + '.joblib'\r\n\r\n if os.path.isfile(features_file) and os.path.isfile(dummies_file) and os.path.isfile(model_file):\r\n model = joblib.load(model_file)\r\n features = open(features_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n dummies = open(dummies_file, 'r', encoding=self.config['DATA_ENCODING']).read().rstrip('\\n').split(self.config['DATA_SEPARATOR'])\r\n return (model, features, dummies)\r\n return (None, None, None)",
"def get_model_dataset(self):\n\n model_dataset = pd.DataFrame(self._model_dataset.copy(deep=True))\n\n observation_numbers = np.arange(model_dataset.shape[0]) + 1\n\n model_dataset['Obs. number'] = observation_numbers\n\n # add the transformed response variable to the dataset\n self._add_transformed_variables(self._response_variable, model_dataset)\n\n for variable in self._explanatory_variables:\n self._add_transformed_variables(variable, model_dataset)\n\n if model_dataset.shape != (0, 0):\n model_dataset.loc[:, 'Missing'] = model_dataset.isnull().any(axis=1)\n model_dataset.loc[:, 'Excluded'] = model_dataset.index.isin(self._excluded_observations)\n\n # TODO: Order data columns\n return model_dataset",
"def readmodel(model = 'dominguez'):\n ebl_file_path = os.path.join(os.path.split(__file__)[0],'data/')\n\n if model == 'kneiske':\n file_name = join(ebl_file_path , 'ebl_nuFnu_tanja.dat')\n elif model == 'franceschini':\n file_name = join(ebl_file_path , 'ebl_franceschini.dat')\n elif model == 'dominguez':\n file_name = join(ebl_file_path , 'ebl_dominguez11.out')\n elif model == 'dominguez-upper':\n file_name = join(ebl_file_path , 'ebl_upper_uncertainties_dominguez11.out')\n elif model == 'dominguez-lower':\n file_name = join(ebl_file_path , 'ebl_lower_uncertainties_dominguez11.out')\n elif model == 'inoue':\n file_name = join(ebl_file_path , 'EBL_z_0_baseline.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_baseline.dat')\n elif model == 'inoue-low-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_low_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_low_pop3.dat')\n elif model == 'inoue-up-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_up_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_up_pop3.dat')\n elif model == 'gilmore':\n file_name = join(ebl_file_path , 'eblflux_fiducial.dat')\n elif model == 'gilmore-fixed':\n file_name = join(ebl_file_path , 'eblflux_fixed.dat')\n elif model == 'cuba':\n file_name = join(ebl_file_path , 'CUBA_UVB.dat')\n elif model == 'finke':\n file_name = join(ebl_file_path , 'ebl_modelC_Finke.txt')\n else:\n raise ValueError(\"Unknown EBL model chosen!\")\n\n data = np.loadtxt(file_name)\n if model.find('inoue') >= 0:\n z = np.array([0.])\n #z = data[0,1:]\n #nuInu = data[:,1]\n lmu = data[:,0]\n nuInu = np.array([data[:,1]]).T\n raise ValueError('Inoue models not correctly implemented at the moment, choose another model')\n\n elif model.find('gilmore') >= 0:\n z = data[0,1:]\n lmu = data[1:,0] * 1e-4 # convert from Angstrom to micro meter\n nuInu = data[1:,1:] \n nuInu[nuInu == 0.] = 1e-20 * np.ones(np.sum(nuInu == 0.))\n \n # convert from ergs/s/cm^2/Ang/sr to nW/m^2/sr\n nuInu = (nuInu.T * data[1:,0]).T * 1e4 * 1e-7 * 1e9 \n\n elif model == 'cuba':\n z = data[0,1:-1]\n lmu = data[1:,0] * 1e-4\n nuInu = data[1:,1:-1]\n\n # replace zeros by 1e-40\n idx = np.where(data[1:,1:-1] == 0.)\n nuInu[idx] = np.ones(np.sum(nuInu == 0.)) * 1e-20\n\n # in erg / cm^2 / s / sr\n nuInu = (nuInu.T * c.c.value / (lmu * 1e-6)).T \n nuInu *= 1e6 # in nW / m^2 / sr\n\n # check where lmu is not strictly increasing\n idx = np.where(np.diff(lmu) == 0.)\n for i in idx[0]:\n lmu[i+1] = (lmu[i + 2] + lmu[i]) / 2.\n\n else:\n z = data[0,1:]\n lmu = data[1:,0]\n nuInu = data[1:,1:]\n if model == 'finke': \n lmu = lmu[::-1] * 1e-4\n nuInu = nuInu[::-1]\n\n return EBL(z,lmu,nuInu, model = model)",
"def run_model(data_loader, model):\n zs, ys, paths = [], [], []\n with torch.no_grad():\n for batch in tqdm(data_loader, total=len(data_loader)):\n image, y, path = batch\n recon, z, mu, log_var = model(image.to(device))\n zs.extend(z.cpu().tolist())\n ys.extend(y.cpu().tolist())\n paths.extend(path)\n return zs, ys, paths",
"def get_weights_from_digital_model(analog_model, digital_model):\n weights = digital_model.weight.data.detach().reshape(\n [analog_model.out_features, analog_model.in_features]).cpu()\n biases = None\n if digital_model.bias is not None:\n biases = digital_model.bias.data.detach().cpu()\n\n return weights, biases",
"def stageData(self,m):\n obs = Variable(filename = self.source,\n variable_name = self.variable,\n alternate_vars = self.alternate_vars)\n if obs.time is None: raise il.NotTemporalVariable()\n self.pruneRegions(obs)\n \n # Try to extract a commensurate quantity from the model\n mod = m.extractTimeSeries(self.variable,\n alt_vars = self.alternate_vars,\n expression = self.derived,\n initial_time = obs.time_bnds[ 0,0],\n final_time = obs.time_bnds[-1,1],\n lats = None if obs.spatial else obs.lat,\n lons = None if obs.spatial else obs.lon)\n obs,mod = il.MakeComparable(obs,mod,\n mask_ref = True,\n clip_ref = True,\n extents = self.extents,\n logstring = \"[%s][%s]\" % (self.longname,m.name))\n \n # Check the order of magnitude of the data and convert to help avoid roundoff errors\n def _reduceRoundoffErrors(var):\n if \"s-1\" in var.unit: return var.convert(var.unit.replace(\"s-1\",\"d-1\"))\n if \"kg\" in var.unit: return var.convert(var.unit.replace(\"kg\" ,\"g\" ))\n return var\n def _getOrder(var):\n return np.log10(np.abs(var.data).clip(1e-16)).mean()\n order = _getOrder(obs)\n count = 0\n while order < -2 and count < 2:\n obs = _reduceRoundoffErrors(obs)\n order = _getOrder(obs)\n count += 1\n \n # convert the model data to the same unit\n mod = mod.convert(obs.unit)\n\n return obs,mod",
"def get_data(self):\n if self.config['model'] == 'vggnet':\n if self.is_training:\n return self.data.shuffle(self.shuffle).batch(self.batch_size)\n elif self.is_testing:\n return self.data.batch(self.batch_size)\n elif not self.is_testing and not self.is_training:\n return self.data.batch(self.batch_size)\n else:\n raise NotImplementedError('In dataset.py: default input not specified for this model!')",
"def get(self, channels): \n data = []\n for chan, d in zip(self.channels, self.data):\n if chan not in channels:\n continue\n data.append(d)\n data = np.stack(data)\n return data",
"def feature_extraction(self, model):\n\n # Filter out all trainable parameters (from every layer)\n # This works differently for PyTorch and TensorFlow. Raise TypeError if model is neither of both.\n if isinstance(model.model, torch.nn.Module):\n model_parameters = list(\n filter(lambda p: p.requires_grad, model.model.parameters())\n )\n # Store the remaining parameters in a concatenated 1D numPy-array\n model_parameters = np.concatenate(\n [el.detach().numpy().flatten() for el in model_parameters]\n ).flatten()\n return model_parameters\n\n elif isinstance(model.model, tf.keras.Model):\n model_parameters = np.concatenate(\n [el.numpy().flatten() for el in model.model.trainable_variables]\n ).flatten()\n return model_parameters\n else:\n raise TypeError(\n f\"Expected model to be an instance of {str(torch.nn.Module)} or {str(tf.keras.Model)}, received {str(type(model.model))} instead.\"\n )",
"def get_data(self, how_many, offset, model_settings, background_frequency,\n background_volume_range, time_shift, mode, sess):\n # Pick one of the partitions to choose samples from.\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = max(0, min(how_many, len(candidates) - offset))\n # Data and labels will be populated and returned.\n data = np.zeros((sample_count, model_settings['fingerprint_size']))\n # labels = np.zeros((sample_count, model_settings['label_count']))\n fnames = []\n desired_samples = model_settings['desired_samples']\n # use_background = self.background_data and (mode == 'training')\n # pick_deterministically = (mode != 'training')\n # Use the processing graph we created earlier to repeatedly to generate the\n # final output sample data we'll use in training.\n for i in xrange(offset, offset + sample_count):\n # Pick which audio sample to use.\n # if how_many == -1 or pick_deterministically:\n # sample_index = i\n # else:\n # sample_index = np.random.randint(len(candidates))\n sample_index = i\n sample = candidates[sample_index]\n # # If we're time shifting, set up the offset for this sample.\n # if time_shift > 0:\n # time_shift_amount = np.random.randint(-time_shift, time_shift)\n # else:\n # time_shift_amount = 0\n # if time_shift_amount > 0:\n # time_shift_padding = [[time_shift_amount, 0], [0, 0]]\n # time_shift_offset = [0, 0]\n # else:\n # time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n # time_shift_offset = [-time_shift_amount, 0]\n time_shift_amount = 0\n time_shift_padding = [[0, -time_shift_amount], [0, 0]]\n time_shift_offset = [-time_shift_amount, 0]\n input_dict = {\n self.wav_filename_placeholder_: sample['file'],\n self.time_shift_padding_placeholder_: time_shift_padding,\n self.time_shift_offset_placeholder_: time_shift_offset,\n }\n # # Choose a section of background noise to mix in.\n # if use_background:\n # background_index = np.random.randint(len(self.background_data))\n # background_samples = self.background_data[background_index]\n # background_offset = np.random.randint(\n # 0, len(background_samples) - model_settings['desired_samples'])\n # background_clipped = background_samples[background_offset:(\n # background_offset + desired_samples)]\n # background_reshaped = background_clipped.reshape([desired_samples, 1])\n # if np.random.uniform(0, 1) < background_frequency:\n # background_volume = np.random.uniform(0, background_volume_range)\n # else:\n # background_volume = 0\n # else:\n # background_reshaped = np.zeros([desired_samples, 1])\n # background_volume = 0\n background_reshaped = np.zeros([desired_samples, 1])\n background_volume = 0\n input_dict[self.background_data_placeholder_] = background_reshaped\n input_dict[self.background_volume_placeholder_] = background_volume\n # If we want silence, mute out the main sample but leave the background.\n # if sample['label'] == SILENCE_LABEL:\n # input_dict[self.foreground_volume_placeholder_] = 0\n # else:\n # input_dict[self.foreground_volume_placeholder_] = 1\n input_dict[self.foreground_volume_placeholder_] = 1\n # Run the graph to produce the output audio.\n data[i - offset, :] = sess.run(self.mfcc_, feed_dict=input_dict).flatten()\n # label_index = self.word_to_index[sample['label']]\n # labels[i - offset, label_index] = 1\n fnames.append(os.path.split(sample['file'])[1])\n # return data, labels\n return data, fnames",
"def prepopulate(self, model, exclude=[]):\n for col in model.columns():\n if col not in exclude and hasattr(self, col):\n setattr(getattr(self, col), 'data', getattr(model, col))"
] | [
"0.6538062",
"0.6490415",
"0.62376046",
"0.6171267",
"0.6053518",
"0.58562785",
"0.580033",
"0.5770171",
"0.57479113",
"0.5745328",
"0.56002873",
"0.5590875",
"0.555867",
"0.5480762",
"0.54566306",
"0.54521996",
"0.54192096",
"0.5412202",
"0.53936917",
"0.5341618",
"0.5324312",
"0.53089285",
"0.52525806",
"0.5225355",
"0.522513",
"0.52133757",
"0.5212115",
"0.5199256",
"0.51954556",
"0.51601213"
] | 0.72179353 | 0 |
This page shows detailed stats on an individual switch queried by serial number | def switch_info(serial):
detail = getSwitchDetail(serial)
intdetail = getInterfaceDetail(serial)
try:
raw_data = open(f"raw_output/{serial}.txt", "r").read().splitlines()
except:
raw_data = "None collected yet"
return render_template(
"detail.html",
title=serial,
switch=detail,
interfaces=intdetail,
raw_data=raw_data,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSwitchDetail(serial):\n swDB = switchdb.DB()\n raw_info = swDB.getSwitchDetail(serial)\n switch = {}\n for row in raw_info:\n switch[\"name\"] = row[0]\n switch[\"serial\"] = row[1]\n switch[\"model\"] = row[2]\n switch[\"swver\"] = row[3]\n switch[\"ip\"] = row[4]\n switch[\"check\"] = row[5]\n switch[\"total\"] = row[6]\n switch[\"up\"] = row[7]\n switch[\"down\"] = row[8]\n switch[\"disabled\"] = row[9]\n switch[\"int10m\"] = row[10]\n switch[\"int100m\"] = row[11]\n switch[\"int1g\"] = row[12]\n switch[\"int10g\"] = row[13]\n switch[\"int25g\"] = row[14]\n switch[\"int40g\"] = row[15]\n switch[\"int100g\"] = row[16]\n switch[\"copper\"] = row[17]\n switch[\"sfp\"] = row[18]\n switch[\"virtual\"] = row[19]\n if switch[\"total\"] == 0:\n switch[\"capacity\"] = 0\n else:\n switch[\"capacity\"] = int((switch[\"up\"] / switch[\"total\"]) * 100)\n swDB.close()\n return switch",
"def getSwitchInfo():\n swDB = switchdb.DB()\n raw_info = swDB.getAllSummary()\n switchList = []\n for row in raw_info:\n row = list(row)\n switch = {}\n switch[\"name\"] = row[0]\n switch[\"serial\"] = row[1]\n switch[\"swver\"] = row[2]\n switch[\"ip\"] = row[3]\n switch[\"check\"] = row[4]\n switch[\"total\"] = row[5]\n switch[\"up\"] = row[6]\n switch[\"down\"] = row[7]\n switch[\"disabled\"] = row[8]\n if switch[\"total\"] == 0:\n switch[\"capacity\"] = 0\n else:\n switch[\"capacity\"] = (switch[\"up\"] / switch[\"total\"]) * 100\n switchList.append(switch)\n swDB.close()\n return switchList",
"def get_switch_details_from_mgmt(self, using):\n ret_output = {}\n #Get the console mgmt handle\n console = self.connect_mgmt_ip(using)\n console.sendline('terminal length 0')\n console.expect(SWITCH_PROMPT)\n console.sendline('show inventory | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['inv'] = console.before\n console.sendline('show system uptime | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['uptime'] = console.before\n console.sendline('show accounting log | grep \"configure\" | last 1')\n console.expect(SWITCH_PROMPT,120)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['idletime'] = console.before\n console.sendline('terminal length 15')\n console.expect(SWITCH_PROMPT)\n console.sendline('show clock | last 1')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['clock'] = console.before\n console.close()\n return ret_output",
"def stp_detail(switch):\n\n\tifloop = False\n\tstp_split = []\n\n\tgetdata = switch.conf('show spanning-tree detail | inc Number')\n\n\tif debug:\n\t\tprint getdata\n\n\tshow_stp = xmltodict.parse(getdata[1])\n\n\tstp = show_stp ['ins_api']['outputs']['output']['body']\n\n\ttcn_change = re.findall('(?<=occurred\\s).*(?=\\:)', stp)\n\tfor each in tcn_change:\n\t\tfor time in tcn_change:\n\n\t\t\tfirst_hour = re.findall(r'^(.*):',time)\n\t\t\tfor hour in first_hour:\n\t\t\t\tif int(hour) == 0:\n\t\t\t\t\tifloop = True\n\t\t\t#pulls the hour as an integer from the time listed in the message body\n\n\t\t\tfirst_minute = re.findall(r'\\:(.*)',time)\n\t\t\tfor minute in first_minute:\n\t\t\t\tif int(minute) <= 5:\n\t\t\t\t\tifloop = True\n\t\t\t#pulls the minute as an integer from the time listed in the message body\n\n\t\t\tstp_time = hour + ':' + minute\n\t\t\tif debug:\n\t\t\t\tprint stp_time\n\n\t\tif debug:\n\t\t\tprint \"Last topology change happened \" + stp_time + \" hours ago\"\n\n\ttcn_number = re.findall('(?<=changes\\s).*(?=\\last)', stp)\n\tfor number in tcn_number:\n\t\tstp_number = number\n\t#pulls ths number of topology changes that have occurred if tcn_change returns a value in the specified range\n\n\t\tif debug:\n\t\t\tprint \"Number of topology changes = \" + stp_number\n\n\tif ifloop:\n\t\tprint \"Last topology change happened \" + stp_time + \" hours ago\"\n\t\tprint \"Number of topology changes = \" + stp_number\n\telse:\n\t\tprint \"No STP topology changes.\"",
"def print_polling_traffic_stats(device_int):\n print \"previous counter {}\".format(device_int[\"previous_counter\"])\n print \"current_counter {}\".format(device_int[\"current_counter\"])\n print \"bits_out {}\".format(device_int[\"bits_out\"])\n print \"time_of poll {}\".format(device_int[\"update_time\"])\n print \"previous_update {}\".format(device_int[\"previous_update\"])\n print \"secounds since {}\".format(device_int[\"seconds_since\"])\n print \"bits_per_sec {}\".format(device_int[\"bits_per_sec\"])\n print \"speed {}\".format(device_int[\"speed\"])\n print \"util_percentage {}\".format(device_int[\"util_percentage\"])\n print \"util_percentage after round {}\".format(device_int[\"util_percentage\"])",
"def get_stats(db_table, device, db):\n\n print \"[%s] %s(%s):\" % (time.ctime(), db_table, device),\n exe = \"stats\"\n\n # Connect to serial device\n ser = Serial(device, config.baudrate, timeout=config.timeout)\n # Write command\n ser.write(commands[exe])\n # And wait for answer\n read = ser.read(com_length[exe])\n ser.close()\n\n # If the length is 0, and the response was empty, then the soladin is (most\n # likely) sleeping because there is no power (at night).\n if len(read) > 0:\n printbytes(read)\n dec = decode(read, exe)\n dec['table'] = db_table\n\n # Put it in our MySQL db\n db.execute((\"INSERT INTO `%(table)s` (`time`, `flags`, `pv_volt`, \" +\n \"`pv_amp`, `grid_freq`, `grid_volt`, `grid_pow`, \" +\n \"`total_pow`, `temp`, `optime`) VALUES \" +\n \"(NULL, '%(flags)d', '%(pv_volt)d', '%(pv_amp)d', \" +\n \"'%(grid_freq)d', '%(grid_volt)d', '%(grid_pow)d', \" +\n \"'%(total_pow)d', '%(temp)d', '%(optime)d')\") % dec)\n else:\n print \"Soladin not responding, sun down? :-(\"\n # HACK: Just write null-data\n db.execute(\"SELECT `total_pow` FROM `%s` ORDER BY `time` DESC LIMIT 1\" %\n db_table)\n prev_pow = db.fetchone()['total_pow']\n db.execute((\"INSERT INTO `%s` (`time`, `flags`, `pv_volt`, `pv_amp`, \" +\n \"`grid_freq`, `grid_volt`, `grid_pow`, `total_pow`, \" +\n \"`temp`, `optime`, `hasdata`) VALUES (NULL, '0', '0', \" +\n \"'0', '0', '0', '0', '%d', '0', '0', '0')\") % (db_table,\n prev_pow))",
"def get_switches_stats(self, site_id: str) -> List:\n try:\n stats = self.api.get(host=self.host, endpoint=f\"/api/v1/sites/{site_id}/stats/devices?type=switch\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting switch stats:{TextColors.ENDC} {e}\")\n raise e\n return stats",
"def show():\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE')\n port_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PORT')\n port_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PORT_BUFFER_DROP)\n rif_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'RIF')\n queue_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE_WATERMARK')\n pg_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PG_WATERMARK')\n pg_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PG_DROP)\n buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK)\n acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL)\n tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL')\n trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP')\n route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE')\n\n header = (\"Type\", \"Interval (in ms)\", \"Status\")\n data = []\n if queue_info:\n data.append([\"QUEUE_STAT\", queue_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), queue_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_info:\n data.append([\"PORT_STAT\", port_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), port_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_drop_info:\n data.append([PORT_BUFFER_DROP, port_drop_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), port_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if rif_info:\n data.append([\"RIF_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if queue_wm_info:\n data.append([\"QUEUE_WATERMARK_STAT\", queue_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), queue_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_wm_info:\n data.append([\"PG_WATERMARK_STAT\", pg_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), pg_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_drop_info:\n data.append(['PG_DROP_STAT', pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), pg_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if buffer_pool_wm_info:\n data.append([\"BUFFER_POOL_WATERMARK_STAT\", buffer_pool_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), buffer_pool_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if acl_info:\n data.append([ACL, pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), acl_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if tunnel_info:\n data.append([\"TUNNEL_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if trap_info:\n data.append([\"FLOW_CNT_TRAP_STAT\", trap_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), trap_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if route_info:\n data.append([\"FLOW_CNT_ROUTE_STAT\", route_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC),\n route_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n\n click.echo(tabulate(data, headers=header, tablefmt=\"simple\", missingval=\"\"))",
"def get_switch_serial(ip_addr, meraki_net):\n serial_id = \"\"\n api_uri = f\"/v1/networks/{meraki_net}/devices\"\n data = get_meraki_api_data(api_uri)\n for device in data:\n device_type = decode_meraki_model(device[\"model\"])\n if \"switch\" in device_type: \n if ip_addr in device[\"lanIp\"]:\n serial_id = str(device[\"serial\"]).strip()\n switch_name = str(device[\"name\"]).strip()\n logger.info(\"Switch Found! Serial %s\" , serial_id) \n return serial_id, switch_name",
"def getInterfaceDetail(serial):\n swDB = switchdb.DB()\n raw_info = swDB.getInterfaceDetail(serial)\n interfaceList = []\n for row in raw_info:\n row = list(row)\n interface = {}\n interface[\"name\"] = row[0]\n interface[\"description\"] = row[1]\n interface[\"physical_address\"] = row[2]\n interface[\"oper_status\"] = row[3]\n interface[\"oper_speed\"] = row[4]\n interface[\"oper_duplex\"] = row[5]\n interfaceList.append(interface)\n return interfaceList",
"def portstatsshow(obj, content):\n global _portstats_to_api\n\n port_obj, port_stats_d, switch_obj = None, None, obj.r_switch_obj()\n\n for buf in content:\n buf = buf.replace('er_single_credit_loss', 'er_single_credit_loss ')\n buf = buf.replace('er_multi_credit_loss', 'er_multi_credit_loss ')\n buf = buf.replace('fec_corrected_rate', 'fec_corrected_rate ')\n buf = buf.replace('latency_dma_ts', 'latency_dma_ts ')\n tl = gen_util.remove_duplicate_char(buf.replace('\\t',' '), ' ').split(' ')\n if len(tl) < 2:\n continue\n\n if tl[0] == 'port:':\n port_obj = brcddb_port.port_obj_for_index(switch_obj, int(tl[1].strip()))\n if port_obj is None:\n brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log\n raise Exception('Could not find port matching: ' + buf)\n port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)\n if port_stats_d is None:\n port_stats_d = dict(name=port_obj.r_obj_key())\n port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)\n\n elif tl[0] in _portstatsshow_special:\n _portstatsshow_special[tl[0]](port_obj)\n\n else:\n key = _portstats_to_api.get(tl[0])\n if key is not None:\n port_stats_d.update({key: int(tl[1])})",
"def get_switch_details_from_console(self):\n ret_output = {}\n #Get the console port\n console = self.telnet_console_port()\n console.sendline('terminal length 0')\n console.expect(SWITCH_PROMPT)\n console.sendline('show inventory | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['inv'] = console.before\n console.sendline('show system uptime | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['uptime'] = console.before\n console.sendline('show accounting log | grep \"configure\" | last 1')\n console.expect(SWITCH_PROMPT,120)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['idletime'] = console.before\n console.sendline('terminal length 15')\n console.expect(SWITCH_PROMPT)\n console.sendline('show clock | last 1')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['clock'] = console.before\n console.close()\n return ret_output",
"def get_switch(self,host):\n switch_list = self.__graph_dict[host]\n switch_num = switch_list[0]\n return switch_num",
"def showStatistics(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n stats = a.sys.net.lnx.device.DeviceUtils.getStatistics(self.name, self._log, deviceName) \n if stats:\n for key in stats:\n print \"%s: %s\" % (key, stats[key])",
"def get_meter_info(apt_no):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n payload = (\"select uuid, Metadata/Instrument/SupplyType \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=payload)\n # logger.debug (\"%s\",r)\n payload_body = r.json()\n # logger.debug (\"Payload:\\n%s\", payload_body)\n\n meters = []\n for i in range(0, len(payload_body)):\n meter = payload_body[i]\n\n meters.append({'uuid': meter['uuid'], 'type': meter[\n 'Metadata']['Instrument']['SupplyType']})\n\n return meters",
"def getDeviceSwitchPortsStatuses(self, serial: str, **kwargs):\n\n kwargs.update(locals())\n\n metadata = {\n 'tags': ['switch', 'monitor', 'ports', 'statuses'],\n 'operation': 'getDeviceSwitchPortsStatuses',\n }\n resource = f'/devices/{serial}/switch/ports/statuses'\n\n query_params = ['t0', 'timespan']\n params = {k: v for (k, v) in kwargs.items() if k in query_params}\n\n return self._session.get(metadata, resource, params)",
"def startup_info(serial_port):\n top()\n programflow(serial_port)",
"def desc_stats_reply_handler(self, ev):\n body = ev.msg.body\n datapath = ev.msg.datapath\n dpid = datapath.id\n self.logger.info('event=DescStats Switch dpid=%s is mfr_desc=\"%s\" '\n 'hw_desc=\"%s\" sw_desc=\"%s\" serial_num=\"%s\" dp_desc=\"%s\"',\n dpid, body.mfr_desc, body.hw_desc, body.sw_desc,\n body.serial_num, body.dp_desc)",
"def sunpower_fetch(sunpower_monitor):\n try:\n sunpower_data = sunpower_monitor.device_list()\n _LOGGER.debug(\"got data %s\", sunpower_data)\n data = {}\n # Convert data into indexable format data[device_type][serial]\n for device in sunpower_data[\"devices\"]:\n if device[\"DEVICE_TYPE\"] not in data:\n data[device[\"DEVICE_TYPE\"]] = {device[\"SERIAL\"]: device}\n else:\n data[device[\"DEVICE_TYPE\"]][device[\"SERIAL\"]] = device\n return data\n except ConnectionException as error:\n raise UpdateFailed from error",
"def show_devices_status(releaser):\n\n devices = releaser.get_devices_by_status()\n for tag in devices:\n tag_devices = \", \".join([c[\"uuid\"][:6] for c in devices[tag].values()])\n click.echo(f\"{tag}: {tag_devices}\")",
"def device_counters(system_ip):\n\n click.secho(\"Retrieving the Device Counters\")\n\n url = base_url + \"/device/counters?deviceId={0}\".format(system_ip)\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get device Counters \" + str(response.text))\n exit()\n\n print(\"\\nDevice Counters for device = \",system_ip)\n\n\n headers = [\"OMP Peers Up\", \"OMP Peers Down\", \"vSmart connections\", \"BFD Sessions Up\", \"BFD Sessions Down\"]\n table = list()\n\n for item in items:\n try:\n tr = [item['ompPeersUp'], item['ompPeersDown'], item['number-vsmart-control-connections'], item['bfdSessionsUp'], item['bfdSessionsDown']]\n table.append(tr)\n except KeyError:\n pass\n\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def collect_switch_info(device_id_list):\n\n all_switches_info_list = []\n for device_id in device_id_list: # loop to collect data from each device\n info_list = []\n print('device id ', device_id) # print device id, printing messages will show progress\n host_name = get_hostname_devicetype_serialnumber(device_id)[0]\n serial_number = get_hostname_devicetype_serialnumber(device_id)[2]\n info_list.append(host_name)\n info_list.append(serial_number)\n device_license = get_license_device(device_id) # call the function to provide active licenses\n for licenses in device_license: # loop to append the provided active licenses to the device list\n info_list.append(licenses)\n all_switches_info_list.append(info_list) # append the created list for this device to the list of lists\n switchport_info_list = collect_switchport_info(device_id)\n for switchports in switchport_info_list:\n all_switches_info_list.append(switchports)\n all_switches_info_list.append('')\n return all_switches_info_list",
"def showStat(self):\n print \">>[Stat Information]:\"\n if self.gid != DEFALUT_GROUP_ID:\n print \"Gid = %u\" % self.gid\n print \"[Queries] Arp = %u, Original_to_controller= %u, Current_to_controller = %u\" % (self.query_arp, self.query_control_origin, self.query_control_current)\n print \"TP = %u, TN = %u, FP = %u\" % (self.tp, self.tn, self.fp)\n print \"[Flow] local_switch = %u, within the group = %u,across groups = %u\" % (self.flow_local, self.flow_within_group, self.flow_cross_group)\n print \"[Traffic] local_switch = %u byte, within the group = %u byte,across groups = %u byte\" % (self.byte_local, self.byte_within_group, self.byte_cross_group)",
"def show_meraki_switch(self, job_req):\n logger.info(\"Job Received : %s\", job_req)\n api_uri = f\"/v1/networks/{self.meraki_net}/devices\"\n data = get_meraki_api_data(api_uri)\n # Parse the JSON\n message = \"Here is the detail: \\n\"\n device_counter = 0\n check_icon = chr(0x2705)\n for device in data:\n device_type = decode_meraki_model(device[\"model\"])\n if \"switch\" in device_type:\n message += f\"* **{device['name']}** | IP: **{device['lanIp']}** | Serial: **{device['serial']}** \\n\"\n device_counter += 1\n message += f\"{check_icon} Total: **{device_counter}** \\n\" \n return message",
"def port_show(switch, port):\n print client.port.show(switch, port)",
"def getSwitch(self, projNumber):",
"def test_retrieve_1_by_1(self):\n swa = frontend.SupplyWinApi()\n query_dict = dict(\n dev=\"rrenaud\",\n targets=\"Council Room\",\n interaction=\"Farming Village\",\n unconditional=\"true\",\n )\n\n card_stats = swa.retrieve_data(query_dict)\n\n self.assertEquals(len(card_stats), 2)\n\n self.assertEquals(card_stats[0]['card_name'], 'Council Room')\n self.assertEquals(card_stats[0]['condition'][0], 'Farming Village')\n\n self.assertEquals(card_stats[1]['card_name'], 'Council Room')\n self.assertEquals(len(card_stats[1]['condition']), 0)\n\n json = swa.readable_json_card_stats(card_stats)\n self.assertEquals(json[0:14], '[{\"card_name\":')",
"def portstats64show(obj, content):\n global _portstats_to_api\n\n i, x, chassis_obj = 0, len('portstats64show'), obj.r_chassis_obj()\n while len(content) > i:\n\n # Get the port object\n buf = gen_util.remove_duplicate_char(content[i].replace('\\t', ' '), ' ')\n if len(buf) == 0:\n i += 1\n continue\n if len(buf) < x or buf[0:x] != 'portstats64show':\n break\n index = int(buf.split(' ')[1])\n port_obj = brcddb_port.port_obj_for_index(chassis_obj, int(buf.split(' ')[1]))\n if port_obj is None:\n brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log\n raise Exception('Could not find port matching: ' + buf)\n port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)\n if port_stats_d is None:\n port_stats_d = dict()\n port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)\n\n # Parse the port statistics\n i += 1\n while len(content) > i and len(content[i]) > 0:\n buf = gen_util.remove_duplicate_char(content[i].replace('\\t', ' '), ' ')\n cl = buf.split(' ')\n key = _portstats_to_api.get(cl[0])\n if key is not None:\n if 'top_int :' in buf:\n i += 1\n lv = int(gen_util.remove_duplicate_char(content[i].replace('\\t', ' ').strip().split(' ')[0], ' '))\n v = int('{:x}'.format(int(cl[1])) + '{:08x}'.format(lv), 16)\n else:\n v = int(cl[1])\n port_stats_d.update({key: v})\n i += 1\n\n return i",
"def all_info(stdscr, jetson, key):\n # Screen size\n height, width = stdscr.getmaxyx()\n line_counter = 1\n # Plot Status CPU\n line_counter = plot_CPUs(stdscr, line_counter, jetson.stats['CPU'], width)\n # Plot MTS\n if 'MTS' in jetson.stats:\n line_counter += 1\n stdscr.addstr(line_counter, 0, \"MTS \", curses.color_pair(5))\n MTS_FG = {'name': 'FG',\n 'value': int(jetson.stats['MTS']['fg']),\n }\n linear_percent_gauge(stdscr, MTS_FG, width // 2 - 2,\n offset=line_counter, start=4, color_name=5)\n MTS_BG = {'name': 'BG',\n 'value': int(jetson.stats['MTS']['bg']),\n }\n linear_percent_gauge(stdscr, MTS_BG, width // 2 - 2,\n offset=line_counter, start=2 + width // 2, color_name=5)\n # RAM linear gauge info\n ram_status = jetson.stats['RAM']['RAM']\n lfb_status = jetson.stats['RAM']['lfb']\n RAM_VALUE = {'name': \"Mem\",\n 'value': int(ram_status['used'][-1] / float(ram_status['total']) * 100.0),\n 'label': \"(lfb \" + str(lfb_status['nblock']) + \"x\" + str(lfb_status['size']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(ram_status['used'][-1] / 1000.0, ram_status['total'] / 1000.0),\n }\n line_counter += 1\n linear_percent_gauge(stdscr, RAM_VALUE, width, offset=line_counter)\n # EMC linear gauge info\n if 'EMC' in jetson.stats:\n line_counter += 1\n linear_percent_gauge(stdscr, make_gauge_from_percent(jetson.stats['EMC']), width, offset=line_counter)\n # IRAM linear gauge info\n iram_status = jetson.stats['IRAM']\n if iram_status:\n line_counter += 1\n IRAM_VALUE = {'name': \"Imm\",\n 'value': int(iram_status['used'][-1] / float(iram_status['total']) * 100.0),\n 'label': \"(lfb \" + str(iram_status['size']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(iram_status['used'][-1] / 1000.0,\n iram_status['total'] / 1000.0),\n }\n linear_percent_gauge(stdscr, IRAM_VALUE, width, offset=line_counter)\n # SWAP linear gauge info\n swap_status = jetson.stats['SWAP']\n if swap_status:\n SWAP_VALUE = {'name': \"Swp\",\n 'value': int(swap_status['used'][-1] / float(swap_status['total']) * 100.0),\n 'label': \"(cached \" + str(swap_status['cached']) + \"MB)\",\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(swap_status['used'][-1] / 1000.0,\n swap_status['total'] / 1000.0),\n }\n else:\n SWAP_VALUE = {'name': \"Swp\"}\n line_counter += 1\n linear_percent_gauge(stdscr, SWAP_VALUE, width, offset=line_counter)\n # GPU linear gauge info\n line_counter += 1\n if 'GR3D' in jetson.stats:\n linear_percent_gauge(stdscr, make_gauge_from_percent(jetson.stats['GR3D']), width, offset=line_counter + 1)\n line_counter += 2\n # Status disk\n disk_status = jetson.disk\n DISK_STATUS = {'name': \"Dsk\",\n 'value': int(float(disk_status['used']) / float(disk_status['total']) * 100.0),\n 'percent': \"{0:2.1f}GB/{1:2.1f}GB\".format(disk_status['used'], disk_status['total']),\n }\n linear_percent_gauge(stdscr, DISK_STATUS, width, offset=line_counter, type_bar=\"#\", color_name=3)\n # Last part of information\n split = 1.0\n split += 1.0 if jetson.stats['temperatures'] else 0.0\n split += 1.0 if jetson.stats['voltages'] else 0.0\n column_width = int(float(width - 4) / split)\n line_counter += 1\n # List of all mini menu\n mini_menu = [compact_info, plot_temperatures, plot_voltages]\n # Evaluate column width\n column_width = int(float(width) / len(mini_menu))\n for idx, mini in enumerate(mini_menu):\n # Run mini page\n mini(stdscr, idx * column_width, line_counter, column_width, jetson)",
"def stats(short_url):\n stats = get_stats(short_url)\n click.echo(stats)"
] | [
"0.747413",
"0.6891839",
"0.5926545",
"0.58273053",
"0.57952845",
"0.5792791",
"0.57800347",
"0.56501704",
"0.564042",
"0.5636971",
"0.56230223",
"0.54754305",
"0.5463351",
"0.5437777",
"0.5394672",
"0.53712904",
"0.536182",
"0.53433543",
"0.53223366",
"0.52944213",
"0.5273669",
"0.52659166",
"0.52504444",
"0.52339715",
"0.5211656",
"0.52101105",
"0.52059805",
"0.52038944",
"0.51954377",
"0.5164739"
] | 0.7308758 | 1 |
Check DB for last runtime of backend script This is published on the main page to see when stats were last updated | def getLastUpdate():
swDB = switchdb.DB()
lastupdate = swDB.getLastUpdate()
swDB.close()
return lastupdate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_last_init() -> str:\n return db[\"last_init\"]",
"def test_log_last_started_datetime(self):\n initial_count = CostUsageReportStatus.objects.count()\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n saver.log_last_started_datetime()\n self.assertIsNotNone(saver.get_last_started_datetime())\n saver.delete()\n self.assertEqual(CostUsageReportStatus.objects.count(), initial_count)",
"def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)",
"def main():\n print get_latest_data()",
"def test_log_last_completed_datetime(self):\n initial_count = CostUsageReportStatus.objects.count()\n saver = ReportStatsDBAccessor(\"myreport\", self.manifest_id)\n saver.log_last_completed_datetime()\n self.assertIsNotNone(saver.get_last_completed_datetime())\n saver.delete()\n self.assertEqual(CostUsageReportStatus.objects.count(), initial_count)",
"def get_newest_status():\n TrackedSite.objects.monitor_sites()",
"def last_updated() -> str:\n return GLOBAL.get(\"last_update\")",
"def dbstats_api():\n if not config.DEBUG:\n limit_to_localhost()\n\n return jsonify(status='ok', stats=sqlalchemy_pool_status()) # cant be async, used by the reboot script",
"def get_stats(self, current_time, last_print):\n logs_for_stats = self.database.loc[\n (current_time >= self.database[\"date\"]) & (self.database[\"date\"] > last_print)\n ]\n if len(logs_for_stats):\n section_stat = logs_for_stats[\"section\"].value_counts().idxmax()\n user_stat = logs_for_stats[\"authuser\"].value_counts().idxmax()\n failed_request = len(logs_for_stats.loc[logs_for_stats[\"status\"] >= 400])\n num_logs = len(logs_for_stats)\n last_print = logs_for_stats[\"date\"].unique().max()\n print(\"{}: Number of logs: {}, Top hit section: {}, Top user: {}, Failed request: {}\".format(\n current_time,\n num_logs,\n section_stat,\n user_stat,\n failed_request)\n )\n return last_print",
"def update_server_stats(self):\n try:\n aio.run(self.client.execute, 'ANALYZE')\n except Exception:\n pass # swallow; CrateDB 4.1.0+ is required to run ANALYZE",
"async def do_lastupdated():\n\n download = urllib.request.urlopen(server_api)\n data = json.loads(download.read())\n timestamp = data['last_updated']\n time = datetime.datetime.fromtimestamp(int(timestamp)).strftime('%Y-%m-%d %H:%M:%S')\n await bot.send_message(c, 'The last time the server API updated was at: {}'.format(time))",
"def check_load(cursor):\n cursor.execute(\"\"\"\n select pid from pg_stat_activity where query ~* 'FETCH'\n and datname = 'asos'\"\"\")\n if cursor.rowcount > 9:\n sys.stderr.write((\"/cgi-bin/request/metars.py over capacity: %s\"\n ) % (cursor.rowcount,))\n ssw(\"Content-type: text/plain\\n\")\n ssw('Status: 503 Service Unavailable\\n\\n')\n ssw(\"ERROR: server over capacity, please try later\")\n sys.exit(0)",
"def load_last_run_time():\n # path = \"/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run\"\n if os.path.isfile(\"last_time_run\"): #\n # If the file exists\n f = open(\"last_time_run\", \"r\")\n last_run_time = datetime.datetime.strptime(f.read(), \"%Y-%m-%d %H:%M:%S\")\n f.close()\n return last_run_time\n save_current_run_time()\n # If file doesn't exist (possible if it's the first run), return current time\n return datetime.datetime.now()",
"def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()",
"def get_curr_exec_time(self):\n if self.type == 'normal':\n try:\n self.curr_exec_time = self.my_rand.gauss(self.runtime, self.stddev)\n except:\n if self.fwk.debug:\n print(\"not varying the execution time\")\n self.curr_exec_time = self.runtime\n raise\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_work':\n # this is a sandia style work task\n next_ckpt = self.sim.next_ckpt # relative work time\n work_todo = self.sim.total_work - self.sim.completed_work\n self.curr_exec_time = min(work_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_rework':\n next_ckpt = self.sim.next_ckpt # relative work time\n self.curr_exec_time = min(self.sim.rework_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_ckpt' or self.type == 'sandia_restart':\n self.curr_exec_time = self.runtime\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n else:\n print('error error error!!! problem with component type in get_curr_exec_time')\n raise",
"def OnUpdate(self, event):\n # Check remote - TODO\n # Query database for status of processing\n # 2018-04-11 13:25:56.914000\n self.controller.checkRemote()\n seriesprocesses = self.controller.db.getActiveProcesses()\n self.m_dataViewListCtrlCloud.DeleteAllItems()\n for series in seriesprocesses:\n # time delta\n t1 = datetime.datetime.strptime(series[4], '%Y-%m-%d %H:%M:%S.%f')\n if series[5] is not None:\n t2 = datetime.datetime.strptime(series[5], '%Y-%m-%d %H:%M:%S.%f')\n else:\n t2 = datetime.datetime.now()\n tdiff = t2 - t1\n # Load to window\n self.m_dataViewListCtrlCloud.AppendItem(\n [False, series[0], series[1], series[2].upper(), self.getStatus(series[3]), str(tdiff)])",
"def get_post_stats(self):\n stats = self.stats\n stats.results = self.job.result().get_counts(stats.iteration)\n stats.datetime = str(datetime.now())",
"def get_host_stats(self, refresh=False):",
"def time_to_process_last_submission(self) -> int:\n return self.snapshot['time_to_process_last_submission']",
"def periodicUpdate(self):\n try:\n logging.info(f'{self.cn} periodicUpdate = Start')\n isHaz = JsonSettings.parseJson('settings.json','isHazelcast')\n if self.db.isDb():\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n else:\n self.db.initDb()\n self.insertSys()\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n except Exception as e:\n logging.critical(f'{self.cn} Exception: {e}')\n logging.critical(f'{self.cn} StackTrace: \\n', exc_info=1)\n finally:\n logging.info(f'{self.cn} periodicUpdate = End')",
"def dbCurrentTime():\n return datetime.datetime.utcnow()",
"def fetch_time_server(self):\n processes = self.get_processes()\n if processes:\n server = processes.itervalues().next()\n host = server.get_leader()\n if host:\n self.global_time_server = xmlrpclib.ServerProxy(\"http://%s:%d\"%(host[0], host[1]))\n self.time_server_set = True\n return True if host else False\n else:\n print \"Not enough servers up yet. Cannot fetch a time server.\"\n return False",
"def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")",
"def getLastFinishedBuild():",
"def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)",
"def validity_by_time(self):\n conn = psycopg2.connect(self.conn)\n permissable_maximum_age_secs = 600 # 600s = 10mins\n query = \"SELECT time FROM steve_sense_sensor_logs ORDER BY time DESC LIMIT 1\"\n cur = conn.cursor()\n cur.execute(query)\n queryResult = cur.fetchall()\n age_seconds = (datetime.datetime.now(\n timezone.utc) - queryResult[0][0]).seconds\n cur.close()\n conn.close()\n if age_seconds > permissable_maximum_age_secs:\n print(\"Check Sensor, last sample is \"+str(age_seconds)+\" old\")\n return False\n else:\n return True",
"def last_update(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_update')",
"def last_run(self):\r\n with sqlite3.connect('fileTransfer.db') as connection:\r\n c = connection.cursor()\r\n cursor = c.execute('SELECT max(id) FROM tbl_lastRun') \r\n max_id = cursor.fetchone()[0]\r\n cursor = c.execute('SELECT col_timestamp FROM tbl_lastRun')\r\n #timeLastRun = cursor.fetchone()[0]\r\n tLR_str = time.strftime('%Y-%m-%d %H:%M %z', time.localtime(cursor.fetchone()[0])) \r\n self.txt_lastRun.delete(0, 'end')\r\n self.txt_lastRun.insert(0, tLR_str)",
"def _check_if_statistics_calculation_is_needed():\n expiration_date = datetime.datetime.now(tz=datetime.timezone.utc) - datetime.timedelta(\n seconds=UploadHandler.EXPIRATION_TIME_IN_SECONDS)\n not_expired_data = UnprocessedData.objects.filter(uploaded_at__gte=expiration_date)\n sites_of_not_expired_data = not_expired_data.values_list('site_id', flat=True).distinct()\n all_sites = UnprocessedData.objects.filter(uploaded_at__lte=expiration_date).values_list('site_id',\n flat=True).distinct()\n for s in all_sites:\n if s not in sites_of_not_expired_data:\n from_date = UnprocessedData.objects.filter(site_id_id=s).order_by('from_date')[0].from_date\n to_date = UnprocessedData.objects.filter(site_id_id=s).order_by('-to_date')[0].to_date\n logger.info(\"should create stats for {} from {} to {}\".format(s, from_date, to_date))\n site_obj = get_object_or_404(Site, pk=s)\n UploadHandler.create_statistics(site=site_obj, from_date=from_date, to_date=to_date)\n UnprocessedData.objects.filter(site_id_id=s).delete()\n\n if len(sites_of_not_expired_data):\n Timer(UploadHandler.INTERVAL, UploadHandler._check_if_statistics_calculation_is_needed).start()\n else:\n UploadHandler.is_interval_running = False",
"def get_last_runtime(self, file_path) -> float | None:\n stat = self._file_stats.get(file_path)\n return stat.last_duration.total_seconds() if stat and stat.last_duration else None"
] | [
"0.575571",
"0.5727403",
"0.56392384",
"0.5554743",
"0.5544511",
"0.5508971",
"0.5502992",
"0.54756486",
"0.54550827",
"0.5454201",
"0.54325056",
"0.54227924",
"0.5381801",
"0.5367871",
"0.53669065",
"0.5365385",
"0.536471",
"0.5316883",
"0.53158337",
"0.53126395",
"0.5308993",
"0.52858573",
"0.5261974",
"0.5243597",
"0.524065",
"0.52359676",
"0.5235283",
"0.5217812",
"0.5214207",
"0.521415"
] | 0.6065848 | 0 |
Query DB for summary info on all switches currently monitored | def getSwitchInfo():
swDB = switchdb.DB()
raw_info = swDB.getAllSummary()
switchList = []
for row in raw_info:
row = list(row)
switch = {}
switch["name"] = row[0]
switch["serial"] = row[1]
switch["swver"] = row[2]
switch["ip"] = row[3]
switch["check"] = row[4]
switch["total"] = row[5]
switch["up"] = row[6]
switch["down"] = row[7]
switch["disabled"] = row[8]
if switch["total"] == 0:
switch["capacity"] = 0
else:
switch["capacity"] = (switch["up"] / switch["total"]) * 100
switchList.append(switch)
swDB.close()
return switchList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_switches_stats(self, site_id: str) -> List:\n try:\n stats = self.api.get(host=self.host, endpoint=f\"/api/v1/sites/{site_id}/stats/devices?type=switch\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting switch stats:{TextColors.ENDC} {e}\")\n raise e\n return stats",
"def show():\n configdb = ConfigDBConnector()\n configdb.connect()\n queue_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE')\n port_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PORT')\n port_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PORT_BUFFER_DROP)\n rif_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'RIF')\n queue_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'QUEUE_WATERMARK')\n pg_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'PG_WATERMARK')\n pg_drop_info = configdb.get_entry('FLEX_COUNTER_TABLE', PG_DROP)\n buffer_pool_wm_info = configdb.get_entry('FLEX_COUNTER_TABLE', BUFFER_POOL_WATERMARK)\n acl_info = configdb.get_entry('FLEX_COUNTER_TABLE', ACL)\n tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL')\n trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP')\n route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE')\n\n header = (\"Type\", \"Interval (in ms)\", \"Status\")\n data = []\n if queue_info:\n data.append([\"QUEUE_STAT\", queue_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), queue_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_info:\n data.append([\"PORT_STAT\", port_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), port_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if port_drop_info:\n data.append([PORT_BUFFER_DROP, port_drop_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), port_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if rif_info:\n data.append([\"RIF_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_1_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if queue_wm_info:\n data.append([\"QUEUE_WATERMARK_STAT\", queue_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), queue_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_wm_info:\n data.append([\"PG_WATERMARK_STAT\", pg_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), pg_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if pg_drop_info:\n data.append(['PG_DROP_STAT', pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), pg_drop_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if buffer_pool_wm_info:\n data.append([\"BUFFER_POOL_WATERMARK_STAT\", buffer_pool_wm_info.get(\"POLL_INTERVAL\", DEFLT_60_SEC), buffer_pool_wm_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if acl_info:\n data.append([ACL, pg_drop_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), acl_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if tunnel_info:\n data.append([\"TUNNEL_STAT\", rif_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), rif_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if trap_info:\n data.append([\"FLOW_CNT_TRAP_STAT\", trap_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC), trap_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n if route_info:\n data.append([\"FLOW_CNT_ROUTE_STAT\", route_info.get(\"POLL_INTERVAL\", DEFLT_10_SEC),\n route_info.get(\"FLEX_COUNTER_STATUS\", DISABLE)])\n\n click.echo(tabulate(data, headers=header, tablefmt=\"simple\", missingval=\"\"))",
"def get_host_stats(self, refresh=False):",
"def get_switch_details_from_mgmt(self, using):\n ret_output = {}\n #Get the console mgmt handle\n console = self.connect_mgmt_ip(using)\n console.sendline('terminal length 0')\n console.expect(SWITCH_PROMPT)\n console.sendline('show inventory | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['inv'] = console.before\n console.sendline('show system uptime | xml')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['uptime'] = console.before\n console.sendline('show accounting log | grep \"configure\" | last 1')\n console.expect(SWITCH_PROMPT,120)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['idletime'] = console.before\n console.sendline('terminal length 15')\n console.expect(SWITCH_PROMPT)\n console.sendline('show clock | last 1')\n console.expect(SWITCH_PROMPT)\n if any(i in console.before for i in INVALID_CLI): raise InvalidCliError('show cmd failure') \n ret_output['clock'] = console.before\n console.close()\n return ret_output",
"def getSwitchDetail(serial):\n swDB = switchdb.DB()\n raw_info = swDB.getSwitchDetail(serial)\n switch = {}\n for row in raw_info:\n switch[\"name\"] = row[0]\n switch[\"serial\"] = row[1]\n switch[\"model\"] = row[2]\n switch[\"swver\"] = row[3]\n switch[\"ip\"] = row[4]\n switch[\"check\"] = row[5]\n switch[\"total\"] = row[6]\n switch[\"up\"] = row[7]\n switch[\"down\"] = row[8]\n switch[\"disabled\"] = row[9]\n switch[\"int10m\"] = row[10]\n switch[\"int100m\"] = row[11]\n switch[\"int1g\"] = row[12]\n switch[\"int10g\"] = row[13]\n switch[\"int25g\"] = row[14]\n switch[\"int40g\"] = row[15]\n switch[\"int100g\"] = row[16]\n switch[\"copper\"] = row[17]\n switch[\"sfp\"] = row[18]\n switch[\"virtual\"] = row[19]\n if switch[\"total\"] == 0:\n switch[\"capacity\"] = 0\n else:\n switch[\"capacity\"] = int((switch[\"up\"] / switch[\"total\"]) * 100)\n swDB.close()\n return switch",
"def status_summary(self):\n base_query_set = super(PeeringSessionManager, self).get_queryset()\n summary = base_query_set.annotate(\n label=models.Case(\n models.When(provisioning_state=2, then=models.Case(\n models.When(admin_state=2, then=models.Case(\n models.When(operational_state=6,\n then=models.Value('Up')),\n default=models.Value('Down')\n )),\n default=models.Value('Admin Down')\n )),\n models.When(provisioning_state=1,\n then=models.Value('Provisioning')),\n default=models.Value('None'),\n output_field=models.CharField()\n )).values('label').annotate(value=models.Count('label'))\n return summary",
"def get_data_router_statuses(request):\n data_router_status_list = []\n for router in DataRouter.objects.filter(experiment=request.experiment):\n router_res = {\n 'success': True,\n 'pk': router.pk,\n 'is_online': router.is_online,\n 'is_clean': router.staging_directory_is_clean,\n }\n data_router_status_list.append(router_res)\n\n return data_router_status_list",
"async def get_monitor_data(self):\n json = await self._api_call(\"app/monitors/%s/overview\" % self.sense_monitor_id)\n if \"monitor_overview\" in json and \"monitor\" in json[\"monitor_overview\"]:\n self._monitor = json[\"monitor_overview\"][\"monitor\"]\n return self._monitor",
"def db_stats(self):\n return { \"search_and_get\": self.db_search_and_get }",
"def measurements_lookup(client, database):\n client.switch_database(database)\n mlist_dict = client.get_list_measurements()\n # print(\"def measurements_lookup 010:\", mlist_dict[:10])\n return mlist_dict",
"def summary(self):\n\t\tprint \"Summary--------------------------------------:\"\n\t\tprint \"Available data sources are:\"\n\t\tfor path in self.available_databases:\n\t\t\tprint path",
"def latest_monitoring_statistics(self, **_):\n raise NotImplementedError(\"{} doesn't support statistics.\".format(self.__class__.__name__))",
"def showStatistics(self):\n\n deviceName = self.deviceName()\n\n if deviceName:\n stats = a.sys.net.lnx.device.DeviceUtils.getStatistics(self.name, self._log, deviceName) \n if stats:\n for key in stats:\n print \"%s: %s\" % (key, stats[key])",
"def list_health_monitors(self, retrieve_all=True, **_params):\r\n # Pass filters in \"params\" argument to do_request\r\n return self.list('health_monitors', self.health_monitors_path,\r\n retrieve_all, **_params)",
"def get_monitor_details():\n monitor_id = paranoid_clean(request.args.get('id'))\n monitors = mongo.db[app.config['MONITORS_COLLECTION']]\n monitor = monitors.find_one({'hashed': monitor_id}, {'_id': 0})\n if not monitor:\n return jsonify({'success': False, 'error': 'Monitor was not found.'})\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n link = monitor['metadata']['rss_link']\n articles = list(articles.find({'feed_source': link}, {'_id': 0}))\n for idx, item in enumerate(articles):\n articles[idx]['title'] = html.unescape(item['title'])\n articles[idx]['date'] = item['collected'][:10]\n articles.sort(key=lambda x: x['collected'], reverse=True)\n return jsonify({'success': True, 'monitor': monitor, 'articles': articles})",
"def fetch_metrics(self):\n\n self.explain_all_indices()",
"def all_statistics(self):\n statistics_database = CurrentProject().db_client.statistics\n collections = [getattr(statistics_database, name) for name in statistics_database.collection_names()]\n return AttributeDict.attributize_dict({collection.name: list(collection.find()) for collection in collections})",
"def mmo_replication_status_summary(self, mmo_connection):\n replication_summary = []\n primary_info = {}\n o = self.mmo_replication_status(mmo_connection)\n o = o + self.mmo_configsrv_replication_status(mmo_connection)\n replset_hosts_up_down = {}\n for shard in self.shards:\n replset_hosts_up_down[shard] = 0\n for replicaset in o:\n if \"Error\" not in replicaset[\"command_output\"].keys():\n for member in replicaset[\"command_output\"][\"members\"]:\n if member[\"stateStr\"] == \"PRIMARY\":\n primary_info[replicaset[\"command_output\"][\"set\"]] = member[\"optimeDate\"]\n\n replication_summary.append( { \"replicaset\": replicaset[\"command_output\"][\"set\"],\n \"hostname\": member[\"name\"],\n \"state\": member[\"stateStr\"],\n \"uptime\": member[\"uptime\"],\n \"configVersion\": member[\"configVersion\"],\n \"optimeDate\": member[\"optimeDate\"] } )\n for doc in replication_summary:\n if doc[\"state\"] == \"PRIMARY\":\n doc[\"lag\"] = \"NA\" # not relevant here\n else: # calculate the slave lag from the PRIMARY optimeDate\n if doc[\"replicaset\"] in primary_info.keys(): # is there a primary in the replset?\n try:\n if hasattr((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]), \"total_seconds\"): # Does not exist in python 2.6\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).total_seconds())\n else: # for python 2.6 that does not have total_seconds attribute\n # Will only be correct for delays of up to 24 hours\n doc[\"lag\"] = abs((doc[\"optimeDate\"] - primary_info[doc[\"replicaset\"]]).seconds) # Primary needs ot be first in this case\n except:\n doc[\"lag\"] = \"ERR\"\n else:\n doc[\"lag\"] = \"UNK\" # We cannot know what the delay is if there is no primary\n else:\n replset_hosts_up_down[replicaset[\"shard\"]] += 1\n\n #else: Probably redundant code now. Removed ot fix https://github.com/rhysmeister/mmo/issues/34\n # We cannot know the state of much of the replicaset at this point\n # replication_summary.append({\"replicaset\": replicaset[\"shard\"],\n # \"hostname\": \"UNK\",\n # \"state\": \"UNK\",\n # \"uptime\": \"UNK\",\n # \"configVersion\": \"UNK\",\n # \"optimeDate\": \"UNK\"})\n\n\n shard_server_count = {}\n # how many servers in each shard\n for shard in self.shards:\n shard_server_count[shard] = 0\n for s in self.shard_servers:\n shard_server_count[s['shard']] += 1\n # are all the hosts of any shard down?\n for shard in self.shards:\n if replset_hosts_up_down[shard] > 0:\n if replset_hosts_up_down[shard] == shard_server_count[shard]:\n replication_summary.append({\"replicaset\": shard,\n \"hostname\": \"UNK\",\n \"state\": \"UNK\",\n \"uptime\": \"UNK\",\n \"configVersion\": \"UNK\",\n \"optimeDate\": \"UNK\",\n \"lag\": \"UNK\"})\n deduped_replication_summary = []\n for d in replication_summary:\n if d not in deduped_replication_summary:\n deduped_replication_summary.append(d)\n return deduped_replication_summary",
"def monitor(self):\n logging.debug(\"monitor entered\")\n # monitor machines...\n # first, get a list of machine IDs\n res = progress_table(self.machines)\n return res",
"def get_health_dashboard(self):\n result = {}\n fabric_switches_dns, fabric_switches_rns = self.get_fabric_switches()\n for fabric_switch in fabric_switches_rns:\n result[fabric_switch] = {}\n # Switch health\n Health_Inst_mo = self.moDir.lookupByDn('topology/pod-1/' + fabric_switch + '/sys/health')\n result[fabric_switch]['Health'] = Health_Inst_mo.cur\n\n # Switch Policy CAM table\n cam_usage_mo = self.moDir.lookupByDn('topology/pod-1/' + str(fabric_switch) +\n '/sys/eqptcapacity/CDeqptcapacityPolUsage5min')\n result[fabric_switch]['Policy CAM table'] = cam_usage_mo.polUsageCum + ' of ' + cam_usage_mo.polUsageCapCum\n\n # Switch MAC table\n multicast_usage_mo = self.moDir.lookupByDn('topology/pod-1/' + str(fabric_switch) +\n '/sys/eqptcapacity/CDeqptcapacityMcastUsage5min')\n result[fabric_switch]['Multicast'] = multicast_usage_mo.localEpCum + ' of ' + multicast_usage_mo.localEpCapCum\n\n # VLAN\n vlan_usage_mo = self.moDir.lookupByDn('topology/pod-1/' + str(fabric_switch) +\n '/sys/eqptcapacity/CDeqptcapacityVlanUsage5min')\n result[fabric_switch]['VLAN'] = vlan_usage_mo.totalCum + ' of ' + vlan_usage_mo.totalCapCum\n return result",
"def _monitor(self):\n # while CONF.weight == 'bw':\n while True:\n self._send_echo_request()\n self.create_link_delay()\n # self.get_loss()\n self.stats['flow'] = {}\n self.stats['port'] = {}\n for dp in self.datapaths.values():\n self.port_features.setdefault(dp.id, {})\n self.link_loss.setdefault(dp.id,{})\n self._request_stats(dp)\n # refresh data.\n self.capabilities = None\n self.best_paths = None\n hub.sleep(setting.MONITOR_PERIOD)\n self.show_stat()",
"def mmo_database_stats(self, mmo_connection, database):\n command = { \"dbstats\" : 1 }\n return self.mmo_execute_on_mongos(mmo_connection, command, database)",
"def summary():\n\n summary_result = session.query(Summary.Count, Summary.Total).all()\n session.close()\n\n # Return a List of Column Names (Sample Names)\n return jsonify(summary_result)",
"def query_summary(self):\n return self.details[KEY_QUERY_SUMMARY]",
"def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results",
"def dbstats_api():\n if not config.DEBUG:\n limit_to_localhost()\n\n return jsonify(status='ok', stats=sqlalchemy_pool_status()) # cant be async, used by the reboot script",
"def mysql_status(self):\n stamp = int(time.time())\n\n # get data\n conn = self.object.connect()\n result = {}\n try:\n with conn.cursor() as cursor:\n for key in REQUIRED_STATUS_FIELDS:\n cursor.execute('SHOW GLOBAL STATUS LIKE \"%s\";' % key)\n row = cursor.fetchone()\n result[row[0]] = row[1]\n except Exception as e:\n exception_name = e.__class__.__name__\n context.log.debug('failed to collect MySQLd metrics due to %s' % exception_name)\n context.log.debug('additional info:', exc_info=True)\n finally:\n conn.close()\n\n # counters\n counted_vars = {}\n for metric, variable_name in METRICS['counters'].items():\n if variable_name in result:\n counted_vars[metric] = int(result[variable_name])\n\n # compound counter\n counted_vars['mysql.global.writes'] = \\\n counted_vars['mysql.global.insert'] + \\\n counted_vars['mysql.global.update'] + \\\n counted_vars['mysql.global.delete']\n\n self.aggregate_counters(counted_vars, stamp=stamp)\n\n # gauges\n tracked_gauges = {}\n for metric, variable_name in METRICS['gauges'].items():\n if variable_name in result:\n tracked_gauges[metric] = {\n self.object.definition_hash: int(result[variable_name])\n }\n\n # compound gauges\n pool_util = 0\n if ('mysql.global.innodb_buffer_pool_pages_total' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] > 0):\n pool_util = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] -\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_free'][self.object.definition_hash]) /\n tracked_gauges['mysql.global.innodb_buffer_pool_pages_total'][self.object.definition_hash] * 100\n )\n tracked_gauges['mysql.global.innodb_buffer_pool_util'] = {\n self.object.definition_hash: pool_util\n }\n\n hit_ratio = 0\n if ('mysql.global.innodb_buffer_pool_read_requests' in tracked_gauges and\n tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] > 0):\n hit_ratio = (\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] /\n (tracked_gauges['mysql.global.innodb_buffer_pool_read_requests'][self.object.definition_hash] +\n tracked_gauges['mysql.global.innodb_buffer_pool_reads'][self.object.definition_hash])) * 100\n )\n\n tracked_gauges['mysql.global.innodb_buffer_pool.hit_ratio'] = {\n self.object.definition_hash: hit_ratio\n }\n\n self.aggregate_gauges(tracked_gauges, stamp=stamp)\n\n # finalize\n self.increment_counters()\n self.finalize_gauges()",
"def get_all_switch(self, conf):\n\t\tpass",
"def get_sensor_summary_info(self):\n import statistics\n info_dict = dict()\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Find the scene count.\")\n vld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == False).count()\n invld_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Invalid == True).count()\n dwn_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True).count()\n ard_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.ARDProduct == True).count()\n dcload_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.DCLoaded == True).count()\n arch_scn_count = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Archived == True).count()\n info_dict['n_scenes'] = dict()\n info_dict['n_scenes']['n_valid_scenes'] = vld_scn_count\n info_dict['n_scenes']['n_invalid_scenes'] = invld_scn_count\n info_dict['n_scenes']['n_downloaded_scenes'] = dwn_scn_count\n info_dict['n_scenes']['n_ard_processed_scenes'] = ard_scn_count\n info_dict['n_scenes']['n_dc_loaded_scenes'] = dcload_scn_count\n info_dict['n_scenes']['n_archived_scenes'] = arch_scn_count\n logger.debug(\"Calculated the scene count.\")\n\n logger.debug(\"Find the scene file sizes.\")\n file_sizes = ses.query(EDDSentinel1ASF.Total_Size).filter(EDDSentinel1ASF.Invalid == False).all()\n if file_sizes is not None:\n if len(file_sizes) > 0:\n file_sizes_nums = list()\n for file_size in file_sizes:\n if file_size[0] is not None:\n file_sizes_nums.append(file_size[0])\n if len(file_sizes_nums) > 0:\n total_file_size = sum(file_sizes_nums)\n info_dict['file_size'] = dict()\n info_dict['file_size']['file_size_total'] = total_file_size\n if total_file_size > 0:\n info_dict['file_size']['file_size_mean'] = statistics.mean(file_sizes_nums)\n info_dict['file_size']['file_size_min'] = min(file_sizes_nums)\n info_dict['file_size']['file_size_max'] = max(file_sizes_nums)\n if len(file_sizes_nums) > 1:\n info_dict['file_size']['file_size_stdev'] = statistics.stdev(file_sizes_nums)\n info_dict['file_size']['file_size_median'] = statistics.median(file_sizes_nums)\n if (len(file_sizes_nums) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['file_size']['file_size_quartiles'] = statistics.quantiles(file_sizes_nums)\n logger.debug(\"Calculated the scene file sizes.\")\n\n logger.debug(\"Find download and processing time stats.\")\n download_times = []\n ard_process_times = []\n scns = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.Downloaded == True)\n for scn in scns:\n download_times.append((scn.Download_End_Date - scn.Download_Start_Date).total_seconds())\n if scn.ARDProduct:\n ard_process_times.append((scn.ARDProduct_End_Date - scn.ARDProduct_Start_Date).total_seconds())\n\n if len(download_times) > 0:\n info_dict['download_time'] = dict()\n info_dict['download_time']['download_time_mean_secs'] = statistics.mean(download_times)\n info_dict['download_time']['download_time_min_secs'] = min(download_times)\n info_dict['download_time']['download_time_max_secs'] = max(download_times)\n if len(download_times) > 1:\n info_dict['download_time']['download_time_stdev_secs'] = statistics.stdev(download_times)\n info_dict['download_time']['download_time_median_secs'] = statistics.median(download_times)\n if (len(download_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['download_time']['download_time_quartiles_secs'] = statistics.quantiles(download_times)\n\n if len(ard_process_times) > 0:\n info_dict['ard_process_time'] = dict()\n info_dict['ard_process_time']['ard_process_time_mean_secs'] = statistics.mean(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_min_secs'] = min(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_max_secs'] = max(ard_process_times)\n if len(ard_process_times) > 1:\n info_dict['ard_process_time']['ard_process_time_stdev_secs'] = statistics.stdev(ard_process_times)\n info_dict['ard_process_time']['ard_process_time_median_secs'] = statistics.median(ard_process_times)\n if (len(ard_process_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['ard_process_time']['ard_process_time_quartiles_secs'] = statistics.quantiles(\n ard_process_times)\n logger.debug(\"Calculated the download and processing time stats.\")\n\n if self.calc_scn_usr_analysis():\n plgin_lst = self.get_usr_analysis_keys()\n info_dict['usr_plugins'] = dict()\n for plgin_key in plgin_lst:\n info_dict['usr_plugins'][plgin_key] = dict()\n scns = ses.query(EDDSentinel1ASFPlugins).filter(EDDSentinel1ASFPlugins.PlugInName == plgin_key).all()\n n_err_scns = 0\n n_complete_scns = 0\n n_success_scns = 0\n plugin_times = []\n for scn in scns:\n if scn.Completed:\n plugin_times.append((scn.End_Date - scn.Start_Date).total_seconds())\n n_complete_scns += 1\n if scn.Success:\n n_success_scns += 1\n if scn.Error:\n n_err_scns += 1\n info_dict['usr_plugins'][plgin_key]['n_success'] = n_success_scns\n info_dict['usr_plugins'][plgin_key]['n_completed'] = n_complete_scns\n info_dict['usr_plugins'][plgin_key]['n_error'] = n_err_scns\n if len(plugin_times) > 0:\n info_dict['usr_plugins'][plgin_key]['processing'] = dict()\n info_dict['usr_plugins'][plgin_key]['processing']['time_mean_secs'] = statistics.mean(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_min_secs'] = min(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_max_secs'] = max(plugin_times)\n if len(plugin_times) > 1:\n info_dict['usr_plugins'][plgin_key]['processing']['time_stdev_secs'] = statistics.stdev(plugin_times)\n info_dict['usr_plugins'][plgin_key]['processing']['time_median_secs'] = statistics.median(plugin_times)\n if (len(plugin_times) > 1) and (eodatadown.py_sys_version_flt >= 3.8):\n info_dict['usr_plugins'][plgin_key]['processing']['time_quartiles_secs'] = statistics.quantiles(plugin_times)\n ses.close()\n return info_dict",
"def switches(self) -> List[dict]:\n return self.items_by_domain(\"switch\")"
] | [
"0.59377617",
"0.58443993",
"0.58337426",
"0.569104",
"0.5689093",
"0.56877404",
"0.5678661",
"0.56559193",
"0.56524736",
"0.5558809",
"0.5531664",
"0.55013895",
"0.5465066",
"0.5456181",
"0.54546607",
"0.54425275",
"0.5420958",
"0.5404441",
"0.53917074",
"0.53812945",
"0.53646237",
"0.5351872",
"0.5322026",
"0.5319231",
"0.5301285",
"0.5301087",
"0.52984625",
"0.52966523",
"0.52945364",
"0.5278292"
] | 0.71028024 | 0 |
Call to DB to delete a device by serial number | def deleteDevice(serial):
swDB = switchdb.DB()
swDB.deleteBySerial(serial)
swDB.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def db_delete_device_record(db_path, rec_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare and execute SQL statement (make sure the 'record_name'\n # parameter follows with comma to make it a tuple)\n sql = \"DELETE FROM Devices WHERE name=?\"\n cursor.execute(sql, (rec_name,))\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))",
"def db_delete_device_record(db_path, record_name):\n path_exist = os.path.exists(db_path)\n if path_exist is False:\n print '!!!Error, database does not exist.'\n return\n\n try:\n with db.connect(db_path) as conn:\n cursor = conn.cursor()\n # Prepare SQL query statement (make sure the 'record_name'\n # parameter follows with comma to make it a tuple)\n sql = \"DELETE FROM Devices WHERE name=?\"\n # Execute SQL query statement\n cursor.execute(sql, (record_name,))\n except (db.OperationalError) as e:\n print(\"!!!Error, %s\" % repr(e))",
"def test_delete_device_by_id(self):\n pass",
"def test_delete_device_by_id1(self):\n pass",
"def test_delete_device(self):\n pass",
"def test_delete_device(self):\n pass",
"def delete_device(device_id):\n netAdminToolDB = app.config['DATABASE']\n device = netAdminToolDB.get_device(device_id)\n\n if device == None:\n return jsonify({'error': 'Device_id not found'}), 404\n\n netAdminToolDB.delete_device(device_id)\n return jsonify({'result': True})",
"def delete(no):\n\n conn = sqlite3.connect(\"person_database.bd\")\n c = conn.cursor()\n\n # delete a record\n c.execute(f\"DELETE from person_info WHERE oid= \" + str(no))\n\n conn.commit()\n conn.close()",
"def delete_device(device):\n if device in devices.list():\n devices.delete(device)\n return '', 204\n else:\n raise BadRequest('The given device name does not exist')",
"def delete_device(self):\n # PROTECTED REGION ID(AsyncTabata.delete_device) ENABLED START #\n # PROTECTED REGION END # // AsyncTabata.delete_device",
"def delete_device(cls, device_uuid):\n cls.dbdriver.delete_device(device_uuid)",
"async def delete(self):\r\n\r\n data = await self.request.json()\r\n system_uuid = data.get(\"sys_id\")\r\n sys_del = System.get(System.uuid == system_uuid)\r\n if not sys_del:\r\n response_obj = {\"status\": \"failed\", \"reason\": \"System not Present\"}\r\n return web.Response(text=str(response_obj), status=500)\r\n try:\r\n sys_del.delete_instance()\r\n logger.info(\"System deleted successfully!!!\")\r\n return web.Response(text=\"Successful\", status=200)\r\n except Exception as ex:\r\n response_obj = {\"status\": \"failed\", \"reason\": str(ex)}\r\n error_message = str(ex)\r\n logger.error(error_message)\r\n return web.Response(text=str(response_obj), status=500)",
"def test_gwservice_deletedevice(self, setup_controller):\n configuration = {'uuid': '1'}\n payload = {'serialNumber': 'DEADBEEF0011',\n 'UUID': '123456',\n 'configuration': configuration,\n 'deviceType': 'AP',\n 'location': '',\n 'macAddress': 'DE:AD:BE:EF:00:11',\n 'manufacturer': 'Testing',\n 'owner': ''}\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"POST\", None, json.dumps(payload))\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw create devices\", body=body)\n if resp.status_code != 200:\n assert False\n devices = json.loads(resp.text)\n print (devices)\n\n\n resp = setup_controller.request(\"gw\", \"device/DEADBEEF0011\", \"DELETE\", None, None)\n body = resp.url + \",\" + str(resp.status_code) + ',' + resp.text\n allure.attach(name=\"gw get device\", body=body)\n if resp.status_code != 200:\n assert False",
"def delete():",
"def delete_id(cls, uuid):\n session = connection.get_session()\n keyspace = radon.cfg.dse_keyspace\n session.set_keyspace(keyspace)\n query = SimpleStatement(\"\"\"DELETE FROM data_object WHERE uuid=%s\"\"\")\n session.execute(query, (uuid,))",
"def del_record(self, args):\n\n mac = MacAddress(args.mac)\n desc = self.dhcp_client_state[mac.as_redis_key()]\n print(\"Deleted mac %s with DHCP rec %s\" % (str(mac), desc))\n self.dhcp_client_state[mac.as_redis_key()] = None",
"def delete(device):\n delete_subject(device)\n return redirect_back('index')",
"def eliminarServicio(codigo):\n try:\n conexion.cur.execute('delete from servicios where codigoServicio = ?', (codigo,))\n conexion.conex.commit()\n\n except sqlite3.OperationalError as e:\n print(e)\n conexion.conex.rollback()",
"def delete_device(cls, device_id, token):\n\n tenant = init_tenant_context(token, db)\n orm_device = assert_device_exists(device_id)\n data = serialize_full_device(orm_device, tenant)\n\n kafka_handler_instance = cls.kafka.getInstance(cls.kafka.kafkaNotifier)\n kafka_handler_instance.remove(data, meta={\"service\": tenant})\n\n db.session.delete(orm_device)\n db.session.commit()\n\n results = {'result': 'ok', 'removed_device': data}\n return results",
"def delete(self, request, pk):\n sensor_obj = get_object_or_404(SensorData, id=pk)\n sensor_obj.delete()\n return Response({'message': 'Deleted Successfully', \"status\":200})",
"def delete(self):\n key = f'https://plex.tv/devices/{self.id}.xml'\n self._server.query(key, self._server._session.delete)",
"def test_do_delete(test_dao):\r\n DUT = dtmFunction(test_dao, test=True)\r\n DUT.do_select_all(revision_id=1)\r\n DUT.do_insert(revision_id=1, parent_id=1)\r\n\r\n _error_code, _msg = DUT.do_delete(DUT.last_id)\r\n\r\n assert _error_code == 0\r\n assert _msg == (\"RAMSTK SUCCESS: Deleting an item from the RAMSTK Program \"\r\n \"database.\")",
"def removeDevice(self, node, fullDeviceName):",
"def delete(self, request, registration_id):\n Device.objects.filter(registration_id=registration_id).delete()\n return Response(status=rest_framework.status.HTTP_200_OK)",
"def deleterecord(phones,username,phonenum):\r\n if username in phones:\r\n del phones[username]\r\n else:\r\n raise ValueError(\"This username are not exist\")",
"def delete(self, application_id):",
"def delete_record(uuid):\n\n collection[uuid].delete()\n return redirect('/')",
"def deleteDocumentFromPhone(file):\n\tprint \"Removing %s from target device...\" % file\n\tcmd =r\"adb shell rm -r %s\" % file\n\tos.system(cmd)\n\tprint \"Finished removing file from phone.\"",
"def do_nic_delete(cc, args):\n cc.nic.delete(args.uuid)\n print(_(\"%s deleted\" % args.uuid))",
"def delete_record(records):\n delete_record()"
] | [
"0.71832097",
"0.70881724",
"0.6902768",
"0.6834857",
"0.67007685",
"0.67007685",
"0.66466284",
"0.6552417",
"0.65387774",
"0.6451662",
"0.64388514",
"0.6282438",
"0.6280378",
"0.62574196",
"0.6221847",
"0.621336",
"0.61614496",
"0.61176556",
"0.6114799",
"0.6113734",
"0.6084292",
"0.6069952",
"0.60305893",
"0.60004985",
"0.5984841",
"0.595846",
"0.5952401",
"0.5933473",
"0.59269667",
"0.59131086"
] | 0.82801366 | 0 |
Does the user have the permission to publish a data. | def has_perm_publish_data(user):
has_perm_publish(user, rights.PUBLISH_DATA) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def canPublish(id, userId):\n db = core.connect()\n theShift = db[id]\n return user.isAdmin(userId) or (userId == theShift['createdBy'])",
"def has_add_permission(self, request):\n if not settings.PUBLISHER_CODE:\n return False\n return super().has_add_permission(request)",
"def has_permission(self, request, view):\n return True",
"def has_permission(self, request, view):\n return False",
"def has_write_permission(request):\n # TODO: Stop users from writing other users' pageranks. Why should that be so hard?\n return request.user.is_authenticated",
"def current_user_has_permission(query: 'Query') -> bool:\n return acl.current_user_has_permission(data_set_acl_resources[query.data_set.id])",
"def has_permission(self):\n\n org_id = self.kwargs.get(\"pk1\", False)\n # i'm checking the org_id is truthy here since some org user views don't\n # have `/org/pk1/` for example the initial publish landing page.\n if org_id and not self.request.user.organisations.filter(id=org_id).exists():\n return False\n\n return super().has_permission() and self.request.user.is_org_user",
"def isPublic(id):\n db = core.connect()\n theShift = db[id]\n publishData = theShift[\"publishData\"]\n return (not publishData[\"draft\"]) and (not publishData[\"private\"])",
"def has_permission(self, request, view):\n return request.user.group == 'admin'",
"def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public",
"def has_permission(self, request, view):\n usuario = request.user\n return str(usuario.grupo) == \"Vendedor\"",
"def has_permission(self, request, view):\n if request.user.is_authenticated():\n return True\n return False",
"def has_write_permission(request):\n user = request.user\n return user.is_superuser",
"def check_permission(self, action, username, resource, perm):\n if not resource:\n return\n if resource.realm == 'blog' and resource.id:\n the_post = BlogPost(self.env, resource.id, resource.version)\n for category in the_post.category_list:\n if category in self.draft and the_post.author != username:\n # Block all access regardless\n return False",
"def has_object_permission(self, request, view, obj):\n # if the user is trying to retrieve to create a item.. it will return true\n if request.method in permissions.SAFE_METHODS:\n return True\n # check if the user is trying to don't do a SAFE_METHODS, put,patch,delete and if the feed owner is doing it or another different user.. and it will return true if match or false if not\n return obj.user_profile.id == request.user.id",
"def has_permission(self, request, view):\n usuario = request.user\n return str(usuario) == \"AnonymousUser\"",
"def has_permission(self):\n return super().has_permission()",
"def has_permission(self, request):\n\t\treturn request.user.is_active",
"def has_permission(self, request, view):\n if request.method == \"POST\":\n return not (request.user and is_authenticated(request.user))\n\n return request.user and is_authenticated(request.user)",
"def has_permission(self, request, view):\n\n # Fallback to has_object_permission unless it's a POST\n if request.method != 'POST':\n return True\n\n # Need this information to make a decision\n if 'privileged_access' not in request.data and \\\n 'document' in request.data:\n return False\n\n document = request.data['document']\n privileged_access = request.data['privileged_access']\n\n found = Document.objects.filter(id=document).first()\n\n if not found:\n return False\n\n if found.create_user.organization != request.user.organization and \\\n not request.user.is_government_user:\n return False\n\n return DocumentCommentPermissions.user_can_comment(\n request.user,\n found,\n privileged_access\n )",
"def has_permission(self, request, view):\n return request.user.group != 'patient'",
"def should_publish(self, create, extracted, **kwargs):\n if create and extracted:\n return True\n return False",
"def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")",
"def user_has_permission(self, id: int, user: User) -> bool:\n return self.get_queryset().filter(pk=id).filter_for_user(user).exists()",
"def has_permission(self, request, view):\n if request.method == \"POST\":\n return self.model_admin_config.has_add_permission(self, request)\n return True",
"def has_object_permission(self, request, view, obj):\n if Contributor.objects.filter(project=obj, user=request.user):\n return True\n else:\n return False",
"def can_write_data_workspace(func, data, workspace, user):\n return can_write_in_workspace(\n func, data, workspace, user, rights.PUBLISH_DATA\n )",
"def has_object_permission(self, request, view, obj):\n if Contributor.objects.filter(project=obj, user=request.user):\n permission = Contributor.objects.filter(project=obj, user=request.user)[0]\n return permission.permission == \"author\"\n else:\n return False",
"def is_visible(cls, request):\n if cls.permission_required:\n return request.user.has_perm(cls.permission_uri)\n else:\n return True",
"def has_object_permission(self, request, view, obj):\n return not obj.permission == \"author\""
] | [
"0.7401715",
"0.68930787",
"0.6854572",
"0.68086797",
"0.6761256",
"0.67495507",
"0.6707519",
"0.6702206",
"0.6684865",
"0.66546744",
"0.6624943",
"0.66060966",
"0.6596229",
"0.65667605",
"0.6543968",
"0.6537075",
"0.6512933",
"0.65074724",
"0.6503338",
"0.64965105",
"0.6494102",
"0.64899516",
"0.64501077",
"0.64437324",
"0.6442235",
"0.64420617",
"0.6437126",
"0.6426635",
"0.64204025",
"0.64203376"
] | 0.889468 | 0 |
Can read list of data. | def can_read_list_data_id(func, list_data_id, user):
if user.is_superuser:
return func(list_data_id, user)
# check anonymous access
_check_anonymous_access(user)
list_data = func(list_data_id, user)
check_can_read_list(list_data, user)
return list_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_list(self, register, length):\n raise NotImplementedError",
"def _read_data(self):",
"def canread(self):\n return False",
"def storage_can_read(self):\n return True",
"def read_data(self) -> List[BaseRecord]:\n pass",
"def read(self) -> List[str]:\n pass",
"def test_read_allowlist(self):\n\n curdir = os.path.dirname(os.path.abspath(__file__))\n allowlist_file = os.path.join(curdir, \"data\", \"ima-allowlist-short.txt\")\n allowlist_sig = os.path.join(curdir, \"data\", \"ima-allowlist-short.sig\")\n allowlist_bad_sig = os.path.join(curdir, \"data\", \"ima-allowlist-bad.sig\")\n allowlist_gpg_key = os.path.join(curdir, \"data\", \"gpg-sig.pub\")\n allowlist_checksum = \"8b7c2c6a1d7af2568cc663905491bda829c04c397cdba38cc4fc4d8d8a3e69d4\"\n allowlist_bad_checksum = \"4c143670836f96535d9e617359b4d87c59e89e633e2773b4d7feae97f561b3dc\"\n\n # simple read, no fancy verification\n al_data = ima.read_allowlist(allowlist_file)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertIsNotNone(al_data[\"meta\"], \"AllowList metadata is present\")\n self.assertEqual(al_data[\"meta\"][\"version\"], 1, \"AllowList metadata version is correct\")\n self.assertEqual(al_data[\"meta\"][\"generator\"], \"keylime-legacy-format-upgrade\", \"AllowList metadata generator is correct\")\n self.assertNotIn(\"checksum\", al_data[\"meta\"], \"AllowList metadata no checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")\n\n # validate checkum\n al_data = ima.read_allowlist(allowlist_file, allowlist_checksum)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertEqual(al_data[\"meta\"][\"checksum\"], allowlist_checksum, \"AllowList metadata correct checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")\n\n # test with a bad checksum\n with self.assertRaises(Exception) as bad_checksum_context:\n ima.read_allowlist(allowlist_file, allowlist_bad_checksum)\n self.assertIn('Checksum of allowlist does not match', str(bad_checksum_context.exception))\n\n # validate GPG signature\n al_data = ima.read_allowlist(allowlist_file, None, allowlist_sig, allowlist_gpg_key)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertNotIn(\"checksum\", al_data[\"meta\"], \"AllowList metadata no checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")\n\n # test with a bad GPG sig\n with self.assertRaises(Exception) as bad_sig_context:\n ima.read_allowlist(allowlist_file, None, allowlist_bad_sig, allowlist_gpg_key)\n self.assertIn('GPG signature verification failed', str(bad_sig_context.exception))\n\n # validate everything together\n al_data = ima.read_allowlist(allowlist_file, allowlist_checksum, allowlist_sig, allowlist_gpg_key)\n self.assertIsNotNone(al_data, \"AllowList data is present\")\n self.assertEqual(al_data[\"meta\"][\"checksum\"], allowlist_checksum, \"AllowList metadata correct checksum\")\n self.assertIsNotNone(al_data[\"hashes\"], \"AllowList hashes are present\")\n self.assertEqual(len(al_data[\"hashes\"]), 21, \"AllowList hashes are correct length\")\n self.assertEqual(al_data[\"hashes\"][\"/boot/grub2/i386-pc/testload.mod\"][0], \"68e1d012e3f193dcde955e6ffbbc80e22b0f8778\", \"AllowList sample hash is correct\")",
"def read_all(self):\r\n pass",
"def read_data(self):\n raise NotImplementedError",
"def can_read_list(self, auth_params: List[str]) -> Dict[str, bool]:\n perms = self._get_workspace_permissions(auth_params)\n ret_perms = dict()\n for p in auth_params:\n ret_perms[p] = self._has_read_perm(perms.get(p, WorkspacePermission.NONE))\n return ret_perms",
"def is_list(self) -> bool:\n return False",
"def get_can_read(self):\n\t\tif not self.can_read:\n\t\t\tself.build_permissions()\n\t\treturn self.can_read",
"def _read_all(self):\n return self._connector.read_all()",
"def getListData(self):\n # by default no list is present\n return None",
"def canRead(*args, **kwargs):\n return MultiFileTileSource.canRead(*args, **kwargs)",
"def is_list(self):\n answer = self._call('is_list')\n return answer.yes",
"def is_list(self) -> bool:\n if self.is_list_of_list: # pylint: disable=R1705\n return False\n else:\n return bool(AnnotationWrapper.list_field_re.match(self.data))",
"def list_reads(cls) -> list:\n return [cls.FWREAD, cls.RVREAD];",
"def read(self):\n return list(self.pile_list)",
"def read(self):",
"def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False",
"def can_read_data_query(\n func,\n query,\n user,\n workspace_filter=None,\n user_filter=None,\n order_by_field=DATA_SORTING_FIELDS,\n):\n # check anonymous access\n _check_anonymous_access(user)\n # update the query\n query = _update_can_read_query(query, user, workspace_filter, user_filter)\n # get list of data\n data_list = func(query, user, order_by_field=order_by_field)\n # if superuser, return list of data\n if user.is_superuser:\n return data_list\n # TODO: check if necessary because it is time consuming (checking that user has access to list of returned data)\n # check that user can access the list of data\n if VERIFY_DATA_ACCESS:\n check_can_read_list(data_list, user)\n return data_list",
"def read_all(self, *args, **kwargs):\n pass",
"def test_list(self, array: dict) -> None:\r\n item = read_items(array)\r\n if read_type(item) == 'object':\r\n logger.debug('list -> dict')\r\n self.test_dict(obj=item)\r\n elif read_type(item) == 'array':\r\n logger.debug('list -> list')\r\n self.test_list(array=item)",
"def readOneData(self):\n\t\tpass",
"def _list(self):\n raise NotImplementedError",
"def load_data_list(self, list=[]):\n self.panel.load_data_list(list=list)",
"def read(self):\n raise NotImplementedError",
"def test_get_list(self):\n pass",
"def read_allowed(self, ui, req):\n\n user = req.env.get('REMOTE_USER')\n\n deny_read = ui.configlist('web', 'deny_read', untrusted=True)\n if deny_read and (not user or ismember(ui, user, deny_read)):\n return False\n\n allow_read = ui.configlist('web', 'allow_read', untrusted=True)\n # by default, allow reading if no allow_read option has been set\n if (not allow_read) or ismember(ui, user, allow_read):\n return True\n\n return False"
] | [
"0.662519",
"0.6616855",
"0.6610192",
"0.6583547",
"0.6576495",
"0.64457273",
"0.6304789",
"0.62909526",
"0.6290033",
"0.6213072",
"0.616153",
"0.61129534",
"0.60768044",
"0.59920454",
"0.5950494",
"0.5942837",
"0.59417313",
"0.593891",
"0.59161913",
"0.59137505",
"0.5912139",
"0.59063077",
"0.5906032",
"0.5890838",
"0.58870167",
"0.58696294",
"0.5864591",
"0.58251315",
"0.58016944",
"0.5799253"
] | 0.6868034 | 0 |
Can read a data, given a query. | def can_read_data_query(
func,
query,
user,
workspace_filter=None,
user_filter=None,
order_by_field=DATA_SORTING_FIELDS,
):
# check anonymous access
_check_anonymous_access(user)
# update the query
query = _update_can_read_query(query, user, workspace_filter, user_filter)
# get list of data
data_list = func(query, user, order_by_field=order_by_field)
# if superuser, return list of data
if user.is_superuser:
return data_list
# TODO: check if necessary because it is time consuming (checking that user has access to list of returned data)
# check that user can access the list of data
if VERIFY_DATA_ACCESS:
check_can_read_list(data_list, user)
return data_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, query):\r\n t1 = time.time()\r\n if self.database in ['redshift', 'postgres']:\r\n ret = postgres_helper.fetchall(config=self.conf, sql=query)\r\n else:\r\n raise Exception(\"database not supported yet: '{}'\"\r\n .format(self.database))\r\n t2 = time.time()\r\n t = t2 - t1\r\n print('Finished in {:.2f} seconds.'.format(t))\r\n return ret",
"def runQueryRead(d, query):\n with d.session() as s:\n results = s.read_transaction(runQuery, query, True)\n return results",
"async def exec_read(self, query, *args, only_one=False):",
"def can_read_aggregate_query(func, query, user):\n if user.is_superuser:\n return func(query, user)\n\n # check anonymous access\n _check_anonymous_access(user)\n\n # update the query\n query = _update_can_read_aggregate_query(query, user)\n # get list of data\n data = func(query, user)\n\n return data",
"def abstract_read(self, search_param):\n \n #print(\"Search\", search_param)\n section = self.table_section_from_parameter(search_param)\n tag = self.id_from_parameter(search_param)\n\n for table_row in section:\n # Assuming that if first word in a block is valid, the other is too\n if table_row is None:\n continue\n\n if table_row.tag == tag and table_row.valid:\n table_row.access()\n return True\n \n return False",
"def can_read(self, user):\n raise Return(True)",
"def _can_handle_query(cls, *query):\n chkattr = [\"Time\", \"Instrument\", \"SatelliteNumber\"]\n chklist = [x.__class__.__name__ in chkattr for x in query]\n for x in query:\n if x.__class__.__name__ == \"Instrument\" and x.value.lower() in (\n \"xrs\",\n \"goes\",\n ):\n return all(chklist)\n return False",
"def _can_handle_query(cls, *query):\n # Import here to prevent circular imports\n from sunpy.net import attrs as a\n\n required = {a.Time, a.Instrument}\n optional = {a.Wavelength, a.Level, a.goes.SatelliteNumber}\n all_attrs = {type(x) for x in query}\n\n ops = all_attrs - required\n # check to ensure that all optional requirements are in approved list\n if ops and not all(elem in optional for elem in ops):\n return False\n\n # if we get this far we have either Instrument and Time\n # or Instrument, Time and Wavelength\n check_var_count = 0\n for x in query:\n if isinstance(x, a.Instrument) and x.value.lower() == 'suvi':\n check_var_count += 1\n\n if check_var_count == 1:\n return True\n else:\n return False",
"def _read_query(self):\n try:\n # Open Google Drive and read the sql file\n self.query = GDrive().read_drive_file(self.input_source_id)\n except Exception as e:\n raise e",
"def read_data(datastore_url, read_request, verbose):\n\n logging.info('Checking if data is available')\n\n if verbose:\n pprint.pprint(read_request)\n\n read_url = datastore_url + DATASTORE_PREFIX + 'ReadData'\n\n timeout = time.time() + 60 * 2 # 2 minutes from now\n\n while True:\n print('.', end='', flush=True)\n r = requests.post(read_url, headers=headers(), json=read_request)\n if r.status_code != 200:\n logging.error(f'ERROR: Unexpected response: {r.status_code}')\n pprint.pprint(r.json())\n return False\n\n resp = r.json()\n\n if verbose:\n pprint.pprint(resp)\n\n if len(resp['events']) > 0:\n print('')\n logging.info('SUCCESS: Read encrypted data')\n return decrypt_data(resp['events'][0], verbose)\n\n if time.time() > timeout:\n print('')\n logging.warning(\n 'ERROR: Failed to read any data for the policy. Please check device token and try again'\n )\n return False\n\n time.sleep(10)\n\n return True",
"def execute_default_index(self, query, **kwargs):\n data = None\n\n try:\n data = pd.read_sql(query, self.engine, **kwargs)\n except exc.ResourceClosedError:\n return True\n except:\n logging.exception(f'Something went wrong while executing query. Check trace.')\n params = kwargs['params'] if 'params' in kwargs else None\n return False\n\n return data.where((pd.notnull(data)), None)",
"def query(self):\r\n raise NotImplementedError",
"def current_user_has_permission(query: 'Query') -> bool:\n return acl.current_user_has_permission(data_set_acl_resources[query.data_set.id])",
"def storage_can_read(self):\n return True",
"def _can_handle_query(cls, *query):\n required = {a.Time, a.Instrument}\n optional = {a.Wavelength}\n all_attrs = {type(x) for x in query}\n\n ops = all_attrs - required\n # If ops is empty or equal to optional we are ok, otherwise we don't\n # match\n if ops and ops != optional:\n return False\n\n # if we get this far we have either Instrument and Time\n # or Instrument, Time and Wavelength\n for x in query:\n if isinstance(x, a.Instrument) and x.value.lower() == 'norh':\n return True\n\n return False",
"def execute(self, query, attempt=0, **kwargs):\n data = None\n\n params = kwargs['params'] if 'params' in kwargs else None\n logging.debug(f'Query: {query}')\n logging.debug(f'Params: {params}')\n try:\n data = pd.read_sql(query, self.engine, index_col='id', **kwargs)\n except exc.ResourceClosedError:\n logging.warning('Query does not have any value to return.')\n return True\n except exc.IntegrityError as e:\n logging.warning(f'Integrity Error - {e}')\n return None\n except (exc.StatementError, OperationalError) as e:\n logging.warning(f'Creating new connection. Engine/Connection is probably None. [{e}]')\n attempt += 1\n if attempt <= 5:\n self.connect()\n logging.debug(f'Attempt #{attempt}')\n return self.execute(query, attempt=attempt, **kwargs)\n else:\n logging.debug(f'Maximum attempts reached. ({5})')\n return False\n except:\n logging.exception('Something went wrong executing query. Check trace.')\n return False\n\n return data.where((pd.notnull(data)), None)",
"def execute_(self, query, attempt= 0, **kwargs):\n data = None\n\n try:\n data = pd.read_sql(query, self.engine, **kwargs)\n except exc.ResourceClosedError:\n return True\n except exc.IntegrityError as e:\n logging.warning(f'Integrity Error - {e}')\n return None\n except (exc.StatementError, OperationalError) as e:\n logging.warning(f'Creating new connection. Engine/Connection is probably None. [{e}]')\n attempt += 1\n if attempt <= 5:\n self.connect()\n logging.debug(f'Attempt #{attempt}')\n return self.execute_(query, attempt=attempt, **kwargs)\n else:\n logging.debug(f'Maximum attempts reached. ({5})')\n return False\n except:\n logging.exception(f'Something went wrong while connecting. Check trace.')\n params = kwargs['params'] if 'params' in kwargs else None\n return False\n\n return data.replace({pd.np.nan: None})",
"def canread(self):\n return False",
"def canRead(id, userId):\n db = core.connect()\n theShift = db[id]\n if user.isAdmin(userId):\n return True\n if theShift[\"createdBy\"] == userId:\n return True\n if theShift[\"publishData\"][\"draft\"]:\n return False\n theUser = db[userId]\n if not theShift[\"publishData\"][\"private\"]:\n return True\n if theUser[\"privateStream\"] in theShift[\"publishData\"][\"streams\"]:\n return True\n shiftStreams = theShift[\"publishData\"][\"streams\"]\n readableStreams = permission.readableStreams(userId)\n allowed = set(shiftStreams).intersection(readableStreams)\n return len(allowed) > 0",
"def check_access(ident):\n resource = data_service.resource_load(uniq = ident)\n log.debug('Result from the database: %s'%resource)\n if resource is None:\n return False\n return True",
"def canRead(self, user, data):\n # possible matches\n possibilities = [data['to'], data['from']['name']]\n if data['from']['hasAccount']:\n possibilities.append(data['from']['id'])\n if len(self.match_user(user, possibilities)) > 0:\n return True\n # check puppet\n return 'puppet' in user and user['puppet'] in possibilities",
"def is_read_only(self):\n return self.__aceQLHttpApi.is_read_only()",
"def execute_read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except psycopg2.OperationalError as e:\n print(f\"The error '{e}' occurred\")",
"def isDataSourceReadable(self):\r\n\r\n readable = True\r\n start, stop = self.getReadParameters(\\\r\n numpy.array(0, dtype=numpy.int64), self.chunk_size)\r\n try:\r\n self.data_source.read(start, stop)\r\n except tables.HDF5ExtError:\r\n readable = False\r\n print(translate('Buffer',\r\n \"\"\"\\nError: problems reading records. The dataset seems \"\"\"\r\n \"\"\"to be compressed with the {0} library. Check that it \"\"\"\r\n \"\"\"is installed in your system, please.\"\"\",\r\n 'A dataset readability error').\\\r\n format(self.data_source.filters.complib))\r\n\r\n return readable",
"def test_filteredQuery(self):\n answer, authority, additional = self._queryTest(True)\n self.assertEqual(\n answer,\n [RRHeader(b'foo.example.com', payload=Record_A('5.8.13.21', ttl=0))])\n self.assertEqual(authority, [])\n self.assertEqual(additional, [])",
"def get(self, query, as_dict=True):\r\n self.log.debug('using query \"%s\"', query)\r\n if self.adapter == 'odbc':\r\n res = self._get_odbc(query=query, as_dict=as_dict)\r\n elif self.adapter == 'mssql':\r\n res = self._get_odbc(query=query, as_dict=as_dict)\r\n return res",
"def get_check_read(dbname, nrows=False, complete=True):\n db = get_db(dbname, complete=complete)\n if db is None:\n raise KeyError(\"no such database\")\n if not has_read_access(db):\n raise ValueError(\"may not read the database\")\n set_nrows(db, targets=nrows)\n return db",
"def query(self, query):",
"def get_data(self, query):\n result = input(\"{}: \".format(query))\n return result",
"def select(self, query):\n\n if query.isId():\n # simple\n url = '%s/%s/%i' % (self.uri, query.table(), query._where[0].value)\n else:\n # real query\n url = '%s/%s/filter?%s' % (self.uri, query.table(), query.encode())\n data, resp = self.execute(method='GET', url=url, decode=True)\n return data"
] | [
"0.64174247",
"0.6388573",
"0.6367381",
"0.6172934",
"0.5943755",
"0.59167004",
"0.5904258",
"0.5697214",
"0.5670645",
"0.56312513",
"0.5612141",
"0.555337",
"0.55376184",
"0.5506616",
"0.550447",
"0.54996186",
"0.5492912",
"0.5484353",
"0.5476029",
"0.5465419",
"0.5450928",
"0.5436018",
"0.5434893",
"0.5410332",
"0.5407767",
"0.5407222",
"0.53698504",
"0.536955",
"0.536755",
"0.53574735"
] | 0.71578825 | 0 |
Can read a data, given an aggregate query. | def can_read_aggregate_query(func, query, user):
if user.is_superuser:
return func(query, user)
# check anonymous access
_check_anonymous_access(user)
# update the query
query = _update_can_read_aggregate_query(query, user)
# get list of data
data = func(query, user)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_read_data_query(\n func,\n query,\n user,\n workspace_filter=None,\n user_filter=None,\n order_by_field=DATA_SORTING_FIELDS,\n):\n # check anonymous access\n _check_anonymous_access(user)\n # update the query\n query = _update_can_read_query(query, user, workspace_filter, user_filter)\n # get list of data\n data_list = func(query, user, order_by_field=order_by_field)\n # if superuser, return list of data\n if user.is_superuser:\n return data_list\n # TODO: check if necessary because it is time consuming (checking that user has access to list of returned data)\n # check that user can access the list of data\n if VERIFY_DATA_ACCESS:\n check_can_read_list(data_list, user)\n return data_list",
"def _update_can_read_aggregate_query(query, user):\n\n accessible_workspaces = _get_read_accessible_workspaces_by_user(user)\n # update query with workspace criteria\n query = mongo_raw_query.add_aggregate_access_criteria(\n query, accessible_workspaces, user\n )\n return query",
"def read(self, query):\r\n t1 = time.time()\r\n if self.database in ['redshift', 'postgres']:\r\n ret = postgres_helper.fetchall(config=self.conf, sql=query)\r\n else:\r\n raise Exception(\"database not supported yet: '{}'\"\r\n .format(self.database))\r\n t2 = time.time()\r\n t = t2 - t1\r\n print('Finished in {:.2f} seconds.'.format(t))\r\n return ret",
"def runQueryRead(d, query):\n with d.session() as s:\n results = s.read_transaction(runQuery, query, True)\n return results",
"def test_ipam_aggregates_read(self):\n pass",
"def aggregate_query(self):\n raise NotImplementedError",
"def execute_default_index(self, query, **kwargs):\n data = None\n\n try:\n data = pd.read_sql(query, self.engine, **kwargs)\n except exc.ResourceClosedError:\n return True\n except:\n logging.exception(f'Something went wrong while executing query. Check trace.')\n params = kwargs['params'] if 'params' in kwargs else None\n return False\n\n return data.where((pd.notnull(data)), None)",
"async def exec_read(self, query, *args, only_one=False):",
"def abstract_read(self, search_param):\n \n #print(\"Search\", search_param)\n section = self.table_section_from_parameter(search_param)\n tag = self.id_from_parameter(search_param)\n\n for table_row in section:\n # Assuming that if first word in a block is valid, the other is too\n if table_row is None:\n continue\n\n if table_row.tag == tag and table_row.valid:\n table_row.access()\n return True\n \n return False",
"def can_read(self, user):\n raise Return(True)",
"def test_aggregateby(self):\n result = export.processExport(houseId=1,\n aggregate=\"1D\",\n aggregateby=[\"min\"])\n\n #So this will just show the minimum value\n self.assertEqual(result.shape, (10, 2))\n\n\n\n result = export.processExport(houseId=1,\n aggregate=\"1D\",\n aggregateby=[\"min\",\"mean\",\"max\"])\n\n #print result.head()\n #So this will have 3 readings for each location (6 in total()\n self.assertEqual(result.shape, (10, 6))\n #And the second sample should be 10 minutes in\n #self.assertEqual(result.index[1], datetime.datetime(2013, 01, 01, 1, 00, 00))",
"def canRead(id, userId):\n db = core.connect()\n theShift = db[id]\n if user.isAdmin(userId):\n return True\n if theShift[\"createdBy\"] == userId:\n return True\n if theShift[\"publishData\"][\"draft\"]:\n return False\n theUser = db[userId]\n if not theShift[\"publishData\"][\"private\"]:\n return True\n if theUser[\"privateStream\"] in theShift[\"publishData\"][\"streams\"]:\n return True\n shiftStreams = theShift[\"publishData\"][\"streams\"]\n readableStreams = permission.readableStreams(userId)\n allowed = set(shiftStreams).intersection(readableStreams)\n return len(allowed) > 0",
"def _can_handle_query(cls, *query):\n chkattr = [\"Time\", \"Instrument\", \"SatelliteNumber\"]\n chklist = [x.__class__.__name__ in chkattr for x in query]\n for x in query:\n if x.__class__.__name__ == \"Instrument\" and x.value.lower() in (\n \"xrs\",\n \"goes\",\n ):\n return all(chklist)\n return False",
"def test_checkread(self):\n user1 = {'uid': 1, 'gid': 1}\n self.assertTrue(self.m._checkread(user1, {}))\n mock_image = {\n 'userACL': None,\n 'groupACL': None\n }\n # Test a public image with ACLs set to None\n self.assertTrue(self.m._checkread(user1, mock_image))\n # Now empty list instead of None. Treat it the same way.\n mock_image['userACL'] = []\n mock_image['groupACL'] = []\n self.assertTrue(self.m._checkread(user1, mock_image))\n self.assertTrue(self.m._checkread(user1, {'private': False}))\n # Private false should trump other things\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'userACL': [2]}))\n self.assertTrue(self.m._checkread(user1,\n {'private': False, 'groupACL': [2]}))\n # Now check a protected image that the user should\n # have access to\n mock_image['userACL'] = [1]\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 2, 'gid': 1}, mock_image))\n # Now check by groupACL\n mock_image['groupACL'] = [1]\n self.assertTrue(self.m._checkread({'uid': 3, 'gid': 1}, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 3, 'gid': 2}, mock_image))\n # What about an image with a list\n mock_image = {\n 'userACL': [1, 2, 3],\n 'groupACL': [4, 5, 6]\n }\n self.assertTrue(self.m._checkread(user1, mock_image))\n # And Not\n self.assertFalse(self.m._checkread({'uid': 7, 'gid': 7}, mock_image))",
"def can_read_blob(func, data, user):\n if user.is_superuser:\n return func(data, user)\n\n if data._blob is not None:\n _check_can_read(data._blob, user)\n\n return func(data, user)",
"def _query(self, mapping, from_date=None, to_date=None, max_count=None,\n offset=None, ascendingly=True, describe=False):\n group, key = mapping.data_var.split(self._data_var_separator)\n\n # build params\n params = 'describe={describe}&keys={key}'.format(describe=str(describe).lower(), key=key)\n if self._api['token'] is not None:\n params += '&apitoken={}'.format(self._api['token'])\n if from_date is not None:\n params += '&from-date={}'.format(from_date.isoformat())\n if to_date is not None:\n params += '&to-date={}'.format(to_date.isoformat())\n\n # build url\n url = '{}{}?{}'.format(self._api['host'], self._api['url'], params).format(group=group)\n\n r = requests.get(url)\n if r.status_code == 200:\n data = json.loads(r.content.decode('utf-8'))\n # return query result\n if not describe:\n # sort\n data = sorted(\n data,\n key=lambda k: k.get(self._timestampkey),\n reverse=(not ascendingly))\n # apply constraints\n if offset is not None:\n data = data[offset:]\n if max_count is not None:\n data = data[:max_count]\n # process to query result\n res = QueryResult(mapping.obs_uri)\n for r in data:\n res.add_row(\n dateutil.parser.parse(r.get(self._timestampkey)),\n r.get(self._valuekey))\n # return\n return res\n # return query result description\n else:\n min = data.get('mindate', None)\n if min is not None:\n min = dateutil.parser.parse(min)\n max = data.get('maxdate', None)\n if max is not None:\n max = dateutil.parser.parse(max)\n return QueryResultDescription(mapping.obs_uri, min, max, data.get('count', 0))\n else:\n # empty/erronous response\n self.pyerr(\"Failed calling API: {}\".format(url))\n if not describe:\n return QueryResult(mapping.obs_uri)\n return QueryResultDescription(mapping.obs_uri, None, None, 0)",
"def can_access_all_queries(self) -> bool:\n\n return self.can_access(\"all_query_access\", \"all_query_access\")",
"def isDataSourceReadable(self):\r\n\r\n readable = True\r\n start, stop = self.getReadParameters(\\\r\n numpy.array(0, dtype=numpy.int64), self.chunk_size)\r\n try:\r\n self.data_source.read(start, stop)\r\n except tables.HDF5ExtError:\r\n readable = False\r\n print(translate('Buffer',\r\n \"\"\"\\nError: problems reading records. The dataset seems \"\"\"\r\n \"\"\"to be compressed with the {0} library. Check that it \"\"\"\r\n \"\"\"is installed in your system, please.\"\"\",\r\n 'A dataset readability error').\\\r\n format(self.data_source.filters.complib))\r\n\r\n return readable",
"def getAggregateData(self, pipeline: t.Mapping[t.Text, t.Any],\n filter: t.Mapping[t.Text, t.Any] = {},\n ) -> DatasetData:\n session = self.session_maker()\n\n mongoquery = self._mongo_query(session).query(\n filter=filter,\n aggregate=pipeline,\n ).end()\n\n schema = self.general_schema\n data_dir = self.data_dir\n\n #TODO: Make sure this is tested (above vars are unused, why?)\n\n data = [row._asdict() for row in mongoquery.all()]\n\n # close the ORM session when done\n session.close()\n\n return DatasetData(data=data)",
"def storage_can_read(self):\n return True",
"def execute_read_query(connection, query):\n cursor = connection.cursor()\n result = None\n try:\n cursor.execute(query)\n result = cursor.fetchall()\n return result\n except psycopg2.OperationalError as e:\n print(f\"The error '{e}' occurred\")",
"def get_check_read(dbname, nrows=False, complete=True):\n db = get_db(dbname, complete=complete)\n if db is None:\n raise KeyError(\"no such database\")\n if not has_read_access(db):\n raise ValueError(\"may not read the database\")\n set_nrows(db, targets=nrows)\n return db",
"def query(self):\r\n raise NotImplementedError",
"def query(self, __template_id=\"*\", __query=None, *, read_as=None):\n raise NotImplementedError",
"def is_read_only(self):\n return self.__aceQLHttpApi.is_read_only()",
"def current_user_has_permission(query: 'Query') -> bool:\n return acl.current_user_has_permission(data_set_acl_resources[query.data_set.id])",
"def canread(self):\n return False",
"def can_read_list_data_id(func, list_data_id, user):\n if user.is_superuser:\n return func(list_data_id, user)\n\n # check anonymous access\n _check_anonymous_access(user)\n\n list_data = func(list_data_id, user)\n check_can_read_list(list_data, user)\n\n return list_data",
"def test_aggregate_ds(self, thredds_catalog):\n ncml_ns = \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"\n\n top_level_ds = [el for el in thredds_catalog if el.tag == get_full_tag(\"dataset\")]\n agg_ds = None\n for el in top_level_ds[0]:\n if el.tag == get_full_tag(\"dataset\"):\n for subel in el:\n if subel.tag == get_full_tag(\"netcdf\", ns=ncml_ns):\n agg_ds = el\n break\n\n assert agg_ds is not None, \"Aggregation dataset not found\"\n assert self.has_access_method(agg_ds, \"wms\")\n assert self.has_access_method(agg_ds, \"wcs\")\n assert self.has_access_method(agg_ds, \"OpenDAPServer\")\n\n properties = agg_ds.findall(get_full_tag(\"property\"))\n assert len(properties) == 1\n assert \"name\" in properties[0].attrib\n assert \"value\" in properties[0].attrib\n assert \"jasmin.eofrom.space\" in properties[0].attrib[\"value\"]",
"def query_many(self, *q, read_as=None):\n raise NotImplementedError"
] | [
"0.68077844",
"0.6013364",
"0.5804927",
"0.57963204",
"0.5774573",
"0.5536843",
"0.5486568",
"0.54431725",
"0.534709",
"0.53435445",
"0.52316743",
"0.5172882",
"0.5158935",
"0.5156636",
"0.51558",
"0.5118686",
"0.5110658",
"0.507409",
"0.50682276",
"0.5029962",
"0.50261956",
"0.5025011",
"0.4995894",
"0.49862227",
"0.49776834",
"0.4975058",
"0.49697426",
"0.49612606",
"0.496102",
"0.49595448"
] | 0.7802571 | 0 |
Update query with access control parameters. | def _update_can_read_query(
query, user, workspace_filter=None, user_filter=None
):
accessible_workspaces = _get_read_accessible_workspaces_by_user(user)
# update query with workspace criteria
query = django_raw_query.add_access_criteria(
query, accessible_workspaces, user, workspace_filter, user_filter
)
return query | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, params):",
"def update(self, query: str, *args, **kwargs):\n cursor = self._cursor()\n try:\n self._execute(cursor, query, args, kwargs)\n return cursor.rowcount\n finally:\n cursor.close()",
"def _update(self, values):\n if self.query.is_sliced:\n raise TypeError(\"Cannot update a query once a slice has been taken.\")\n query = self.query.chain(sql.UpdateQuery)\n query.add_update_fields(values)\n # Clear any annotations so that they won't be present in subqueries.\n query.annotations = {}\n self._result_cache = None\n return query.get_compiler(self.db).execute_sql(CURSOR)",
"def _execute_update(self, updateQuery, updateValues):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(updateQuery, updateValues)",
"def updateParameters(self, parameters):",
"def set_query(self, query):\n return self.set_param(\"query\", query)",
"def _set_query_params(query, key, value):\n query.update({str(key): str(value)})",
"def modify_query(**values):\n args = request.args.copy()\n\n for attr, new_value in values.items():\n if new_value is not None:\n args[attr] = new_value\n elif attr in args:\n del args[attr]\n\n if args:\n return request.base_url + \"?\" + url_encode(args)\n else:\n return request.base_url",
"def update_query(self, **updates):\r\n self._url_updates.update(updates)",
"def update_params(self):\n pass",
"def access_info_update(context, storage_id, values):\n session = get_session()\n with session.begin():\n _access_info_get(context, storage_id, session).update(values)\n return _access_info_get(context, storage_id, session)",
"def edit_db(self, query, args=()):\n conn = self.get_db()\n try:\n cur = conn.execute(query, args)\n conn.commit()\n cur.close()\n except Exception as e:\n print(e)\n return False\n return True",
"def updateView(request, query, exquery, wild_card_str):\n query = copy.deepcopy(query)\n exquery = copy.deepcopy(exquery)\n\n if 'modificationtime__castdate__range' in query:\n query['creationdate__castdate__range'] = query['modificationtime__castdate__range']\n del query['modificationtime__castdate__range']\n if 'workinggroup' in query and 'preset' in request.session['requestParams'] and \\\n request.session['requestParams']['preset'] == 'MC' and ',' in query['workinggroup']:\n # excludeWGList = list(str(wg[1:]) for wg in request.session['requestParams']['workinggroup'].split(','))\n # exquery['workinggroup__in'] = excludeWGList\n try:\n del query['workinggroup']\n except:\n pass\n if 'status' in request.session['requestParams'] and request.session['requestParams']['status'] == '':\n try:\n del query['status']\n except:\n pass\n if 'site' in request.session['requestParams'] and request.session['requestParams']['site'] == 'hpc':\n try:\n del query['site']\n except:\n pass\n exquery['site__isnull'] = True\n if 'currentpriority__gte' in query and 'currentpriority__lte' in query:\n query['priority__gte'] = query['currentpriority__gte']\n query['priority__lte'] = query['currentpriority__lte']\n del query['currentpriority__gte']\n del query['currentpriority__lte']\n\n if 'runnumber' in request.session['requestParams'] and request.session['requestParams']['runnumber']:\n try:\n query['runnumber'] = int(request.session['requestParams']['runnumber'])\n except:\n _logger.exception('Provided runnumber is not valid. It should be int')\n\n jedi_tasks_fields = [field.name for field in JediTasks._meta.get_fields() if field.get_internal_type() == 'CharField']\n running_prod_fields = (set([\n field.name for field in RunningProdTasksModel._meta.get_fields() if field.get_internal_type() == 'CharField'\n ])).difference(set(jedi_tasks_fields))\n\n for f in running_prod_fields:\n if f in request.session['requestParams'] and request.session['requestParams'][f] and f not in query and f not in wild_card_str:\n if f == 'hashtags':\n wild_card_str += ' and ('\n wildCards = request.session['requestParams'][f].split(',')\n currentCardCount = 1\n countCards = len(wildCards)\n for card in wildCards:\n if '*' not in card:\n card = '*' + card + '*'\n elif card.startswith('*'):\n card = card + '*'\n elif card.endswith('*'):\n card = '*' + card\n wild_card_str += preprocess_wild_card_string(card, 'hashtags')\n if currentCardCount < countCards:\n wild_card_str += ' and '\n currentCardCount += 1\n wild_card_str += ')'\n elif f == 'scope' and (\n '!' in request.session['requestParams'][f] or '*' in request.session['requestParams'][f]):\n wild_card_str += ' and ({})'.format(preprocess_wild_card_string(request.session['requestParams'][f], f))\n else:\n query[f] = request.session['requestParams'][f]\n\n return query, exquery, wild_card_str",
"def update(self, query_conditions, cols_vals_to_update):\n matched_queries = self.__return_query('query', query_conditions)\n if matched_queries == None:\n raise Exception('Sorry, your query did not match any data.')\n else:\n #Loop through and update each row where the query returned true\n for found_row in matched_queries:\n #Check to make sure all the column names given by user match the column names in the table.\n row_id = found_row['row_id']\n self.update_row(row_id, cols_vals_to_update)",
"def update(self, query, callback=None, query_args=None):\r\n data = self.db.execute(query, query_args)\r\n return data",
"def updateParameters(self):\n\n return",
"def query(self, query):\n self._query = query",
"def updated_query(request, *args):\n # NOTE: it returns a dict not a QueryDict\n\n # recall query_to_dict returns key-val sequence\n # filter out the search key\n updated = {k: v for k, v in query_to_dict(request.GET.copy()) if\n k != \"search\"}\n\n # the args must at least have a key + value\n if len(args) < 2:\n return updated\n\n # helper function to update key-in\n def key_in(dic, keys, val):\n k = keys[0]\n # TODO : broken in the sense that I seem to be only updating\n # lists\n if len(keys) == 1:\n if isinstance(dic[k], list) and val not in dic[k]:\n dic[k].append(val)\n else:\n key_in(dic[k], keys[1:], val)\n\n # call key_in to update\n key_in(updated, args[:-1], args[-1])\n\n # return the updated dict (NOTE: this is not\n # a query dict\n return updated",
"def update(self, **kwargs):\n expr = self.model.__table__.update().where(self.query).values(**kwargs)\n return self._runquery(expr)",
"def _update_params(self):\n pass",
"def update(\n self,\n *args: Union[dict, Mapping],\n session: Optional[ClientSession] = None\n ):\n self.set_session(session=session)\n return (\n self.UpdateQueryType(\n document_model=self.document_model,\n find_query=self.get_filter_query(),\n )\n .update(*args)\n .set_session(session=self.session)\n )",
"def test_update_visibility_query(self):\n pass",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return"
] | [
"0.6119067",
"0.6095496",
"0.60731256",
"0.6058203",
"0.58836794",
"0.58246744",
"0.5813785",
"0.58044815",
"0.5776203",
"0.57282144",
"0.57191473",
"0.5662358",
"0.5660843",
"0.5659255",
"0.5619666",
"0.560143",
"0.5595878",
"0.55811363",
"0.55611885",
"0.5551484",
"0.55443144",
"0.5523215",
"0.5522959",
"0.5522959",
"0.5522959",
"0.5522959",
"0.5522959",
"0.5522959",
"0.5522959",
"0.5522959"
] | 0.65168345 | 0 |
Get read accessible workspaces by user. | def _get_read_accessible_workspaces_by_user(user):
if not settings.CAN_ANONYMOUS_ACCESS_PUBLIC_DOCUMENT and user.is_anonymous:
accessible_workspaces = []
else:
# workspace case
# list accessible workspaces
accessible_workspaces = [
workspace.id
for workspace in workspace_api.get_all_workspaces_with_read_access_by_user(
user
)
]
return accessible_workspaces | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_accessible_spaces(user):\n if not user:\n return []\n obj_list = get_objects_for_user(user, 'access_space',Space)\\\n .order_by('-created_at')\n return obj_list",
"def get_own_spaces(user):\n if not user:\n return []\n own_spaces = []\n accessible_spacs = get_accessible_spaces(user)\n for space in accessible_spacs:\n group_ids = [space.get_team().id, space.get_members().id, space.get_admins().id]\n if user.groups.filter(id__in = group_ids):\n own_spaces.append(space)\n return own_spaces",
"def get_workspace(self, user_id=None, alias=None, unique_id=None, include_deleted=False):\n # Get UUID for workspace\n if alias == 'default_workspace':\n unique_id = 'default_workspace'\n else:\n uuid_mapping = self._get_uuid_mapping_object(user_id)\n status = self.include_status[:]\n if include_deleted:\n status.append('deleted')\n if not unique_id:\n unique_id = uuid_mapping.get_uuid(alias, user_id, status=status)\n if not unique_id:\n return False\n # return matching workspace \n self._logger.debug('Getting workspace \"{}\" with alias \"{}\"'.format(unique_id, alias)) \n \n return self.workspaces.get(unique_id, None)",
"def list_workspaces(self, user_id=None, include_default=True):\n workspace_list = []\n uuid_mapping = self._get_uuid_mapping_object(user_id)\n for unique_id in uuid_mapping.get_uuid_list_for_user(user_id, status=self.all_status):\n workspace_list.append(self.dict_workspace(unique_id=unique_id, user_id=user_id))\n \n if include_default:\n workspace_list.append(self.dict_workspace(unique_id='default_workspace', user_id='default')) \n \n return workspace_list",
"def get_workspaces(user, layout_uid=None, workspace=None, public=False,\n different_layouts=False):\n # We need to show workspaces\n q_kwargs = {'user': user}\n if not different_layouts:\n q_kwargs.update({'layout_uid': layout_uid})\n if public:\n q_kwargs.update({'is_public': public})\n\n workspaces = list(\n DashboardWorkspace._default_manager\n .filter(**q_kwargs)\n .only('id', 'name', 'slug')\n .order_by('position')[:]\n )\n\n next = None\n previous = None\n current = None\n current_not_found = False\n\n if workspace:\n\n # Slugifying the workspace\n workspace_slug = slugify_workspace(workspace)\n num_workspaces = len(workspaces)\n\n for index, ws in enumerate(workspaces):\n if workspace_slug == ws.slug:\n current = ws\n\n if index == 0:\n # No previous workspace (previous is default).\n try:\n next = workspaces[1]\n except IndexError:\n pass\n\n elif num_workspaces == index:\n # No next workspace (next is default).\n try:\n previous = workspaces[index - 1]\n except IndexError:\n pass\n\n else:\n # Getting previous and next workspaces.\n try:\n previous = workspaces[index - 1]\n except IndexError:\n pass\n\n try:\n next = workspaces[index + 1]\n except IndexError:\n pass\n\n if current is None:\n current_not_found = True\n\n else:\n try:\n previous = workspaces[-1]\n except IndexError:\n pass\n\n try:\n next = workspaces[0]\n except IndexError:\n pass\n\n return {\n 'workspaces': workspaces,\n 'next_workspace': next,\n 'previous_workspace': previous,\n 'current_workspace': current,\n 'current_workspace_not_found': current_not_found\n }",
"def request_workspace_list(self, request):\n \n user_id = request['user_id'] \n \n response = {'workspaces': []}\n response['workspaces'] = self.list_workspaces(user_id=user_id)\n \n return response",
"def get_user_workspaces(request):\n post = request.POST.dict()\n user = post.get('user_id')\n if not user:\n response = {'status': -1, 'status_message': 'No user supplied'}\n return HttpResponse(json.dumps(response))\n response = {\n 'status': 1,\n 'status_message': 'Success',\n 'workspaces':[]\n }\n qs = user_to_workspace.objects.filter(user_id=user)\n for row in qs:\n response['workspaces'].append({\n 'workspace_id':row.workspace_id.workspace_id,\n 'workspace_name':row.workspace_id.workspace_name,\n 'description':row.workspace_id.description\n })\n return HttpResponse(json.dumps(response))",
"def get_shared_worlds(self, username):\n\t\treturn self._send_command_to_entity_server(us.SERVER_COMMAND_GET_SHARED_WORLDS, username)",
"def get_user_stream(user):\n spaces = get_accessible_spaces(user)\n ret = model_stream(Space, target_object_id__in=spaces)[:10]\n return ret",
"def workspaces_members(\n self,\n context,\n request: TracimRequest,\n hapic_data=None\n ) -> typing.List[UserRoleWorkspaceInContext]:\n app_config = request.registry.settings['CFG']\n rapi = RoleApi(\n current_user=request.current_user,\n session=request.dbsession,\n config=app_config,\n )\n \n roles = rapi.get_all_for_workspace(request.current_workspace)\n return [\n rapi.get_user_role_workspace_with_context(user_role)\n for user_role in roles\n ]",
"def get(self, user_id):\n longitude = self.get_argument(\"longitude\", False)\n latitude = self.get_argument(\"latitude\", False)\n assert user_id and longitude and latitude\n\n nearby_available_parking_spaces = yield AvailableParkingSpacePool.read_many(\n longitude=longitude,\n latitude=latitude\n )\n\n _available_parking_spaces = [AvailableParkingSpaceMapper.to_record(\n entity=single_available_parking_space\n ) for single_available_parking_space in nearby_available_parking_spaces]\n\n self.set_status(httplib.OK)\n self.write({\n 'available_parking_spaces': _available_parking_spaces\n })",
"def get(self, name):\n res = self.rpc.call(MsfRpcMethod.DbGetWorkspace, [name])\n if 'workspace' in res:\n return res['workspace']\n else:\n return",
"def get_allowed_topologies(user):\n try:\n up = user.get_profile()\n except AttributeError:\n return db.Topology.objects.none()\n\n if user.has_perm(\"vnswww.topology_use_any\"):\n # We can view and use any templates\n topos = db.Topology.objects.filter()\n else:\n q_own = Q(owner=user)\n q_permitted = Q(allowed_users=user)\n q_org = Q(org=user.get_profile().org)\n q_public = Q(public=True)\n if user.has_perm(\"vnswww.topology_use_org\"):\n print \"Allowed all topos in own org\"\n # We can view and use any from the user's organization\n topos = db.Topology.objects.filter(q_permitted | q_org | q_own)\n else:\n print \"NOT allowed all topos in own org\"\n # We can view any from our own organization which are protected\n topos = db.Topology.objects.filter(q_permitted | q_own)\n\n return topos",
"def load_all_workspaces_for_user(self, user_id, with_status=None):\n self._logger.debug('Trying to load all workspaces for user \"{}\"'.format(user_id))\n status = self.include_status[:]\n if with_status:\n if type(with_status) != list:\n with_status = [with_status]\n status = with_status\n uuid_mapping = self._get_uuid_mapping_object(user_id)\n workspace_list = ['default_workspace'] + uuid_mapping.get_uuid_list_for_user(user_id, status=status) \n for unique_id in workspace_list: \n self.load_workspace(unique_id=unique_id)",
"def get_workspace(client):\n return client._creoson_post(\"windchill\", \"get_workspace\", key_data=\"workspace\")",
"def getGSWorkspace(self, desired=None):\n role = self.getRole(desired)\n ws = role[\"roleName\"]\n return ws",
"def get_rooms(user_id, org_id):\n\n helper = DataStorage()\n helper.organization_id = org_id\n query = {\"room_user_ids\":user_id}\n options = {\"sort\":{\"created_at\":-1}}\n response = helper.read_query(\"dm_rooms\", query=query, options=options)\n\n if response and \"status_code\" not in response:\n return response\n return []",
"def get_workspace(client):\n workspace = getattr(settings, 'ASANA_WORKSPACE', None)\n if not workspace:\n workspaces = [\n workspace for workspace in client.workspaces.find_all(item_limit=1)\n ]\n if not workspaces:\n logger.error('Any workspaces was not found')\n return\n workspace = workspaces[0]['gid']\n return workspace",
"def get_user_folders(user):\n folders = Folder.objects.filter(user=user)\n return folders",
"def workspaces(self):\n return WorkspaceCollection(client=self)",
"def get_access_rights(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetAccessRights', self.handle))",
"def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):\n # get all cmake prefix paths\n env_name = 'CMAKE_PREFIX_PATH'\n value = environ[env_name] if env_name in environ else ''\n paths = [path for path in value.split(os.pathsep) if path]\n # remove non-workspace paths\n workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]\n return workspaces",
"def get_user_access(self, user):\n return self._access_lists.get_user_access(user)",
"def get_accessible_projects(user):\n query = Q(deprecated_files=False)\n\n query &= get_public_projects_query()\n\n if user.is_authenticated:\n query |= get_restricted_projects_query(user)\n\n if user.is_credentialed:\n query |= get_credentialed_projects_query(user)\n\n query |= get_projects_accessible_through_events(user)\n\n return PublishedProject.objects.filter(query).distinct()",
"def get_readable_dag_ids(self, user) -> set[str]:\n return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])",
"def get_workspace(self):\n\n # Our AML config file\n with open(\"/usr/src/api/config.json\", \"r\") as json_file:\n config_data = json.load(json_file)\n\n # Let's connect to our workspace\n sp = ServicePrincipalAuthentication(tenant_id=config_data['tenant_id'], # tenantID\n service_principal_id=config_data['service_principal_id'], # clientId\n service_principal_password=config_data[\n 'service_principal_password']) # clientSecret\n\n ws = Workspace.get(name=config_data['workspace_name'],\n auth=sp,\n subscription_id=config_data['subscription_id'],\n resource_group=config_data['resource_group'])\n\n return ws",
"def get_projects_user_can_view(user):\n if hasattr(user, 'worker'):\n # Workers need to be able to view all data\n projects = Project.objects.all()\n else:\n projects = get_objects_for_user(\n user,\n 'view_project_data',\n klass=Project)\n sites = get_objects_for_user(user, 'view_site_data', klass=Site)\n site_projects = Project.objects\\\n .filter(id__in=[i.project_id for i in sites])\\\n .exclude(id__in=[p.id for p in projects])\n\n return projects | site_projects",
"def get_repos_user(user='xmonader'):\n u = ghclient.get_user(login=user)\n repos = u.get_repos()\n repos_list = []\n for i in range(20):\n page = repos.get_page(i)\n if len(page) == 0:\n break\n repos_list.extend(repos.get_page(i))\n return repos_list",
"def get_workspaces(db_connection):\n workspaces_dict = {}\n\n cursor = db_connection.cursor()\n query = \"use \" + query_on\n cursor.execute(query)\n\n query = (\n \"select ws_id, ws.username as username, initial_save_date, mod_date, \"\n \"is_deleted, is_public \"\n \"from metrics_reporting.workspaces_current ws \"\n \"inner join metrics.user_info ui on ws.username = ui.username \"\n \"where ui.kb_internal_user = 0 and ws.number_of_shares > 0 \"\n \"and narrative_version > 0;\"\n )\n cursor.execute(query)\n for record in cursor:\n workspaces_dict[record[0]] = {\n \"username\": record[1],\n \"creation_date\": record[2],\n \"mod_date\": record[3],\n \"is_deleted\": record[4],\n \"is_public\": record[5],\n \"shares_list\": list(),\n }\n return workspaces_dict",
"def list_user_access(self):\n return self.manager.list_user_access(self)"
] | [
"0.6719666",
"0.66262877",
"0.6177913",
"0.61746544",
"0.61572975",
"0.59542626",
"0.5788981",
"0.5735323",
"0.5701356",
"0.5671635",
"0.5643736",
"0.5525086",
"0.55015767",
"0.54937226",
"0.549314",
"0.54608184",
"0.5457025",
"0.54482085",
"0.54317015",
"0.54259753",
"0.5395578",
"0.53368545",
"0.53345627",
"0.53259885",
"0.5298573",
"0.5268821",
"0.52624655",
"0.52601594",
"0.52480894",
"0.5232334"
] | 0.82568294 | 0 |
Can user write data in workspace. | def can_write_data_workspace(func, data, workspace, user):
return can_write_in_workspace(
func, data, workspace, user, rights.PUBLISH_DATA
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def can_write(self, auth_param: str) -> bool:\n perms = self._get_workspace_permissions([auth_param])\n return self._has_write_perm(perms.get(auth_param, WorkspacePermission.NONE))",
"def canwrite(self):\n return False",
"def set_data_writable(self):\n pass",
"def isWriteable(self, name):\n pass",
"def _check_writable_(self):\n self._check_within_context_()\n if self._mode != 'w':\n raise Exception('Cannot update database: read only mode')",
"def is_writable(self):\n raise NotImplementedError()",
"def has_write_permission(request):\n user = request.user\n return user.is_superuser",
"def has_write_permission(request):\n # TODO: Stop users from writing other users' pageranks. Why should that be so hard?\n return request.user.is_authenticated",
"def can_write_list(self, auth_params: List[str]) -> Dict[str, bool]:\n perms = self._get_workspace_permissions(auth_params)\n ret_perms = dict()\n for p in auth_params:\n ret_perms[p] = self._has_write_perm(perms.get(p, WorkspacePermission.NONE))\n return ret_perms",
"def _has_write_perm(self, perm: WorkspacePermission) -> bool:\n write_permissions = [\n WorkspacePermission.ADMINISTRATOR,\n WorkspacePermission.READ_WRITE,\n ]\n return perm in write_permissions",
"def check_p4gf_user_write_permission(self):\n gf_client_map = P4.Map()\n gf_client_map.insert(\"//...\", \"//client/...\")\n utp = p4gf_protect.UserToProtect(self.ctx.p4)\n prot = utp.user_to_protect(p4gf_const.P4GF_USER)\n gf_write_filter = prot.map_for_perm(p4gf_protect.WRITE)\n gf_write_filter = P4.Map.join(gf_write_filter, gf_client_map)\n if not gf_write_filter.includes('//{depot}/...'.format(depot=p4gf_const.P4GF_DEPOT)):\n raise RuntimeError(_('permission denied'))",
"def writable(self):\n return True",
"def add_user_right_to_workspace(request):\n workspace_id = request.POST.get('workspace_id', None)\n users_ids = request.POST.getlist('users_id[]', [])\n is_read_checked = request.POST.get('read', None) == 'true'\n is_write_checked = request.POST.get('write', None) == 'true'\n\n if len(users_ids) == 0:\n return HttpResponseBadRequest(\"You need to select at least one user.\")\n if not is_read_checked and not is_write_checked:\n return HttpResponseBadRequest(\"You need to select at least one permission (read and/or write).\")\n\n try:\n workspace = workspace_api.get_by_id(str(workspace_id))\n for user in user_api.get_all_users_by_list_id(users_ids):\n if is_read_checked:\n workspace_api.add_user_read_access_to_workspace(workspace, user, request.user)\n if is_write_checked:\n workspace_api.add_user_write_access_to_workspace(workspace, user, request.user)\n except AccessControlError, ace:\n return HttpResponseBadRequest(ace.message)\n except DoesNotExist, dne:\n return HttpResponseBadRequest(dne.message)\n except Exception, exc:\n return HttpResponseBadRequest('Something wrong happened.')\n\n return HttpResponse(json.dumps({}), content_type='application/javascript')",
"def _testIdWriteAccess(self):\n from Products.CMFCore.utils import getToolByName\n mt = getToolByName(self.portal, 'portal_membership')\n user = mt.getAuthenticatedMember()\n perm = self.person.schema.get('id').write_permission\n return user.checkPermission(perm, self.person)",
"def is_writable(self, object, content_type):\n return False",
"def test_project_writer(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n return False",
"def writable(self):\n return 'gmail.modify' in self.scopes",
"def test_write(self):\n userEdited = self.env['res.users'].browse(\n self.user.id).write({'user_profile_id': self.user_profile2.id})\n self.assertEqual(userEdited, True)",
"def saveProgrammingValuesToDatabase(self):\n #print(\"save programming values\")\n #print(\"username: \", self.user.username)\n #self.user.data.printData()\n self.dbManager.setUserProgramData(self.user.username, self.user.data)",
"def writable(self):\n ...",
"def _validate_can_write(self):\n if self._mode not in WRITE_MODES:\n raise IOError(\"File is not writable\")\n if self.Writable == 'no':\n raise IOError(\"'Writable' flag is 'no'\")",
"def write_authorize(cls, user, obj):\n if not cls._meta.model.published_where_is_examiner(user).filter(id=obj.id):\n raise PermissionDenied()\n if obj.id == None:\n raise PermissionDenied() # We only allow update",
"def can_update(self, user, **data):\n raise Return((True, set([])))",
"def writable(name):",
"def write_data():",
"def has_perm_publish_data(user):\n has_perm_publish(user, rights.PUBLISH_DATA)",
"def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")",
"def writable(self) -> bool:\n return self._writable",
"def is_mutable_by(self, user, perm='site.change_localsite'):\n return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()",
"def testUpdateAccessAllowed(self):\n for user in (self.contributor, self.delegate, self.owner, self.root):\n response = self.runPut(user, sequencer=self.hiseq2000.sodar_uuid, data=self.post_data)\n self.response_200(response)\n data = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(data[\"vendor_id\"], self.post_data[\"vendor_id\"])"
] | [
"0.72511923",
"0.70142853",
"0.6588434",
"0.6476887",
"0.6420118",
"0.63587517",
"0.6332469",
"0.63128155",
"0.62503433",
"0.6213865",
"0.6140603",
"0.61385965",
"0.60728014",
"0.59627616",
"0.59163034",
"0.5911914",
"0.5841909",
"0.5827193",
"0.5817602",
"0.57675314",
"0.57560205",
"0.56526715",
"0.55812323",
"0.556942",
"0.55568635",
"0.5538468",
"0.542843",
"0.5419943",
"0.54042834",
"0.53945327"
] | 0.81769687 | 0 |
Sets the calculated_at of this StandardizedTierTier. | def calculated_at(self, calculated_at):
self._calculated_at = calculated_at | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tier(self, tier):\n\n self._tier = tier",
"def scheduled_at(self, scheduled_at):\n\n self._scheduled_at = scheduled_at",
"def set_tier(self, tier):\n self.single_selection_from_static_kendo_dropdown(self.tier_kendo_dropdown_locator, tier)",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def created_at(self, created_at):\n\n self._created_at = created_at",
"def scheduled_reset_at(self, scheduled_reset_at):\n\n self._scheduled_reset_at = scheduled_reset_at",
"def created_at(self, created_at):\n self._created_at = created_at",
"def modified_at(self, modified_at):\n\n self._modified_at = modified_at",
"def modified_at(self, modified_at):\n\n self._modified_at = modified_at",
"def tier_explanation(self, tier_explanation):\n\n self._tier_explanation = tier_explanation",
"def effective_at(self, effective_at):\n if self.local_vars_configuration.client_side_validation and effective_at is None: # noqa: E501\n raise ValueError(\"Invalid value for `effective_at`, must not be `None`\") # noqa: E501\n if (self.local_vars_configuration.client_side_validation and\n effective_at is not None and len(effective_at) < 1):\n raise ValueError(\"Invalid value for `effective_at`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._effective_at = effective_at",
"def set_analysis_time(self, t):\n for z in self.zones:\n z.set_demand_rate_per_t(t)",
"def set_etacalc(self, etacalc):\n self.__etacalc = etacalc",
"def observed_at(self, observed_at):\n\n self._observed_at = observed_at",
"def updated_at(self, updated_at):\n\n self._updated_at = updated_at"
] | [
"0.58256483",
"0.5355169",
"0.5262881",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.51667094",
"0.5163938",
"0.51372296",
"0.509025",
"0.509025",
"0.5018493",
"0.5005989",
"0.4978487",
"0.49382493",
"0.49160874",
"0.4901448"
] | 0.7393106 | 0 |
Sets the explanation of this StandardizedTierTier. | def explanation(self, explanation):
self._explanation = explanation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tier_explanation(self, tier_explanation):\n\n self._tier_explanation = tier_explanation",
"def tier(self, tier):\n\n self._tier = tier",
"def set_description(self, descr):\n self._current_test_descr = descr",
"def description(self, value):\n self.definition.description = value",
"def explain(self):\n return self.description + f\" ({self.value:.3f} eV)\"",
"def set_description(self, desc):\n super().set_description(desc, refresh=True)\n if self._pbar:\n self._pbar._set_description(self.desc)",
"def set_description(self, description):\r\n self.__description = description",
"def set_tier(self, tier):\n self.single_selection_from_static_kendo_dropdown(self.tier_kendo_dropdown_locator, tier)",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def description(self, description):\n self._description = description",
"def SetDescription(self, description):\n self.description = str(description)",
"def set_description(self, description):\n self.description = description",
"def hypertension(self, hypertension):\n\n self.logger.debug(\"In 'hypertension' setter.\")\n\n self._hypertension = hypertension",
"def tier_2160p(self, tier_2160p):\n\n self._tier_2160p = tier_2160p",
"def set_description(self, description):\n self.__description = description",
"def tier_number(self, tier_number):\n\n self._tier_number = tier_number",
"def __init__(self):\n self.label = \"Calculate response\"\n self.description = \"Use this tool to combine the evidence weighted by their associated generalization in the weights-of-evidence table. This tool calculates the posterior probability, standard deviation (uncertainty) due to weights, variance (uncertainty) due to missing data, and the total standard deviation (uncertainty) based on the evidence and how the evidence is generalized in the associated weights-of-evidence tables.The calculations use the Weight and W_Std in the weights table from Calculate Weights.\"\n self.canRunInBackground = False\n self.category = \"Weights of Evidence\"",
"def set_desc(self, desc: str):\n self._desc = desc",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description",
"def description(self, description):\n\n self._description = description"
] | [
"0.79957366",
"0.5871474",
"0.5464915",
"0.54482603",
"0.5392081",
"0.53604364",
"0.52467775",
"0.52365154",
"0.5220184",
"0.5220184",
"0.5220184",
"0.5220184",
"0.52154154",
"0.5203799",
"0.51837766",
"0.5157714",
"0.51374966",
"0.5122163",
"0.5120805",
"0.51175475",
"0.5111977",
"0.5111977",
"0.5111977",
"0.5111977",
"0.5111977",
"0.5111977",
"0.5111977",
"0.5111977",
"0.5111977",
"0.5111977"
] | 0.6389624 | 1 |
This funtion return integer the number of files in ISO | def NumberOfFilesInISO(XISOPath, XSystemUpdateFolder):
command='./extract-xiso -l '
if XSystemUpdateFolder == True:
command = command + '-s '
command = command + '"' + XISOPath + '"'
print(command)
commandOut = commands.getstatusoutput(command)
commandOut = commandOut[1].split('\n')
global Logs
Logs.append(commandOut)
NOF = int(commandOut[-1].split(' ')[0])
print('Number of files')
print(NOF)
global NISOFiles
NISOFiles = NOF
return NOF #Retornar el numero de ficheros | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_file(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=True):\n n += 1\n return n",
"def getFileCount(self) -> int:\n ...",
"def n_subfile(self):\n self.assert_is_dir_and_exists()\n n = 0\n for _ in self.select_file(recursive=False):\n n += 1\n return n",
"def numberFiles(self):\n return self.n",
"def fileCount(self):\n pass",
"def getnrfiles(self):\n return len(self.filenames)",
"def __number_of_files(self):\n self.__get_files()\n return len(self.files)",
"def file_count(self) -> str:\n return pulumi.get(self, \"file_count\")",
"def get_number_of_files(directory: str):\n\n number_of_files = len([item for item in os.listdir(directory) if os.path.isfile(os.path.join(directory, item))])\n print(number_of_files)\n return number_of_files",
"def n_total_files(self):\n return len(self.fileinfo)",
"def file_num(self):\n command = \"SELECT COUNT(id) FROM files;\"\n return self.c.execute(command)",
"def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n",
"def svn_fs_file_length(*args):\r\n return _fs.svn_fs_file_length(*args)",
"def get_number_files(dataset):\n HOME = os.environ['HOME']\n # cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json',\n # '--key=%s/.globus/userkey.pem' % HOME, '--cert=%s/.globus/usercert.pem' % HOME]\n cmds = ['das_client.py', '--query', 'summary dataset=%s' % dataset, '--format=json']\n output = subprocess.check_output(cmds, stderr=subprocess.STDOUT)\n summary_dict = json.loads(output)\n return int(summary_dict['data'][0]['summary'][0]['nfiles'])",
"def totalfiles(self):\n return len([sz for sz in self.iterate()])",
"def fileCounter(directory):",
"def getCountFiles():\n result = 0\n session = Queries.createSession()\n try:\n result = session.execute(func.count(FileTable.id)).fetchone()[0]\n except sqlalchemy.exc.ArgumentError:\n print 'SQLAlchemy ERROR: Invalid or conflicting function argument is supplied'\n except sqlalchemy.exc.CompileError:\n print 'SQLAlchemy ERROR: Error occurs during SQL compilation'\n finally:\n session.close()\n return result",
"def count_files_dir(self,full_path):\n try:\n num_files = len([name for name in os.listdir(full_path) if os.path.isfile(self.FILENAME)])\n print(f\"Number of files in {full_path} is {num_files}\")\n return num_files\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")",
"def get_num_files(self, file_type):\n return self.file_type_counter.get(file_type, 0)",
"def _count_data(path):\n matcher = re.compile(r'[0-9]+\\.dec')\n match = lambda name: bool(matcher.match(name))\n names = os.listdir(path)\n n_data = len(list(filter(match, names)))\n return n_data",
"def countgenes():\n directory = openfile('db_directory.txt')\n no_genes_file = directory+'GENES_IN_HPO.txt'\n GENES_IN_HPO = openfile(no_genes_file)\n #GENES_IN_HPO = openfile(numbergenes_file)\n return int(GENES_IN_HPO)",
"def size(path):",
"def FileLen(filename):\n return os.stat(str(filename))[6]",
"def countFiles(samweb, dimensions=None, defname=None):\n if defname is not None:\n result = samweb.getURL('/definitions/name/%s/files/count' % escape_url_component(defname))\n else:\n result = samweb._callDimensions('/files/count', dimensions)\n return long(result.text.strip())",
"def get_num_files(self):\n\t\tif self.num_files_in_set is None and self.set_type == FAST5SET_TARBALL:\n\t\t\tself.num_files_in_set = len(self.files)\n\t\treturn self.num_files_in_set",
"def len(self):\n # print(self.processed_file_names)\n return self.len_",
"def get_lenght(self):\n return len(self.filelist)",
"def file_length(fileName):\n with open(f_pass) as f:\n for i, l in enumerate(f):\n pass\n return i + 1",
"def __len__(self):\n return len(self.files[self.split])",
"def max_files(self):\n\n return 10 ** self.int_len(self.cnt_files())"
] | [
"0.7400887",
"0.7315209",
"0.72290283",
"0.7170745",
"0.701988",
"0.7000022",
"0.688257",
"0.68750906",
"0.681171",
"0.6706613",
"0.66990584",
"0.66621566",
"0.6575327",
"0.6553808",
"0.65233034",
"0.6516356",
"0.6504051",
"0.64615285",
"0.6457432",
"0.64490885",
"0.64460576",
"0.6354166",
"0.6319753",
"0.6303407",
"0.63029623",
"0.62992543",
"0.6298164",
"0.62976044",
"0.6270981",
"0.6243483"
] | 0.76097965 | 0 |
Return a dataframe from a parquet file. | def parquet(path, *args, **kwargs):
try:
df = Spark.instance.spark.read.parquet(path, *args, **kwargs)
except IOError as error:
logging.error(error)
raise
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _pq2df(data_file):\n df = pd.read_parquet(data_file)\n return df",
"def read_parquet(self, filename):\n if filename is None:\n raise ValueError(\"File name is None\")\n self.logger.info(\"# Reading a Parquet file \" + filename)\n sqlContext = SQLContext(self.spark)\n return sqlContext.read.parquet(filename)",
"def get_dataframe(data_path: PathLike) -> pd.DataFrame:\n path = get_local_data_path(data_path, download_if_missing=True)\n df = pd.read_parquet(path)\n return df",
"def parquet_reader(filename):\n\n return ParquetFile(source = filename)",
"def load(file):\n return pq.read_table(file).to_pandas()",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")",
"def read_data(self, file_path, format, **kwargs) -> DataFrame:\n return self.spark.read.format(format).options(**kwargs).load(file_path)",
"def parquet(filepath, mode=DataSaver.MODE_OVERWRITE, compression='snappy'):\n format_file = DataSaver.FORMAT_PARQUET\n kwargs = locals()\n _apply_datasaver(format_file, kwargs, last_uuid)\n return None",
"def load_parquet(self):\n \n if len(self.sample_stocks) > 0:\n \n stocks = set(self.sample_stocks)\n book_df, trade_df = [__read_pq_data__(path, stocks) for path in\n (self.book_dir, self.trade_dir)]\n return book_df, trade_df\n \n else:\n \n print(\"Select stocks first using pick_stocks method!\")\n return",
"def df_read(path: pathlib.Path, **kwargs) -> pd.DataFrame:\n # Always specify the datatype so pandas does not have to infer it--much\n # faster.\n return pd.read_csv(path, sep=';', float_precision='high', **kwargs)",
"def _to_dask(self):\n import dask.dataframe as dd\n urlpath = self._get_cache(self._urlpath)[0]\n self._df = dd.read_parquet(urlpath,\n storage_options=self._storage_options, **self._kwargs)\n self._load_metadata()\n return self._df",
"def from_parquet(cls,parquet_dir,timeSeriesCol,mainCategoryCol,columns = None,partitioning='hive',filters=None,filesystem=None):\n data = io.from_parquet(\n parquet_dir,\n timeSeriesCol,\n mainCategoryCol,\n columns,\n partitioning,\n filters,\n filesystem\n )\n return cls(data,timeSeriesCol,mainCategoryCol)",
"def get_file_df(filepath):\n dd = [json.loads(f) for f in open(filepath).readlines()]\n return pd.DataFrame(dd)",
"def load_multi_parquet():\n files = glob.glob('temp/*.parquet')\n df = pd.concat([pd.read_parquet(fp) for fp in files])\n return df",
"def read_parfile(parfile):\n if not os.path.exists(parfile):\n raise Exception(\n \"pst_utils.read_parfile: parfile not found: {0}\".format(parfile)\n )\n f = open(parfile, \"r\")\n header = f.readline()\n par_df = pd.read_csv(\n f, header=None, names=[\"parnme\", \"parval1\", \"scale\", \"offset\"], sep=r\"\\s+\"\n )\n par_df.index = par_df.parnme\n return par_df",
"def gp_dataframe_import(filename):\n path = os.path.join('..', 'data', filename)\n frame = pd.read_csv(path)\n return frame",
"def parse_parquet_file(dataframe, **kwargs):\n\n # check if the provided dataframe is valid\n validate_required_fields(dataframe)\n\n # log all arguments provided by client\n logging.debug(kwargs)\n\n # get all provided args\n arg_header_count = kwargs[\"header\"]\n arg_tail_count = kwargs[\"tail\"]\n args_total_dataframe_count = kwargs[\"total_dataframe_size\"]\n args_drop_count = kwargs[\"drop_rows\"]\n selected_list_columns = kwargs[\"selected_columns\"]\n\n if not is_empty_dataframe(dataframe):\n filtered_dataframe = None\n total_count_dataframe = None\n\n if args_drop_count:\n execute_drop_strategy(dataframe, args_drop_count)\n\n if selected_list_columns:\n dataframe = execute_select_dataframe_columns(\n dataframe, selected_list_columns)\n\n if args_total_dataframe_count:\n total_count_dataframe = execute_total_dataframe_count_strategy(\n dataframe)\n elif arg_tail_count:\n filtered_dataframe = execute_tail_strategy(\n dataframe, arg_tail_count)\n else:\n filtered_dataframe = execute_header_strategy(\n dataframe, arg_header_count)\n\n # print out the result set\n if filtered_dataframe is not None:\n print_dataframe_content(filtered_dataframe)\n else:\n print(\"Total Rows:\", total_count_dataframe)\n else:\n print(\"It was provided a empty dataframe.\")",
"def creat_data_frame(spark, file_path):\n df = spark.read \\\n .format(\"csv\") \\\n .option(\"header\", \"true\") \\\n .option(\"inferSchema\", \"true\")\\\n .option(\"samplingRatio\", 0.01) \\\n .option(\"delimiter\", \",\") \\\n .load(file_path)\n return df",
"def covert_df_to_parquet(df, s3_path, database_name, table):\n wr.s3.to_parquet(\n df=df,\n path=f\"s3://{s3_path}\",\n dataset=True,\n mode=\"append\",\n database=database_name,\n table=table\n )",
"def csv_file_to_parquet(input_file, output_file):\n df = pd.read_csv(input_file)\n df.to_parquet(output_file)",
"def read_table(file_name: Union[str, Path], **kwargs):\n\tfile_name = Path(file_name)\n\textension = file_name.suffix\n\tdefault_args = {\n\t\t'.csv': {'delimiter': ','},\n\t\t'.tsv': {'delimiter': '\\t'}\n\t}\n\n\t# arguments = self._cleanArguments(extension, arguments)\n\tfile_name = str(file_name.absolute())\n\tif extension in {'.xls', '.xlsx', '.xlsm'}: # .xlsm is not a typo.\n\n\t\tdf = pandas.read_excel(file_name, **kwargs)\n\telif extension in {'.csv', '.tsv', '.fsv', '.txt'}:\n\t\targuments = {**default_args.get(extension), **kwargs}\n\t\tif 'sheetname' in arguments: arguments.pop('sheetname')\n\t\tdf = pandas.read_table(file_name, **arguments)\n\telif extension == '.pkl':\n\t\tdf = pandas.read_pickle(file_name)\n\telse:\n\t\traise NameError(\"{} does not have a valid extension!\".format(file_name))\n\treturn df",
"def convert_sql_to_parquet():\n logging.basicConfig(\n format='%(levelname)-8s | %(asctime)s | %(name)s: %(message)s',\n level=logging.INFO\n )\n\n zipped_sql_file, output_dir = parse_command_line_args()\n\n if not output_dir.exists():\n output_dir.mkdir()\n\n parquet_file = output_dir / (zipped_sql_file.stem + '.parquet')\n\n market_data = MarketData(zipped_sql_file)\n market_data.load_data()\n\n data = merge_market_data(market_data)\n del market_data\n\n logger.info(\"Saving the data into Parquet file %s\", parquet_file.absolute())\n data.to_parquet(parquet_file.as_posix())",
"def path_to_df(\n\t\t\tpath:str\n\t\t\t, source_file_format:str\n\t\t\t, column_names:list\n\t\t\t, skip_header_rows:int\n\t\t): \n\t\t\tif not os.path.exists(path):\n\t\t\t\traise ValueError(f\"\\nYikes - The path you provided does not exist according to `os.path.exists(path)`:\\n{path}\\n\")\n\n\t\t\tif not os.path.isfile(path):\n\t\t\t\traise ValueError(f\"\\nYikes - The path you provided is not a file according to `os.path.isfile(path)`:\\n{path}\\n\")\n\n\t\t\tif (source_file_format == 'tsv') or (source_file_format == 'csv'):\n\t\t\t\tif (source_file_format == 'tsv') or (source_file_format is None):\n\t\t\t\t\tsep='\\t'\n\t\t\t\t\tsource_file_format = 'tsv' # Null condition.\n\t\t\t\telif (source_file_format == 'csv'):\n\t\t\t\t\tsep=','\n\n\t\t\t\tdf = pd.read_csv(\n\t\t\t\t\tfilepath_or_buffer = path\n\t\t\t\t\t, sep = sep\n\t\t\t\t\t, names = column_names\n\t\t\t\t\t, header = skip_header_rows\n\t\t\t\t)\n\t\t\telif (source_file_format == 'parquet'):\n\t\t\t\tif (skip_header_rows != 'infer'):\n\t\t\t\t\traise ValueError(dedent(\"\"\"\n\t\t\t\t\tYikes - The argument `skip_header_rows` is not supported for `source_file_format='parquet'`\n\t\t\t\t\tbecause Parquet stores column names as metadata.\\n\n\t\t\t\t\t\"\"\"))\n\t\t\t\ttbl = pyarrow.parquet.read_table(path)\n\t\t\t\tif (column_names is not None):\n\t\t\t\t\ttbl = tbl.rename_columns(column_names)\n\t\t\t\t# At this point, still need to work with metadata in df.\n\t\t\t\tdf = tbl.to_pandas()\n\t\t\treturn df",
"def df(self):\n if os.path.isfile(self.path):\n df = io.parquet_to_df(self.path)\n else:\n df = self.refresh()\n\n if self.cols:\n for col in self.cols:\n if col not in df:\n log.warning(f\"Col {col} missing. Not in the sources?\")\n\n return df",
"def get_database_data(file_name=''):\n if not os.path.exists(file_name):\n raise IOError(\"File {} does not exist!\".format(file_name))\n df = pd.read_csv(file_name, header=1)\n return df",
"def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df",
"def pandas_vertices_from_plyfile(filename):\n xyz = vertex_dict_from_plyfile(filename)\n return pd.DataFrame(xyz)",
"def create_data_frame(input_filepath):\n df = pd.read_json(input_filepath)\n logger = logging.getLogger(__name__)\n logger.info('Imported dataframe:')\n logger.info(df.info())\n logger.info(df.describe())\n logger.info(df.head())\n return df",
"def dataFrame(self):\n\n memory_file = StringIO(initial_value=self.sparql_result.decode('utf-8'), newline='\\n')\n reader = DictReader(memory_file)\n\n schema = StructType(\n list(map(lambda f: StructField(f, StringType()), reader.fieldnames))\n )\n\n data = list(map(lambda d: [d[f] for f in reader.fieldnames], list(reader)))\n\n return self.spark.createDataFrame(data, schema)",
"def read_metafile(path: PathType) -> dd.DataFrame:\n with bgen_metafile(path) as mf:\n divisions = [mf.partition_size * i for i in range(mf.npartitions)] + [\n mf.nvariants - 1\n ]\n dfs = [\n dask.delayed(_read_metafile_partition)(path, i)\n for i in range(mf.npartitions)\n ]\n meta = dd.utils.make_meta(METAFILE_DTYPE)\n return dd.from_delayed(dfs, meta=meta, divisions=divisions)"
] | [
"0.8080801",
"0.7680819",
"0.75473124",
"0.71808255",
"0.70952356",
"0.66877913",
"0.65009177",
"0.64348716",
"0.642109",
"0.64079404",
"0.63399845",
"0.63359207",
"0.62860304",
"0.624169",
"0.6233218",
"0.62246",
"0.6219255",
"0.61767995",
"0.61527157",
"0.6148135",
"0.61377245",
"0.61257917",
"0.6118444",
"0.6054342",
"0.60467786",
"0.60406786",
"0.6032734",
"0.6027442",
"0.5996635",
"0.5987427"
] | 0.7916956 | 1 |
Returns GP mean and variance in "scaled" space (same as gpflow model.predict_f with bounds applied) | def predict(self, x_scaled):
model_var_with_prior = self.mult_var_by_prior(x_scaled)
scaled_space_mean = self.y_scaler.transform(tf.reshape(tf.math.exp(self.log_prob(x_scaled)), [-1, 1]))
return scaled_space_mean, tf.reshape(model_var_with_prior, [-1, 1]).numpy() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fit_gp(self, inputs):\n x_context, y_context, x_data = inputs\n kernel = GP.kernels.RBF(length_scale=self._l_scale, length_scale_bounds=(1e-2, 1e3))\n gp = GP.GaussianProcessRegressor(kernel=kernel).fit(x_context[-1, :], y_context[-1, :])\n y_prediction, y_prediction_std = gp.predict(x_data[-1, :], return_std=True)\n return y_prediction[np.newaxis, :], y_prediction_std[np.newaxis, :, np.newaxis]",
"def pred_single_gp(x_train, y_train, x_pred):\n y_mean = np.mean(y_train[~np.isnan(y_train)])\n y_std = np.std(y_train[~np.isnan(y_train)])\n if y_std == 0:\n # print('ystd 0', y_train)\n y_std = 1\n y_train = y_train - y_mean\n # assert (y_std > 0), 'no std; zscore error'\n\n y_train = (y_train - y_mean) / y_std\n\n mf = NegLinear(1, 1)\n kernel = GPy.kern.RBF(input_dim=1, variance=1, lengthscale=4)\n\n kernel.lengthscale.set_prior(GPy.priors.Gamma.from_EV(4., 9.), warning=False)\n kernel.variance.set_prior(GPy.priors.Gamma.from_EV(1., .5), warning=False)\n\n # m = GPy.models.GPRegression(x_train.reshape(-1,1), y_train.reshape(-1,1), kernel)\n m = GPy.models.GPRegression(x_train.reshape(-1, 1), y_train.reshape(-1, 1), kernel=kernel, mean_function=mf)\n\n m.likelihood.variance.set_prior(GPy.priors.Gamma.from_EV(0.75, 0.25 ** 2), warning=False)\n m.mean_function.set_prior(GPy.priors.Gamma.from_EV(2 / 3, 0.2), warning=False)\n\n m.optimize()\n y_pred = m.predict(x_pred.reshape(-1, 1))[0]\n\n y_pred = (y_pred * y_std) + y_mean\n\n return y_pred",
"def predict_mean_and_var(self, Fmu, Fvar):\n gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)\n gh_w /= np.sqrt(np.pi)\n gh_w = gh_w.reshape(-1, 1)\n shape = tf.shape(Fmu)\n Fmu, Fvar = [tf.reshape(e, (-1, 1)) for e in (Fmu, Fvar)]\n X = gh_x[None, :] * tf.sqrt(2.0 * Fvar) + Fmu\n\n # here's the quadrature for the mean\n E_y = tf.reshape(tf.matmul(self.conditional_mean(X), gh_w), shape)\n\n # here's the quadrature for the variance\n integrand = self.conditional_variance(X) \\\n + tf.square(self.conditional_mean(X))\n V_y = tf.reshape(tf.matmul(integrand, gh_w), shape) - tf.square(E_y)\n\n return E_y, V_y",
"def mse_with_var_regularization(y_true, y_pred):\n return K.mean(y_true)",
"def test_predict_mean_variance(self):\n lik = self._standard_likelihood()\n input_mean = Variable(TensorType([0.0]))\n input_variance = Variable(TensorType([1.0]))\n expected_output_mean = input_mean\n expected_output_variance = input_variance + self._expected_likelihood_variance\n\n # API\n output_mean, output_variance = lik.predict_mean_variance(\n input_mean, input_variance\n )\n assert isinstance(output_mean, Variable)\n assert isinstance(output_variance, Variable)\n\n # Value\n assert output_mean.data.numpy() == expected_output_mean.data.numpy()\n assert output_variance.data.numpy() == pytest.approx(\n expected_output_variance.data.numpy()\n )",
"def prediction_aggregation(self, xt_s,mu_s,var_s, method='PoE', weighting='uniform', power=26):\n\n nt = xt_s.shape[0]\n mu = np.zeros([nt, self.C],dtype='float64')\n var = np.zeros([nt, self.C],dtype='float64')\n\n prior_var = self.experts[0].kernel(xt_s[0], xt_s[0])\n\n \n #Process each latent gp individually \n for j in range(self.C):\n \n mu_s_c = mu_s[:, :, j]\n var_s_c = var_s[:, :, j]\n \n weight_matrix = compute_weights(mu_s_c, var_s_c, power, weighting, prior_var)\n \n prec_s= 1/var_s_c\n\n if method == 'PoE':\n \n prec = tf.reduce_sum(prec_s, axis=0)\n \n\n if method == 'gPoE':\n \n weight_matrix = normalize_weights(weight_matrix)\n\n prec = tf.reduce_sum(weight_matrix * prec_s , axis=0)\n \n\n if method == 'BCM':\n \n prec = tf.reduce_sum(prec_s, axis=0) + (1 - self.M) / prior_var \n\n if method == 'rBCM':\n \n \n prec = tf.reduce_sum(weight_matrix * prec_s, axis=0) \\\n + (1 - tf.reduce_sum(weight_matrix, axis=0)) / prior_var\n \n \n \n if method != 'bar':\n \n var[:, j] = 1 / prec\n\n mu[:, j] = var[:, j] * tf.reduce_sum(weight_matrix * prec_s * mu_s_c, axis=0)\n \n else:\n \n weight_matrix = normalize_weights(weight_matrix)\n\n mu[:, j] = tf.reduce_sum(weight_matrix * mu_s_c, axis=0)\n var[:, j] = tf.reduce_sum(weight_matrix * var_s_c, axis=0)\n \n \n return self.lik_aggregation(mu, var)",
"def predict_mean_and_var(self, Fmu, Fvar, epsilon=None):\n integrand2 = lambda *X: self.conditional_variance(*X) + tf.square(\n self.conditional_mean(*X))\n E_y, E_y2 = self._mc_quadrature([self.conditional_mean, integrand2],\n Fmu,\n Fvar,\n epsilon=epsilon)\n V_y = E_y2 - tf.square(E_y)\n return E_y, V_y # [N, D]",
"def _variance(self,gp):\r\n p = self.gp_link.transf(gp)\r\n return p*(1.-p)",
"def _variance(self,gp):\r\n return self.gp_link.transf(gp)/self.beta",
"def _variance(self,gp):\r\n return self.gp_link.transf(gp)**2",
"def _variance(self,gp):\r\n return self.variance",
"def conditional_variance(self, gp):\n raise NotImplementedError",
"def __call__(self, **kwargs):\n stddev = self.predictive_distribution.stddev(**kwargs)\n mean = self.predictive_distribution.mean(**kwargs)\n return normal_upper_confidence_bound(\n mean, stddev, exploration=self.exploration)",
"def _variance(self,gp):\r\n return self.gp_link.transf(gp)",
"def p_mean_variance(self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n model_kwargs=None):\n if model_kwargs is None:\n model_kwargs = {}\n\n batch, channels = x.shape[:2]\n assert t.shape == (batch,)\n model_output = model(x, t, **model_kwargs)\n if isinstance(model_output, tuple):\n model_output, extra = model_output\n else:\n extra = None\n\n assert model_output.shape == (batch, channels * 2, *x.shape[2:])\n model_output, model_var_values = torch.split(model_output, channels, dim=1)\n min_log = extract_into_tensor(self.posterior_log_variance_clipped_th, t,\n x.shape)\n max_log = extract_into_tensor(self.log_betas_th, t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = torch.exp(model_log_variance)\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n return x.clamp(-1, 1)\n return x\n\n # pylint: disable=protected-access\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output))\n # pylint: enable=protected-access\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t)\n\n assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n \"extra\": extra,\n }",
"def predict_mean_and_var(self, Fmu, Fvar):\n\n def integrand(*X):\n return self.conditional_variance(*X) + self.conditional_mean(*X)**2\n\n integrands = [self.conditional_mean, integrand]\n nghp = self.num_gauss_hermite_points\n E_y, E_y2 = ndiagquad(integrands, nghp, Fmu, Fvar)\n V_y = E_y2 - E_y**2\n return E_y, V_y",
"def test_predict_uncertain_inputs(self):\n X = np.linspace(-5,5, 10)[:, None]\n Y = 2*X + np.random.randn(*X.shape)*1e-3\n m = GPy.models.BayesianGPLVM(Y, 1, X=X, kernel=GPy.kern.Linear(1), num_inducing=1)\n m.Gaussian_noise[:] = 1e-4\n m.X.mean[:] = X[:]\n m.X.variance[:] = 1e-5\n m.X.fix()\n m.optimize()\n X_pred_mu = np.random.randn(5, 1)\n X_pred_var = np.random.rand(5, 1) + 1e-5\n from GPy.core.parameterization.variational import NormalPosterior\n X_pred = NormalPosterior(X_pred_mu, X_pred_var)\n # mu = \\int f(x)q(x|mu,S) dx = \\int 2x.q(x|mu,S) dx = 2.mu\n # S = \\int (f(x) - m)^2q(x|mu,S) dx = \\int f(x)^2 q(x) dx - mu**2 = 4(mu^2 + S) - (2.mu)^2 = 4S\n Y_mu_true = 2*X_pred_mu\n Y_var_true = 4*X_pred_var\n Y_mu_pred, Y_var_pred = m.predict_noiseless(X_pred)\n np.testing.assert_allclose(Y_mu_true, Y_mu_pred, rtol=1e-3)\n np.testing.assert_allclose(Y_var_true, Y_var_pred, rtol=1e-3)",
"def test_scaling():\n rng = np.random.RandomState(42)\n shape = (400, 10)\n u = rng.standard_normal(size=shape)\n mean = 100 * rng.uniform(size=shape[1]) + 1\n Y = u + mean\n Y_, mean_ = mean_scaling(Y)\n assert_almost_equal(Y_.mean(0), 0, 5)\n assert_almost_equal(mean_, mean, 0)\n assert Y.std() > 1",
"def scale_data(self, data):\n return (data - self.mean)/self.std",
"def gaussian_process_pointwise_variance(kernel, pred_samples, train_samples,\n nugget = 0):\n K_train = kernel(train_samples.T)\n # add small number to diagonal to ensure covariance matrix is\n # positive definite\n ntrain_samples = train_samples.shape[1]\n K_train[np.arange(ntrain_samples), np.arange(ntrain_samples)] += nugget\n k_pred = kernel(train_samples.T, pred_samples.T)\n L = np.linalg.cholesky(K_train)\n tmp = solve_triangular(L, k_pred, lower=True)\n variance = kernel.diag(pred_samples.T) - np.sum(tmp*tmp, axis=0)\n return variance",
"def NgNormalization2(Pin,g=10.0):\n Pmean = np.mean(Pin,axis=1,keepdims=True) \n Pstd = np.sqrt(np.var(Pin,axis=1,keepdims=True)+g ) # g = 10 for images of brightness 0...255 \n O = (Pin - Pmean) / Pstd\n return O",
"def transform(self, sess, xs):\n return sess.run( [self.z_mean, self.z_log_sigma_sq],\n feed_dict={self.x: xs} )",
"def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s",
"def _ave(self):\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()",
"def minibatch_mean_variance(x):\n mean = tf.reduce_mean(x, 0, keepdims=True)\n vals = tf.sqrt(tf.reduce_mean(tf.squared_difference(x, mean), 0) + 1e-8)\n vals = tf.reduce_mean(vals)\n return vals",
"def standardize(x, axis=-1):\n stds_avg = np.std(x, axis=axis, keepdims=True)\n x -= np.mean(x, axis=axis, keepdims=True)\n x /= (stds_avg + 1e-8)\n return x",
"def _ave(self):\n\n return np.asarray(np.mean(self.model_estim.x, axis=0)).flatten()",
"def fit_model(self):\r\n\t\tself.mu = np.mean(self.x, axis = 0)\r\n\t\tself.sig = np.std(self.x, axis = 0)",
"def predict(self, X_test):\n if self.basis_func is not None:\n X_transformed = self.basis_func(X_test)\n else:\n X_transformed = X_test\n\n # Marginalise predictions over hyperparameters\n mu = np.zeros([len(self.hypers), X_transformed.shape[0]])\n var = np.zeros([len(self.hypers), X_transformed.shape[0]])\n\n for i, h in enumerate(self.hypers):\n mu[i] = np.dot(self.models[i][0].T, X_transformed.T)\n var[i] = 1. / h[1] + np.diag(np.dot(np.dot(X_transformed, self.models[i][1]), X_transformed.T))\n\n m = mu.mean(axis=0)\n v = var.mean(axis=0)\n # Clip negative variances and set them to the smallest\n # positive float value\n if v.shape[0] == 1:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n else:\n v = np.clip(v, np.finfo(v.dtype).eps, np.inf)\n v[np.where((v < np.finfo(v.dtype).eps) & (v > -np.finfo(v.dtype).eps))] = 0\n\n return m, v",
"def conditional_mean(self, gp):\n raise NotImplementedError"
] | [
"0.60946023",
"0.5923903",
"0.5884754",
"0.58357656",
"0.57872975",
"0.56988716",
"0.56963426",
"0.56526315",
"0.56245965",
"0.5610475",
"0.56104165",
"0.55741924",
"0.5556511",
"0.5541308",
"0.55345446",
"0.55169386",
"0.54802036",
"0.54369193",
"0.5432692",
"0.5422389",
"0.5409307",
"0.53966415",
"0.53779095",
"0.536072",
"0.535099",
"0.5337384",
"0.53267795",
"0.53247315",
"0.5320744",
"0.5317128"
] | 0.62195873 | 0 |
Connect social account to existing account, if existing email found | def pre_social_login(self, request, sociallogin):
if sociallogin.is_existing:
return
email_addresses = sociallogin.email_addresses
for email in email_addresses:
try:
user_email = EmailAddress.objects.get(email__iexact=email.email)
except EmailAddress.DoesNotExist:
continue
user = user_email.user
sociallogin.connect(request, user) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _external_login_or_signup(request,\r\n external_id,\r\n external_domain,\r\n credentials,\r\n email,\r\n fullname,\r\n retfun=None):\r\n # see if we have a map from this external_id to an edX username\r\n try:\r\n eamap = ExternalAuthMap.objects.get(external_id=external_id,\r\n external_domain=external_domain)\r\n log.debug('Found eamap=%s', eamap)\r\n except ExternalAuthMap.DoesNotExist:\r\n # go render form for creating edX user\r\n eamap = ExternalAuthMap(external_id=external_id,\r\n external_domain=external_domain,\r\n external_credentials=json.dumps(credentials))\r\n eamap.external_email = email\r\n eamap.external_name = fullname\r\n eamap.internal_password = generate_password()\r\n log.debug('Created eamap=%s', eamap)\r\n eamap.save()\r\n\r\n log.info(u\"External_Auth login_or_signup for %s : %s : %s : %s\", external_domain, external_id, email, fullname)\r\n uses_shibboleth = settings.FEATURES.get('AUTH_USE_SHIB') and external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX)\r\n uses_certs = settings.FEATURES.get('AUTH_USE_CERTIFICATES')\r\n internal_user = eamap.user\r\n if internal_user is None:\r\n if uses_shibboleth:\r\n # If we are using shib, try to link accounts\r\n # For Stanford shib, the email the idp returns is actually under the control of the user.\r\n # Since the id the idps return is not user-editable, and is of the from \"[email protected]\",\r\n # use the id to link accounts instead.\r\n try:\r\n link_user = User.objects.get(email=eamap.external_id)\r\n if not ExternalAuthMap.objects.filter(user=link_user).exists():\r\n # if there's no pre-existing linked eamap, we link the user\r\n eamap.user = link_user\r\n eamap.save()\r\n internal_user = link_user\r\n log.info('SHIB: Linking existing account for %s', eamap.external_id)\r\n # now pass through to log in\r\n else:\r\n # otherwise, there must have been an error, b/c we've already linked a user with these external\r\n # creds\r\n failure_msg = _(dedent(\"\"\"\r\n You have already created an account using an external login like WebAuth or Shibboleth.\r\n Please contact %s for support \"\"\"\r\n % getattr(settings, 'TECH_SUPPORT_EMAIL', '[email protected]')))\r\n return default_render_failure(request, failure_msg)\r\n except User.DoesNotExist:\r\n log.info('SHIB: No user for %s yet, doing signup', eamap.external_email)\r\n return _signup(request, eamap, retfun)\r\n else:\r\n log.info('No user for %s yet. doing signup', eamap.external_email)\r\n return _signup(request, eamap, retfun)\r\n\r\n # We trust shib's authentication, so no need to authenticate using the password again\r\n uname = internal_user.username\r\n if uses_shibboleth:\r\n user = internal_user\r\n # Assuming this 'AUTHENTICATION_BACKENDS' is set in settings, which I think is safe\r\n if settings.AUTHENTICATION_BACKENDS:\r\n auth_backend = settings.AUTHENTICATION_BACKENDS[0]\r\n else:\r\n auth_backend = 'django.contrib.auth.backends.ModelBackend'\r\n user.backend = auth_backend\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info('Linked user.id: {0} logged in via Shibboleth'.format(user.id))\r\n else:\r\n AUDIT_LOG.info('Linked user \"{0}\" logged in via Shibboleth'.format(user.email))\r\n elif uses_certs:\r\n # Certificates are trusted, so just link the user and log the action\r\n user = internal_user\r\n user.backend = 'django.contrib.auth.backends.ModelBackend'\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info('Linked user_id {0} logged in via SSL certificate'.format(user.id))\r\n else:\r\n AUDIT_LOG.info('Linked user \"{0}\" logged in via SSL certificate'.format(user.email))\r\n else:\r\n user = authenticate(username=uname, password=eamap.internal_password, request=request)\r\n if user is None:\r\n # we want to log the failure, but don't want to log the password attempted:\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.warning('External Auth Login failed')\r\n else:\r\n AUDIT_LOG.warning('External Auth Login failed for \"{0}\"'.format(uname))\r\n return _signup(request, eamap, retfun)\r\n\r\n if not user.is_active:\r\n if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):\r\n # if BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH, we trust external auth and activate any users\r\n # that aren't already active\r\n user.is_active = True\r\n user.save()\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info('Activating user {0} due to external auth'.format(user.id))\r\n else:\r\n AUDIT_LOG.info('Activating user \"{0}\" due to external auth'.format(uname))\r\n else:\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.warning('User {0} is not active after external login'.format(user.id))\r\n else:\r\n AUDIT_LOG.warning('User \"{0}\" is not active after external login'.format(uname))\r\n # TODO: improve error page\r\n msg = 'Account not yet activated: please look for link in your email'\r\n return default_render_failure(request, msg)\r\n\r\n login(request, user)\r\n request.session.set_expiry(0)\r\n\r\n # Now to try enrollment\r\n # Need to special case Shibboleth here because it logs in via a GET.\r\n # testing request.method for extra paranoia\r\n if uses_shibboleth and request.method == 'GET':\r\n enroll_request = _make_shib_enrollment_request(request)\r\n student.views.try_change_enrollment(enroll_request)\r\n else:\r\n student.views.try_change_enrollment(request)\r\n if settings.FEATURES['SQUELCH_PII_IN_LOGS']:\r\n AUDIT_LOG.info(\"Login success - user.id: {0}\".format(user.id))\r\n else:\r\n AUDIT_LOG.info(\"Login success - {0} ({1})\".format(user.username, user.email))\r\n if retfun is None:\r\n return redirect('/')\r\n return retfun()",
"def email_signin(email, password):\n user = User.get_byemail(email)\n\n if not user:\n return \"Account doesn't exists, please register\", 401\n\n # check if authentication method by email exists for this user\n auth_id = 'email${}'.format(user.id)\n user_auth = UserAuth.exists(auth_id)\n if not user_auth:\n return \"Existing user with google or facebook account, not email\", 401\n\n # check password validity\n if not pbkdf2_sha256.verify(password, user_auth.password):\n return \"Incorrect password\", 401\n\n user.update_apikey(User.generate_apikey(user.email))\n\n resp = {\n 'auth_id': auth_id,\n }\n resp.update(user.json)\n\n return resp, 200",
"def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')",
"def existing_email(cls, email):\n user_db = User.get_by('email', email)\n if not user_db:\n raise ValueError('This email is not in our database.')\n return email",
"def authorise_signup(self, username, password, email):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"SELECT username FROM users\\\n WHERE username = %s\", (username,))\n user = cursor.fetchone()\n if user is not None:\n return False\n cursor.execute(\"SELECT password FROM users\\\n WHERE password = %s\", (password,))\n pass_word = cursor.fetchone()\n if pass_word is not None:\n return False\n cursor.execute(\"SELECT email FROM users WHERE email = %s\", (email,))\n mail = cursor.fetchone()\n cursor.close()\n con.commit()\n con.close()\n if mail is not None:\n return False\n return True",
"def register(email, display_name=None):",
"def test_manage_user_with_email(self):\r\n # First with a new user\r\n user_data = dict(id=1, username='facebook',\r\n email='[email protected]', name='name')\r\n token = 't'\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Second with the same user\r\n user = manage_user(token, user_data, None)\r\n assert user.email_addr == user_data['email'], user\r\n assert user.name == user_data['username'], user\r\n assert user.fullname == user_data['name'], user\r\n assert user.facebook_user_id == user_data['id'], user\r\n\r\n # Finally with a user that already is in the system\r\n user_data = dict(id=10, username=self.name,\r\n email=self.email_addr, name=self.fullname)\r\n token = 'tA'\r\n user = manage_user(token, user_data, None)\r\n err_msg = \"It should return the same user\"\r\n assert user.facebook_user_id == 10, err_msg",
"def _find_account_for(self, name, email):\r\n\r\n try:\r\n # Look for an account we have cached\r\n account = self.username_mapping[(name, email)]\r\n except KeyError:\r\n # Look for an existing account that was created due to a previous import\r\n account = self._query_account(Account.c.ob_account_name == name,\r\n Account.c.email == email)\r\n if not account:\r\n # Look for an existing account based on derivations of the name\r\n candidates = (\r\n name,\r\n name.replace(' ', ''),\r\n self._username_from_name(name)\r\n )\r\n\r\n account = None\r\n for candidate in candidates:\r\n account = self._query_account(Account.c.name == candidate,\r\n Account.c.email == email)\r\n if account:\r\n account.ob_account_name = name\r\n account._commit()\r\n break\r\n\r\n # Cache the result for next time\r\n self.username_mapping[(name, email)] = account\r\n\r\n if not account:\r\n raise NotFound\r\n\r\n return account",
"def email_exist(email):\n return User.objects.filter(email=email).first()",
"def is_existing_user(email):\n if not email:\n return False\n user = session.query(KlaxerUser).filter(KlaxerUser.email==email).first()\n return True if user else False",
"def social_auth_user(backend, uid, user=None, *args, **kwargs):\n if not user and backend.name not in ('twitter', 'facebook'):\n raise StopPipeline()\n social_user = UserSocialAuth.get_social_auth(backend.name, uid)\n if social_user:\n if user and social_user.user != user:\n msg = ugettext('This %(provider)s account is already in use.')\n raise AuthAlreadyAssociated(backend, msg % {\n 'provider': backend.name\n })\n elif not user:\n user = social_user.user\n return {'social_user': social_user, 'user': user}",
"def acctLogin(self):\n self.acctObj.email = \"[email protected]\"\n self.password = \"default\"\n self._displayName = \"defaultUser\"\n return True",
"async def login(self, email: str, password: str) -> Tuple[bool, str]:\n\n data: Any = {\n \"namespace\": \"email\",\n \"id\": email,\n \"password\": password,\n }\n\n reply = await self._connection.send(\"login\", data)\n data = self._extract_data(reply)\n\n success: bool = data[\"success\"]\n account_id_or_reason = data.get(\"account_id\") or data[\"reason\"]\n\n if success:\n logger.info(f\"&{self.name}: Logged in as {account_id_or_reason}\")\n else:\n logger.info(f\"&{self.name}: Failed to log in with {email} because {account_id_or_reason}\")\n\n await self._connection.reconnect()\n\n return success, account_id_or_reason",
"def change_email(self, token):\n app = current_app._get_current_object()\n serializer = Serializer(app.config[\"SECRET_KEY\"])\n try:\n data = serializer.loads(token.encode(\"utf-8\"))\n except:\n return False\n if data.get(\"user_id\") != self.id:\n return False\n new_email = data.get(\"new_email\")\n if new_email is None:\n return False\n # check to see if another user has this email\n if self.query.filter_by(email=new_email).first() is not None:\n return False\n self.email = data.get(\"new_email\")\n db.session.add(self)\n return True",
"def sign_in_existing_user(self, email, password):\r\n signin_url = \"https://www.googleapis.com/identitytoolkit/v3/relyingparty/verifyPassword?key=\" + self.wak\r\n signin_payload = {\"email\": email, \"password\": password, \"returnSecureToken\": True}\r\n signin_request = requests.post(signin_url, data=signin_payload)\r\n sign_up_data = json.loads(signin_request.content.decode())\r\n app = App.get_running_app()\r\n print(signin_request.ok)\r\n print(signin_request.content.decode())\r\n\r\n if signin_request.ok == True:\r\n refresh_token = sign_up_data['refreshToken']\r\n localId = sign_up_data['localId']\r\n idToken = sign_up_data['idToken']\r\n\r\n # Save refreshToken to a file\r\n with open(\"refresh_token.txt\", \"w\") as f:\r\n f.write(refresh_token)\r\n\r\n # Save localId to a variable in main app class\r\n # Save idToken to a variable in main app class\r\n app.local_id = localId\r\n app.id_token = idToken\r\n\r\n # Create new key in database from localI\r\n #app.change_screen(\"sandwiches\")\r\n app.on_start()\r\n elif signin_request.ok == False:\r\n error_data = json.loads(signin_request.content.decode())\r\n error_message = error_data[\"error\"]['message']\r\n app.root.ids['login'].ids['login_message'].text = \"EMAIL EXISTS - \" + error_message.replace(\"_\", \" \")",
"def test_returns_existing_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n existing_user = User.objects.create(email=email)\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n self.assertEquals(user, existing_user)",
"def get_user_account_from_email(email, default='raise', active_only=True):\n email = email.strip()\n try:\n return auth.models.User.objects.get(email__iexact=email,\n is_active=active_only)\n except auth.models.User.DoesNotExist:\n # User does not exist\n if default == 'raise':\n raise\n else:\n return default\n except auth.models.User.MultipleObjectsReturned:\n # The system expects to only have one user record per email,\n # so let's reraise the error to have it fixed in the database.\n raise auth.models.User.MultipleObjectsReturned(\n 'Found multiple records for user with email %r' % email)",
"def user_profile_setemail(token, email):\n users = database.get_users()\n for user in users:\n if user['email'] is email:\n raise error.InputError(description=\"This email is already taken\")\n u_id = database.get_current_user(token)\n user = database.get_user_data(u_id)\n user['email'] = email\n database.set_user_data(user)",
"def save(self):\n email = self.validated_data['email']\n name = self.validated_data['name']\n password = self.validated_data['password']\n\n email_query = models.Email.objects.filter(address=email)\n if email_query.exists():\n email_instance = email_query.get()\n\n # If the email is already verified, we send a duplicate\n # notification and exit.\n if email_instance.is_verified:\n logger.info(\n \"Not registering a new user because the email address %r \"\n \"is already verified.\",\n email_instance,\n )\n email_instance.send_duplicate_notification()\n\n return\n\n # If the email is not verified, we send a new verification\n # token to the address.\n logger.info(\n \"Not registering a new user because the email address %r \"\n \"already exists. Sending a new verification token instead.\"\n )\n verification = models.EmailVerification.objects.create(\n email=email_instance,\n )\n verification.send_email()\n\n return\n\n # The email doesn't exist, so we create a new user and email,\n # then send a verification token to the email.\n user = models.User.objects.create_user(name, password)\n email_instance = models.Email.objects.create(address=email, user=user)\n\n # The user's primary email is their only email. This is the only\n # time the primary email can be unverified.\n user.primary_email = email_instance\n user.save()\n\n logger.info(\n \"Registered new user %r with email address %r\",\n user,\n email_instance,\n )\n\n verification = models.EmailVerification.objects.create(\n email=email_instance,\n )\n verification.send_email()",
"def get_account_for_email(cls, email):\n assert email\n key = '<%s>' % email\n return cls.get_by_key_name(key)",
"def user_exists(self, email):\n user = UserModels.fetch_user_by_email(email)\n if user:\n return {\n \"status\": 400,\n \"error\": \"That email already exists\"\n }",
"def test_optional_email(self):\n\n consumer = LTIConsumerFactory(slug=\"consumer\")\n passport = LTIPassportFactory(title=\"consumer1_passport1\", consumer=consumer)\n\n user_count = get_user_model().objects.count()\n\n new_user = self._authenticate(\n {\n \"user_id\": \"7275a984-1e77-4084-9fe6-e54d0deba0e7\",\n \"lti_message_type\": \"basic-lti-launch-request\",\n \"lti_version\": \"LTI-1p0\",\n \"resource_link_id\": \"aaa\",\n \"context_id\": \"course-v1:fooschool+authbackend+0001\",\n \"lis_person_sourcedid\": \"user_without_email\",\n },\n passport,\n )\n\n self.assertEqual(\"user_without_email\", new_user.public_username)\n self.assertEqual(\"\", new_user.email)\n self.assertEqual(consumer, new_user.lti_consumer)\n self.assertEqual(\"user_without_email@consumer\", new_user.username)\n self.assertEqual(user_count + 1, get_user_model().objects.count())",
"def create_account_from_email(email):\n passwd = User.objects.make_random_password(length=8)\n return create_account('', '', email, passwd), passwd",
"def test_create_email_account_twice(self):\n email_addr = 'testcreatetwins@' + self.email_dom\n acc = SpokeEmailAccount(self.org_name, self.user_id)\n self.assertRaises(error.AlreadyExists, acc.create, email_addr)",
"def test_create_account_failed_existing_email(self):\n data = self.user_data.copy()\n data['email'] = '[email protected]'\n response = self.client.post(self.url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data.get('message').get('email')[0], 'user with this email already exists.')",
"def test_retire_user_with_email():\n test_email = \"[email protected]\"\n\n user = UserFactory.create(email=test_email, is_active=True)\n UserSocialAuthFactory.create(user=user, provider=\"edX\")\n\n assert user.is_active is True\n assert \"retired_email\" not in user.email\n assert UserSocialAuth.objects.filter(user=user).count() == 1\n\n COMMAND.handle(\"retire_users\", users=[test_email])\n\n user.refresh_from_db()\n assert user.is_active is False\n assert \"retired_email\" in user.email\n assert UserSocialAuth.objects.filter(user=user).count() == 0",
"def register(self):\n try:\n sha = sha1(self.email).hexdigest()\n except TypeError:\n raise SleekException(\"Could not register user.\", 401)\n\n if not redis.sadd(\"sl:account:ids\", sha):\n raise SleekException(\"Could not register new user.\", 401)\n self.save(register=True)",
"def test_returns_new_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n new_user = User.objects.get(email=email)\r\n self.assertEquals(user, new_user)",
"def user_exist(email, pwd):\r\n session = tables.get_session()\r\n uid = -1\r\n account_name = ''\r\n if session is None:\r\n return uid, account_name\r\n try:\r\n user_account = UserAccount()\r\n password = user_account.get_field_by_key(UserAccount.password, UserAccount.email, email,\r\n session)\r\n if password is not None and password == pwd:\r\n uid = user_account.get_field_by_key(UserAccount.user_id, UserAccount.email, email,\r\n session)\r\n account_name = user_account.get_field_by_key(UserAccount.account_name,\r\n UserAccount.email, email, session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('User login failed: %s', err)\r\n return -1, account_name\r\n finally:\r\n session.close()\r\n return uid, account_name",
"def authenticate(self, facebook_id=None):\n print facebook_id\n if facebook_id:\n user, created = User.objects.get_or_create(username=facebook_id)\n return user\n return None"
] | [
"0.6368004",
"0.60966545",
"0.6048381",
"0.59991693",
"0.59486884",
"0.5934243",
"0.5869946",
"0.58662754",
"0.5855204",
"0.5833579",
"0.5811763",
"0.57687676",
"0.57521755",
"0.56753784",
"0.5648578",
"0.5643133",
"0.5633166",
"0.56330556",
"0.56206214",
"0.5619708",
"0.5619379",
"0.56108487",
"0.5606494",
"0.5589435",
"0.5587153",
"0.55836654",
"0.5582024",
"0.55764264",
"0.55592763",
"0.55482084"
] | 0.7319161 | 0 |
Save the trained vectorizer for future use. | def pickle_vectorizer(self, path='models/TFIDFVectorizer.pkl'):
with open(path, 'wb') as f:
pickle.dump(self.vectorizer, f)
print("Pickled vectorizer at {}".format(path)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_vectorizer(self, vectorizer_filepath):\n with open(vectorizer_filepath, \"w\") as fp:\n json.dump(self._vectorizer.to_serializable(), fp)",
"def save(self, tfidf_vectorizer_path):\n with open(tfidf_vectorizer_path, \"wb\") as fw:\n pickle.dump(self, fw)",
"def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)",
"def save(self, dirname=None):\n self.genio.save(dirname)\n logging.info(\n f'Saved word vectorizations for {dirname}')",
"def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)",
"def save(self):\n if self.loaded:\n full_file_name = self.resource_manager.get_dataset(self.corpus, self.embeddings.vsm_name)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')",
"def save(self, save_dir='models'):\n with open(os.path.join(save_dir, 'model_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.model, f)\n with open(os.path.join(save_dir, 'vectorizer_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.vectorizer, f)\n with open(os.path.join(save_dir, 'userid2name.pkl'), 'wb') as f:\n pickle.dump(self.userid2name, f)\n with open(os.path.join(save_dir, 'name2userid.pkl'), 'wb') as f:\n pickle.dump(self.name2userid, f)",
"def save(self, path):\n file = open(path, 'wb')\n pickle.dump(optimizers.unpack_optimizer_state(self.opt_state), file, -1)\n file.close()",
"def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')",
"def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')",
"def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)",
"def save(self, path):\n torch.save({\n 'model_state_dict': self.state_dict(),\n 'optimizer_state_dict': self.optimizer.state_dict(),\n }, path)",
"def save(self, path):\n if path is None:\n return\n\n logging.info(\"Save model to {}\".format(path))\n self.model.save_pretrained(path)\n self.tokenizer.save_pretrained(path)",
"def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)",
"def save(self, path=\"./trained_model.checkpoint\"):\n torch.save({\"state_dict\":self.working_q.state_dict}, path)",
"def save(self, filename):\n model_dict = {'model_state_dict': self.state_dict(),\n 'init_args': {\"vocab_size\": self.vocab_size,\n \"embeddings_size\": self.embeddings_size,\n \"hidden_size\": self.hidden_size,\n \"mlp_hidden_size\": self.mlp_hidden_size,\n \"dropout\": self.dropout}}\n torch.save(model_dict, filename)",
"def save_model(self, filename) -> None:\n #t.save(self, filename)\n traced=t.jit.script(self)\n t.jit.save(traced,filename)",
"def __savePreProcessedData(self):\n np.savetxt(self.X_filename, self.X, delimiter=',')\n np.savetxt(self.y_filename, self.le.fit_transform(self.y), delimiter=',')\n #Need to save the label Enconder to inverse transform later\n joblib.dump(self.le, self.le_filename)\n\n print(\"Saved X and y\")",
"def save(self, path):\n save_dict = {\n 'model': {\n 'vocabulary': self.vocabulary,\n 'max_sequence_length': self.max_sequence_length\n },\n 'decorator': {\n 'params': self.network.get_params(),\n 'state': self.network.state_dict()\n }\n }\n torch.save(save_dict, path)",
"def save(self, path=\"word2vec_keras.tar.gz\"):\n tokenizer_path = os.path.join(tempfile.gettempdir(), \"tokenizer.pkl\")\n label_encoder_path = os.path.join(tempfile.gettempdir(), \"label_encoder.pkl\")\n params_path = os.path.join(tempfile.gettempdir(), \"params.pkl\")\n keras_path = os.path.join(tempfile.gettempdir(), \"model.h5\")\n w2v_path = os.path.join(tempfile.gettempdir(), \"model.w2v\")\n\n # Dump pickle\n pickle.dump(self.tokenizer, open(tokenizer_path, \"wb\"))\n pickle.dump(self.label_encoder, open(label_encoder_path, \"wb\"))\n pickle.dump(self.__attributes__(), open(params_path, \"wb\"))\n pickle.dump(self.w2v_model, open(w2v_path, \"wb\"))\n self.k_model.save(keras_path)\n # self.w2v_model.save(w2v_path)\n\n # Create Tar file\n tar = tarfile.open(path, \"w:gz\")\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n tar.add(name, arcname=os.path.basename(name))\n tar.close()\n\n # Remove temp file\n for name in [tokenizer_path, label_encoder_path, params_path, keras_path, w2v_path]:\n os.remove(name)",
"def save_model(cls, vocab, path, filename):\n return super().save_model(vocab, path, filename)",
"def save(self) -> None:\n self.saver.save_model_and_weights(self.model)\n self.saver.save_data_shuffle_indices(\n self.data.eval_shuffler.ds_inds\n )\n self.saver.save_input_scaler(self.data.x.scaler)",
"def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")\n # self.Model_claim.save(\"Model_claim.h5\")\n # Model_made = self.Model_made\n # Model_claim = self.Model_claim\n # self.Model_made = None\n # self.Model_claim = None\n with open('pricing_model.p', 'wb') as target:\n pickle.dump(self, target)\n\n # self.Model_made = Model_made\n # self.Model_claim = Model_claim\n\n # zipObj = ZipFile(\"model.zip\",\"w\")\n # zipObj.write(\"Model_made.h5\")\n # zipObj.write(\"Model_claim.h5\")\n # zipObj.write(\"pricing_model.p\")\n # zipObj.close()",
"def save(self, path):\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n np.save(os.path.join(path, 'V.npy'), self.V.cpu().numpy())\n\n if self.W is not None:\n np.save(os.path.join(path, 'W.npy'), self.W.cpu().numpy())\n\n if self.vb is not None:\n np.save(os.path.join(path, 'v_bias.npy'), self.vb.cpu().numpy())\n\n if self.wb is not None:\n np.save(os.path.join(path, 'w_bias.npy'), self.wb.cpu().numpy())\n\n if self.dictionary is not None:\n self.dictionary.save(os.path.join(path, 'dictionary'))",
"def finalise(self):\n self.logger.info(\"Saving final versions of model...\")\n self.save_checkpoint(filename='final.pth.tar')",
"def save(self):\r\n # torch.save(self.model.state_dict, os.path.join(self.ckpt_dir, 'best_model_state_dict.pt'))\r\n torch.save(self.model, os.path.join(self.ckpt_dir, 'best_model_INN.pt'))",
"def update_predictor(self):\n estimator = tf.estimator.Estimator(self.model_fn,\n self.model_dir,\n params={},\n )\n self.saved_path = estimator.export_saved_model('saved_model', \n self._serv_input_fn(),\n )\n self._build_predictor()",
"def save(self):\n\n try:\n joblib.dump(self._clf, self._modelFile)\n except:\n return False\n\n return True",
"def save(self, idx2vec: numpy.ndarray = None, filepath: str = None) -> str:\n if filepath is not None:\n self.filepath = filepath\n\n if not os.path.isdir(os.path.dirname(self.filepath)):\n os.mkdir(os.path.dirname(self.filepath))\n\n self.idx2word: list = self.vocab.idx2word\n self.idx2freq: numpy.ndarray = self.vocab.idx2freq\n if idx2vec is not None:\n self.idx2vec = idx2vec\n\n with open(self.filepath, 'wb') as f:\n pickle.dump(self, f)\n return self.filepath",
"def transform_word_vectors(self):\n print('Transforming word vectors')\n \n self.train_X_tfidfvec = self.get_word_vectors(self.train_X)\n self.val_X_tfidfvec = self.get_word_vectors(self.val_X)\n self.test_X_tfidfvec = self.get_word_vectors(self.test_X)\n if self.savename is not None:\n with open(self.savename + '_X_tfidfvec.obj','wb') as f:\n pickle.dump((self.train_X_tfidfvec,self.val_X_tfidfvec,self.test_X_tfidfvec),f) \n print('Done transforming word vectors')"
] | [
"0.76096624",
"0.73109317",
"0.7164249",
"0.70169145",
"0.66974753",
"0.6566269",
"0.6522058",
"0.64056844",
"0.63931715",
"0.6316061",
"0.6313552",
"0.6313552",
"0.6276785",
"0.62720037",
"0.623774",
"0.62127876",
"0.62069654",
"0.6184085",
"0.6171327",
"0.61685747",
"0.6127885",
"0.6122308",
"0.61023724",
"0.60992193",
"0.6098343",
"0.6053443",
"0.6031845",
"0.60276747",
"0.6025877",
"0.60167736"
] | 0.7430699 | 1 |
Initializes the object using the list of technology dictionaries that are copied and formatted. Takes an optional parameter for the datetime.date object of the last full BuiltWith scan. | def __init__(self, technologies_list, last_full_builtwith_scan_date=None):
self._technologies_by_name = {}
for technologies_dict in technologies_list:
copied_technologies_dict = copy.deepcopy(technologies_dict)
for name in DATETIME_INFORMATION_NAMES:
copied_technologies_dict[name] = _convert_timestamp_to_utc_datetime(technologies_dict[name])
# According to the team at BuiltWith, it's best to just use the last "FULL" scan
# time in the CurrentlyLive determination since BuiltWith doesn't publish their
# smaller "TOPSITE" list. Downside is that this client will say some technologies were
# successfully detected on "TOPSITE" sites on the the last BuiltWith scan when that's
# not in fact accurate.
if last_full_builtwith_scan_date:
copied_technologies_dict['CurrentlyLive'] = (
last_full_builtwith_scan_date <= copied_technologies_dict['LastDetected'].date())
self._technologies_by_name[technologies_dict['Name']] = copied_technologies_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(\n self, datetime,\n provider, asset_license,\n ext_properties\n ):\n self.ext_properties = ext_properties\n self.license = asset_license\n self.provider = provider\n self.datetime = datetime",
"def __init__(self, *args):\n this = _libsbml.new_Date(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, date=None, calculate='time', wsid=None, api_token=None):\n self.given_date = date or datetime.date.today()\n self.calculate = calculate\n self.wsid = wsid or TOGGLR_CONFIG.get_internal('wsid')\n self.api_token = api_token or TOGGLR_CONFIG.get_internal('api_token')\n\n self.url = 'https://toggl.com/reports/api/v2/weekly'\n self.starting_date = utils.get_first_day_of_week_for_date(self.given_date)\n self.ending_date = self.starting_date + datetime.timedelta(days=6)",
"def __init__(self):\n this = _libsbml.new_ListWrapperDate()\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, prefix, date, county):\n# self._dummyivo = DUMMYIVO\n\n # this to keep pylint happy\n self._ballots = []\n self._filename = ''\n self._pctname = ''\n self._pctnumber = ''\n self._registered = 0\n\n self.readdata(prefix, date, county)",
"def __init__(\n self,\n data_dict: Dict[str, pd.DataFrame],\n sampling_rate_dict: Dict[str, int],\n start_time: Optional[pd.Timestamp] = None,\n event_markers: Optional[Sequence[bioread.reader.EventMarker]] = None,\n tz: Optional[str] = None,\n ):\n self._data = data_dict\n for name, data in data_dict.items():\n setattr(self, name, data)\n for name, sampling_rate in sampling_rate_dict.items():\n setattr(self, f\"sampling_rate_hz_{name}\", sampling_rate)\n setattr(self, \"channels\", list(self._data.keys()))\n self._sampling_rate = sampling_rate_dict\n self._start_time_unix = start_time\n self._event_markers = event_markers\n self._tz = tz",
"def __init__(self, date: dt_date, style: str, partners: list, notes: str, climb: Climb):\r\n self._date = date\r\n self._styles = {\r\n 'Lead RP': 'read point',\r\n 'AltLd O/S': 'onsight',\r\n 'Solo O/S': 'onsight',\r\n 'Lead rpt': 'no log',\r\n 'Lead O/S': 'onsight',\r\n '2nd β': 'flash',\r\n 'Solo rpt': 'no log',\r\n 'Lead Flash': 'flash',\r\n 'Lead dog': 'no send',\r\n '2nd O/S': 'onsight',\r\n 'AltLd rpt': 'no log',\r\n 'AltLd': 'no log',\r\n '2nd': 'no log',\r\n 'Sent x': 'read point',\r\n 'Sent Flash': 'flash',\r\n '-': 'summit',\r\n 'Solo': 'no log',\r\n 'Sent O/S': 'onsight',\r\n 'AltLd dnf': 'no send',\r\n 'Lead dnf': 'no send',\r\n 'DWS': 'no log',\r\n '2nd rpt': 'no log',\r\n '2nd dog': 'no send',\r\n 'AltLd dog': 'no send',\r\n 'Sent rpt': 'no log',\r\n 'Lead G/U': 'ground up',\r\n 'Sent': 'no log',\r\n 'Solo dnf': 'no send',\r\n 'Lead': 'no log'} # A matcher of different style types\r\n self._style = self.match_style(style) # Correct the style for a more readable format\r\n self._partners = partners\r\n self._notes = notes\r\n self._climb = climb",
"def __init__(self, start: datetime.date) -> None:\n self.start = start\n self.bill = None",
"def __init__(self, vardict):\n \n datevars = []\n for v in vardict:\n vf = v.VariableFormat\n for fmt in [\"DATE\", \"TIME\", \"QYR\", \"MOYR\", \"MONTH\"]:\n if vf.find(fmt) >= 0:\n datevars.append(v.VariableName)\n break\n self.datevars = \" \".join(datevars)",
"def __init__(self, data):\n\n self.produce_csv = data['produce_csv']\n self.produce_graphics = data['produce_graphics']\n self.report_name = data['report_name']\n self.file_name = self.report_name + '.csv'\n self.annual_file_name = self.report_name + '_annual.csv'\n self.csv_dir = ''\n self.diagnostic_dir = ''\n\n self.daily_variables = {\n 'year': ['time.cal_year', '', []],\n 'j_day': ['time.day', '', []]\n }\n\n self.annual_variables = {\n 'year': ['time.cal_year', '', 0]\n }",
"def __init__(__self__, *,\n date: Optional[int] = None,\n is_last: Optional[bool] = None):\n if date is not None:\n pulumi.set(__self__, \"date\", date)\n if is_last is not None:\n pulumi.set(__self__, \"is_last\", is_last)",
"def __init__(self, name=\"\", date=None):\n super().__init__(\"date\", name)\n self.date = date",
"def __init__(self, **kwargs):\n self.suspension = kwargs[\"suspension\"] # Suspension Status\n self.license = kwargs[\"license\"] # License Number\n self.first_name = kwargs[\"first_name\"] # First Name\n self.last_name = kwargs[\"last_name\"] # Last Name\n self.city = kwargs[\"city\"] # City\n self.state = kwargs[\"state\"] # State\n self.zipcode = kwargs[\"zipcode\"] # Zip Code\n self.gender = kwargs[\"gender\"] # Gender\n self.racing_age = kwargs[\"racing_age\"] # Racing Age\n self.expire_date = kwargs[\"expire_date\"] # License Expiration Date\n self.intl_team = kwargs[\"intl_team\"] # Intl Team\n self.road_cat = kwargs[\"road_cat\"] # Road Category\n self.track_cat = kwargs[\"track_cat\"] # Track Category\n self.xc_cat = kwargs[\"xc_cat\"] # XC Category\n self.dh_cat = kwargs[\"dh_cat\"] # DH Category\n self.ot_cat = kwargs[\"ot_cat\"] # OT Category\n self.mx_cat = kwargs[\"mx_cat\"] # MX Category\n self.cx_cat = kwargs[\"cx_cat\"] # CX Category\n self.birth_date = kwargs[\"birth_date\"] # Birthday\n self.citizenship = kwargs[\"citizenship\"] # Citizenship\n self.road_club_id = kwargs[\"road_club_id\"]\n self.road_team_id = kwargs[\"road_team_id\"] # RD Club/Team ID\n self.track_club_id = kwargs[\"track_club_id\"]\n self.track_team_id = kwargs[\"track_team_id\"] # Track Club/Team ID\n self.mtn_club_id = kwargs[\"mtn_club_id\"]\n self.mtn_team_id = kwargs[\"mtn_team_id\"] # MTN Club/Team ID\n self.cx_club_id = kwargs[\"cx_club_id\"]\n self.cx_team_id = kwargs[\"cx_team_id\"] # CX Club/Team ID\n self.coll_club_id = kwargs[\"coll_club_id\"] # Collegiate Club ID\n self.uci_code = kwargs[\"uci_code\"] # UCI Code\n self.cx_rank = kwargs[\"cx_rank\"] # CX Rank\n self.hs_club_id = kwargs[\"hs_club_id\"]\n self.hs_team_id = kwargs[\"hs_team_id\"] # HS Club/Team ID",
"def __init__(self, context, t):\n ## A hack to deal with _bibtex' incomplete handling of the \"month\" field\n self.date = [0,0] # Month, Year\n ##\n self.handle = t[0]\n self.entry_type = t[1]\n t[2] #Mystery!\n t[3] #Mystery!\n self.typemap = {} #If we know that some fields should be a particular type.\n self.data = {}\n items = t[4]\n for k in items.keys():\n ty = self.typemap.get(k, -1)\n x = _bibtex.expand(context, items[k], ty)\n if k == \"month\":\n month = BIBTEX_MONTHS[_bibtex.get_native(items[k])]\n self.date[0] = month\n self.data[\"month\"] = month\n continue\n if ty == -1: ty = x[0]\n self.data[k] = self.build(ty, x)\n self.date[1] = self.data[\"year\"]",
"def __init__(self, measurement, tags, fields, time_stamp):\n self.measurement = measurement\n self.tags = tags\n self.fields = fields\n self.time = time_stamp",
"def __init__(self, **kwargs):\n self._defined = dict(_DEFAULT_TASKS_CHARACTERISTICS)\n self._defined.update(kwargs)\n self.clean_dates()\n self._parent = None",
"def __init__(\n self,\n name: str,\n source: str,\n start_date: np.ndarray,\n ):\n self.name = name\n self.start_date = start_date\n super().__init__(source)",
"def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)",
"def __init__(__self__, *,\n end_date: Optional[str] = None,\n start_date: Optional[str] = None,\n term_unit: Optional[str] = None):\n if end_date is not None:\n pulumi.set(__self__, \"end_date\", end_date)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if term_unit is not None:\n pulumi.set(__self__, \"term_unit\", term_unit)",
"def __init__(self, **attrs):\n \n self.part_id = None\n self.category_id = None\n self.external_ids = {}\n self.name = None\n \n self.year_from = None\n self.year_to = None\n \n self.url = None\n self.img_url = None\n \n self.print_of = None\n self.prints = []\n self.molds = []\n self.alternates = []\n \n super().__init__(**attrs)",
"def __init__(self):\n self.date = str(date.today())\n today_date = str(date.today())\n today_date = today_date.split(\"-\")\n self.curr_year = int(today_date[0])\n self.curr_month = int(today_date[1])\n self.curr_date = int(today_date[2])",
"def _set_date_times(self):\n if self._report_key in (ReportTypes.SEARCH_DETAIL_REPORT, ReportTypes.SEARCH_BODY_REPORT):\n self._report_data['searchDateTime'] = Report._to_report_datetime(self._report_data['searchDateTime'])\n if self._report_data['totalResultsSize'] > 0:\n for detail in self._report_data['details']:\n detail['createDateTime'] = Report._to_report_datetime(detail['createDateTime'])\n if detail.get('declaredDateTime'):\n detail['declaredDateTime'] = Report._to_report_datetime(detail['declaredDateTime'], False)\n declared_value = str(detail['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n detail['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n detail['declaredValue'] = ''\n if detail.get('description') and detail['description'].get('engineerDate'):\n if detail['description']['engineerDate'] == '0001-01-01':\n detail['description']['engineerDate'] = ''\n else:\n detail['description']['engineerDate'] = \\\n Report._to_report_datetime(detail['description']['engineerDate'], False)\n else:\n detail['description']['engineerDate'] = ''\n if detail.get('location') and detail['location'].get('taxExpiryDate'):\n detail['location']['taxExpiryDate'] = \\\n Report._to_report_datetime(detail['location']['taxExpiryDate'], False)\n elif self._report_key == ReportTypes.MHR_REGISTRATION:\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('description') and reg['description'].get('engineerDate'):\n if reg['description']['engineerDate'] == '0001-01-01':\n reg['description']['engineerDate'] = ''\n else:\n reg['description']['engineerDate'] = \\\n Report._to_report_datetime(reg['description']['engineerDate'], False)\n else:\n reg['description']['engineerDate'] = ''\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'], False)\n elif self._report_key in (ReportTypes.MHR_TRANSFER, ReportTypes.MHR_EXEMPTION,\n ReportTypes.MHR_TRANSPORT_PERMIT, ReportTypes.MHR_NOTE,\n ReportTypes.MHR_ADMIN_REGISTRATION):\n reg = self._report_data\n reg['createDateTime'] = Report._to_report_datetime(reg['createDateTime'])\n if reg.get('declaredValue'):\n declared_value = str(reg['declaredValue'])\n if declared_value.isnumeric() and declared_value != '0':\n reg['declaredValue'] = '$' + '{:0,.2f}'.format(float(declared_value))\n else:\n reg['declaredValue'] = ''\n if reg.get('transferDate'):\n reg['transferDate'] = Report._to_report_datetime(reg['transferDate'], False)\n if self._report_key == ReportTypes.MHR_TRANSPORT_PERMIT and reg.get('newLocation'):\n reg['location'] = reg.get('newLocation')\n if reg.get('location') and reg['location'].get('taxExpiryDate'):\n reg['location']['taxExpiryDate'] = Report._to_report_datetime(reg['location']['taxExpiryDate'],\n False)",
"def __init__(self, code, start_date=\"1900-01-01\", end_date=\"2020-01-01\"):\n base = Base()\n self.datas = base.getData(\n code=code, start_date=start_date, end_date=end_date)\n self._index = 0\n self.period = 14",
"def __init__(__self__, *,\n date: Optional[pulumi.Input[int]] = None,\n is_last: Optional[pulumi.Input[bool]] = None):\n if date is not None:\n pulumi.set(__self__, \"date\", date)\n if is_last is not None:\n pulumi.set(__self__, \"is_last\", is_last)",
"def __init__(self):\n self.TECRDB_compounds_data_dict = {}\n self.TECRDB_compounds_pH7_species_id_dict = {}\n self.TECRDB_compounds_least_H_sid_dict = {}\n self.get_TECRDB_compounds_data()",
"def __init__(self):\n self.current_year = datetime.date.today().year\n self.random_column_mappings = collections.defaultdict(dict)",
"def __init__(self, price_of_previous_sale=None, date_of_previous_sale=None, leasehold_or_freehold=None, wall_type=None, wall_insulation=None, wall_energy_efficiency=None, roof_type=None, roof_insulation=None, roof_energy_efficiency=None, window_glazing_type=None, window_energy_efficiency=None, current_energy_rating=None, potential_energy_rating=None, annual_heating_cost_in_pounds=None, annual_hot_water_cost_in_pounds=None, annual_lighting_cost_in_pounds=None, annual_energy_consumption_in_k_wh=None, floor_height_in_foot=None, built_form=None): # noqa: E501 # noqa: E501\n\n self._price_of_previous_sale = None\n self._date_of_previous_sale = None\n self._leasehold_or_freehold = None\n self._wall_type = None\n self._wall_insulation = None\n self._wall_energy_efficiency = None\n self._roof_type = None\n self._roof_insulation = None\n self._roof_energy_efficiency = None\n self._window_glazing_type = None\n self._window_energy_efficiency = None\n self._current_energy_rating = None\n self._potential_energy_rating = None\n self._annual_heating_cost_in_pounds = None\n self._annual_hot_water_cost_in_pounds = None\n self._annual_lighting_cost_in_pounds = None\n self._annual_energy_consumption_in_k_wh = None\n self._floor_height_in_foot = None\n self._built_form = None\n self.discriminator = None\n\n if price_of_previous_sale is not None:\n self.price_of_previous_sale = price_of_previous_sale\n if date_of_previous_sale is not None:\n self.date_of_previous_sale = date_of_previous_sale\n if leasehold_or_freehold is not None:\n self.leasehold_or_freehold = leasehold_or_freehold\n if wall_type is not None:\n self.wall_type = wall_type\n if wall_insulation is not None:\n self.wall_insulation = wall_insulation\n if wall_energy_efficiency is not None:\n self.wall_energy_efficiency = wall_energy_efficiency\n if roof_type is not None:\n self.roof_type = roof_type\n if roof_insulation is not None:\n self.roof_insulation = roof_insulation\n if roof_energy_efficiency is not None:\n self.roof_energy_efficiency = roof_energy_efficiency\n if window_glazing_type is not None:\n self.window_glazing_type = window_glazing_type\n if window_energy_efficiency is not None:\n self.window_energy_efficiency = window_energy_efficiency\n if current_energy_rating is not None:\n self.current_energy_rating = current_energy_rating\n if potential_energy_rating is not None:\n self.potential_energy_rating = potential_energy_rating\n if annual_heating_cost_in_pounds is not None:\n self.annual_heating_cost_in_pounds = annual_heating_cost_in_pounds\n if annual_hot_water_cost_in_pounds is not None:\n self.annual_hot_water_cost_in_pounds = annual_hot_water_cost_in_pounds\n if annual_lighting_cost_in_pounds is not None:\n self.annual_lighting_cost_in_pounds = annual_lighting_cost_in_pounds\n if annual_energy_consumption_in_k_wh is not None:\n self.annual_energy_consumption_in_k_wh = annual_energy_consumption_in_k_wh\n if floor_height_in_foot is not None:\n self.floor_height_in_foot = floor_height_in_foot\n if built_form is not None:\n self.built_form = built_form",
"def __init__(self, us):\n self.subject = us[\"subject\"]\n self.epic = us[\"epics\"][0][\"subject\"] if us[\"epics\"] else []\n self.tags = us[\"tags\"][0] if us[\"tags\"] else []\n self.section = self._section\n self.subtasks = us[\"tasks\"]\n # if us[\"due_date\"]:\n # self.due_date = dt.datetime.strptime(us[\"due_date\"],\n # \"%Y-%m-%dT%H:%M:%S.%fZ\")\n # else:\n # self.due_date = None",
"def __init__(self, **kw_args):\n self._isoFmt = \"%Y%m%dT%H%M%S%z\"\n\n self._init_client_id(kw_args)\n self._init_shared_secret(kw_args)\n self._init_counter_from_time(kw_args)\n self._init_last_count(kw_args)\n self._init_last_count_update_time(kw_args)\n self._init_period(kw_args)\n self._init_password_length(kw_args)\n self._init_tags(kw_args)\n self._init_note(kw_args)",
"def __init__(\n self, *, term: str, date: datetime = None, max_timestamp_delta: timedelta = None\n ):\n self.term = term\n self.date = date or datetime.now()\n self.max_timestamp_delta = max_timestamp_delta"
] | [
"0.58997256",
"0.58272433",
"0.57844007",
"0.5710227",
"0.55624354",
"0.55044675",
"0.5499629",
"0.5475405",
"0.541306",
"0.5392969",
"0.53880745",
"0.53873634",
"0.5373261",
"0.53632164",
"0.53545606",
"0.5312174",
"0.5298981",
"0.52789253",
"0.52789253",
"0.52765673",
"0.52729434",
"0.52653384",
"0.5250143",
"0.5226036",
"0.5218868",
"0.5217761",
"0.52164155",
"0.5205125",
"0.5202117",
"0.5195638"
] | 0.7597779 | 0 |
Lookup BuiltWith results for the given domain. If API version 2 is used and the get_last_full_query flag enabled, it also queries for the date of the last full BuiltWith scan. | def lookup(self, domain, get_last_full_query=True):
data = {}
try:
last_full_builtwith_scan_date = None
if self.api_version == 7 and isinstance(domain, list):
domain = ','.join(domain)
if self.api_version in [2, 7]:
last_updates_resp = requests.get(ENDPOINTS_BY_API_VERSION[self.api_version], params={'UPDATE': 1})
last_updated_data = last_updates_resp.json()
if get_last_full_query and last_updated_data['FULL']:
last_full_builtwith_scan_date = datetime.datetime.strptime(last_updated_data['FULL'],
'%Y-%m-%d').date()
print "last_full_builtwith_scan_date >", last_full_builtwith_scan_date
params = {'KEY': self.key, 'LOOKUP': domain,}
response = requests.get(ENDPOINTS_BY_API_VERSION[self.api_version], params=params)
if self.api_version == 1:
data = response.json()
elif self.api_version == 2:
data = BuiltWithDomainInfo(response.json(), last_full_builtwith_scan_date)
elif self.api_version == 7:
domain_info = list()
for result in response.json()['Results']:
domain_info.append(BuiltWithDomainInfo(result['Result'], last_full_builtwith_scan_date))
return domain_info
elif self.api_version == 12:
data = response.json()
except Exception as e:
try:
error = e.get("Message")
data["Errors"] = error
except Exception as error:
data["Errors"] = error
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _query(self, mapping, from_date=None, to_date=None, max_count=None,\n offset=None, ascendingly=True, describe=False):\n group, key = mapping.data_var.split(self._data_var_separator)\n\n # build params\n params = 'describe={describe}&keys={key}'.format(describe=str(describe).lower(), key=key)\n if self._api['token'] is not None:\n params += '&apitoken={}'.format(self._api['token'])\n if from_date is not None:\n params += '&from-date={}'.format(from_date.isoformat())\n if to_date is not None:\n params += '&to-date={}'.format(to_date.isoformat())\n\n # build url\n url = '{}{}?{}'.format(self._api['host'], self._api['url'], params).format(group=group)\n\n r = requests.get(url)\n if r.status_code == 200:\n data = json.loads(r.content.decode('utf-8'))\n # return query result\n if not describe:\n # sort\n data = sorted(\n data,\n key=lambda k: k.get(self._timestampkey),\n reverse=(not ascendingly))\n # apply constraints\n if offset is not None:\n data = data[offset:]\n if max_count is not None:\n data = data[:max_count]\n # process to query result\n res = QueryResult(mapping.obs_uri)\n for r in data:\n res.add_row(\n dateutil.parser.parse(r.get(self._timestampkey)),\n r.get(self._valuekey))\n # return\n return res\n # return query result description\n else:\n min = data.get('mindate', None)\n if min is not None:\n min = dateutil.parser.parse(min)\n max = data.get('maxdate', None)\n if max is not None:\n max = dateutil.parser.parse(max)\n return QueryResultDescription(mapping.obs_uri, min, max, data.get('count', 0))\n else:\n # empty/erronous response\n self.pyerr(\"Failed calling API: {}\".format(url))\n if not describe:\n return QueryResult(mapping.obs_uri)\n return QueryResultDescription(mapping.obs_uri, None, None, 0)",
"def get_domain_dns_records(domain):\n url_suffix = \"v1/domains/{}/records\".format(domain)\n ret = _call_endpoint(url_suffix)\n if isinstance(ret, dict) and ret.get('code', None) == \"UNKNOWN_DOMAIN\":\n # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}\n raise Exception(f\"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}\")\n return ret",
"def lookup(self, period=None, **kwargs):\n return self._backend.lookup(period, **kwargs)",
"def whois_parsed(self, domain):\n return self.apiquery('/v1/{}/whois/parsed/'.format(domain))",
"def domain_lookup(domain):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': domain\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response",
"def feed_company_from_db1(output_data, domain):\n companyfl = CompanyFromdb1.objects.using('db1').filter(\n company_domain__iexact=domain,\n active=1\n )[0]\n\n if companyfl.company_name:\n output_data['name'] = companyfl.company_name\n\n if companyfl.company_phone:\n output_data['phone'] = companyfl.company_phone\n\n if companyfl.company_website:\n analyzed_url = urlparse(companyfl.company_website)\n if analyzed_url.netloc and analyzed_url.scheme:\n website_url = \"%s://%s\" % (\n analyzed_url.scheme,\n analyzed_url.netloc\n )\n elif analyzed_url.netloc and not analyzed_url.scheme:\n website_url = analyzed_url.netloc\n else:\n website_url = analyzed_url.path\n output_data['website_url'] = website_url\n\n if (companyfl.company_size and\n company_size_mapping_dict.get(companyfl.company_size)):\n output_data['size'] = company_size_mapping_dict.get(\n companyfl.company_size\n )\n\n if companyfl.company_remarks:\n output_data['description'] = (\n companyfl.\n company_remarks.\n replace('\\n', ' ').\n replace('\\r', '')\n )\n\n if companyfl.company_social:\n output_data['linkedin_url'] = companyfl.company_social\n\n if companyfl.sectors:\n output_data['industries'] = companyfl.sectors.split(u'§')\n\n if companyfl.profiles:\n output_data['types'] = companyfl.profiles.split(u'§')\n\n if companyfl.updated_on:\n output_data['last_updated'] = companyfl.updated_on\n\n # only retrieving email if email_status=VAL and row was updated less than\n # 365days ago\n if companyfl.company_email_status == \"VAL\" and companyfl.updated_on:\n duration_in_days = (timezone.now() - companyfl.updated_on).days\n if duration_in_days <= 365:\n output_data['email'] = companyfl.company_email\n\n if companyfl.street_name and companyfl.city and companyfl.country:\n # TODO: if street_number or postcode are None, we do not add it but it\n # leaves 2 spaces...find a way to solve it intelligently\n output_data['formatted_address'] = \"%s %s, %s %s, %s\" % (\n companyfl.street_number if companyfl.street_number else '',\n companyfl.street_name,\n companyfl.postcode if companyfl.postcode else '',\n companyfl.city,\n companyfl.country.country_name\n )\n\n return output_data",
"def for_domain(cls, domain, **kwargs):\n return cls.all(**kwargs).ancestor(domain)",
"def __call__(self, *filters):\n return self.client._get_and_filter(Domain, *filters)",
"def lookup_whois(self, inc_raw=False, retry_count=3, get_referral=False,\r\n extra_blacklist=None, ignore_referral_errors=False,\r\n field_list=None, asn_alts=None):\r\n\r\n from .whois import Whois\r\n\r\n # Create the return dictionary.\r\n results = {}\r\n\r\n # Retrieve the ASN information.\r\n log.debug('ASN lookup for {0}'.format(self.address_str))\r\n asn_data, response = self.net.lookup_asn(retry_count, asn_alts)\r\n\r\n # Add the ASN information to the return dictionary.\r\n results.update(asn_data)\r\n\r\n # Retrieve the whois data and parse.\r\n whois = Whois(self.net)\r\n log.debug('WHOIS lookup for {0}'.format(self.address_str))\r\n whois_data = whois.lookup(\r\n inc_raw, retry_count, response, get_referral, extra_blacklist,\r\n ignore_referral_errors, asn_data, field_list\r\n )\r\n\r\n # Add the RDAP information to the return dictionary.\r\n results.update(whois_data)\r\n\r\n return results",
"def fetch(api_key, query, page=1, from_date=False, to_date=False):\n\n # construct url\n url = create_guardian_search_url(api_key, query, page, from_date, to_date)\n\n # do the fetch request\n request_response = fetch_url.fetch(url)\n\n # did we get a response\n if (request_response):\n return request_response['response']\n else:\n return False",
"def info(self):\n\n return self.call(method='getDomain', args=[self.domainname])",
"def domain_info(self, domain):\n endpoint = '/Domain/Info'\n\n params = {\n 'Domain' : domain\n }\n\n response = self.__perform_get_request(endpoint, params)\n \n if response.status_code == 200:\n parsed_response = response.json()\n return parsed_response",
"def search(self, query: str):\n from googlesearch import search\n from urllib.error import HTTPError\n search_successful = False\n result = None\n\n # top level domains for the google search\n tld_array = [\"com\", \"co.in\", \"co.za\", \"co.uk\", \"co.de\", \"co.id\"]\n # the index of the top level domains to start off with\n tld_index = 0\n\n # if getting too many requests, change tld to co.in and com, co.za\n while not search_successful:\n try:\n urls = search(query, tld=tld_array[tld_index], num=1, stop=1, pause=2,\n # domains=[\"\"],\n user_agent=\"GoogleSearchBotThing/1.0\")\n for url in urls:\n result = url\n\n search_successful = True\n except HTTPError as error:\n tld_index = (tld_index + 1) % len(tld_array)\n printer = General(self.verbosity)\n printer.pprint(8, \"Too many requests from TLD. Switching to\", tld_array[tld_index], error)\n search_successful = False\n pass\n return result",
"def find_domain_range(record):\n response = {\"domain\": [], \"range\": []}\n if \"http://schema.org/domainIncludes\" in record:\n if isinstance(record[\"http://schema.org/domainIncludes\"], dict):\n response[\"domain\"] = [record[\"http://schema.org/domainIncludes\"][\"@id\"]]\n elif isinstance(record[\"http://schema.org/domainIncludes\"], list):\n response[\"domain\"] = [\n _item[\"@id\"] for _item in record[\"http://schema.org/domainIncludes\"]\n ]\n if \"http://schema.org/rangeIncludes\" in record:\n if isinstance(record[\"http://schema.org/rangeIncludes\"], dict):\n response[\"range\"] = [record[\"http://schema.org/rangeIncludes\"][\"@id\"]]\n elif isinstance(record[\"http://schema.org/rangeIncludes\"], list):\n response[\"range\"] = [\n _item[\"@id\"] for _item in record[\"http://schema.org/rangeIncludes\"]\n ]\n return (response[\"domain\"], response[\"range\"])",
"async def _async_query_domain() -> Domain | None:\n try:\n return await hass.async_add_executor_job(\n whois_query, entry.data[CONF_DOMAIN]\n )\n except UnknownTld as ex:\n raise UpdateFailed(\"Could not set up whois, TLD is unknown\") from ex\n except (FailedParsingWhoisOutput, WhoisCommandFailed, UnknownDateFormat) as ex:\n raise UpdateFailed(\"An error occurred during WHOIS lookup\") from ex",
"def domain_profile(self, domain):\n return self.apiquery('/v1/{}'.format(domain))",
"def full_contact_company(self,domain):\n if self.contact_api_key is None:\n click.secho(\"[!] No Full Contact API key, so skipping company lookup.\",fg=\"red\")\n return None\n else:\n headers = {\"Authorization\": \"Bearer %s\" % self.contact_api_key}\n payload = {\"domain\": domain}\n try:\n resp = requests.post(self.company_api_uri,data=json.dumps(payload),headers=headers,timeout=self.requests_timeout)\n if resp.status_code == 200:\n return resp.json()\n elif resp.status_code == 401:\n click.secho(\"[!] Full Contact says the provided API key is no good. Make sure you are using a valid key for API v3.\",fg=\"red\")\n return None\n except requests.exceptions.Timeout:\n click.secho(\"\\n[!] The connection to Full Contact timed out!\",fg=\"red\")\n except requests.exceptions.TooManyRedirects:\n click.secho(\"\\n[!] The connection to Full Contact encountered too many redirects!\",fg=\"red\")\n except requests.exceptions.RequestException as error:\n click.secho(\"\\n[!] The connection to Full Contact encountered an error!\",fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")\n return None",
"def run_whois(self,domain):\n try:\n who = whois.whois(domain)\n results = {}\n # Check if info was returned before proceeding because sometimes records are protected\n if who.registrar:\n results['domain_name'] = who.domain_name\n results['registrar'] = who.registrar\n results['expiration_date'] = who.expiration_date\n results['registrant'] = who.name\n results['org'] = who.org\n results['admin_email'] = who.emails[0]\n results['tech_email'] = who.emails[1]\n results['address'] = \"{}, {}{}, {}, {}\".format(who.address,who.city,who.zipcode,who.state,who.country)\n results['dnssec'] = who.dnssec\n else:\n click.secho(\"[*] WHOIS record for {} came back empty. You might try looking at dnsstuff.com.\".format(domain),fg=\"yellow\")\n return results\n except Exception as error:\n click.secho(\"[!] The WHOIS lookup for {} failed!\".format(domain),fg=\"red\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"red\")",
"def generateDRQFor(self, domain):\n block = BasicRangeQuery.generateBaseDRQ(self, domain)\n head = block[0] # First Set of Queries\n tail = set() # Remaining Queries\n for set_of_queries in block[1:]: # Add all elements from the tailing query blocks to big query block\n tail.update(set_of_queries)\n return (head, tail)",
"def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)",
"def domain_search(self, terms, page=1, exclude='', max_length=25, min_length=1, has_hyphen='true',\n has_number='true', active_only='false', deleted_only='false', anchor_left='false',\n anchor_right='false'):\n params = {'query':terms,\n 'page':page,\n 'exclude_query':exclude,\n 'max_length':max_length,\n 'min_legnth':min_length,\n 'has_hyphen':has_hyphen,\n 'has_number':has_number,\n 'active_only':active_only,\n 'deleted_only':deleted_only,\n 'anchor_left':anchor_left,\n 'anchor_right':anchor_right}\n return self.apiquery('/v2/domain-search/', params=params)",
"def api_query(self, **kwargs):\n with self._api_lock:\n return self._api_query(kwargs)",
"def domain_check(self, domain, raw=False):\n endpoint = '/Domain/Check'\n \n params = {\n 'Domain' : domain,\n }\n\n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response.get('status') == 'AVAILABLE'",
"def run_whoxy_company_search(self,company):\n if self.whoxy_api_key:\n try:\n results = requests.get(self.reverse_whoxy_api_endpoint.format(self.whoxy_api_key,company),timeout=self.requests_timeout).json()\n if results['status'] == 1 and results['total_results'] > 0:\n whois_results = {}\n total_results = results['total_results']\n for domain in results['search_result']:\n domain_name = domain['domain_name']\n temp = self.parse_whoxy_results(domain,True)\n whois_results[domain_name] = temp\n return whois_results,total_results\n else:\n click.secho(\"[*] WhoXY returned status code 0, error/no results, for reverse company search.\",fg=\"yellow\")\n except requests.exceptions.Timeout:\n click.secho(\"\\n[!] The connection to WhoXY timed out!\",fg=\"red\")\n except requests.exceptions.TooManyRedirects:\n click.secho(\"\\n[!] The connection to WhoXY encountered too many redirects!\",fg=\"red\")\n except requests.exceptions.RequestException as error:\n click.secho(\"[!] Error connecting to WhoXY for reverse company search!\",fg=\"yellow\")\n click.secho(\"L.. Details: {}\".format(error),fg=\"yellow\")",
"def whois_live(self, domain):\n return self.apiquery('/v1/{}/whois/live/'.format(domain))",
"def query(self,\n query: str,\n datatype: str = \"response\",\n page: int = 0,\n indices: str = \"\") -> dict:\n endpoint = \"/api/responses/\"\n if datatype == \"cert\":\n endpoint = \"/api/certs/\"\n elif datatype == \"domain\":\n endpoint = \"/api/domains/\"\n ret = self._request(\n endpoint=endpoint,\n params={\n \"q\": query,\n \"indices\": indices,\n \"start\": page * 20,\n },\n )\n return ret",
"def query(self, wireframe):\n\n headers = {\n 'content-type': 'application/dns-message'\n }\n\n dnsdata = dnslib.DNSRecord.parse(wireframe)\n dnsdomain = dnsdata.q.get_qname()\n qtype = dnslib.QTYPE.get(k=dnsdata.q.qtype)\n\n print(f\"Handling query: ({qtype}) {dnsdomain}\")\n\n retval = None\n domconfig = self.get_domain_config(globals.config.default, wireframe)\n\n if 'static' in domconfig:\n # handle \"static\" domain configuration\n\n if qtype in domconfig.static:\n # reply for static configured domain match\n d = dnsdata.reply()\n qanswer = domconfig.static[qtype]\n d.add_answer(*dnslib.RR.fromZone(f\"{dnsdomain} 60 {qtype} {qanswer}\"))\n\n d.header.id = dnsdata.header.id\n d.q.qtype = dnsdata.q.qtype\n d.header.qr = 1\n\n return d.pack()\n \n else:\n # return NXDOMAIN\n r = dnsdata.reply()\n r.header.rcode = dnslib.RCODE.NXDOMAIN\n return r.pack()\n\n for retries in range(0, domconfig.doh_max_retries):\n if domconfig.doh_url_select == \"random\":\n url = self.get_random_doh(domconfig.doh_urls)\n elif domconfig.doh_url_select == \"roundrobin\":\n url = self.get_roundrobin_doh(domconfig.doh_urls)\n else:\n print(\"Error, no DOH url select method\")\n r = dnsdata.reply()\n r.header.rcode = dnslib.RCODE.NXDOMAIN\n return r.pack()\n\n print(\"Using\", url)\n\n try:\n r = requests.post(url, headers=headers, data=wireframe, stream=True, verify=globals.config.service.check_doh_ssl)\n assert r.status_code == 200\n retval = r.content\n break\n\n except Exception as ex:\n print(\"Error requesting DOH: \", ex)\n continue\n\n return retval",
"def test_client_get_domain(mocker, client_domain_input):\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mocker.MagicMock(return_value=client_domain_input)\n\n domain = test_client.get_domain(\"foo.bar\")\n\n test_client.execute_query.assert_called_once_with(\n queries.GET_DOMAIN, {\"domain\": \"foo.bar\"}\n )\n assert domain.domain_name == \"foo.bar\"\n assert domain.dmarc_phase == \"not implemented\"\n assert domain.last_ran == \"2021-01-27 23:24:26.911236\"\n assert domain.dkim_selectors == []",
"def lookup(self, inc_raw=False, retry_count=3, response=None,\n get_referral=False, extra_blacklist=None,\n ignore_referral_errors=False, asn_data=None,\n field_list=None, is_offline=False):\n\n # Create the return dictionary.\n results = {\n 'query': self._net.address_str,\n 'nets': [],\n 'raw': None,\n 'referral': None,\n 'raw_referral': None\n }\n\n # The referral server and port. Only used if get_referral is True.\n referral_server = None\n referral_port = 0\n\n # Only fetch the response if we haven't already.\n if response is None or (not is_offline and\n asn_data['asn_registry'] is not 'arin'):\n\n log.debug('Response not given, perform WHOIS lookup for {0}'\n .format(self._net.address_str))\n\n # Retrieve the whois data.\n response = self._net.get_whois(\n asn_registry=asn_data['asn_registry'], retry_count=retry_count,\n extra_blacklist=extra_blacklist\n )\n\n if get_referral:\n\n # Search for a referral server.\n for match in re.finditer(\n r'^ReferralServer:[^\\S\\n]+(.+:[0-9]+)$',\n response,\n re.MULTILINE\n ):\n\n try:\n\n temp = match.group(1)\n if 'rwhois://' not in temp: # pragma: no cover\n raise ValueError\n\n temp = temp.replace('rwhois://', '').split(':')\n\n if int(temp[1]) > 65535: # pragma: no cover\n raise ValueError\n\n referral_server = temp[0]\n referral_port = int(temp[1])\n\n except (ValueError, KeyError): # pragma: no cover\n\n continue\n\n break\n\n # Retrieve the referral whois data.\n if get_referral and referral_server:\n\n log.debug('Perform referral WHOIS lookup')\n\n response_ref = None\n\n try:\n\n response_ref = self._net.get_whois(\n asn_registry='', retry_count=retry_count,\n server=referral_server, port=referral_port,\n extra_blacklist=extra_blacklist\n )\n\n except (BlacklistError, WhoisLookupError):\n\n if ignore_referral_errors:\n\n pass\n\n else:\n\n raise\n\n if response_ref:\n\n log.debug('Parsing referral WHOIS data')\n\n if inc_raw:\n\n results['raw_referral'] = response_ref\n\n temp_rnet = self.parse_fields(\n response_ref,\n RWHOIS['fields'],\n field_list=field_list\n )\n\n # Add the networks to the return dictionary.\n results['referral'] = temp_rnet\n\n # If inc_raw parameter is True, add the response to return dictionary.\n if inc_raw:\n\n results['raw'] = response\n\n nets = []\n\n if asn_data['asn_registry'] == 'arin':\n\n nets_response = self.get_nets_arin(response)\n\n elif asn_data['asn_registry'] == 'lacnic':\n\n nets_response = self.get_nets_lacnic(response)\n\n else:\n\n nets_response = self.get_nets_other(response)\n\n nets.extend(nets_response)\n\n # Iterate through all of the network sections and parse out the\n # appropriate fields for each.\n log.debug('Parsing WHOIS data')\n for index, net in enumerate(nets):\n\n section_end = None\n if index + 1 < len(nets):\n\n section_end = nets[index + 1]['start']\n\n try:\n\n dt_format = RIR_WHOIS[results['asn_registry']]['dt_format']\n\n except KeyError:\n\n dt_format = None\n\n temp_net = self.parse_fields(\n response,\n RIR_WHOIS[asn_data['asn_registry']]['fields'],\n section_end,\n net['end'],\n dt_format,\n field_list\n )\n\n # Merge the net dictionaries.\n net.update(temp_net)\n\n # The start and end values are no longer needed.\n del net['start'], net['end']\n\n # Add the networks to the return dictionary.\n results['nets'] = nets\n\n return results",
"def query_api(term, location):\n response = search(term, location)\n\n businesses = response.get('businesses')\n\n if not businesses:\n print 'No businesses for {0} in {1} found.'.format(term, location)\n return\n\n business_id = businesses[0]['id']\n \n print '{0} businesses found, querying business info for the top result \"{1}\" ...'.format(\n len(businesses),\n business_id\n )\n \n response=[]\n for biz in range(len(businesses)):\n response.append(get_business(businesses[biz]['id']))\n #response = get_business(business_id)\n return response"
] | [
"0.5364037",
"0.5217802",
"0.5124252",
"0.5091811",
"0.50653636",
"0.5045531",
"0.50369877",
"0.49869594",
"0.49551797",
"0.48993677",
"0.48925823",
"0.4868528",
"0.48650628",
"0.48334965",
"0.48261112",
"0.4820986",
"0.47899625",
"0.47878402",
"0.4784079",
"0.47710827",
"0.47541344",
"0.47249177",
"0.47201723",
"0.471616",
"0.47156987",
"0.47127128",
"0.47057796",
"0.46885487",
"0.4684896",
"0.46835297"
] | 0.80236036 | 0 |
Does this lesson or any of its descendants need feedback? 0 = no feedback required 1 = feedback not yet provided 2 = feedback provided | def needs_feedback(lesson, course_id):
descendants = lesson.get_descendants(include_self=True)
provided = False
for descendant in descendants:
if descendant.feedback_required:
# If feedback is needed, check if already provided
try:
# pylint: disable=E1101
feedback = Feedback.objects.get(lesson_id=descendant.id,
course_id=course_id)
except Feedback.DoesNotExist:
# feedback is required but not yet provided
return 1
else:
if feedback.negative == 0 and feedback.positive == 0:
# Object has been created but no data provided
return 1
# Feedback has been provided
provided = True
if provided:
return 2
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def should_ask_if_examiner_want_to_give_another_chance(self):\n if self.assignment.is_electronic:\n return (self.delivery_status == \"corrected\" and not self.feedback.is_passing_grade) \\\n or self.delivery_status == 'closed-without-feedback'\n else:\n return False",
"def check_feedback(self, step):\r\n world.wait_for_visible(self.active_problem_selector('.tag-status.correct'))\r\n assert_equals(len(world.css_find(self.active_problem_selector('.tag-status.correct'))), 1)\r\n assert_equals(len(world.css_find(self.active_problem_selector('.show'))), 1)",
"def _check_feedback_func(self, assessment_type):\r\n if assessment_type == 'ai':\r\n section_name = 'AI-Assessed'\r\n elif assessment_type == 'peer':\r\n section_name = self.peer_problem_name\r\n else:\r\n raise ValueError('Assessment type not recognized. Must be either \"ai\" or \"peer\"')\r\n\r\n def _inner_check():\r\n self.course_nav.go_to_sequential('Self-Assessed')\r\n self.course_nav.go_to_sequential(section_name)\r\n\r\n try:\r\n feedback = self.open_response.rubric.feedback\r\n\r\n # Unsuccessful if the rubric hasn't loaded\r\n except BrokenPromise:\r\n return False, None\r\n\r\n # Successful if `feedback` is a non-empty list\r\n else:\r\n return bool(feedback), feedback\r\n\r\n return _inner_check",
"def test_weighted_exam(self):\r\n self.weighted_setup()\r\n self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.check_grade_percent(0.75)",
"def check_hint_condition(self, hxml_set, student_answers):\r\n pass",
"def should_pay_attention(self):\n return random.randint(1,100) > self.wander",
"def is_waiting_for_feedback(self):\n if self.is_corrected:\n return False\n return self.cached_data.last_feedbackset_deadline_datetime < timezone.now()",
"def get_learn_after_each_decision(self):\r\n return 0",
"def test_create_negative_feedback_removal(self):\n pass",
"def do_targeted_feedback(self, tree):\r\n # Note that the modifications has been done, avoiding problems if called twice.\r\n if hasattr(self, 'has_targeted'):\r\n return\r\n self.has_targeted = True # pylint: disable=W0201\r\n\r\n for mult_choice_response in tree.xpath('//multiplechoiceresponse[@targeted-feedback]'):\r\n show_explanation = mult_choice_response.get('targeted-feedback') == 'alwaysShowCorrectChoiceExplanation'\r\n\r\n # Grab the first choicegroup (there should only be one within each <multiplechoiceresponse> tag)\r\n choicegroup = mult_choice_response.xpath('./choicegroup[@type=\"MultipleChoice\"]')[0]\r\n choices_list = list(choicegroup.iter('choice'))\r\n\r\n # Find the student answer key that matches our <choicegroup> id\r\n student_answer = self.student_answers.get(choicegroup.get('id'))\r\n expl_id_for_student_answer = None\r\n\r\n # Keep track of the explanation-id that corresponds to the student's answer\r\n # Also, keep track of the solution-id\r\n solution_id = None\r\n for choice in choices_list:\r\n if choice.get('name') == student_answer:\r\n expl_id_for_student_answer = choice.get('explanation-id')\r\n if choice.get('correct') == 'true':\r\n solution_id = choice.get('explanation-id')\r\n\r\n # Filter out targetedfeedback that doesn't correspond to the answer the student selected\r\n # Note: following-sibling will grab all following siblings, so we just want the first in the list\r\n targetedfeedbackset = mult_choice_response.xpath('./following-sibling::targetedfeedbackset')\r\n if len(targetedfeedbackset) != 0:\r\n targetedfeedbackset = targetedfeedbackset[0]\r\n targetedfeedbacks = targetedfeedbackset.xpath('./targetedfeedback')\r\n for targetedfeedback in targetedfeedbacks:\r\n # Don't show targeted feedback if the student hasn't answer the problem\r\n # or if the target feedback doesn't match the student's (incorrect) answer\r\n if not self.done or targetedfeedback.get('explanation-id') != expl_id_for_student_answer:\r\n targetedfeedbackset.remove(targetedfeedback)\r\n\r\n # Do not displace the solution under these circumstances\r\n if not show_explanation or not self.done:\r\n continue\r\n\r\n # The next element should either be <solution> or <solutionset>\r\n next_element = targetedfeedbackset.getnext()\r\n parent_element = tree\r\n solution_element = None\r\n if next_element is not None and next_element.tag == 'solution':\r\n solution_element = next_element\r\n elif next_element is not None and next_element.tag == 'solutionset':\r\n solutions = next_element.xpath('./solution')\r\n for solution in solutions:\r\n if solution.get('explanation-id') == solution_id:\r\n parent_element = next_element\r\n solution_element = solution\r\n\r\n # If could not find the solution element, then skip the remaining steps below\r\n if solution_element is None:\r\n continue\r\n\r\n # Change our correct-choice explanation from a \"solution explanation\" to within\r\n # the set of targeted feedback, which means the explanation will render on the page\r\n # without the student clicking \"Show Answer\" or seeing a checkmark next to the correct choice\r\n parent_element.remove(solution_element)\r\n\r\n # Add our solution instead to the targetedfeedbackset and change its tag name\r\n solution_element.tag = 'targetedfeedback'\r\n targetedfeedbackset.append(solution_element)",
"def check_feedback(current_lesson, submitted_answer, status, unlock_next):\n type = 'DEF'\n if status == 'success':\n headline = 'Correct'\n text = current_lesson.correct_feedback\n type = 'COR'\n else:\n if current_lesson.sub_lessons_available:\n type = check_type(current_lesson, submitted_answer, status)\n try:\n headline = current_lesson.feedback.get(feedback_type=type).headline\n text = current_lesson.feedback.get(feedback_type=type).feedback_text\n except:\n type = 'DEF'\n finally:\n headline = current_lesson.feedback.get(feedback_type=type).headline\n text = current_lesson.feedback.get(feedback_type=type).feedback_text\n else:\n if status == 'failure':\n headline = current_lesson.feedback.get(feedback_type='DEF').headline\n text = current_lesson.feedback.get(feedback_type='DEF').feedback_text\n type = 'DEF'\n else:\n return{'resultsHeader': \"<h3>Something went wrong</h3>\",\n 'resultDetails': 'Try again or contact us.',\n 'status': status}\n\n return {'resultsHeader': headline,\n 'resultDetails': text,\n 'status': status,\n 'sub': current_lesson.sub_lessons_available,\n 'type': type,\n 'unlock_next': unlock_next\n }",
"def set_as_not_feedback(self):\n self.feedback = False",
"def test_targeted_feedback_id_typos(self):\r\n xml_str = textwrap.dedent(\"\"\"\r\n <problem>\r\n <p>What is the correct answer?</p>\r\n <multiplechoiceresponse targeted-feedback=\"\">\r\n <choicegroup type=\"MultipleChoice\">\r\n <choice correct=\"false\" explanation-id=\"feedback1TYPO\">wrong-1</choice>\r\n <choice correct=\"false\" explanation-id=\"feedback2\">wrong-2</choice>\r\n <choice correct=\"true\" explanation-id=\"feedbackCTYPO\">correct-1</choice>\r\n <choice correct=\"false\" explanation-id=\"feedback3\">wrong-3</choice>\r\n </choicegroup>\r\n </multiplechoiceresponse>\r\n\r\n <targetedfeedbackset>\r\n <targetedfeedback explanation-id=\"feedback1\">\r\n <div class=\"detailed-targeted-feedback\">\r\n <p>Targeted Feedback</p>\r\n <p>This is the 1st WRONG solution</p>\r\n </div>\r\n </targetedfeedback>\r\n\r\n <targetedfeedback explanation-id=\"feedback2\">\r\n <div class=\"detailed-targeted-feedback\">\r\n <p>Targeted Feedback</p>\r\n <p>This is the 2nd WRONG solution</p>\r\n </div>\r\n </targetedfeedback>\r\n\r\n <targetedfeedback explanation-id=\"feedback3\">\r\n <div class=\"detailed-targeted-feedback\">\r\n <p>Targeted Feedback</p>\r\n <p>This is the 3rd WRONG solution</p>\r\n </div>\r\n </targetedfeedback>\r\n\r\n <targetedfeedback explanation-id=\"feedbackC\">\r\n <div class=\"detailed-targeted-feedback-correct\">\r\n <p>Targeted Feedback</p>\r\n <p>Feedback on your correct solution...</p>\r\n </div>\r\n </targetedfeedback>\r\n\r\n </targetedfeedbackset>\r\n\r\n <solution explanation-id=\"feedbackC\">\r\n <div class=\"detailed-solution\">\r\n <p>Explanation</p>\r\n <p>This is the solution explanation</p>\r\n <p>Not much to explain here, sorry!</p>\r\n </div>\r\n </solution>\r\n </problem>\r\n \"\"\")\r\n\r\n # explanation-id does not match anything: fall back to empty targetedfeedbackset\r\n problem = new_loncapa_problem(xml_str)\r\n problem.done = True\r\n problem.student_answers = {'1_2_1': 'choice_0'}\r\n the_html = problem.get_html()\r\n self.assertRegexpMatches(the_html, r\"<targetedfeedbackset>\\s*</targetedfeedbackset>\")\r\n\r\n # New problem with same XML -- try the correct choice.\r\n problem = new_loncapa_problem(xml_str)\r\n problem.done = True\r\n problem.student_answers = {'1_2_1': 'choice_2'} # correct\r\n the_html = problem.get_html()\r\n self.assertRegexpMatches(the_html, r\"<targetedfeedbackset>\\s*</targetedfeedbackset>\")",
"def need_attention(self):\n msg = [\"not staged\", \"behind\", \"ahead\", \"Untracked\"]\n status_msg = self.status()\n if any([each in status_msg for each in msg]):\n return True\n return False",
"def passes(self) -> bool:\n ...",
"def feedback_enable(self):\n return self._feedback_enable",
"def test_never_show_correctness(self):\r\n\r\n conditions = [\r\n {'input_type': 'radio', 'status': Status('correct'), 'value': ''},\r\n {'input_type': 'radio', 'status': Status('correct'), 'value': '2'},\r\n {'input_type': 'radio', 'status': Status('correct'), 'value': ['2']},\r\n {'input_type': 'radio', 'status': Status('incorrect'), 'value': '2'},\r\n {'input_type': 'radio', 'status': Status('incorrect'), 'value': []},\r\n {'input_type': 'radio', 'status': Status('incorrect'), 'value': ['2']},\r\n {'input_type': 'checkbox', 'status': Status('correct'), 'value': []},\r\n {'input_type': 'checkbox', 'status': Status('correct'), 'value': ['2']},\r\n {'input_type': 'checkbox', 'status': Status('incorrect'), 'value': []},\r\n {'input_type': 'checkbox', 'status': Status('incorrect'), 'value': ['2']}]\r\n\r\n self.context['show_correctness'] = 'never'\r\n self.context['submitted_message'] = 'Test message'\r\n\r\n for test_conditions in conditions:\r\n self.context.update(test_conditions)\r\n xml = self.render_to_xml(self.context)\r\n\r\n # Should NOT mark the entire problem correct/incorrect\r\n xpath = \"//div[@class='indicator_container']/span[@class='status correct']\"\r\n self.assert_no_xpath(xml, xpath, self.context)\r\n\r\n xpath = \"//div[@class='indicator_container']/span[@class='status incorrect']\"\r\n self.assert_no_xpath(xml, xpath, self.context)\r\n\r\n # Should NOT mark individual options\r\n self.assert_no_xpath(xml,\r\n \"//label[@class='choicegroup_incorrect']\",\r\n self.context)\r\n\r\n self.assert_no_xpath(xml,\r\n \"//label[@class='choicegroup_correct']\",\r\n self.context)\r\n\r\n # Expect to see the message\r\n self.assert_has_text(xml, \"//div[@class='capa_alert']\",\r\n self.context['submitted_message'])",
"def still_has_questions(self):\n return self.question_number < len(self.question_list)",
"def test_return_start_discussion_display_weight_question(self):\n # create user\n user_created = self.create_user_questionnaire_in_progress()\n\n # data\n data_dict = {\"height\": False, \"actual_weight\": False,\n \"cruising_weight\": False, \"weight_goal\": False}\n old_robot_question = \"Grignotes-tu après les repas ?\"\n user_answer = \"non\"\n\n # call method\n context = self.new_controller.return_start_discussion(user_created.id, old_robot_question,\n data_dict, user_answer)\n\n dict_questions = {\"height\": \"Quelle taille fais-tu ? (au format x,xx)\",\n \"actual_weight\": \"Quel est ton poids actuel ?\",\n \"cruising_weight\": \"Quel est ton poids de croisière \"\n \"(poids le plus longtemps \"\n \"maintenu sans effort) ?\",\n \"weight_goal\": \"Quel est ton poids d'objectif ?\"}\n self.assertEqual(context[\"robot_answer\"], None)\n self.assertEqual(context[\"goal_weight_text\"], \"Nous allons maintenant \"\n \"définir ton objectif.\")\n self.assertEqual(len(context[\"dict_questions\"]), 4)\n self.assertEqual(context[\"dict_questions\"], dict_questions)",
"def still_has_questions(self):\n return self.question_number < len(self.question_list) #returns boolean value",
"def test_weighted_total(self):\r\n self.weighted_setup()\r\n self.submit_question_answer('H1P1', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.check_grade_percent(1.0)",
"def check_responses(self, button):\n with self.out:\n clear_output()\n\n for i, question in enumerate(self.questions):\n self.create_feedback(i+1, question.correct())",
"def test_no_message_before_submission(self):\r\n\r\n conditions = [\r\n {'input_type': 'radio', 'status': Status('unsubmitted'), 'value': ''},\r\n {'input_type': 'radio', 'status': Status('unsubmitted'), 'value': []},\r\n {'input_type': 'checkbox', 'status': Status('unsubmitted'), 'value': []},\r\n\r\n # These tests expose bug #365\r\n # When the bug is fixed, uncomment these cases.\r\n #{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},\r\n #{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},\r\n #{'input_type': 'radio', 'status': 'unsubmitted', 'value': '2'},\r\n #{'input_type': 'radio', 'status': 'unsubmitted', 'value': ['2']},\r\n #{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']},\r\n #{'input_type': 'checkbox', 'status': 'unsubmitted', 'value': ['2']}]\r\n ]\r\n\r\n self.context['show_correctness'] = 'never'\r\n self.context['submitted_message'] = 'Test message'\r\n\r\n for test_conditions in conditions:\r\n self.context.update(test_conditions)\r\n xml = self.render_to_xml(self.context)\r\n\r\n # Expect that we do NOT see the message yet\r\n self.assert_no_xpath(xml, \"//div[@class='capa_alert']\", self.context)",
"def receive_feedback(self, winner):\r\n # No implementation needed since player is not a learning agent.\r\n pass",
"def get_learn_after_each_trial(self):\r\n return 0",
"def __do_essential_memebers_exist__(self):\n assert self.element_type is not None\n assert self.elements is not None\n assert self.points is not None",
"def check_if_student_has_done_needed_grading(self):\r\n student_id = self.system.anonymous_student_id\r\n success = False\r\n allowed_to_submit = True\r\n try:\r\n response = self.peer_gs.get_data_for_location(self.location, student_id)\r\n count_graded = response['count_graded']\r\n count_required = response['count_required']\r\n student_sub_count = response['student_sub_count']\r\n count_available = response['count_available']\r\n success = True\r\n except GradingServiceError:\r\n # This is a dev_facing_error\r\n log.error(\"Could not contact external open ended graders for location {0} and student {1}\".format(\r\n self.location, student_id))\r\n # This is a student_facing_error\r\n error_message = \"Could not contact the graders. Please notify course staff.\"\r\n return success, allowed_to_submit, error_message\r\n except KeyError:\r\n log.error(\"Invalid response from grading server for location {0} and student {1}\".format(self.location, student_id))\r\n error_message = \"Received invalid response from the graders. Please notify course staff.\"\r\n return success, allowed_to_submit, error_message\r\n if count_graded >= count_required or count_available==0:\r\n error_message = \"\"\r\n return success, allowed_to_submit, error_message\r\n else:\r\n allowed_to_submit = False\r\n # This is a student_facing_error\r\n error_string = (\"<h4>Feedback not available yet</h4>\"\r\n \"<p>You need to peer grade {0} more submissions in order to see your feedback.</p>\"\r\n \"<p>You have graded responses from {1} students, and {2} students have graded your submissions. </p>\"\r\n \"<p>You have made {3} submissions.</p>\")\r\n error_message = error_string.format(count_required - count_graded, count_graded, count_required,\r\n student_sub_count)\r\n return success, allowed_to_submit, error_message",
"def feedback(self):\n self.storeState()\n\n return None",
"def check_submit_feedback(context, url):\n payload = {\n \"stack_id\": \"1234-569586048\",\n \"recommendation_type\": \"companion\",\n \"package_name\": \"blah-blah\",\n \"feedback_type\": True,\n \"ecosystem\": None\n }\n context.response = requests.post(context.coreapi_url + url,\n headers=authorization(context),\n data=payload)",
"def test_get_feedback_none(self):\n result = ''\n self.xblock.credit_dict = None\n test_result = self.xblock.get_feedback_message()\n self.assertEquals(result, test_result)"
] | [
"0.6186025",
"0.61752695",
"0.61344266",
"0.5998169",
"0.59937",
"0.5966056",
"0.58796865",
"0.5829263",
"0.58252865",
"0.56954336",
"0.56225294",
"0.555234",
"0.55216324",
"0.5501553",
"0.54851127",
"0.5417297",
"0.5385953",
"0.5377694",
"0.5376452",
"0.53695625",
"0.53657264",
"0.5353647",
"0.5347686",
"0.53219736",
"0.5318329",
"0.5272671",
"0.5271305",
"0.5260312",
"0.5256285",
"0.5247241"
] | 0.7998184 | 0 |
Get meta information about a lesson | def get_lesson_meta(lesson_id):
# pylint: disable=E1101
lesson_meta_list = (LessonMetaData.objects.filter(lesson=lesson_id)
.prefetch_related())
result = []
for item in lesson_meta_list:
result.append({
'description': item.description.description,
'value': item.value,
'icon': item.description.icon,
'sort' : item.sort
})
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def showLesson(self):\n lesson = \"\"\n lesson += self.__title + \"\\n\\n\"\n lesson += self.__description + \"\\n\"\n lesson += self.__lesson_text + \"\\n\"\n lesson += self.getLink()\n return lesson",
"def meta(id):\n db = core.connect()\n return db[id][\"meta\"]",
"def get_meta_information() -> Dict:\n return {'name': 'NAS-Bench-201',\n 'references': ['Xuanyi Dong, Yi Yang',\n 'NAS-Bench-201: Extending the Scope of Reproducible Neural Architecture Search',\n 'https://openreview.net/forum?id=HJxyZkBKDr',\n 'https://github.com/D-X-Y/AutoDL-Projects'],\n }",
"def meta(self):\n title = 'Месторасположение: {0}'.format(self.object.emplacement)\n return {\n 'title': title\n }",
"def get_meta(meta):\n thumb_url = '{}{}?X-Plex-Token={}'.format(\n meta._server._baseurl, meta.thumb, meta._server._token)\n\n meta_dict = {'title': meta.title,\n 'rating': meta.rating if\n meta.rating is not None else 0.0,\n 'genres': [x.tag for x in meta.genres],\n 'server': [meta._server.friendlyName],\n 'thumb': [thumb_url]\n }\n if meta.guid:\n # guid will return (com.plexapp.agents.imdb://tt4302938?lang=en)\n # Agents will differ between servers.\n agent = meta.guid\n source_name = agent.split('://')[0].split('.')[-1]\n source_id = agent.split('://')[1].split('?')[0]\n meta_dict[source_name] = source_id\n\n if meta.type == 'movie':\n # For movies with same titles\n meta_dict['title'] = u'{} ({})'.format(meta.title, meta.year)\n return meta_dict",
"def GetMetadata(self):\n return self.dict['meta']",
"def meta(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'meta')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def meta(self):\n title = 'Передача оборудования: {0}'.format(self.object.model)\n return {\n 'title': title\n }",
"def get_lessons(self, course: str):\n\n lesson_link: Any = self.courses[course][\"link\"]\n lesson_data = self._parse_lesson(lesson_link)\n # self.courses[course][\"lessons\"] = lesson_data\n self.lessons = lesson_data",
"def get_meta():\n meta = {\n 'pages': _get_pages()\n }\n return meta",
"def meta(self):\n title = 'Оборудование: {0}'.format(self.object.value)\n return {\n 'title': title\n }",
"def meta(self):\n title = 'Оборудование на баллансе'\n return {\n 'title': title\n }",
"def _parse_lesson(self, lesson_link: str):\n\n lesson_page: Soup = self._get_soup(lesson_link)\n lesson_content: NavigableString = lesson_page.find(\"div\", \n {\"class\": \"moduleContent\"})\n course_divs: ResultSet = lesson_content.find_all(\"div\")\n\n data = {}\n module = \"\"\n for div in course_divs:\n if div[\"class\"][0] == \"courseModule\":\n module = div.p.get_text().split(\":\")[-1].strip()\n data[module] = []\n elif div[\"class\"][0] == \"courseLesson\":\n lesson_title = div.find(\"span\", {\"class\": \"courseLessonTitle\"})\n data[module].append(lesson_title.get_text().strip())\n \n return data",
"def get_lessons(lesson_id):\n url = '{0}?cat={1}'.format(BASE_URL, lesson_id)\n page = requests.get(url, verify=False)\n soup = BeautifulSoup(page.content)\n output = []\n\n for item in soup.find(id='playlist').findAll('dd'):\n video_id = item.find('a')['href'].split('=')[-1]\n title = item.find('a').text\n output.append({\n 'title': title, 'lesson_id': lesson_id,\n 'video_id': video_id})\n\n return output",
"def showmeta(self,\r\n index):\r\n\r\n return self.get_metadata_from_note(index)",
"def get_technical_details(self):\n\n url = \"https://www.imdb.com/title/%s/reference\" % (self.film_id)\n return Scraper(url).scrape_technical_data()",
"def getInfo(self):\n self.name, self.description = achievements[self.id]",
"def get_lessons(course_id, lesson=None):\n lesson_list = []\n if lesson is None:\n lesson_list = Lesson.objects.filter(\n id__in=get_root_lesson_ids(course_id))\n else:\n lesson_list = lesson.get_children()\n result = []\n for lesson_item in lesson_list:\n result.append({\n 'id': lesson_item.id,\n 'title': lesson_item.title,\n 'needs_feedback': needs_feedback(lesson_item, course_id),\n 'mandatory': lesson_item.mandatory,\n })\n return result",
"def _metadata(self):\n meta = super()._metadata\n meta.update({\n \"name\": self.name,\n \"lead_in_time\": self.lead_in_time,\n \"amplification\": self.amplification,\n \"amplifier_clipping\": self.amplifier_clipping,\n \"power_threshold\": self.power_threshold,\n })\n return meta",
"def get_details(self):",
"def extract_meta(self, id: str) -> dict:\r\n\r\n raw = self.session.get(f\"{self.host}/{id}\")\r\n soup = self.soup(raw)\r\n\r\n meta = self.MetaSet()\r\n meta[\"image\"] = soup.find(class_=\"wp-post-image\")[\"src\"]\r\n if (content := soup.find(class_=\"entry-content\")):\r\n meta.register(r\"(?i){id} *: *(.+?)\\n\", content.text)\r\n\r\n meta.setItem(\"judul anime\", \"judul\")\r\n meta.setItem(\"judul alternatif\")\r\n meta.setItem(\"tipe anime\", \"tipe\")\r\n meta.setItem(\"status anime\", \"status\")\r\n meta.setItem(\"total episode\")\r\n meta.setItem(\"musim rilis\", \"musim\")\r\n meta.setItem(\"studio yang memproduksi\", \"studio\")\r\n meta.setItem(\"genre\")\r\n meta.setItem(\"durasi per episode\", \"durasi\")\r\n meta.setItem(\"Skor di MyAnimeList\", \"score\")\r\n\r\n if (h2 := soup.find(\"h2\", text=self.re.compile(r\"Sinopsis[^>]+\"))):\r\n desc = []\r\n for p in h2.findAllNext(\"p\"):\r\n if p.center:\r\n break\r\n desc.append(p.text)\r\n meta[\"sinopsis\"] = \" \".join(desc)\r\n elif (ogDesc := soup.find(r\"meta\", property=\"og:description\")):\r\n content = self.re.sub(r\"\\[[^]]+?]\\s*$\", \"[^>]+\", ogDesc[\"content\"])\r\n if (fullDesc := soup.find(text=self.re.compile(content))):\r\n meta[\"sinopsis\"] = fullDesc\r\n\r\n if (h2 := soup.find(\"h2\", text=\"Main Character\")):\r\n meta[\"karakter\"] = [\r\n figure.text for figure in h2.findAllNext(\"figure\")]\r\n\r\n return meta",
"def meta(self):\n return self._meta",
"def meta(self):\n return self._meta",
"def meta(self):\n return self._meta",
"def _get_canonicality_info(lesson):\n # XXX: This could be made much more fancy\n lessons_course = g.model.get_course('lessons')\n is_canonical_lesson = (lessons_course == lesson.course)\n if is_canonical_lesson:\n canonical_url = None\n else:\n if lessons_course._has_lesson(lesson.slug):\n canonical = lessons_course.lessons[lesson.slug]\n canonical_url = canonical.get_url(external=True)\n else:\n canonical_url = None\n return is_canonical_lesson, canonical_url",
"def get_info(hit):\n mention = Mention(hit)\n return dict(\n url = mention.info[\"url\"],\n title = mention.info[\"title\"],\n date = mention.info[\"datetime_date\"] or datetime.date(1970, 1, 1),\n type = 'news' if mention.in_the_news else 'print',\n author = '(need author)',\n media = mention.info[\"media\"],\n )",
"def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE",
"def detail(self):\n url = '/question/%d' % self.id\n d = req.get(url)\n return parser.detail(d)",
"def meta_extract(doc):\n title_search = re.compile(r'(title:\\s*)(?P<title>.*(\\n *\\w.*)*)(\\nauthor:)', re.IGNORECASE)\n author_search = re.compile(r'(author:)(?P<author>.*)', re.IGNORECASE)\n translator_search = re.compile(r'(translator:)(?P<translator>.*)', re.IGNORECASE)\n illustrator_search = re.compile(r'(illustrator:)(?P<illustrator>.*)', re.IGNORECASE)\n title = re.search(title_search, doc).group('title')\n author = re.search(author_search, doc)\n translator = re.search(translator_search, doc)\n illustrator = re.search(illustrator_search, doc)\n if author: \n author = author.group('author')\n if translator:\n translator = translator.group('translator')\n if illustrator:\n illustrator = illustrator.group('illustrator')\n print \"Title: {}\".format(title)\n print \"Author(s): {}\".format(author)\n print \"Translator(s): {}\".format(translator)\n print \"Illustrator(s): {}\\n\".format(illustrator)\n # return title, author, illustrator, translator",
"def extract_movie_meta(soup: BeautifulSoup) -> Tuple[int, str, Optional[str]]:\n\n meta = soup.find(\"p\", class_=\"text-muted\")\n\n runtime_with_suffix = meta.find(\"span\", class_=\"runtime\").get_text()\n runtime = runtime_with_suffix[:-4]\n runtime = int(runtime)\n\n genres = meta.find(\"span\", class_=\"genre\").get_text().split(\", \")\n genres = [genre.strip() for genre in genres]\n\n certificate = None\n if certificate_element := meta.find(\"span\", class_=\"certificate\"):\n certificate = certificate_element.get_text()\n\n return runtime, genres, certificate"
] | [
"0.6347282",
"0.63374656",
"0.6238976",
"0.6156013",
"0.6148112",
"0.61196655",
"0.6065378",
"0.58501846",
"0.5849513",
"0.58236367",
"0.58219254",
"0.5817728",
"0.5810855",
"0.5797458",
"0.57406706",
"0.5716694",
"0.5701135",
"0.5665865",
"0.564084",
"0.56000566",
"0.5573098",
"0.5567617",
"0.5567617",
"0.5567617",
"0.55627245",
"0.5550843",
"0.55417496",
"0.5539576",
"0.55316967",
"0.5513933"
] | 0.6979437 | 0 |
Get attachments for a lesson | def get_lesson_attachments(lesson_id):
# pylint: disable=E1101
lesson_attachments = Attachment.objects.filter(lesson=lesson_id)
result = []
for attachment in lesson_attachments:
url = attachment.attached_file.url
result.append({'title': attachment.title,
'url': url,
'is_image': (
True if url.lower().endswith(".jpg")
or url.lower().endswith(".png")
or url.lower().endswith(".jpeg")
else False),
'attached_file': attachment.attached_file, })
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_attachments(self, expense_id):\n return self._get_request({}, Expenses.GET_EXPENSE_ATTACHMENTS.format(expense_id))",
"def attachments(self):\n return self._attachments",
"def attachments(self):\n return self.properties.get('attachments',\n AttachmentCollection(self.context, ResourcePath(\"attachments\", self.resource_path)))",
"def attachments(self):\r\n return Attachments(self)",
"def attachments(self):\n if \"attachments\" in self._prop_dict:\n return AttachmentsCollectionPage(self._prop_dict[\"attachments\"])\n else:\n return None",
"def get_attachments(service, user_id, msg_id, save_path):\n try:\n message = service.users().messages().get(userId=user_id, id=msg_id).execute()\n\n if 'parts' not in message['payload']:\n if message['payload']['body']['size'] > 0:\n print(\"Downloading single-part attachment...\")\n file_data = base64.urlsafe_b64decode(message['payload']['body']['data'].encode('UTF-8'))\n path = ''.join([save_path, sanitize_string(message['snippet'][0:70])])\n write_file_to_location(file_data, path)\n elif 'parts' in message['payload']:\n for part in message['payload']['parts']:\n print(\"Downloading multi-part attachment...\")\n if part['filename']:\n data = get_data_from_part(service, user_id, msg_id, part)\n file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))\n path = ''.join([save_path, part['filename']])\n write_file_to_location(file_data, path)\n # Nothing to download\n else:\n return None\n\n except errors.HttpError as error:\n print(f\"An error occurred: {error}\")\n\n return msg_id",
"def l10n_mx_edi_retrieve_attachments(self):\n self.ensure_one()\n if not self.l10n_mx_edi_cfdi_name:\n return []\n domain = [\n ('res_id', '=', self.id),\n ('res_model', '=', self._name),\n ('name', '=', self.l10n_mx_edi_cfdi_name )]\n return self.env['ir.attachment'].search(domain)",
"def attachments(self):\n return [Attachment(part) for part in self._parts]",
"def parse_attachments(request):\n attachments = []\n for attachment in request.files.getlist('attachment'):\n attachments.append(Attachment(attachment.filename, attachment))\n return attachments",
"def attachments(self):\n return attachment_collection.AttachmentCollectionRequestBuilder(self.append_to_request_url(\"attachments\"), self._client)",
"def get_and_send_attachments(self, session, mid, message_payload_parts, context, m_chat_id):\r\n\r\n store_dir_1 = os.getcwd()\r\n\r\n for part in message_payload_parts:\r\n if part['filename']:\r\n attachment_id = part['body']['attachmentId']\r\n\r\n response = session.get(f'https://www.googleapis.com/gmail/v1/users/me/'\r\n f'messages/{mid}/attachments/{attachment_id}')\r\n\r\n data = response.content\r\n encoded_data_dict = ast.literal_eval(data.decode('utf-8'))\r\n file_data = base64.urlsafe_b64decode(encoded_data_dict['data'].encode('UTF-8'))\r\n\r\n path = os.path.join(store_dir_1, part['filename'])\r\n\r\n # запись данных в файловую систему, чтение, отправка и удаление\r\n with open(path, 'wb') as file_object:\r\n file_object.write(file_data)\r\n with open(path, 'rb') as f:\r\n context.bot.send_document(m_chat_id, f)\r\n os.remove(path)",
"def getPostAttachment(self,id,filename):\n # GET /posts/$id/attachments/$filename\n pass",
"def find_by_task(self, task, params={}, **options):\n path = \"/tasks/%s/attachments\" % (task)\n return self.client.get_collection(path, params, **options)",
"def getAttachment(mail, directory=detach_dir):#Download attachment to directory & return filename\n filename = []\n for part in mail.walk():\n if part.get_content_maintype() == 'multipart':\n continue\n if part.get('Content-Disposition') is None:\n continue\n\n filename = part.get_filename()\n att_path = os.path.join(directory, filename)\n\n if not os.path.isfile(att_path) :\n fp = open(att_path, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n\n return filename",
"def download_submission_attachment(self, url):\n\n r = requests.get(url)\n return r.content",
"def Get_Attachments(service, userId, msg_id, store_dir):\n try:\n message = service.users().messages().get(userId=userId, id=msg_id).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part['body']['attachmentId']\n ).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))\n else:\n file_data = None\n if file_data:\n #do some staff, e.g.\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)",
"def get_attachments_for(parser, token):\n def next_bit_for(bits, key, if_none=None):\n try:\n return bits[bits.index(key)+1]\n except ValueError:\n return if_none\n\n bits = token.contents.split()\n args = {\n 'obj': next_bit_for(bits, 'get_attachments_for'),\n 'var_name': next_bit_for(bits, 'as', '\"attachments\"'),\n }\n return AttachmentsForObjectNode(**args)",
"def get_attachment(dataset, question_value, main_key=\"_attachments\"):\n if question_value is not None:\n for attachment in dataset.get(main_key, []):\n if attachment.get(\"filename\", \"\").endswith(question_value):\n return attachment\n return None",
"def add_mms_attachments(db, mms, backup_dir, thread_dir):\n qry = db.execute(\n \"SELECT _id, ct, unique_id, voice_note, width, height, quote \"\n \"FROM part WHERE mid=?\",\n (mms._id,),\n )\n for _id, ct, unique_id, voice_note, width, height, quote in qry:\n a = Attachment(\n contentType=ct,\n unique_id=unique_id,\n fileName=get_attachment_filename(\n _id, unique_id, backup_dir, thread_dir\n ),\n voiceNote=voice_note,\n width=width,\n height=height,\n quote=quote,\n )\n mms.attachments.append(a)",
"def attachments_get(self,\r\n document_id,\r\n attachment_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id,\r\n attachment_id=attachment_id)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}/attachments/{attachmentId}'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id,\r\n 'attachmentId': attachment_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, AttachmentResponse.from_dictionary)",
"def attachments(self):\n for part in self.email.walk():\n filename = part.get_filename()\n if filename:\n yield {\n 'type': part.get_content_type(),\n 'name': filename,\n 'content': part.get_payload()\n }",
"def get_queryset(self, **kwargs):\n print(\"inside attachmentlistview for object %s\" % self.gfk_object)\n attachments = self.gfk_object.attachments.all()\n self.checker.prefetch_perms(attachments)\n return attachments",
"def attachments(self):\r\n return GlobalAttachments(self)",
"def find_video_attachments(document_attachments):\n if isinstance(document_attachments, dict):\n document_attachments = [document_attachments]\n video_info_list = []\n for collection in document_attachments:\n if \"video\" in collection['contentType']:\n size = round(collection['size']/1048576, 2)\n video_info_list.append({\"download_url\": collection['url'], \"size\": size})\n return video_info_list",
"def attachment_names(self, ticketnum):\n ticketnum = int(ticketnum)\n attachments = self._anonymous_server_proxy.ticket.listAttachments(ticketnum)\n return [a[0] for a in attachments]",
"def test_get_file_attachment(self, incident_id, artifact_id, task_id, attachment_id, expected_results_1, expected_results_2):\n\n results = get_file_attachment(mocked_res_client(), incident_id, artifact_id, task_id, attachment_id)\n\n data_content = results[\"content\"]\n file_name = results[\"filename\"]\n assert expected_results_1 == file_name\n assert expected_results_2 == data_content",
"def attachments_list(self,\r\n document_id):\r\n\r\n # Validate required parameters\r\n self.validate_parameters(document_id=document_id)\r\n\r\n # Prepare query URL\r\n _query_builder = Configuration.get_base_uri()\r\n _query_builder += '/signature/documents/{documentId}/attachments'\r\n _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { \r\n 'documentId': document_id\r\n })\r\n _query_url = APIHelper.clean_url(_query_builder)\r\n\r\n # Prepare headers\r\n _headers = {\r\n 'accept': 'application/json'\r\n }\r\n\r\n # Prepare and execute request\r\n _request = self.http_client.get(_query_url, headers=_headers)\r\n OAuth2.apply(_request)\r\n _context = self.execute_request(_request)\r\n self.validate_response(_context)\r\n\r\n # Return appropriate type\r\n return APIHelper.json_deserialize(_context.response.raw_body, AttachmentListItem.from_dictionary)",
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href)",
"def fusion_api_get_storage_volume_attachments(self, uri=None, param='', api=None, headers=None):\n return self.volume_attachment.get(uri=uri, param=param, api=api, headers=headers)",
"def listAttachments(self, page):\n return tuple(AttachmentProxy.AttachmentProxy(attachmentDict) for attachmentDict in self.pm_getSpaceManager().listAttachments(self._unbox(page)))"
] | [
"0.6443785",
"0.62607515",
"0.6257542",
"0.61732197",
"0.613358",
"0.60169816",
"0.5984727",
"0.5975392",
"0.59665823",
"0.589185",
"0.58910084",
"0.582533",
"0.5795188",
"0.57261866",
"0.57246155",
"0.56012267",
"0.5595344",
"0.5585614",
"0.55696243",
"0.5561719",
"0.55461925",
"0.5498191",
"0.549244",
"0.54439056",
"0.54293466",
"0.54128754",
"0.5374322",
"0.5366934",
"0.533592",
"0.531478"
] | 0.7814485 | 0 |
Get list of lessons. If lesson_id is provided, then sublessons will be returned. | def get_lessons(course_id, lesson=None):
lesson_list = []
if lesson is None:
lesson_list = Lesson.objects.filter(
id__in=get_root_lesson_ids(course_id))
else:
lesson_list = lesson.get_children()
result = []
for lesson_item in lesson_list:
result.append({
'id': lesson_item.id,
'title': lesson_item.title,
'needs_feedback': needs_feedback(lesson_item, course_id),
'mandatory': lesson_item.mandatory,
})
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_lessons(lesson_id):\n url = '{0}?cat={1}'.format(BASE_URL, lesson_id)\n page = requests.get(url, verify=False)\n soup = BeautifulSoup(page.content)\n output = []\n\n for item in soup.find(id='playlist').findAll('dd'):\n video_id = item.find('a')['href'].split('=')[-1]\n title = item.find('a').text\n output.append({\n 'title': title, 'lesson_id': lesson_id,\n 'video_id': video_id})\n\n return output",
"async def get_lessons(\n self,\n last_sync: datetime = None,\n deleted=False,\n date_from=None,\n date_to=None,\n **kwargs,\n ) -> Union[AsyncIterator[Lesson], List[int]]:\n return Lesson.get(self._api, last_sync, deleted, date_from, date_to, **kwargs)",
"def get_all_lessons(module) -> list:\n from core.models import DetailPage, TopicPage\n\n return [\n lesson\n for lesson in DetailPage.objects.live().specific().descendant_of(module)\n if isinstance(lesson.get_parent().specific, TopicPage)\n ]",
"def get_lessons(self, course: str):\n\n lesson_link: Any = self.courses[course][\"link\"]\n lesson_data = self._parse_lesson(lesson_link)\n # self.courses[course][\"lessons\"] = lesson_data\n self.lessons = lesson_data",
"def get_root_lesson_ids(course_id):\n # pylint: disable=E1101\n lesson_id_list = (CourseLesson.objects.filter(course=course_id)\n .values_list('lesson', flat=True))\n if (len(lesson_id_list) == 1 and Lesson.objects\n .get(id=lesson_id_list[0])\n .get_children()):\n # if a course has only one lesson, but this lesson has\n # children, then show them directly\n lesson_id_list = (Lesson.objects.get(id=lesson_id_list[0])\n .get_children().values_list('id', flat=True))\n return lesson_id_list",
"def get_lesson_meta(lesson_id):\n # pylint: disable=E1101\n lesson_meta_list = (LessonMetaData.objects.filter(lesson=lesson_id)\n .prefetch_related())\n result = []\n for item in lesson_meta_list:\n result.append({\n 'description': item.description.description,\n 'value': item.value,\n 'icon': item.description.icon,\n 'sort' : item.sort\n })\n return result",
"def get_lesson_attachments(lesson_id):\n # pylint: disable=E1101\n lesson_attachments = Attachment.objects.filter(lesson=lesson_id)\n result = []\n for attachment in lesson_attachments:\n url = attachment.attached_file.url\n result.append({'title': attachment.title,\n 'url': url,\n 'is_image': (\n True if url.lower().endswith(\".jpg\")\n or url.lower().endswith(\".png\")\n or url.lower().endswith(\".jpeg\")\n else False),\n 'attached_file': attachment.attached_file, })\n return result",
"def lessons_for_day(self, day, schedule_id=None, eager=True):\n q = Lesson.query_current(schedule_id)\n q = q.filter(Lesson.day == day).\\\n filter(Lesson.teacher_id == self.id)\n\n if eager:\n q = q.options(eagerload('group'), eagerload('group.year'))\n\n return q.all()",
"def test_get_skills_multiple_lessons(self):\n skill_graph = SkillGraph.load()\n\n skill_1 = skill_graph.add(Skill.build(SKILL_NAME, SKILL_DESC))\n unit = self.course.add_unit()\n unit.title = 'Test Unit'\n lesson1 = self.course.add_lesson(unit)\n lesson1.title = 'Test Lesson 1'\n lesson2 = self.course.add_lesson(unit)\n lesson2.title = 'Test Lesson 2'\n self.course.save()\n lesson1.properties[SKILLS_KEY] = [skill_1.id]\n lesson2.properties[SKILLS_KEY] = [skill_1.id]\n self.course.save()\n\n actions.login(ADMIN_EMAIL)\n response = transforms.loads(self.get(self.URL).body)\n self.assertEqual(200, response['status'])\n\n skills = transforms.loads(response['payload'])['skills']\n self.assertEqual(1, len(skills))\n # All lessons listed\n self.assertEqual(2, len(skills[0]['lessons']))",
"def get(self, id):\n\n lesson = Lesson.get_by_id(int(id))\n if lesson:\n t = jinja_env.get_template(\"lesson.html\")\n response = t.render(lesson=lesson)\n else:\n error = \"there is no lesson with id %s\" % id\n t = jinja_env.get_template(\"404.html\")\n response = t.render(error=error)\n\n self.response.out.write(response)",
"def getCharacterStories(self, character_id):\n story_ids = list()\n parms = {'limit': '100'}\n try:\n result = self.__apiRequest('http://gateway.marvel.com/v1/public/characters/{}/stories'.format(character_id), parms)\n except ApiCommunicationError:\n raise\n total_stories = int(result['data']['total'])\n if total_stories < 1:\n raise ApiCommunicationError('The character ID {} did not return any stories'.format(character_id))\n else:\n total_steps = math.ceil(total_stories/100)\n for it in range(total_steps):\n for sit in result['data']['results']:\n story_ids.append(sit['id'])\n parms = {'limit': '100', 'offset': str((it+1)*100)}\n try:\n result = self.__apiRequest('http://gateway.marvel.com/v1/public/characters/{}/stories'.format(character_id), parms)\n except ApiCommunicationError:\n raise\n return story_ids",
"def _get_questions_from_attempt_lesson(self, event_data):\n unit_id, lesson_id = self._get_unit_and_lesson_id_from_url(\n event_data['location'])\n if unit_id is None or lesson_id is None:\n return []\n\n return self._summarize_multiple_questions(\n event_data, 'u.%s.l.%s' % (unit_id, lesson_id))",
"def showLesson(self):\n lesson = \"\"\n lesson += self.__title + \"\\n\\n\"\n lesson += self.__description + \"\\n\"\n lesson += self.__lesson_text + \"\\n\"\n lesson += self.getLink()\n return lesson",
"def get_lessons(start_time, end_time, iscsv):\n lessons_query = Lesson.objects.filter(start_time__range=[start_time, end_time],\n end_time__range=[start_time, end_time])\n lessons = [(q.start_time.strftime(\"%d-%m-%y\" if iscsv else \"%Y-%m-%d\"),\n q.start_time.strftime(\"%H:%M\" if iscsv else \"%H:%M:%S\"),\n q.end_time.strftime(\"%H:%M\" if iscsv else \"%H:%M:%S\"),\n q.name,\n (q.professor.name + \" \" + q.professor.surname),\n Group.objects.filter(id=q.group_id)[:1].get().name,\n Room.objects.filter(id=q.room_id)[:1].get().number,)\n for q in lessons_query]\n return pd.DataFrame(lessons,\n columns=['Date', 'Start time', 'End time', 'Lesson', 'Professor', 'Group',\n 'Room'])",
"def _parse_lesson(self, lesson_link: str):\n\n lesson_page: Soup = self._get_soup(lesson_link)\n lesson_content: NavigableString = lesson_page.find(\"div\", \n {\"class\": \"moduleContent\"})\n course_divs: ResultSet = lesson_content.find_all(\"div\")\n\n data = {}\n module = \"\"\n for div in course_divs:\n if div[\"class\"][0] == \"courseModule\":\n module = div.p.get_text().split(\":\")[-1].strip()\n data[module] = []\n elif div[\"class\"][0] == \"courseLesson\":\n lesson_title = div.find(\"span\", {\"class\": \"courseLessonTitle\"})\n data[module].append(lesson_title.get_text().strip())\n \n return data",
"def _create_lessons(self):\n self.unit = self.course.add_unit()\n self.unit.title = 'Test Unit'\n self.lesson1 = self.course.add_lesson(self.unit)\n self.lesson1.title = 'Test Lesson 1'\n self.lesson2 = self.course.add_lesson(self.unit)\n self.lesson2.title = 'Test Lesson 2'\n self.lesson3 = self.course.add_lesson(self.unit)\n self.lesson3.title = 'Test Lesson 3'\n self.unit2 = self.course.add_unit()\n self.unit.title = 'Test Unit 2'\n self.lesson4 = self.course.add_lesson(self.unit2)\n self.lesson4.title = 'Test Lesson 4'",
"def lesson(lesson, page, solution=None):\n\n lesson_url, subpage_url, static_url = relative_url_functions(request.path, None, lesson)\n\n page = lesson.pages[page]\n\n content = page_content(lesson, page, solution=solution, lesson_url=lesson_url,\n subpage_url=subpage_url,\n static_url=static_url)\n\n content = content[\"content\"]\n allowed_elements_parser.reset_and_feed(content)\n\n kwargs = {}\n if solution is not None:\n kwargs[\"solution_number\"] = int(solution)\n\n return render_template(\n \"lesson.html\",\n content=content,\n page=page,\n lesson=lesson,\n edit_info=get_edit_info(page.edit_path),\n title=page.title,\n **kwargs\n )",
"def lesson(self, day, order, schedule_id=None, eager=True):\n q = Lesson.query_current(schedule_id)\n q = q.filter(Lesson.day == day).\\\n filter(Lesson.order == order).\\\n filter(Lesson.teacher_id == self.id)\n\n if eager:\n q = q.options(eagerload('group'), eagerload('group.year'))\n\n return q.all()",
"def extract(soup):\r\n table = soup.find('div', id='dnn_ctr11396_TimeTableView_PlaceHolder').find('table')\r\n rows = table.findChildren('tr', recursive=False)\r\n return [[col.findAll('div', {'class': 'TTLesson'}) for col in row.findChildren('td', recursive=False)[1:]]\r\n for row in rows[1:]]",
"def schedule(self, schedule_id=None):\n q = Lesson.query_current(schedule_id)\n lessons = q.filter(Lesson.group_id == self.id).all()\n\n if len(lessons) == 0:\n return None\n\n days = {}\n for x in range(0,5):\n days[x] = []\n for lesson in lessons:\n days[lesson.day].append(lesson)\n schedule = []\n for day in days.values():\n schedule.append(self._process_schedule(day))\n\n return schedule",
"def get(self):\n\n if not CourseOutlineRights.can_view(self):\n transforms.send_json_response(self, 401, 'Access denied.', {})\n return\n\n key = self.request.get('key')\n course = courses.Course(self)\n lesson = course.find_lesson_by_id(None, key)\n assert lesson\n\n fs = self.app_context.fs\n path = fs.impl.physical_to_logical(course.get_activity_filename(\n lesson.unit_id, lesson.lesson_id))\n if lesson.has_activity and fs.isfile(path):\n activity = fs.get(path)\n else:\n activity = ''\n\n payload_dict = {\n 'key': key,\n 'title': lesson.title,\n 'unit_id': lesson.unit_id,\n 'objectives': lesson.objectives,\n 'video': lesson.video,\n 'notes': lesson.notes,\n 'activity_title': lesson.activity_title,\n 'activity_listed': lesson.activity_listed,\n 'activity': activity,\n 'is_draft': not lesson.now_available\n }\n\n message = ['Success.']\n if self.request.get('is_newly_created'):\n message.append('New lesson has been created and saved.')\n\n transforms.send_json_response(\n self, 200, '\\n'.join(message),\n payload_dict=payload_dict,\n xsrf_token=XsrfTokenManager.create_xsrf_token('lesson-edit'))",
"async def get_homework(self, group_id: int, lesson: str) -> Optional[str]:\n pass",
"def get_lesson_url(self, node, state, request, **kwargs):\n course = state.get_data_attr('course')\n unitStatus = state.get_data_attr('unitStatus')\n ul = unitStatus.get_lesson()\n return ul.get_study_url(course.pk)",
"def get_all_enrolled(self, discipline_id):\n students = MyCollection()\n for item in self.__student_repository.get_all():\n newID = str(discipline_id) + \".\" + str(item.entity_id)\n if not self.__link_repository.find_by_id(newID) is None:\n students.append(item)\n return students",
"def copied_lesson_ids(self,courseevent):\n return self.filter(courseevent=courseevent, original_lesson__isnull=False).\\\n values_list('original_lesson_id', flat=True)",
"def student_problem_list(request, course_id):\r\n assert isinstance(course_id, basestring)\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n # Load the course. Don't catch any errors here, as we want them to be loud.\r\n course = get_course_with_access(request.user, 'load', course_key)\r\n\r\n # The anonymous student id is needed for communication with ORA.\r\n student_id = unique_id_for_user(request.user)\r\n base_course_url = reverse('courses')\r\n error_text = \"\"\r\n\r\n student_problem_list = StudentProblemList(course_key, student_id)\r\n # Get the problem list from ORA.\r\n success = student_problem_list.fetch_from_grading_service()\r\n # If we fetched the problem list properly, add in additional problem data.\r\n if success:\r\n # Add in links to problems.\r\n valid_problems = student_problem_list.add_problem_data(base_course_url)\r\n else:\r\n # Get an error message to show to the student.\r\n valid_problems = []\r\n error_text = student_problem_list.error_text\r\n\r\n ajax_url = _reverse_with_slash('open_ended_problems', course_key)\r\n\r\n context = {\r\n 'course': course,\r\n 'course_id': course_key.to_deprecated_string(),\r\n 'ajax_url': ajax_url,\r\n 'success': success,\r\n 'problem_list': valid_problems,\r\n 'error_text': error_text,\r\n # Checked above\r\n 'staff_access': False,\r\n }\r\n\r\n return render_to_response('open_ended_problems/open_ended_problems.html', context)",
"def get_first_lesson(module):\n try:\n return get_all_lessons(module)[0]\n except IndexError:\n return None",
"def get_stories_by_character_id(self,character_id) -> Optional[list]:\n print(\"Starting Extraction for Character ID {}\".format(character_id))\n\n # Variables\n url = \"https://gateway.marvel.com/v1/public/characters/{character_id}/stories?ts={ts}&apikey={public_key}&hash={hash}&limit={limit}&offset={offset}\"\n offset = 0\n total = 0\n init = True \n results = list()\n \n # Pagination Handler\n while offset < total or init:\n init = False\n\n # URL Preparation\n ts = str(time.time())\n hash = self.get_hash(ts)\n url_formated = url.format(character_id = character_id,\n ts = ts,\n public_key = self.public_key,\n hash = hash,\n limit = PAGE_LIMIT,\n offset = offset\n )\n \n # API Request\n try:\n res = requests.get(url_formated)\n if res.status_code == 200:\n response_data = json.loads(res.text)\n offset += response_data['data']['count']\n total = response_data['data']['total']\n results += response_data['data']['results']\n print(\"Character Id {} -> {}/{} Stories\".format(character_id,offset,total))\n else:\n print(\"Unexpected response from API, code: \",res.status_code)\n print(res.text)\n return None\n # Exceptions Handles\n except requests.exceptions.HTTPError as http_error:\n print(\"HTTP error has occurred: \",http_error)\n return None\n except requests.exceptions.ConnectionError as connection_error:\n print(\"Connection error has occurred: \",connection_error)\n return None\n except requests.exceptions.Timeout as timeout_error:\n print(\"Timeout error has occurred: \",timeout_error)\n return None\n except requests.exceptions.RequestException as general_exception:\n print(\"Some unidentified error has occurred on API request: \",general_exception)\n return None\n\n return results",
"async def get_changed_lessons(\n self,\n last_sync: datetime = None,\n deleted=False,\n date_from=None,\n date_to=None,\n **kwargs,\n ) -> Union[AsyncIterator[ChangedLesson], List[int]]:\n return ChangedLesson.get(\n self._api, last_sync, deleted, date_from, date_to, **kwargs\n )",
"def get_guidelines(skill_id):\n guidelines = []\n level = 1\n while level < 6:\n guideline = Guidelines.query.filter(Guidelines.skill_id == skill_id, Guidelines.level == level).first()\n guidelines.append(guideline.information)\n level = level + 1\n return guidelines"
] | [
"0.7365375",
"0.62064546",
"0.61165404",
"0.6037979",
"0.5811342",
"0.5567336",
"0.53809226",
"0.52577466",
"0.52528167",
"0.5149042",
"0.5014981",
"0.49820638",
"0.4955708",
"0.4920301",
"0.48846543",
"0.48611218",
"0.4850564",
"0.4735443",
"0.4732919",
"0.4681646",
"0.46596086",
"0.46525455",
"0.46317947",
"0.46267986",
"0.461669",
"0.46091437",
"0.4601651",
"0.45615092",
"0.4508456",
"0.44400683"
] | 0.7905289 | 0 |
Get a CSRF token | def get_token(request: http.Request) -> str:
if hasattr(request, '_csrf_hook'):
return request._csrf_hook.get_token() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)",
"def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']",
"def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)",
"def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token",
"def csrf_token():\n return api_util.jsonify({\n 'token': view_helpers.generate_csrf_token()\n })",
"def get_token(self, res):\n token = res.xpath('//*[@name=\"_csrf-app\"]')[0].attrs['value']\n return token",
"def get_csrf_token(self):\n h = hashlib.new('sha256')\n h.update(self.__current_authentication_token())\n return h.hexdigest()",
"def generate_csrf_token() -> int:\r\n ...",
"def get_csrf_token_from_response(self, response):\n return re.search(CSRF_REGEX, response.body).group(1)",
"def get_review_token(site):\n return site.get_tokens([\"csrf\"])[\"csrf\"]",
"def get_initial_token():\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''",
"def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']",
"def get_csrf_token(self, opener, cookiejar, login_url):\n opener.open(login_url)\n try:\n token = [x.value for x in cookiejar if x.name == 'csrftoken'][0]\n except Exception:\n token = None\n return token",
"def extract_csrf(self, url):\r\n\r\n with requests.Session() as client:\r\n client.get(url) \r\n csrf = client.cookies['csrftoken']\r\n return csrf",
"def get_csrf_token(url,cookie):\r\n\r\n session = requests.Session()\r\n headers = {\"Origin\":url,\r\n \"Accept\":\"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8\",\r\n \"Upgrade-Insecure-Requests\":\"1\",\r\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\",\r\n \"Connection\":\"close\",\r\n \"Referer\":url + \"/admin/\",\r\n \"Accept-Language\":\"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\",\r\n \"Accept-Encoding\":\"gzip, deflate\"}\r\n cookies = {\"BLUDIT-KEY\":cookie}\r\n response = session.get(url + \"/admin/dashboard\",\r\n headers=headers,\r\n cookies=cookies\r\n )\r\n csrf_token = response.text.split('var tokenCSRF = \"')[1].split('\"')[0]\r\n\r\n print(\"csrf_token: \" + csrf_token)\r\n return csrf_token",
"def getcsrf(session):\n session.get(\"http://anichart.net\")",
"def csrf(request):\n return django_csrf(request)['csrf_token']",
"def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token",
"def get_csrf_secret():\n\tsess = managers.request_manager.get_request().session()\n\tsecret = sess.get(csrf_secret_sess_var_name, None)\n\tif not secret:\n\t\tsecret = gen_csrf_secret()\n\t\tsess[csrf_secret_sess_var_name] = secret\n\treturn secret",
"async def token(request: Request):\n return get_token()",
"def extract_csrf_token(htmlData):\n parsedHTML = HTMLMetaTagCSRFTokenParser()\n parsedHTML.feed(htmlData)\n\n token = parsedHTML.CSRF_Token\n\n parsedHTML.clean()\n\n return token",
"def _get_initial_token(url):\n cj = CookieJar()\n opener = build_opener(HTTPCookieProcessor(cj))\n install_opener(opener)\n opener.open(url)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''",
"def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value",
"def get_csrf_token(self) -> str:\n url_csrf = 'https://www.instagram.com/accounts/login/'\n\n res = self.session.get(url_csrf, headers={\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0\"#'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'\n })\n csrf = re.findall(r\"csrf_token\\\":\\\"(.*?)\\\"\", res.text)[0]\n return csrf",
"def _fetch_csrf(self) -> str:\n login_page = self._session.get(\"https://www.redpocket.com/login\")\n csrf_element = re.search(\n r'<input type=\"hidden\" name=\"csrf\" value=\"([\\w|-]+)\">', login_page.text\n )\n\n if csrf_element:\n csrf = csrf_element.group(1)\n self._logger.debug(\"Using CSRF: %s\", csrf)\n return csrf\n\n raise RedPocketException(\"Failed to get CSRF token from login page!\")",
"def generate_csrf_token(app_key, app_secret, user_key, user_secret):\n # We authenticate the user using the keys\n auth = OAuth1(app_key, app_secret, user_key, user_secret)\n\n # Get token\n token_request = requests.get('https://commons.wikimedia.org/w/api.php', params={\n 'action': 'query',\n 'meta': 'tokens',\n 'format': 'json',\n }, auth=auth)\n token_request.raise_for_status()\n\n # We get the CSRF token from the result to be used in editing\n CSRF_TOKEN = token_request.json()['query']['tokens']['csrftoken']\n return CSRF_TOKEN, auth",
"def csrf_token(context):\r\n csrf_token = context.get('csrf_token', '')\r\n if csrf_token == 'NOTPROVIDED':\r\n return ''\r\n return (u'<div style=\"display:none\"><input type=\"hidden\"'\r\n ' name=\"csrfmiddlewaretoken\" value=\"%s\" /></div>' % (csrf_token))",
"def get_token(self):\n token = self._session.token\n return token",
"def xsrf_token(request):\n if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):\n return HttpTextResponse(\n 'Please include a header named X-Requesting-XSRF-Token '\n '(its content doesn\\'t matter).',\n status=400)\n return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())",
"def xsrf_token(request):\n if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):\n return HttpTextResponse(\n 'Please include a header named X-Requesting-XSRF-Token '\n '(its content doesn\\'t matter).',\n status=400)\n return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())"
] | [
"0.82945985",
"0.79842544",
"0.7978851",
"0.7901868",
"0.7893529",
"0.7813759",
"0.7686076",
"0.76118195",
"0.7603423",
"0.7520915",
"0.7415232",
"0.73648924",
"0.73442954",
"0.7296101",
"0.72825855",
"0.72396684",
"0.7208785",
"0.71859497",
"0.71636873",
"0.7134267",
"0.7123767",
"0.7032999",
"0.7024787",
"0.6998662",
"0.6975875",
"0.69041735",
"0.6867859",
"0.68047994",
"0.67658585",
"0.67658585"
] | 0.825222 | 1 |
Change the CSRF token in use for a request should be done on login for security purposes. | def rotate_token(request: http.Request):
if hasattr(request, '_csrf_hook'):
request._csrf_hook.rotate_token() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def retain_csrf_token(req):\n session = req.environ.get('rex.session', {})\n csrf_token = session.get('_csrf_token')\n if not csrf_token:\n csrf_token = session['_csrf_token'] = b2a(os.urandom(16))\n return csrf_token",
"def _request_csrf_token(self, params):\n if params.get(\"action\") == \"query\":\n if params.get(\"meta\"):\n if \"tokens\" not in params[\"meta\"].split(\"|\"):\n params[\"meta\"] += \"|tokens\"\n else:\n params[\"meta\"] = \"tokens\"\n if params.get(\"type\"):\n if \"csrf\" not in params[\"type\"].split(\"|\"):\n params[\"type\"] += \"|csrf\"",
"def get_csrf_token(self):\n return get_csrf_token(self.REQUEST)",
"def on_GET_request_setup_csrf_cookie(ev) -> None:\n request = ev.request\n if request.method != \"GET\":\n # Skip if not GET. If could detect static requests, would skip too\n return\n token = request.session.get_csrf_token()\n # print(request.session.session_id, token)\n if request.cookies.get(\"XSRF-TOKEN\") != token:\n # Set the Secure flag on the cookie only when serving on https.\n secure: bool = request.registry.settings.get(\n \"scheme_domain_port\", \"\"\n ).startswith(\"https\")\n ev.response.set_cookie(\n COOKIE_NAME,\n token,\n overwrite=True,\n secure=secure,\n httponly=False, # The client reads the cookie to send header\n samesite=\"strict\",\n )",
"def __header_update_token(self) -> None:\n cookies = self.session.cookies.get_dict()\n self.session.headers.update({\n 'Referer': 'https://efdsearch.senate.gov/search/',\n 'X-CSRFToken': cookies['csrftoken'],\n })",
"def add_csrf_validation(event):\n if event.request.method == 'POST':\n token = event.request.POST.get('_csrf')\n if token is None or token != event.request.session.get_csrf_token():\n headers = forget(event.request) # force a log out\n raise HTTPForbidden('CSRF token is missing or invalid',\n headers=headers)",
"def csrf_protect():\n if request.method == \"POST\" and request.path[0:5] != \"/api/\":\n token = login_session.pop('_csrf_token', None)\n request_token = request.form.get('_csrf_token')\n print(\"Comparing server token [\" + token + \"]\")\n print(\"with client token [\" + request_token + \"]\")\n if not token or token != request_token:\n print(\"Tokens do not match! Aborting..\")\n abort(403)\n print(\"Tokens match - accepted\")",
"def generate_csrf_token():\n if '_csrf_token' not in login_session:\n login_session['_csrf_token'] = b64encode(urandom(64)).decode() # Cryptographically secure random key\n print(\"_csrf_token:\" + login_session['_csrf_token'])\n return login_session['_csrf_token']",
"def csrf_token():\n return api_util.jsonify({\n 'token': view_helpers.generate_csrf_token()\n })",
"def _get_csrf(self):\n\n csrf_token_header_name = \"X-CsrfToken\"\n if csrf_token_header_name not in self.headers:\n home_head_response = requests.head(self.BASE_URL)\n self.cookies.update(home_head_response.cookies)\n csrf_token = self.cookies[\"csrftoken\"]\n csrf_header = {csrf_token_header_name: csrf_token}\n self.headers.update(csrf_header)",
"def enforce_csrf(self, request):\n return # To not perform the csrf check previously happening",
"def csrf_token(context):\r\n csrf_token = context.get('csrf_token', '')\r\n if csrf_token == 'NOTPROVIDED':\r\n return ''\r\n return (u'<div style=\"display:none\"><input type=\"hidden\"'\r\n ' name=\"csrfmiddlewaretoken\" value=\"%s\" /></div>' % (csrf_token))",
"def generate_csrf_token() -> int:\r\n ...",
"def setup_csrf_protection(app, cookie_name='r3csrfprot'):\n\n middleware = CSRFProtectionMiddleware(app, cookie_name)",
"def check_csrf(f):\n\n @wraps(f)\n @require_login\n def wrapper(*args, **kwds):\n if \"token\" not in session:\n raise PicoException(\n \"Internal server error\",\n data={\"debug\": \"CSRF token not found in session\"},\n )\n submitted_token = request.headers.get(\"X-CSRF-Token\", None)\n if submitted_token is None:\n raise PicoException(\"CSRF token not included in request\", 403)\n if session[\"token\"] != submitted_token:\n raise PicoException(\"CSRF token is not correct\", 403)\n return f(*args, **kwds)\n\n return wrapper",
"def get_csrf_token(self):\n h = hashlib.new('sha256')\n h.update(self.__current_authentication_token())\n return h.hexdigest()",
"def csrf(request):\n return django_csrf(request)['csrf_token']",
"def _update_token(token):\n session.token = token",
"def xsrf_token(request):\n if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):\n return HttpTextResponse(\n 'Please include a header named X-Requesting-XSRF-Token '\n '(its content doesn\\'t matter).',\n status=400)\n return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())",
"def xsrf_token(request):\n if not request.META.has_key('HTTP_X_REQUESTING_XSRF_TOKEN'):\n return HttpTextResponse(\n 'Please include a header named X-Requesting-XSRF-Token '\n '(its content doesn\\'t matter).',\n status=400)\n return HttpTextResponse(models.Account.current_user_account.get_xsrf_token())",
"def get_csrf(session):\n login = session.get(KONFUZIO_HOST)\n csrf_token = login.cookies['csrftoken']\n return csrf_token",
"def checkcsrf(func):\n @functools.wraps(func)\n @sessiondecorator\n def wrapper(*args, session = None, **kw):\n if \"X-CSRF-TOKEN\" not in session.cookies:\n getcsrf(session)\n return func(*args,session = session, **kw)\n return wrapper",
"def get_csrf(self):\n rv = self.app.get('/')\n soup = BeautifulSoup(rv.data, 'html.parser')\n tag = soup.body.find('input', attrs = { 'name' : '_csrf_token'})\n return tag['value']",
"def get_csrf_token():\n\tresponse = session.get('https://www.udemy.com/join/login-popup')\n\tmatch = re.search(\"name=\\'csrfmiddlewaretoken\\' value=\\'(.*)\\'\", response.text)\n\treturn match.group(1)",
"def _get_form_token(self, req):\n if req.incookie.has_key('trac_form_token'):\n return req.incookie['trac_form_token'].value\n else:\n req.outcookie['trac_form_token'] = hex_entropy(24)\n req.outcookie['trac_form_token']['path'] = req.base_path or '/'\n if self.env.secure_cookies:\n req.outcookie['trac_form_token']['secure'] = True\n if sys.version_info >= (2, 6):\n req.outcookie['trac_form_token']['httponly'] = True\n return req.outcookie['trac_form_token'].value",
"def csrf_protect_app(app):\n\n @app.before_request\n def csrf_protect():\n if request.path == \"/api/login\" or session.get('bypass_csrf', False):\n # Bypass csrf protection for trusted api sessions (see /api/login_for_apps):\n return\n if request.method == \"POST\":\n token = session.get('_csrf_token', None)\n header = request.headers.get('X-csrf', None)\n if not token or not header or token != header:\n abort(make_response(\"Invalid x-csrf token\", 403))\n\n def generate_csrf_token():\n if '_csrf_token' not in session:\n session['_csrf_token'] = random_token()\n return session['_csrf_token']\n\n app.jinja_env.globals['csrf_token'] = generate_csrf_token",
"def test_csrf_token_request_rotation(self):\n\n csrf_client = Client(enforce_csrf_checks=True)\n csrf_client.login(username='archen', password='mytestpassword')\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token1 = \"{0}\".format(response.context['csrf_token'])\n\n # todo: add settings for test URL\n response = csrf_client.get(reverse('hackme:vote', kwargs={'question_id': 1}))\n token2 = \"{0}\".format(response.context['csrf_token'])\n\n self.assertNotEqual(token1, token2, msg='CSRF Token is not rotated per request')",
"def validate_csrf_token(event):\n request = event.request\n if request.is_xhr or request.method.upper() in ('POST', 'PUT', 'DELETE'):\n pyramid.session.check_csrf_token(request, token='XSRF_TOKEN',\n header='X-XSRF-TOKEN', raises=True)",
"def get_token(request: http.Request) -> str:\n if hasattr(request, '_csrf_hook'):\n return request._csrf_hook.get_token()",
"def getcsrf(session):\n session.get(\"http://anichart.net\")"
] | [
"0.6995657",
"0.6735495",
"0.66707176",
"0.66646796",
"0.6627224",
"0.66243565",
"0.6588753",
"0.6564125",
"0.64893675",
"0.64175296",
"0.6354996",
"0.6351722",
"0.6298024",
"0.6286042",
"0.626467",
"0.62383574",
"0.62116677",
"0.61504555",
"0.6089741",
"0.6089741",
"0.6064327",
"0.6041716",
"0.6007499",
"0.5963494",
"0.5902616",
"0.59004545",
"0.58880657",
"0.58748376",
"0.5830292",
"0.5829245"
] | 0.7636964 | 0 |
decorator to coerce a generator to a list | def listify(gen: Callable[..., Union[Generator[T, None, None], AsyncGenerator[T, None]]]) -> Callable[..., List[T]]:
if inspect.isasyncgenfunction(gen):
@wraps(gen)
async def list_func(*args, **kwargs) -> List[Any]:
return [v async for v in gen(*args, **kwargs)]
elif inspect.isgeneratorfunction(gen):
@wraps(gen)
def list_func(*args, **kwargs) -> List[Any]:
return list(gen(*args, **kwargs))
else:
raise TypeError(f'{gen} is not a generator or async-generator')
return list_func | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_list(gen):\n return list(gen())",
"def to_list():\n\n @sinks\n def _dagpype_internal_fn_act(target):\n l = []\n try:\n while True:\n l.append((yield))\n except GeneratorExit:\n target.send(l) \n target.close()\n\n return _dagpype_internal_fn_act",
"def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper",
"def test_listiter_op_1():\n\n @ops.listiter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def decorator(arg):\n return lambda: list(arg)",
"def test_iterlist_op_1():\n\n @ops.iterlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def _for_each_generator(self,\n func: Callable[..., Any],\n *args: Iterable[Any]) -> List[Any]:\n return [func(gen, *args_for_func) for gen, args_for_func in zip(\n self._generators, zip(*args))]",
"def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen",
"def toiter(x):\n if iterable(x):\n return x\n else:\n return [x]",
"def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]",
"def builtin_iterable(func):\n if sys.version_info[:1] < (3,):\n @wraps(func)\n def inner(*args, **kwargs):\n return list(func(*args, **kwargs))\n return inner\n return func",
"def __iter__(self):\n return iter(self.to_list())",
"def concrete(seq):\n if isinstance(seq, Iterator):\n seq = list(seq)\n if isinstance(seq, (tuple, list)):\n seq = list(map(concrete, seq))\n return seq",
"def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)",
"def ensure_list(iterable: Iterable[A]) -> List[A]:\n if isinstance(iterable, list):\n return iterable\n else:\n return list(iterable)",
"def makeiter(obj):\n if not obj:\n return []\n if not isiterable(obj):\n return [obj]\n return obj",
"def listify(val):\n if isinstance(val, basestring):\n return [val]\n if hasattr(val, '__iter__'):\n return list(val)\n return [val]",
"def acc_gens(generators: iter) -> list:\n from_iter = itertools.chain.from_iterable(generators) # [[], [], []] -> flatten\n return functools.reduce(operator.add, from_iter)",
"def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside",
"def flatmap(iterable, function_to_list):\n for element in iterable:\n list_block = function_to_list(element)\n for result_value in list_block:\n yield result_value",
"def list_generator(size: int) -> list:\n libros = size * [None]\n\n return libros",
"def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])",
"def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped",
"def aslist(something):\n return something if isinstance(something, list) else [something]",
"def chek_iter_obj(func):\n\n def wrapper(self, lst):\n try:\n iter(lst)\n return func(self, lst)\n except TypeError:\n raise TypeError(f\"{lst} isn't iterable object\")\n\n return wrapper",
"def make_iterable(arg):\n return arg if is_iterable(arg) else (arg,)",
"def convert_to_list(pattern):\n assert is_iterable(pattern), \"%s is not iterable\" % type(pattern).__name__\n\n def _convert(obj):\n if hasattr(obj, \"__iter__\"):\n return [\n _convert(x)\n for x in obj\n ]\n else:\n return obj\n\n return _convert(pattern)",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def to_list(x):\n if isinstance(x, list):\n return x\n return [x]",
"def tolist(self, flat=0):\n pass"
] | [
"0.7745299",
"0.69110525",
"0.6902051",
"0.6683275",
"0.6664674",
"0.65476835",
"0.639381",
"0.63147944",
"0.6306718",
"0.6297463",
"0.628525",
"0.62602025",
"0.62131625",
"0.6202488",
"0.6202488",
"0.6105673",
"0.60810256",
"0.6071712",
"0.6050292",
"0.60284764",
"0.5994327",
"0.59667027",
"0.5955742",
"0.5954442",
"0.59176093",
"0.5912814",
"0.5876168",
"0.58590835",
"0.58513665",
"0.5844052"
] | 0.70440924 | 1 |
Just lists out paths in env variable, one per line | def plist(self):
if self.val == None:
print("No such env variable ", self.val, " exists!")
else:
print("Listing for ", self.name)
for p in self.path_list: print(" ", p) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)",
"def show_envs(path: Optional[Path] = None):\n path = path or WORKON_HOME or Path.cwd()\n for name, path in sorted(find_environment_folders(path=path, verbose=1)):\n terminal.echo(f'Found {terminal.yellow(name)} under: {terminal.yellow(path)}')",
"def pathext_list():\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)",
"def _prepend_env_paths(content, names):\n export_env_vars = ['export %(k)s=%(v)s:${%(k)s}' %dict(\n k=name, v=os.environ.get(name, '')) for name in names]\n return '\\n'.join(export_env_vars + [content])",
"def __get_environ_path(environ_key):\n environ_value = os.environ.get(environ_key)\n result = []\n\n if not environ_value:\n return result\n\n environ_path_list = environ_value.split(';')\n for each_path in environ_path_list:\n each_path = path.Path(each_path)\n\n if not each_path.exists():\n continue\n\n # make sure default directory first in the order\n if 'FrMaya' in each_path:\n result.insert(0, each_path)\n else:\n result.append(each_path)\n\n return result",
"def print_paths():\n print(f\"~: {HOME}\")\n print(f\"PYTHON: {PYTHON}\")\n print(f\"IDEAS: {IDEAS}\")\n print(f\"SITE-PACKAGES {SITE_PACKAGES}\")\n if os.path.exists(TESTS):\n print(f\"TESTS: {TESTS}\")",
"def path_show(args):\n print(header(\"$PATH Components\"))\n loop_fmt = \"{pad}{color}{path}\"\n pad = 4\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n cnt = (cnt + 1) % len(CODES)\n\n print(loop_fmt.format(pad=pad * \" \", color=color, path=part))\n if args.nowarn:\n continue\n\n for warn in check_path_folder(part):\n print(\"{}X {}\".format(pad * 2 * \" \", warn))",
"def env_var_line(key: str) -> str:\n return str(os.environ.get(key) or \"\").strip()",
"def get_formatted_env_vars() -> str:\n res = \"\"\n for k, v in os.environ.items():\n res += '{0}={1}\\n'.format(k, v)\n return res",
"def get_environment_paths(basedir=None):\n basedir = (\n get_default_secrets_basedir() if basedir is None\n else Path(basedir)\n )\n results = list()\n for item in sorted(basedir.iterdir()):\n if is_valid_environment(item):\n results.append(item)\n return results",
"def path(self):\n if self._path:\n return self._path\n path = os.environ[\"PATH\"].split(os.pathsep)\n path = [os.path.expanduser(x) for x in path]\n path = [os.path.abspath(x) for x in path]\n path = [x for x in path if os.path.exists(x)]\n self._path = path\n return self._path",
"def _get_paths():\n paths = [\n '/'\n ]\n return paths",
"def env_var_list(key: str) -> list:\n return list(\n filter(\n None, map(str.strip, env_var_line(key).split(\",\"))\n )\n )",
"def get_envlist(key, *default, **kwargs):\n separator = kwargs.get('separator', ' ')\n return get_env(key, *default, coerce=lambda x: x.split(separator))",
"def _include_paths_from_environ(env_prefix=''):\n paths = os.environ.get(env_prefix + 'WSGI_AUTH_PATHS')\n if not paths:\n return []\n return paths.split(';')",
"def format_path_variable(\n envvar: str, paths: Iterable[str], prepend: str, separator: str\n) -> str:\n\n if not paths:\n raise ValueError(\"Failed to format '${}': no paths supplied\".format(envvar))\n\n return '{envvar}=\"${envvar}{separator}{paths}\"'.format(\n envvar=envvar,\n separator=separator,\n paths=combine_paths(paths, prepend, separator),\n )",
"def expand_paths_vars(paths: List[str]) -> List[str]:\n paths = [expand_path_vars(p) for p in paths]\n return paths",
"def findPath(enviroment: Environment, position: tuple) -> list:",
"def findPath(enviroment: Environment, position: tuple) -> list:",
"def show_env():\n envs = [\"PATH\", \"ORACLE_HOME\", \"TNS_ADMIN\", \"NLS_LANG\"]\n result = {}\n for env in envs:\n if env in os.environ:\n result[env] = os.environ[env]\n return result",
"def tenv(request):\n with open(SETUP_FILE, \"r\") as f:\n tsetup = json.load(f)\n\n return tsetup[\"paths\"]",
"def get_list():\n\n print(f\"Корневой каталог: {config_tools.NAME_PATH}\")\n for dirpath, dirnames, filenames in os.walk(config_tools.NAME_PATH):\n # перебрать каталоги\n for dirname in dirnames:\n print(\"Каталог:\", os.path.join(dirpath, dirname))\n # перебрать файлы\n for filename in filenames:\n print(\"Файл:\", os.path.join(dirpath, filename))",
"def _real_paths(config):\n for key in ('--config', '--ffmpeg-bin', '--log', '--music-source', '--working-dir'):\n if not config[key]:\n continue\n config[key] = os.path.realpath(os.path.expanduser(config[key]))",
"def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path",
"def paths_list(ctx):\n for path in ctx.obj['CLIENT'].paths.list():\n if not path.source.name:\n cidr_blocks = [subnetwork.cidr_block for subnetwork in path.source.subnetworks]\n source_name = \",\".join(cidr_blocks)\n network_name = \"external\"\n else:\n source_name = path.source.name\n network_name = path.source.network.name\n click.echo(\"%s:%s -(%s)-> %s:%s\" % (network_name, source_name, path.port,\n path.network.name, path.destination.name))",
"def env_path(env):\n host = socket.gethostname()\n if host.startswith('maven-iuvs-itf'):\n path = Path('/maven_iuvs/{}/products'.format(env))\n elif host.startswith('test-machine'):\n path = Path('/abc')\n else:\n path = Path(os.environ['HOME']) / 'Dropbox' / 'data' / 'iuvs'\n return path",
"def cfgPathToList( arg ):\n from types import StringTypes\n listPath = []\n if type( arg ) not in StringTypes:\n return listPath\n while arg.find( '/' ) == 0:\n arg = arg[1:]\n return arg.split( '/' )",
"def print_env_vars():\n print(\"Current process environment variables:\")\n for k, v in os.environ.items():\n print('{0}={1}'.format(k, v))",
"def expandvars(self,v):\n if not v: return None\n if isinstance(v,(list)):\n for i in range(len(v)):\n v[i] = os.path.expandvars(os.path.expanduser(str(v[i])))\n return v\n \n return os.path.expandvars(os.path.expanduser(str(v)))",
"def _trim_env_off_path(paths, saltenv, trim_slash=False):\n env_len = None if _is_env_per_bucket() else len(saltenv) + 1\n slash_len = -1 if trim_slash else None\n\n return [d[env_len:slash_len] for d in paths]"
] | [
"0.69197834",
"0.6629572",
"0.6248786",
"0.62064815",
"0.61731195",
"0.61621183",
"0.6069602",
"0.6014116",
"0.5922058",
"0.58610016",
"0.58253425",
"0.58037055",
"0.5796449",
"0.57708263",
"0.576808",
"0.5671742",
"0.56449497",
"0.5635968",
"0.5635968",
"0.5558531",
"0.55343807",
"0.5507948",
"0.5503701",
"0.5498583",
"0.54774106",
"0.5470867",
"0.54680836",
"0.5416894",
"0.5406435",
"0.54016924"
] | 0.6816062 | 1 |
Updates the internal env val to ensure path_list & val are insync | def pupdate(self):
try:
tmp = self.path_list[0]
except IndexError:
print("Empty value for env variable ", self.name)
return
for p in self.path_list[1:]:
tmp = tmp + ':' + p
self.val = tmp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self, env_obj):\n if env_obj:\n if isinstance(env_obj, EnvValues):\n for package_name, env_vars in env_obj.data.items():\n for name, value in env_vars.items():\n if isinstance(value, list):\n value = copy.copy(value) # Aware of copying by reference the list\n self.add(name, value, package_name)\n # DepsEnvInfo. the OLD values are always kept, never overwrite,\n elif isinstance(env_obj, DepsEnvInfo):\n for (name, value) in env_obj.vars.items():\n name = name.upper() if name.lower() == \"path\" else name\n self.add(name, value)\n else:\n raise ConanException(\"unknown env type: %s\" % env_obj)",
"def add_to_env(self, path, value):\n name = [MakeEnvironArgs.CONFIG]\n for element in path:\n name.append(MakeEnvironArgs.DOT)\n name.append(element)\n self.env[''.join(name)] = value\n return self.env",
"def update(self, env):\n del env\n return",
"def __setitem__(self, key, item):\n super(EnvironmentVariables, self).__setitem__(key, item)\n os.environ[key] = item",
"def test_set_get_correct_path(self):\n agent_config = self.load_agent_config()\n assert not agent_config.component_configurations\n\n config_value = self.get_component_config_value()\n assert config_value == self.INITIAL_VALUE\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert str(self.INITIAL_VALUE) in result.output\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", self.PATH, str(self.NEW_VALUE)],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n\n config_value = self.get_component_config_value()\n assert config_value == self.INITIAL_VALUE\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert str(self.NEW_VALUE) in result.output\n\n agent_config = self.load_agent_config()\n assert agent_config.component_configurations",
"def extendPath(self, name, value, start=True, forceReplace=False):\n if name in self._env and not forceReplace:\n oldval = self._env[name]\n if start:\n newval = PATH_SEP.join([value, oldval])\n else:\n newval = PATH_SEP.join([oldval, value])\n else:\n newval = value\n self._env[name] = newval\n return newval",
"def update(self, value):\n orig = get_nested_default(self._request.session, self._path)\n orig.update(value)\n set_nested(self._request.session, self._path, orig)\n # self._value = get_nested_default(self._session, self._path)\n self.save()",
"def set(self, path, value):\n pth = self._path[:]\n pth.extend(stringify_keys(path))\n set_nested(self._request.session, pth, value)\n # self._value = get_nested_default(self._dct, self._path)\n self.save()",
"def test_set_value_in_env_file(self) -> None:\n\n self.helper.set_env_file_path(self.temp_env_file.name)\n self.helper.set_name(\"GHOST_FINDER\")\n\n self.assertIsNone(self.helper.get_value())\n\n expected = 'GHOST_FINDER=\"no\"\\n'\n\n self.helper.set_value_in_env_file(\"no\")\n\n with open(self.temp_env_file.name, \"r\", encoding=\"utf-8\") as file_stream:\n self.assertTrue(\"no\" in file_stream.read())\n\n expected = \"no\"\n\n self.assertEqual(expected, self.helper.get_value())",
"def premove(self, path):\n path = os.path.normpath(path) # remove double slashes and stuff\n if path not in self.path_list:\n print(\"Not found in path list! \", path)\n else:\n print(\"Removing \", path, \" from env var \", self.name)\n while path in self.path_list: # needed just in case path is not cleaned first\n self.path_list.remove(path)\n self.pupdate()",
"def test_existing_value(self):\n var_name = \"PICCOLO_TEST_2\"\n initial_value = \"hello\"\n new_value = \"goodbye\"\n\n os.environ[var_name] = initial_value\n\n with set_env_var(var_name=var_name, temp_value=new_value):\n self.assertEqual(os.environ.get(var_name), new_value)\n\n self.assertEqual(os.environ.get(var_name), initial_value)",
"def test_setitem(self, env: yaenv.Env):\n assert 'NEW_VAR' not in env\n env['NEW_VAR'] = 'new_var'\n assert env['NEW_VAR'] == 'new_var'\n env['NEW_VAR'] = 'newer var'\n assert env['NEW_VAR'] == 'newer var'",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def update_from_env(self):\n for key, value in os.environ.items():\n if not key.startswith(self._prefix):\n continue\n\n setting = key[len(self._prefix):]\n if setting not in self._default_settings:\n continue\n\n setting_value = getattr(self, setting)\n if isinstance(setting_value, bool):\n value = (value == 'True')\n elif isinstance(setting_value, (int, float)):\n value = type(setting_value)(value)\n elif isinstance(setting_value, (list, dict)):\n value = json.loads(value)\n\n setattr(self, setting, value)\n self._explicit_settings.add(setting)",
"def _rollback_env_variable(environ, name, subfolders):\n value = environ[name] if name in environ else ''\n env_paths = [path for path in value.split(os.pathsep) if path]\n value_modified = False\n for subfolder in subfolders:\n if subfolder:\n if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):\n subfolder = subfolder[1:]\n if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):\n subfolder = subfolder[:-1]\n for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):\n path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path\n path_to_remove = None\n for env_path in env_paths:\n env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path\n if env_path_clean == path_to_find:\n path_to_remove = env_path\n break\n if path_to_remove:\n env_paths.remove(path_to_remove)\n value_modified = True\n new_value = os.pathsep.join(env_paths)\n return new_value if value_modified else None",
"def test_set_env_file_path_attribute(self) -> None:\n\n given = self.temp_env_file.name\n expected = given\n\n self.helper.env_file_path = given\n\n actual = self.helper.env_file_path\n\n self.assertEqual(expected, actual)",
"def update(self, tree_path, value):\n\t\traise NotImplementedError",
"def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)",
"def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value",
"def _set_environment(self) -> None:\n last_update_time = time.time()\n while True:\n # The 'math' in the next line keeps the refresh intervals more regular since the update takes time to\n # complete.\n time.sleep(REFRESH_INTERVAL - (time.time() - last_update_time)) # REFRESH_INTERVAL - ELAPSED_TIME\n last_update_time = time.time()\n with self.lock:\n if self.desired_environment:\n self._update_environment(self.desired_environment)",
"def _setEnv(self):\n try:\n global_env_prfix = \"/GlobalEnv/\"\n if self.etcd_key_prefix is not None:\n global_env_prfix = self.etcd_key_prefix + \"/GlobalEnv/\"\n value = self.etcd.get(global_env_prfix)\n if value[0] is not None:\n jsonConfig = json.loads(value[0].decode('utf-8'))\n for key in jsonConfig.keys():\n os.environ[key] = jsonConfig[key]\n else:\n raise TypeError(\"config manager key {} must be set as \\\n a prerequisite ...\".format(global_env_prfix))\n except Exception as e:\n self.logger.error(\"Exception raised in _setEnv\\\n with error:{}\".format(e))\n raise e",
"def test_set_artifacts_path__allow_change():\n path_before = copy.copy(ContractHandler.artifacts_path)\n assert path_before is not None\n assert ContractHandler._contracts\n\n ContractHandler.set_artifacts_path(\"new path\")\n\n assert ContractHandler.artifacts_path == \"new path\"\n assert not ContractHandler._contracts # cache should have reset",
"def __set__(self, obj, val):\n try:\n self._resolve(val)\n except IOError, e:\n Parameterized(name=\"%s.%s\"%(obj.name,self._attrib_name)).warning('%s'%(e.args[0]))\n\n super(Path,self).__set__(obj,val)",
"def sync_local_fabric_env(self):\n env.sync_filename = '/tmp/{0}_env.txt'.format(time.time())\n env_copy = self.env\n env_copy.use_ssh_config = False\n env_copy.host = False\n env_copy.host_string = False\n env_copy.local_deployment = True\n # TODO: add context from each need to repopulate\n with self.file.tmpfile(self.to_json(env_copy, cls=SilentEncoder)) as f:\n self.up(f.name, env.sync_filename)",
"def update_val(self, val):\n self.in_val = val",
"def reset(self):\n\n for value in self.__dict__.itervalues():\n if isinstance(value, EnvParm):\n value._value = 'use_env'\n getattr(value, 'value')",
"def set_environment_variables(self, time_info):\n\n for identifier, file_path in self.c_dict['INPUT_LIST_DICT'].items():\n self.add_env_var(f'METPLUS_FILELIST_{identifier.upper()}',\n file_path)\n\n super().set_environment_variables(time_info)",
"def update_path():\n #TODO update path information\n pass",
"def relay_env_setitem(c, env, key, x):\n gv = c.types.get_env_update(x.abstract)\n return relay.Call(gv, [c.ref(env), c.ref(key), c.ref(x)])",
"def test_set_artifacts_path__deny_change_to_same():\n path_before = copy.copy(ContractHandler.artifacts_path)\n assert path_before is not None\n assert ContractHandler._contracts\n\n ContractHandler.set_artifacts_path(path_before)\n\n assert ContractHandler.artifacts_path == path_before\n assert ContractHandler._contracts # cache should *not* have reset"
] | [
"0.62745166",
"0.59792364",
"0.5785462",
"0.5765213",
"0.5754156",
"0.56319785",
"0.5602906",
"0.55717754",
"0.5535034",
"0.5533392",
"0.5532169",
"0.55176127",
"0.5516329",
"0.549734",
"0.5485281",
"0.54776955",
"0.54740787",
"0.54621756",
"0.5461831",
"0.5431891",
"0.54318756",
"0.5422078",
"0.54047",
"0.5355977",
"0.53282285",
"0.53278154",
"0.5321409",
"0.53095776",
"0.53048116",
"0.52880275"
] | 0.7859146 | 0 |
Process options based on legal operations & subcommands Return sanitized cmds and arguments | def process_options(args):
subcmds = dict() # each key(cmd) can take on a val of 0, or 1
subcmds_wo_arg = [ 'clean', 'list' ]
subcmds_with_args = [ 'add', 'remove' ]
for cmd in subcmds_wo_arg:
subcmds[cmd] = 0
for cmd in subcmds_with_args:
subcmds[cmd] = 1
if (len(args) == 0):
usage("ERROR. must have one sub-command available")
cmd = args.pop(0)
argc = len(args)
def bad_args(cmd, argc):
return True if argc < subcmds[cmd] else False
env_var = ''
# determine what kind of cmd was given and arguments
if cmd not in subcmds:
usage("ERROR. Unrecognized cmd " + cmd + "! cmd must be from appropriate list")
elif bad_args(cmd, argc):
usage("Must enter at least one argument for " + cmd)
elif argc > subcmds[cmd]: # determine if it defaults to PATH or anything else
if os.getenv(args[0]) != None:
env_var = args.pop(0)
elif os.getenv(args[0].upper()) != None:
env_var = args.pop(0).upper()
else: # first argument is NOT a known env variable
if (cmd == 'remove'):
env_var = 'PATH'
elif (cmd == 'add') and ('/' not in args[0]) and (len(args) > 1): # not like a path & has at least one other argument
env_var = args.pop(0) # assume new env variable to be created
else:
usage("Unrecognized environment variable " + args[0])
else:
env_var = 'PATH'
return (cmd, env_var, args) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_run_options(self, cmdp, exec_engine=None):\n cmdp.declare_options(\"-v= -e= -w= -u= -p= -i -t -a -P\")\n cmd_options = {\n \"netcoop\": {\n \"fl\": (\"-P\", \"--publish-all\", \"--netcoop\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"portsmap\": {\n \"fl\": (\"-p=\", \"--publish=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"novol\": {\n \"fl\": (\"--novol=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"vol\": {\n \"fl\": (\"-v=\", \"--volume=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"env\": {\n \"fl\": (\"-e=\", \"--env=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"envfile\": {\n \"fl\": (\"--env-file=\",), \"act\": 'E',\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"user\": {\n \"fl\": (\"-u=\", \"--user=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cwd\": {\n \"fl\": (\"-w=\", \"--workdir=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"entryp\": {\n \"fl\": (\"--entrypoint=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cpuset\": {\n \"fl\": (\"--cpuset-cpus=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostauth\": {\n \"fl\": (\"--hostauth\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"containerauth\": {\n \"fl\": (\"--containerauth\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nosysdirs\": {\n \"fl\": (\"--nosysdirs\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostenv\": {\n \"fl\": (\"--hostenv\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"bindhome\": {\n \"fl\": (\"--bindhome\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nometa\": {\n \"fl\": (\"--nometa\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dri\": {\n \"fl\": (\"--dri\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cmd\": {\n \"fl\": (\"P+\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"volfrom\": {\n \"fl\": (\"--volumes-from=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dns\": {\n \"fl\": (\"--dns=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dnssearch\": {\n \"fl\": (\"--dns-search=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"kernel\": {\n \"fl\": (\"--kernel=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"devices\": {\n \"fl\": (\"--device=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"nobanner\": {\n \"fl\": (\"--nobanner\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"platform\": {\n \"fl\": (\"--platform=\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"pull\": {\n \"fl\": (\"--pull=\"), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n }\n }\n for option, cmdp_args in list(cmd_options.items()):\n last_value = None\n for cmdp_fl in cmdp_args[\"fl\"]:\n option_value = cmdp.get(cmdp_fl, cmdp_args[\"p2\"],\n cmdp_args[\"p3\"])\n if not exec_engine:\n continue\n if cmdp_args[\"act\"] == \"R\": # action is replace\n if option_value or last_value is None:\n exec_engine.opt[option] = option_value\n elif cmdp_args[\"act\"] == \"E\": # action is extend\n # if option == \"env\":\n # print (type(option_value))\n # print (option_value)\n exec_engine.opt[option].extend(option_value)\n last_value = option_value",
"def parse_options(self, argv):\n parser = argparse.ArgumentParser(\n prog='rbext',\n usage='%(prog)s <command>',\n formatter_class=HelpFormatter,\n description=(\n 'rbext helps create initial source code trees for extensions '\n 'and helps run extension test suites within a '\n 'pre-established Review Board development environment.\\n'\n '\\n'\n 'To get help on an individual command, run:\\n'\n '\\n'\n ' rbext <command> --help'\n ))\n parser.add_argument(\n '--version',\n action=RBProgVersionAction)\n\n subparsers = parser.add_subparsers(\n title='Commands',\n dest='command',\n description=(\n 'To get additional help for these commands, run: '\n 'rb-site <command> --help'\n ))\n\n commands = sorted(self.COMMANDS, key=lambda cmd: cmd.name)\n command_map = {}\n\n for command in commands:\n command_map[command.name] = command\n\n subparser = subparsers.add_parser(\n command.name,\n formatter_class=HelpFormatter,\n prog='%s %s' % (parser.prog, command.name),\n description=command.description_text,\n help=command.help_summary)\n\n subparser.add_argument(\n '-d',\n '--debug',\n action='store_true',\n dest='debug',\n default=False,\n help='Display debug output.')\n subparser.add_argument(\n '-s',\n '--settings-file',\n dest='settings_file',\n default=None,\n help='test_settings.py file to use for any custom settings.')\n\n command.add_options(subparser)\n\n # Prevent the '--' and anything after it from being parsed, so the\n # command can work with it.\n try:\n i = argv.index('--')\n argv = argv[:i]\n except ValueError:\n # The \"--\" isn't in the list anywhere.\n pass\n\n options = parser.parse_args(argv)\n\n if not options.command:\n parser.print_help()\n sys.exit(1)\n\n return command_map[options.command], options",
"def getComandLineOptions(self,):\n\n import argparse\n import os\n import sys\n import re\n \n indata = None\n \n #if re.search('(\\ -h\\ |$)|(\\ --help\\ |$)',self.commandLine): print man\n \n # commandLine arguments parsing\n if self.command == 'initiateAnalysis': prog = 'SEAseq2 initiateAnalysis <path> <type>'\n if self.command == 'commandLog': prog = 'SEAseq2 commandLog <path>'\n if self.command == 'addData': prog = 'SEAseq2 addData <path>'\n argparser = argparse.ArgumentParser(prog=prog, description='', epilog='Use: \"SEAseq2 help\" to get more detailed help.', formatter_class=argparse.RawTextHelpFormatter)\n \n # All programs\n argparser.add_argument('--debug', dest='debug', action='store_true', required=False, default=False, help='Run the program in debug-mode, single process python script (SLOW).')\n argparser.add_argument(\t'-p', dest='cpus', metavar='N',\ttype=int, required=False, default=1,help='The number of processes to run in parallel (default 1).')\n\n if self.command == 'commandLog':\n try: indata = argparser.parse_args(self.commandLineList[3:])\n except IndexError: pass\n \n if self.command == 'initiateAnalysis':\n try: self.mode = self.commandLineList[3]\n except IndexError:\n print 'ERROR: no analysis mode supplied.'\n sys.exit(1)\n try: indata = argparser.parse_args(self.commandLineList[4:])\n except IndexError: pass\n\n if self.command == 'changeSettings': pass\n \n if self.command == 'startAnalysis':\n if self.onUppmax:\n argparser.add_argument('-prj','-project',dest='project',metavar='<b20xxxxx>',\ttype=str,\trequired=False,\tdefault='b2014005',\thelp='uppmaxproject (default b2011011)')\n #argparser.add_argument('--send',\tdest='send', \taction='store_true', \t\t\trequired=False,\tdefault=False,\thelp='Send sbatch scripts to job-queue.')\n #argparser.add_argument('--sendonly',\tdest='sendonly',action='store_true', \t\t\trequired=False,\tdefault=False,\thelp='Do not generate the files only Send sbatch scripts to job-queue.')\n #argparser.add_argument('--small',\tdest='small', \taction='store_true', \t\t\trequired=False,\tdefault=False,\thelp='make for smaller dataset job-queue.')\n try:\n indata = argparser.parse_args(self.commandLineList[3:])\n SEAseqPipeLine.settings.uppmaxProject = indata.project\n except IndexError: pass\n \n if self.command == 'addData':\n argparser.add_argument('-r1',dest='fastq1',\tmetavar='FILE',type=file,required=True, help='Indata \"fastq\"-file read1.')\n argparser.add_argument('-r2',dest='fastq2',\tmetavar='FILE',type=file,required=True,\thelp='Indata \"fastq\"-file read2.')\n try:\n indata = argparser.parse_args(self.commandLineList[3:])\n self.fastq1 = os.path.abspath(indata.fastq1.name)\n self.fastq2 = os.path.abspath(indata.fastq2.name)\n except IndexError: pass\n \n SEAseqPipeLine.settings.debug = indata.debug\n SEAseqPipeLine.settings.parallelProcesses = indata.p",
"def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging",
"def get_parsed_cmd_args(self, test_case=None):\n\n class BooleanAction(argparse.Action):\n \"\"\"Custom action for storing boolean options\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(BooleanAction, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, value, option_string):\n setattr(namespace, self.dest, value not in [\"False\", \"false\"])\n\n class ArrayAction(argparse.Action):\n \"\"\"Custom action for storing comma seperated arrays\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(ArrayAction, self).__init__(*args, **kwargs)\n\n def __call__(self, parser, namespace, value, option_string):\n setattr(namespace, self.dest, value.split(\",\"))\n\n argument_parser = argparse.ArgumentParser(\n description=\"Encryption identification scanner: \" \\\n + \"scans a set of packages to detect use of encryption algorithms.\",\n epilog=\"For additional information, visit: \" \\\n + \"https://github.com/Wind-River/crypto-detector\")\n\n argument_parser.add_argument(\"--version\", \\\n action='version', version=self.version)\n\n # automatically generate options for methods\n\n for method in Options.available_methods():\n\n method_class = Options.available_methods()[method]\n\n if not hasattr(method_class, \"options\"):\n continue\n\n for option in method_class.options:\n self.options[method + \"_\" + option] = method_class.options[option]\n self.method_options[method + \"_\" + option] = (method, option)\n\n if hasattr(method_class, \"options_help\"):\n self.options_help.update({\n method + \"_\" + option: method_class.options_help[option] \\\n for option in method_class.options_help})\n\n for option in self.options:\n\n if option == \"packages\":\n continue\n\n additional_args = {}\n\n if isinstance(self.options[option], list):\n additional_args[\"action\"] = ArrayAction\n\n elif isinstance(self.options[option], bool):\n additional_args[\"nargs\"] = \"?\"\n additional_args[\"choices\"] = [\"True\", \"true\", \"False\", \"false\"]\n additional_args[\"action\"] = BooleanAction\n\n elif option == \"output_existing\":\n additional_args[\"choices\"] = [\"rename\", \"overwrite\", \"skip\"]\n\n self.parse_cmd_argument(argument_parser, option, additional_args)\n\n argument_parser.add_argument(nargs='*', dest=\"packages\", help=self.options_help[\"packages\"])\n\n if test_case:\n return vars(argument_parser.parse_args(test_case))\n\n return vars(argument_parser.parse_args())",
"def preprocess_options(base_options, cmdargs):\n # ===============================================================================\n\n class OptionValues(object):\n pass\n\n option_values = OptionValues()\n # Create a base option dictionary indexed by short and long options.\n # Add the built-in optparse help and version options so that they can be\n # detected as stand-alone options.\n options = {}\n builtins = [BooleanOption('-h', '--help', 'help', ''),\n BooleanOption(None, '--version', 'version', '')]\n for opt in list(base_options) + builtins:\n setattr(option_values, opt.get_dest(), opt.get_default())\n if opt.short_opt:\n options[opt.short_opt] = opt\n if opt.long_opt:\n options[opt.long_opt] = opt\n # Walk through the options and arguments and set option values as attributes.\n iopt = 0\n while iopt < len(cmdargs):\n if cmdargs[iopt].startswith('-'):\n if cmdargs[iopt] in options:\n opt = options[cmdargs[iopt]]\n if opt.has_value():\n # Option with argument\n setattr(option_values, opt.get_dest(), cmdargs[iopt + 1])\n iopt += 1\n else:\n # Boolean option\n setattr(option_values, opt.get_dest(), True)\n iopt += 1\n return option_values",
"def process_options(self):\n\n argv = sys.argv\n\n # process any optlist_ options\n self.valid_opts.check_special_opts(argv)\n\n # process terminal options without the option_list interface\n # (so that errors are not reported)\n\n # if no arguments are given, do default processing\n if '-help' in argv or len(argv) < 2:\n print(g_help_string)\n return 1\n\n if '-hist' in argv:\n print(g_history)\n return 1\n\n if '-show_valid_opts' in argv:\n self.valid_opts.show('', 1)\n return 1\n\n if '-ver' in argv:\n print(g_version)\n return 1\n\n # ============================================================\n # read options specified by the user\n self.user_opts = OL.read_options(argv, self.valid_opts)\n uopts = self.user_opts # convenience variable\n if not uopts: return -1 # error condition\n\n # ------------------------------------------------------------\n # process verb first\n\n val, err = uopts.get_type_opt(int, '-verb')\n if val != None and not err: self.verb = val\n\n # ------------------------------------------------------------\n # process options sequentially, to make them like a script\n errs = 0\n for opt in self.user_opts.olist:\n # check for anything to skip\n if opt.name == '-verb': pass\n\n elif opt.name == '-infiles':\n self.infiles, err = uopts.get_string_list('', opt=opt)\n if self.infiles == None or err:\n print('** failed to read -infiles list')\n errs +=1\n\n self.parse_infile_names()\n\n elif opt.name == '-overwrite':\n self.overwrite = 1\n\n elif opt.name == '-separator':\n self.separator, err = uopts.get_string_opt('', opt=opt)\n if self.separator == None or err:\n print(\"** bad -tablefile option\")\n errs += 1\n if self.separator == 'tab': self.separator = '\\t'\n elif self.separator == 'whitespace': self.separator = 'ws'\n self.seplen = len(self.separator)\n\n elif opt.name == '-showlabs':\n self.showlabs = 1\n\n elif opt.name == '-show_missing':\n self.show_missing = 1\n\n elif opt.name == '-tablefile':\n self.tablefile, err = uopts.get_string_opt('', opt=opt)\n if self.tablefile == None or err:\n print(\"** bad -tablefile option\")\n errs +=1\n\n else:\n oind = self.user_opts.olist.index(opt)\n print('** unknown option # %d: %s' % (oind+1, opt.name))\n errs += 1\n break\n\n # allow early and late error returns\n if errs: return -1\n\n # ------------------------------------------------------------\n # apply any trailing logic\n\n if len(self.infiles) < 1:\n print('** missing -infiles option')\n errs += 1\n\n if errs: return -1\n\n return 0",
"def _process_commands(self, pwd, cmds):\n if self.func_map.get(cmds[0]):\n func = self.func_map[cmds[0]]\n \n args, kwargs = self._get_args(cmds[1:]) \n err_msg = self._check_input(func, args, kwargs)\n if err_msg: return err_msg\n \n _, return_msg = func(*args, **kwargs)\n\n else:\n return_msg = '[error]: no cmd found.'\n return return_msg",
"def treat_options( opts, arg, n_arg, usage_string ) :\n global sent_handler\n global lower_attr\n global input_filetype_ext\n global output_filetype_ext\n\n ctxinfo = util.CmdlineContextInfo(opts)\n util.treat_options_simplest(opts, arg, n_arg, usage_string)\n sent_handler = LowercaserHandler.handle_sentence_simple # default\n \n for o, a in ctxinfo.iter(opts):\n if o == \"--from\":\n input_filetype_ext = a\n elif o == \"--to\":\n output_filetype_ext = a\n elif o in (\"-l\",\"--lemmas\" ) :\n lower_attr = \"lemma\"\n elif o in (\"-a\", \"--algorithm\"):\n algoname = a.lower()\n if algoname == \"simple\" : # Redundant, kept for clarity\n sent_handler = LowercaserHandler.handle_sentence_simple\n elif algoname == \"complex\" :\n sent_handler = LowercaserHandler.handle_sentence_complex\n elif algoname == \"aggressive\" : # Redundant, kept for clarity\n sent_handler = LowercaserHandler.handle_sentence_aggressive\n else :\n ctxinfo.error(\"Bad algorithm name `{name}`\", name=algoname)\n\n elif o == \"-m\":\n ctxinfo.error(\"Deprecated option. Use --from=Moses instead\" )\n elif o == \"-x\":\n ctxinfo.error(\"Deprecated option. \" \\\n \"Use --from=PlainCorpus instead\")\n else:\n raise Exception(\"Bad arg: \" + o)",
"def read_cmd(self):\n\n parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)\n req_opts = parser.add_argument_group(\"Required Options\")\n req_opts.add_argument(\"--instance_dir\", required=True,\n help=\"directory with instances (not recursive\")\n \n opt_opts = parser.add_argument_group(\"Optional Options\")\n \n opt_opts.add_argument(\"--fn_suffix\", default=\".*\",\n help=\"suffix of instance file names\")\n opt_opts.add_argument(\"--cutoff\", default=10, type=int,\n help=\"running time cutoff [sec]\")\n opt_opts.add_argument(\"--memlimit\", default=2048, type=int,\n help=\"memory limit\")\n opt_opts.add_argument(\"--ac_budget\", default=360,\n help=\"configuration budget [sec]\")\n opt_opts.add_argument(\"--run_obj\", default=\"runtime\",\n choices=[\"runtime\", \"quality\"],\n help=\"run objective\")\n opt_opts.add_argument(\"--par-factor\", default=10,\n help=\"Factor by which to penalize unsolved instances. Usage may differ based on TAE used.\")\n\n opt_opts.add_argument(\"--binary\", default=\"clingo\",\n help=\"target binary\")\n opt_opts.add_argument(\"--pcs_file\", default=\"pcs/all_params.pcs\",\n help=\"parameter configuration file\")\n opt_opts.add_argument(\"--runsolver\", default=\"binaries/runsolver\",\n help=\"runsolver binary\")\n opt_opts.add_argument(\"--tae_class\", default=None,\n help=\"TAE class to individualize clingo calls -- has to inherit from smac.tae.execute_ta_run_aclib.ExecuteTARunAClib\")\n\n\n opt_opts.add_argument(\"--seed\", default=12345, type=int,\n help=\"random seed\")\n opt_opts.add_argument(\"--verbose_level\", default=logging.INFO,\n choices=[\"INFO\", \"DEBUG\"],\n help=\"random seed\")\n opt_opts.add_argument(\"--tae_args\", default=\"{}\",\n help=\"Miscellaneous options for the TAE\")\n \n\n args_, misc = parser.parse_known_args()\n self._check_args(args_)\n args_.tae_args=json.loads(args_.tae_args)\n\n # remove leading '-' in option names\n misc = dict((k.lstrip(\"-\"), v.strip(\"'\"))\n for k, v in zip(misc[::2], misc[1::2]))\n\n misc[\"instances\"] = self._find_files(dir_=args_.instance_dir, suffix_=args_.fn_suffix)\n misc[\"wallclock_limit\"] = args_.ac_budget\n misc[\"cutoff_time\"] = args_.cutoff\n misc[\"paramfile\"] = args_.pcs_file\n misc[\"algo\"] = \"\"\n misc[\"run_obj\"] = args_.run_obj\n\n return args_, misc",
"def option_setup(func):\n option_parser = OptionParser()\n for option in option_list:\n option_parser.add_option(option)\n # Allow reasonable help for commands defined with @options and an empty list of options\n if len(option_list) > 0:\n option_parser.set_usage(\"%s [options] %s\" % (func.__name__[3:], arg_desc))\n else:\n option_parser.set_usage(\"%s %s\" % (func.__name__[3:], arg_desc))\n option_parser._func = func\n\n def new_func(instance, arg):\n \"\"\"For @options commands this replaces the actual do_* methods in the instance __dict__.\n\n First it does all of the option/argument parsing. Then it calls the underlying do_* method.\n\n :param instance: cmd2.Cmd2 derived class application instance\n :param arg: str - command-line arguments provided to the command\n :return: bool - returns whatever the result of calling the underlying do_* method would be\n \"\"\"\n try:\n # Use shlex to split the command line into a list of arguments based on shell rules\n opts, new_arglist = option_parser.parse_args(shlex.split(arg, posix=POSIX_SHLEX))\n\n # If not using POSIX shlex, make sure to strip off outer quotes for convenience\n if not POSIX_SHLEX and STRIP_QUOTES_FOR_NON_POSIX:\n temp_arglist = []\n for arg in new_arglist:\n temp_arglist.append(strip_quotes(arg))\n new_arglist = temp_arglist\n\n # Also strip off outer quotes on string option values\n for key, val in opts.__dict__.items():\n if isinstance(val, str):\n opts.__dict__[key] = strip_quotes(val)\n\n # Must find the remaining args in the original argument list, but\n # mustn't include the command itself\n # if hasattr(arg, 'parsed') and new_arglist[0] == arg.parsed.command:\n # new_arglist = new_arglist[1:]\n if USE_ARG_LIST:\n arg = new_arglist\n else:\n new_args = remaining_args(arg, new_arglist)\n if isinstance(arg, ParsedString):\n arg = arg.with_args_replaced(new_args)\n else:\n arg = new_args\n except optparse.OptParseError as e:\n print(e)\n option_parser.print_help()\n return\n if hasattr(opts, '_exit'):\n return None\n result = func(instance, arg, opts)\n return result\n\n new_func.__doc__ = '%s%s' % (func.__doc__ + '\\n' if func.__doc__ else '', option_parser.format_help())\n return new_func",
"def cli_options():\n\n parser = argparse.ArgumentParser(\n description='c[apirca]grep',\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument('-d', '--def', dest='defs',\n help='Network Definitions directory location. \\n',\n default='./def')\n\n # -i and -t can be used together, but not with any other option.\n ip_group = parser.add_argument_group()\n # take 1 or more IPs\n ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip,\n help='Return list of definitions containing the '\n 'IP(s).\\nMultiple IPs permitted.')\n\n ip_group.add_argument('-t', '--token', dest='token',\n help=('See if an IP is contained within the given '\n 'token.\\nMust be used in conjunction with '\n '-i/--ip [addr].'))\n\n exclusive_group = parser.add_mutually_exclusive_group()\n # the rest of the arguments are mutually exclusive with each other,\n # and -i / -t\n exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2,\n metavar=('OBJ', 'OBJ'),\n help=('Compare the two given network '\n 'definition tokens'))\n\n exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2,\n type=is_valid_ip, metavar=('IP', 'IP'),\n help=('Diff the network objects to'\n ' which the given IP(s) belong'))\n\n exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+',\n help=('Return list of IP(s) contained within '\n 'the given token(s)'))\n\n exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+',\n help=('Return list of port(s) contained '\n 'within given token(s)'))\n\n exclusive_group.add_argument('-p', '--port', dest='port', nargs=2,\n metavar=('PORT', 'PROTO'),\n help=('Returns a list of tokens containing '\n 'the given port and protocol'))\n\n return parser",
"def _process_command_line():\n # pylint: disable=C0103\n global parameters\n # pylint: enable=C0103\n\n # option letters followed by : expect an argument\n # same for option strings followed by =\n if parameters[\"Command flavour\"] in (\"posix\", \"linux\"):\n character_options = \"s?\"\n else: # if parameters[\"Command flavour\"] in (\"PNU\", \"bsd\", \"bsd:freebsd\"):\n character_options = \"qs?\"\n string_options = [\n \"debug\",\n \"help\",\n \"version\",\n ]\n\n try:\n options, remaining_arguments = getopt.getopt(\n sys.argv[1:], character_options, string_options\n )\n except getopt.GetoptError as error:\n logging.critical(\"Syntax error: %s\", error)\n _display_help()\n sys.exit(1) # no match\n\n for option, _ in options:\n\n if option == \"--debug\":\n logging.disable(logging.NOTSET)\n\n elif option in (\"--help\", \"-?\"):\n _display_help()\n sys.exit(1) # no match\n\n elif option == \"-q\":\n parameters[\"No formatting\"] = True\n\n elif option == \"-s\":\n parameters[\"First match only\"] = True\n\n elif option == \"--version\":\n print(ID.replace(\"@(\" + \"#)\" + \" $\" + \"Id\" + \": \", \"\").replace(\" $\", \"\"))\n sys.exit(1) # no match\n\n logging.debug(\"_process_command_line(): parameters:\")\n logging.debug(parameters)\n logging.debug(\"_process_command_line(): remaining_arguments:\")\n logging.debug(remaining_arguments)\n\n return remaining_arguments",
"def cmdline(self, executable, options, task, rlimits):\n data_model_param = get_data_model_from_task(task, {ILP32: \"-m32\", LP64: \"-m64\"})\n print(options)\n if data_model_param and not any(\n option.startswith(\"--clang-options=\") for option in options\n ):\n options += [\"--clang-options=\" + data_model_param]\n\n if task.property_file:\n options += [\"--svcomp-property\", task.property_file]\n else:\n raise UnsupportedFeatureException(\n \"SMACK can't execute without a property file.\"\n )\n\n options += [task.single_input_file]\n\n return [executable] + options",
"def processOptions_(self, opts):\n\n for opt in opts.keys():\n val = opts[opt]\n\n # Skip actions, they are processed later in initializeActions_()\n if opt in self.main_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n if opt in self.aux_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n \n\n elif ( opt == '-cfg' ):\n pass\n\n elif ( opt in ('-continue', '-c') ):\n # Already processed in processContinueOption_()\n pass\n\n elif ( opt == '-Q' ):\n self.flag_quiet = 1\n pass\n\n elif ( opt == '-debug' ):\n if val: self.debug_level = int(val)\n else: self.debug_level = 1\n pass\n\n elif string.find(opt,'.') == -1:\n print common.prog_name+'. Unrecognized option '+opt\n usage()\n pass\n\n # Override config parameters from INI-file with cmd-line params\n if string.find(opt,'.') == -1 :\n self.cfg_params['SKIM.'+opt[1:]] = val\n pass\n else:\n # Command line parameters in the form -SECTION.ENTRY=VALUE\n self.cfg_params[opt[1:]] = val\n pass\n pass\n return",
"def _ProcessOptions(argv):\n try:\n opts, args = getopt.getopt(argv, '', [x + '=' for x in _ClOptions])\n except getopt.GetoptError, err:\n print(str(err)) # will print something like 'option -a not recognized'\n sys.exit(-1)\n\n for o, a in opts:\n # strip the leading '--'\n option = o[2:]\n assert option in _ClOptions\n if type(_ClOptions[option]) == int:\n _ClOptions[option] = int(a)\n else:\n _ClOptions[option] = a\n # return the unprocessed options, i.e. the command\n return args",
"def parseOptions(self, options=None):\r\n if options is None:\r\n options = sys.argv[1:]\r\n\r\n try:\r\n opts, args = getopt.getopt(options, self.shortOpt, self.longOpt)\r\n except getopt.error as e:\r\n raise usage.UsageError(str(e))\r\n\r\n for opt, arg in opts:\r\n if opt[1] == '-':\r\n opt = opt[2:]\r\n else:\r\n opt = opt[1:]\r\n\r\n optMangled = opt\r\n if optMangled not in self.synonyms:\r\n optMangled = opt.replace(\"-\", \"_\")\r\n if optMangled not in self.synonyms:\r\n raise usage.UsageError(\"No such option '{0}'\".format(opt))\r\n\r\n optMangled = self.synonyms[optMangled]\r\n if isinstance(self._dispatch[optMangled], usage.CoerceParameter):\r\n self._dispatch[optMangled].dispatch(optMangled, arg)\r\n else:\r\n self._dispatch[optMangled](optMangled, arg)\r\n\r\n if (getattr(self, 'subCommands', None)\r\n and (args or self.defaultSubCommand is not None)):\r\n if not args:\r\n args = [self.defaultSubCommand]\r\n sub, rest = args[0], args[1:]\r\n for (cmd, short, parser, _) in self.subCommands:\r\n if sub == cmd or sub == short:\r\n self.subCommand = cmd\r\n self.subOptions = parser(self.terminal)\r\n self.subOptions.parent = self\r\n self.subOptions.parseOptions(rest)\r\n break\r\n else:\r\n raise usage.UsageError(\"Unknown command: {0}\".format(sub))\r\n else:\r\n try:\r\n self.parseArgs(*args)\r\n except TypeError:\r\n raise usage.UsageError(\"Wrong number of arguments.\")\r\n\r\n self.postOptions()",
"def _parse_options(self):\n parser = argparse.ArgumentParser(prog=self._program,\n formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=30, width=132))\n parser.add_argument(\"--debug\", action='store_true', default=self._debug, help=\"The debug flag. (Default: {0})\".format(self._debug))\n parser.add_argument(\"--drives\", default=None, help=\"The drives to display. (Default: {0})\".format(self._drives))\n parser.add_argument(\"--exclude\", default=None, help=\"The drives to exclude. (Default: {0})\".format(self._exclude))\n parser.add_argument(\"--force_spt\", action='store_true', help=\"Force using spt (debug). (Default: {0})\".format(self._force_spt))\n parser.add_argument(\"--json\", action='store_true', default=self._json_format, help=\"Enable JSON format. (Default: {0})\".format(self._json_format))\n parser.add_argument(\"--long\", action='store_true', default=self._long_format, help=\"Enable long format. (Default: {0})\".format(self._long_format))\n parser.add_argument(\"--noencs\", action='store_false', default=self._include_enclosures, help=\"Exclude enclosures. (Default: {0})\".format(not self._include_enclosures))\n parser.add_argument(\"--noheader\", action='store_false', default=self._report_header, help=\"Exclude headers. (Default: {0})\".format(not self._report_header))\n parser.add_argument(\"--power_on_hours\", action='store_true', default=self._power_on_hours, help=\"Include power on hours. (Default: {0})\".format(not self._power_on_hours))\n # Filters for spt:\n parser.add_argument(\"--firmware_version\", default=None, help=\"The firmware version. (Default: {0})\".format(self.firmware_version))\n parser.add_argument(\"--product_name\", default=None, help=\"The product name. (Default: {0})\".format(self.product_name))\n parser.add_argument(\"--vendor_name\", default=None, help=\"The vendor name. (Default: {0})\".format(self.vendor_name))\n parser.add_argument(\"--serial_number\", default=None, help=\"The serial number. (Default: {0})\".format(self.serial_number))\n parser.add_argument(\"--sas_address\", default=None, help=\"The SAS address. (Default: {0})\".format(self.target_port))\n parser.add_argument(\"--target_port\", default=None, help=\"The target port. (Default: {0})\".format(self.target_port))\n parser.add_argument(\"--use_lsscsi\", action='store_true', help=\"Find devices via lsscsi. (Default: {0})\".format(self._use_lsscsi))\n parser.add_argument(\"--spt_path\", default=None, help=\"The spt tool path. (Default: {0})\".format(self.tool))\n\n args = parser.parse_args()\n\n self._debug = args.debug\n if self._debug:\n self.log_level = logging.DEBUG\n self._json_format = args.json\n self._long_format = args.long\n if args.drives:\n self._drives = args.drives.split(',')\n if args.exclude:\n self._exclude = args.exclude.split(',')\n if not args.noencs:\n self._include_enclosures = False\n if not args.noheader:\n self._report_header = False\n if args.power_on_hours:\n self._power_on_hours = True\n if args.firmware_version:\n self.firmware_version = args.firmware_version\n if args.product_name:\n self.product_name = args.product_name\n if args.vendor_name:\n self.vendor_name = args.vendor_name\n if args.serial_number:\n self.serial_number = args.serial_number\n if args.sas_address:\n self.target_port = args.sas_address\n if args.target_port:\n self.target_port = args.target_port\n if args.force_spt:\n self._force_spt = args.force_spt\n if args.use_lsscsi:\n self._use_lsscsi = args.use_lsscsi\n if args.spt_path:\n self.tool = args.spt_path",
"def getCmdOptions():\n #print( \"getCmdOptions() entered...\\n )\"\n my_opts = {}\n err = None\n required_opts = { 'number': True, 'host': True,'port': True, 'help': True, 'debug': True, 'stdout': True, 'logfile': True }\n rc = 1\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hdsn:H:p:l:\", [\"help\", \"debug\", \"stdout\", \"number=\", \"host=\", \"port=\", \"logfile=\"]) #@UnusedVariable\n except(getopt.GetoptError, err):\n # print help information and exit:\n print(str(err)) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif o in (\"-n\", \"--number\"):\n my_opts['number'] = a\n elif o in (\"-H\", \"--host\"):\n my_opts['host'] = a\n elif o in (\"-p\", \"--port\"):\n my_opts['port'] = a\n required_opts['port'] = True\n elif o in (\"-l\", \"--logfile\"):\n my_opts['logfile'] = a\n elif o in (\"-s\", \"--stdout\"):\n my_opts['stdout'] = True\n elif o in (\"-d\", \"--debug\"):\n my_opts['debug'] = True\n else:\n rc = 0\n assert False, \"unhandled option\"\n #Endif\n #Endfor\n\n if(rc == 0):\n usage()\n\n #for k, v in required_opts.iteritem():\n for k, v in required_opts.items(): #@UnusedVariable\n if(required_opts[k] == False):\n msg = sys.argv[0] + \" Must provide: \" + \"--\" + str(k)\n print(msg)\n rc = 0\n #Endif\n #Endfor\n\n if(rc == 0):\n usage()\n sys.exit(2)\n #Endif\n\n resetInit(my_opts)",
"def usr_args():\n\n # initialize parser\n parser = argparse.ArgumentParser()\n\n # set usages options\n parser = argparse.ArgumentParser(\n prog='bco',\n usage='%(prog)s [options]')\n\n # version\n parser.add_argument(\n '-v', '--version',\n action='version',\n version='%(prog)s ' + __version__)\n\n # create subparser objects\n subparsers = parser.add_subparsers()\n\n # Create parent subparser. Note `add_help=False` & creation via `argparse.`\n parent_parser = argparse.ArgumentParser(add_help=False)\n parent_parser.add_argument('-b', '--bco',\n required=True,\n help=\"BioCompute JSON to process.\")\n\n parent_parser.add_argument('-s', '--schema',\n # type = argparse.FileType('r'),\n help=\"Root json schema to validate against.\")\n\n parent_parser.add_argument('-m', '--mappingFile',\n # type = argparse.FileType('r'),\n help=\"Mapping file to convert BioCompute json with.\")\n\n # Create a functions subcommand\n parser_listapps = subparsers.add_parser('functions',\n help='List all available functions.')\n parser_listapps.set_defaults(func=listapps)\n\n # Create the bco_license\n parser_license = subparsers.add_parser('license',\n parents=[parent_parser],\n help='Saves HTML version of BCO License.')\n parser_license.set_defaults(func=bco_license)\n\n # Create a validate subcommand\n parser_validate = subparsers.add_parser('validate',\n parents=[parent_parser],\n help=\"Validation options. \"\n \"Used to test a BCO against a JSON schema. \"\n \"If no schema is supplied the ieee-2791-schema \"\n \"is used as the \"\n \"default.\")\n parser_validate.set_defaults(func=validate_bco)\n\n parser_validate = subparsers.add_parser('convert',\n parents=[parent_parser],\n help=\"Converting options \"\n \"Used to convert a JSON into a schema (default \"\n \"is ieee-2791-schema). If no mapping file is \"\n \"provided, performs default conversions.\")\n parser_validate.set_defaults(func=map_bcos)\n\n\n parser_validate = subparsers.add_parser('update',\n parents=[parent_parser],\n help=\"Update option\"\n \"Updates last modified and etag on BCO. Updates modified time to current time.\")\n parser_validate.set_defaults(func=update_bco)\n\n parser_validate = subparsers.add_parser('map',\n parents=[parent_parser],\n help=\"Mapping options \"\n \"Used to generate a mapping file for a bco/bcos.\")\n parser_validate.set_defaults(func=map_bcos)\n\n # Create a run_cwl subcommand\n parser_run_cwl = subparsers.add_parser('run_cwl',\n parents=[parent_parser],\n help='Run a CWL described in a BCO.')\n parser_run_cwl.set_defaults(func=run_cwl)\n\n # Print usage message if no args are supplied.\n if len(sys.argv) <= 1:\n sys.argv.append('--help')\n\n # Run the appropriate function\n options = parser.parse_args()\n if parser.parse_args().func is listapps:\n options.func(parser)\n else:\n options.func(options)",
"def makecmd(self, options):",
"def processCmdlineOpts(cmdOpts):\n global opts\n opts = {}\n for i in range(1,len(cmdOpts)):\n if re.match('-i', cmdOpts[i]):\n opts['i'] = cmdOpts[i+1]\n if i not in opts: \n opts['i']='awn.xml'\n return opts",
"def extend_cli(self, subparser):",
"def getOptions():\n usage = ('usage: %prog -c CMD -d DIR [-o OPT]\\nThe multicrab command'\n ' executes \"crab CMD OPT\" for each task contained in DIR\\nUse'\n ' multicrab -h for help\"')\n\n parser = OptionParser(usage=usage)\n parser.add_option(\"-c\", \"--crabCmd\", dest=\"crabCmd\",\n help=(\"The crab command you want to execute for each task in \"\n \"the DIR\"), metavar=\"CMD\")\n parser.add_option(\"-d\", \"--projDir\", dest=\"projDir\",\n help=\"The directory where the tasks are located\", metavar=\"DIR\")\n parser.add_option(\"-o\", \"--crabCmdOptions\", dest=\"crabCmdOptions\",\n help=(\"The options you want to pass to the crab command CMD\"\n \"tasklistFile\"), metavar=\"OPT\", default=\"\")\n parser.add_option(\"-r\", \"--noAutoResubmit\", dest=\"noAutoResubmit\",\n help=(\"don't automatically run the resub commands\"),\n metavar=\"noAutoResub\",default=False,action=\"store_true\")\n parser.add_option(\"-i\", \"--ignoreCache\", dest=\"ignoreMulticrabCache\",\n help=(\"don't use cache file to skip checking status of jobs already done\"),\n metavar=\"ignoreCache\",default=False,action=\"store_true\")\n\n (options, args) = parser.parse_args()\n\n if args:\n parser.error(\"Found positional argument(s) %s.\" % args)\n if not options.crabCmd:\n parser.error(\"(-c CMD, --crabCmd=CMD) option not provided\")\n if not options.projDir:\n parser.error(\"(-d DIR, --projDir=DIR) option not provided\")\n if not os.path.isdir(options.projDir):\n parser.error(\"Directory %s does not exist\" % options.projDir)\n\n return options",
"def parse_commands(args):\n\n # command flags, mapped to their function.\n commands = {\n '-l': {\n 'aliases': ['--list'],\n 'func': do_list,\n 'kwargs': None,\n },\n '-h': {\n 'aliases': ['--help'],\n 'func': print_usage,\n 'kwargs': {'retcode': 0},\n },\n }\n\n # Setup list of known flags...\n commandflags = list(commands.keys())\n aliasesgen = (commands[cmdflag]['aliases'] for cmdflag in commands.keys())\n for cmdaliases in aliasesgen:\n commandflags += cmdaliases\n commandflags.sort()\n\n # Function to retrive a flag by its name, or alias.\n def get_flag(flagname):\n \"\"\" Retrieve a flag function by name, or alias. \"\"\"\n if flagname in commands.keys():\n return commands[flagname]\n else:\n for cmdflag in commands.keys():\n if flagname in commands[cmdflag]['aliases']:\n return commands[cmdflag]\n\n # wrun commands must come before any script file.\n commandargs = []\n while ((args) and (args[0].startswith('-'))):\n commandargs.append(args.pop(0))\n\n # Retrieve functions for flags, and call them..\n for cmdarg in commandargs:\n if cmdarg in commandflags:\n # known flag, retrieve the function for it and call it.\n command = get_flag(cmdarg)\n commandkw = command['kwargs']\n commandfunc = command['func']\n if commandkw:\n print_debug('Running command option function with args...')\n commandfunc(**commandkw)\n else:\n print_debug('Running command option function...')\n commandfunc()\n else:\n # unknown flag!\n print_fail((\n 'Unknown flag given!: {}\\n'\n 'Run with --help for usage instructions.'\n ).format(cmdarg))\n\n # Return args without any 'wrun command flags'.\n return args",
"def _main(Options=None):\n cli = CLI()\n shortOptions = \"aAiIeEfFgGyYxXDCVbBNOPlmMrRzZuq\"\n shortArgOptions = \"d:c:v:n:o:p:\"\n if Options:\n if type(Options) == bytes:\n Options = Options.split(\" \", 1)\n Options = list([s.strip() for s in Options])\n else:\n Options = sys.argv[1:]\n\n try:\n options, args = getopt.getopt(Options, shortOptions+shortArgOptions)\n except getopt.GetoptError:\n print(('invalid option: %s'% repr(Options)))\n cli.usage()\n return\n\n if args:\n print(('should not have extraneous arguments: %s'% repr(args)))\n for o, v in options:\n o = o.lstrip('-')\n funcName = 'do_%s'% o\n func = getattr(cli, funcName, None)\n if not func:\n print(('option %s not found in cli functions: %s'% (o, funcName)))\n cli.usage()\n continue\n if o in shortOptions:\n func(None) # dummy arg\n elif o in shortArgOptions:\n func(v)\n else:\n print('options should not come here')\n cli.usage()",
"def ProcessOptions():\n \n MiscUtil.PrintInfo(\"Processing options...\")\n \n # Validate options...\n ValidateOptions()\n \n OptionsInfo[\"CalcRMSD\"] = Options[\"--calcRMSD\"]\n OptionsInfo[\"UseBestRMSD\"] = False\n if re.match(\"^BestRMSD$\", OptionsInfo[\"CalcRMSD\"], re.I):\n OptionsInfo[\"UseBestRMSD\"] = True\n \n OptionsInfo[\"MaxIters\"] = int(Options[\"--maxIters\"])\n \n OptionsInfo[\"Mode\"] = Options[\"--mode\"]\n \n OptionsInfo[\"RefFile\"] = Options[\"--reffile\"]\n OptionsInfo[\"ProbeFile\"] = Options[\"--probefile\"]\n\n # No need for any RDKit specific --outfileParams....\n OptionsInfo[\"InfileParams\"] = MiscUtil.ProcessOptionInfileParameters(\"--infileParams\", Options[\"--infileParams\"])\n \n OptionsInfo[\"Outfile\"] = Options[\"--outfile\"]\n \n OptionsInfo[\"Overwrite\"] = Options[\"--overwrite\"]\n \n OptionsInfo[\"OutDelim\"] = \" \"\n if MiscUtil.CheckFileExt(OptionsInfo[\"Outfile\"], \"csv\"):\n OptionsInfo[\"OutDelim\"] = \",\"\n elif MiscUtil.CheckFileExt(OptionsInfo[\"Outfile\"], \"tsv txt\"):\n OptionsInfo[\"OutDelim\"] = \"\\t\"\n else:\n MiscUtil.PrintError(\"The file name specified , %s, for option \\\"--outfile\\\" is not valid. Supported file formats: csv tsv txt\\n\" % (OptionsInfo[\"Outfile\"]))",
"def parse_commands(self, argsin=None):\r\n\r\n parser=OptionParser() # command line options\r\n\r\n parser.add_option(\"-t\", \"--test\", dest=\"test\", action='store_true',\r\n\r\n help=\"Test mode to check calculations\", default=False)\r\n\r\n parser.add_option(\"--dF/F\", dest=\"mode\", action='store_true',\r\n\r\n help=\"analysis mode set to fft (default) vs. dF/F\", default=False)\r\n\r\n parser.add_option(\"-u\", \"--upfile\", dest=\"upfile\", metavar='FILE',\r\n\r\n help=\"load the up-file\")\r\n\r\n parser.add_option(\"-d\", \"--downfile\", dest=\"downfile\", metavar='FILE',\r\n\r\n help=\"load the down-file\")\r\n\r\n parser.add_option(\"-D\", \"--directory\", dest=\"directory\", metavar='FILE',\r\n\r\n help=\"Use directory for data\")\r\n\r\n parser.add_option(\"-T\", \"--tiff\", dest=\"tifffile\", default=None, type=\"str\",\r\n\r\n help=\"load a tiff file\")\r\n\r\n parser.add_option(\"-p\", '--period', dest = \"period\", default=self.period, type=\"float\",\r\n\r\n help = \"Stimulus cycle period\")\r\n\r\n parser.add_option(\"-c\", '--cycles', dest = \"cycles\", default=self.nrepetitions, type=\"int\",\r\n\r\n help = \"# cycles to analyze\")\r\n\r\n parser.add_option(\"-b\", '--binning', dest = \"binsize\", default=self.binsize, type=\"int\",\r\n\r\n help = \"bin reduction x,y\")\r\n\r\n parser.add_option(\"-z\", '--zbinning', dest = \"zbinsize\", default=self.zbinsize, type=\"int\",\r\n\r\n help = \"bin reduction z\")\r\n\r\n parser.add_option(\"-g\", '--gfilter', dest = \"gfilt\", default=self.gfilter, type=\"float\",\r\n\r\n help = \"gaussian filter width\")\r\n\r\n parser.add_option(\"-f\", '--fdict', dest = \"fdict\", default=None, type=\"int\",\r\n\r\n help = \"Use dictionary entry\")\r\n\r\n parser.add_option(\"-P\", '--freqperiod', dest = \"freqperiod\", default=1.0, type=\"float\",\r\n\r\n help = \"Set Frequency period (seconds)\")\r\n\r\n parser.add_option(\"-F\", '--frequencies', dest = \"freqinfo\", default='[4,32,8]', type=\"str\",\r\n\r\n help = \"Set Frequency settings as string. Example: '[4,32,8]' start=4, end=32, nfreqs = 8\")\r\n\r\n parser.add_option('--threshold', dest = \"threshold\", default=self.threshold, type=\"float\",\r\n\r\n help = \"dFF threshold for map\")\r\n\r\n if argsin is not None:\r\n\r\n (options, args) = parser.parse_args(argsin)\r\n\r\n else:\r\n\r\n (options, args) = parser.parse_args()\r\n\r\n\r\n\r\n if options.mode is not None:\r\n\r\n self.mode = options.mode\r\n\r\n if options.period is not None:\r\n\r\n self.period = options.period\r\n\r\n if options.freqperiod is not None:\r\n\r\n self.freqperiod = options.freqperiod\r\n\r\n if options.freqinfo is not None:\r\n\r\n self.freqinfo = options.freqinfo\r\n\r\n if options.cycles is not None:\r\n\r\n self.nrepetitions = options.cycles\r\n\r\n if options.binsize is not None:\r\n\r\n self.binsize = options.binsize\r\n\r\n if options.zbinsize is not None:\r\n\r\n self.zbinsize = options.zbinsize\r\n\r\n if options.gfilt is not None:\r\n\r\n self.gfilter = options.gfilt\r\n\r\n if options.tifffile is not None:\r\n\r\n self.tifffile = options.tifffile\r\n\r\n if options.threshold is not None:\r\n\r\n self.threshold = options.threshold\r\n\r\n \r\n\r\n print ('Freqperiod: ', self.freqperiod )\r\n\r\n print ('Mode: ', self.mode)\r\n\r\n \r\n\r\n if self.freqinfo is not None:\r\n\r\n f0, octave, nfreq = eval(self.freqinfo)\r\n\r\n self.frequencies = self.compute_freqs(f0, octave, nfreq)\r\n\r\n\r\n\r\n if options.upfile is not None:\r\n\r\n self.dir = 'up'\r\n\r\n\r\n\r\n if options.downfile is not None:\r\n\r\n self.dir = 'down'\r\n\r\n\r\n\r\n if options.directory is not None:\r\n\r\n self.directory = options.directory\r\n\r\n\r\n\r\n # if options.upfile is not None:\r\n\r\n # self.upfile = options.upfile\r\n\r\n # target = 1\r\n\r\n #\r\n\r\n # if options.downfile is not None:\r\n\r\n # self.downfile = options.downfile\r\n\r\n # target = 2\r\n\r\n \r\n\r\n if options.test is True:\r\n\r\n self.make_test_data()\r\n\r\n return\r\n\r\n \r\n\r\n if options.tifffile is not None:\r\n\r\n n2 = self.tifffile + '_MMStack_Pos0.ome.tif'\r\n\r\n self.read_tiff_stack(filename=os.path.join(basepath, self.tifffile, n2))\r\n\r\n return\r\n\r\n\r\n\r\n if options.fdict is not None:\r\n\r\n if options.fdict in DB.keys(): # populate options \r\n\r\n options.upfile = DB[options.fdict][0]\r\n\r\n options.downfile = DB[options.fdict][1]\r\n\r\n options.period = DB[options.fdict][4]\r\n\r\n self.period = options.period\r\n\r\n else:\r\n\r\n print (\"File %d NOT in DBase\\n\" % options.fdict)\r\n\r\n return\r\n\r\n \r\n\r\n self.read_meta_stack(os.path.join(basepath, 'video_' + options.upfile + '.ma'))",
"def get_cmd_args():\n\n\n\t#Creates the Argument Parser\n\tparser = ArgumentParser(description = \"ID Lab qPCR Analysis v\" + VERSION + \" \" + QUALITY)\n\n\t#Adds the input file argument\n\tparser.add_argument('-f', '--file',\n\t\t\t\tnargs = '+',\n\t\t\t\ttype = FileType('r'),\n\t\t\t\trequired = True)\n\n\t#Adds the output directory\n\tparser.add_argument('-o', '--output',\n\t\t\t\trequired = True)\n\n\t#Adds the model argument, to select between the three models\n\tparser.add_argument('-m', '--mod', '--model',\n\t\t\t\tnargs = '?',\n\t\t\t\tchoices = ['relative', 'absolute', 'stability'],\n\t\t\t\trequired = True)\n\n\t#Adds the control genes argument, taking a list of gene names\n\tparser.add_argument('-cg', '--cgenes', '--controlgenes',\n\t\t\t\tnargs = '+',\n\t\t\t\trequired = True)\n\n\t#Adds the optional control sample argument for the stability model, taking a list of sample names\n\tparser.add_argument('-cs', '--csample', '--controlsamples',\n\t\t\t\tnargs = '*')\n\n\t#Adds optional outlier cutoff\n\tparser.add_argument('-oc', '--ocutoff',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.3)\n\n\t#Adds optional max outliers\n\tparser.add_argument('-om', '--omax',\n\t\t\t\ttype = float,\n\t\t\t\tdefault = 0.5)\n\n\t#Adds optional encoding \n\tparser.add_argument('-e', '--encoding',\n\t\t\t\tdefault = 'ISO-8859-1')\n\n\t#Adds optional header size\n\tparser.add_argument('-hd', '--header',\n\t\t\t\tdefault = 47)\n\n\treturn vars(parser.parse_args())",
"def parse_command_line(com_string):\r\n\t#\tREMINDER: command line format is $ python relabel_clean.py [options] module (exp_file) file/dir/\r\n\topt_dict = {}\r\n\topts, args = getopt.getopt(com_string, \"c:df:hl:o:\", [\"col=\", \"dict\", \"form=\", \"help\", \"lang=\", \"old=\"])\r\n\t\r\n\t# display help screen if present\r\n\tfor option, value in opts:\r\n\t\tif option == \"-h\" or option == \"--help\":\r\n\t\t\tdisplay_help_screen()\r\n\t\r\n\t# determine module to be used\r\n\tif args[0] == \"relabel\":\r\n\t\topt_dict[\"module\"] = \"1\"\r\n\telif args[0] == \"clean\":\r\n\t\topt_dict[\"module\"] = \"2\"\r\n\telif args[0] == \"dictionary\":\r\n\t\topt_dict[\"module\"] = \"3\"\r\n\telse:\r\n\t\tsys.exit(\"Unrecognized module.\")\r\n\t\r\n\t# populate option dictionary for each module with defaults and arguments\r\n\tif opt_dict[\"module\"] == \"1\":\r\n\t\tif len(args) == 3:\r\n\t\t\topt_dict[\"text file\"] = args[1]\r\n\t\t\topt_dict[\"file dir\"] = name_check(args[2])\r\n\t\telse:\r\n\t\t\topt_dict[\"text file\"] = None\r\n\t\t\topt_dict[\"file dir\"] = None\r\n\t\t\t\r\n\t\topt_dict[\"columns\"] = \"experiment_item_condition\"\r\n\t\topt_dict[\"dict\"] = False\r\n\t\topt_dict[\"format\"] = \"experiment_participant_item_condition\"\r\n\t\topt_dict[\"lang\"] = None\r\n\t\topt_dict[\"old dir\"] = \"0_old_labfile_relabel/\"\r\n\telif opt_dict[\"module\"] == \"2\":\r\n\t\tif len(args) == 2:\r\n\t\t\topt_dict[\"file dir\"] = name_check(args[1])\r\n\t\telse:\r\n\t\t\topt_dict[\"file dir\"] = None\r\n\t\t\r\n\t\topt_dict[\"dict\"] = False\r\n\t\topt_dict[\"lang\"] = None\r\n\t\topt_dict[\"old dir\"] = \"0_old_labfile_clean/\"\r\n\telif opt_dict[\"module\"] == \"3\":\r\n\t\tif len(args) == 2:\r\n\t\t\topt_dict[\"file dir\"] = name_check(args[1])\r\n\t\telse:\r\n\t\t\topt_dict[\"file dir\"] = None\r\n\t\r\n\t# override defaults with options, if necessary\r\n\tfor option, value in opts:\r\n\t\tif option == \"-c\" or option == \"--col\":\r\n\t\t\topt_dict[\"columns\"] = value\r\n\t\telif option == \"-d\" or option == \"--dict\":\r\n\t\t\topt_dict[\"dict\"] = True\r\n\t\telif option == \"-f\" or option == \"--form\":\r\n\t\t\topt_dict[\"format\"] = value\r\n\t\telif option == \"-l\" or option == \"--lang\":\r\n\t\t\topt_dict[\"lang\"] = value\r\n\t\telif option == \"-o\" or option == \"--old\":\r\n\t\t\topt_dict[\"old dir\"] = name_check(value)\r\n\t\r\n\treturn opt_dict"
] | [
"0.68020016",
"0.6262761",
"0.6206423",
"0.6184919",
"0.6182061",
"0.6162515",
"0.6088291",
"0.6074992",
"0.6049868",
"0.60402685",
"0.60306376",
"0.6025602",
"0.60253805",
"0.6003726",
"0.59915304",
"0.59439254",
"0.5930587",
"0.5928856",
"0.59242874",
"0.59233814",
"0.5922619",
"0.59064114",
"0.5893374",
"0.58872",
"0.586611",
"0.5840183",
"0.5840096",
"0.58261186",
"0.58169806",
"0.58018756"
] | 0.71814495 | 0 |
Handle multiple requests each expected to be a 4byte length, followed by the LogRecord in pickle format. Logs the record according to whatever policy is configured locally. | def handle(self):
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack('>L', chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle(self):\n while True:\n try:\n chunk = self.connection.recv(4)\n if len(chunk) < 4:\n break\n slen = struct.unpack(\">L\", chunk)[0]\n chunk = self.connection.recv(slen)\n while len(chunk) < slen:\n chunk = chunk + self.connection.recv(slen - len(chunk))\n obj = self.unPickle(chunk)\n msg = obj['msg']\n if type(msg) is str:\n record = logging.makeLogRecord(obj)\n self.handleLogRecord(record)\n else:\n self.statsThread.addRecord(msg)\n timeDict = msg['time'] \n if timeDict['total'] > LOG_THRESHOLD: \n #obj['msg'] = 'Processed ' + msg['request'] + ' on ' + msg['file'] + ' in ' + ('%.3f' % msg['time']['total']) + ' seconds'\n logMsg = 'Processed ' + msg['request'] + ' on ' + msg['file'] + '. Timing entries in seconds: '\n addComma=False\n for SECTION in self.SECTION_KEYS:\n timeKey=SECTION.strip()\n if timeDict.has_key(timeKey):\n if addComma:\n logMsg += ','\n else:\n addComma = True\n logMsg += ' ' + timeKey + ' ' + ('%.3f' % timeDict[timeKey])\n \n obj['msg'] = logMsg\n record = logging.makeLogRecord(obj)\n self.handleLogRecord(record)\n except Exception, e:\n import sys, traceback, string\n t, v, tb = sys.exc_info()\n print string.join(traceback.format_exception(t, v, tb))",
"def handle(self):\n while True:\n chunk = self.connection.recv(4)\n if len(chunk) < 4:\n break\n slen = struct.unpack('>L', chunk)[0]\n chunk = self.connection.recv(slen)\n while len(chunk) < slen:\n chunk = chunk + self.connection.recv(slen - len(chunk))\n obj = self.unPickle(chunk)\n record = logging.makeLogRecord(obj)\n self.handleLogRecord(record)",
"def _log_process(self, log_req):\n rq_size = log_req.multipart_size\n with self._lock:\n if self._payload_size + rq_size >= self.max_payload_size:\n if len(self._batch) > 0:\n self._send_batch()\n self._batch.append(log_req)\n self._payload_size += rq_size\n if len(self._batch) >= self.max_entry_number:\n self._send_batch()",
"def handle(self) -> None:\n while True:\n chunk = self.connection.recv(4) # type: ignore[attr-defined]\n if len(chunk) < 4:\n break\n slen = struct.unpack(\">L\", chunk)[0]\n chunk = self.connection.recv(slen) # type: ignore[attr-defined]\n while len(chunk) < slen:\n chunk = chunk + self.connection.recv(slen - len(chunk)) # type: ignore[attr-defined] # noqa: E501\n obj = self.unPickle(chunk)\n record = logging.makeLogRecord(obj)\n self.handleLogRecord(record)",
"def logRecordHandler(self, logrecord):\n logrecords = self._logrecords\n logrecords.append(logrecord)\n if len(logrecords) > self._queuesize:\n logrecords.pop(0)\n self._logRecordsTotal += 1",
"def _send_batch(self):\n batch = RPLogBatch(self._batch)\n http_request = HttpRequest(\n self.session.post, self._log_endpoint, files=batch.payload,\n verify_ssl=self.verify_ssl)\n batch.http_request = http_request\n self._worker.send(batch)\n self._batch = []\n self._payload_size = helpers.TYPICAL_MULTIPART_FOOTER_LENGTH",
"def emit(self, record):\n standard_log_info = ['name', 'msg', 'args', 'levelname', 'levelno', 'pathname', 'filename', 'module', 'exc_info', 'exc_text', 'stack_info', 'lineno',\n 'funcName', 'created', 'msecs', 'relativeCreated', 'thread', 'threadName', 'processName', 'process']\n for t in range(self.retries):\n try:\n info = {k: str(v) for k, v in record.__dict__.items() if not k.startswith('__') and k not in standard_log_info}\n bod = 'log={}'.format(json.dumps(info))\n self.http_client.fetch(self.addr, method='POST', body=bod, request_timeout=self.request_timeout)\n except Exception as e:\n # Other errors are possible, such as IOError.\n logger.error(str(e))\n time.sleep(self.on_fail_sleep_duration)\n else:\n break\n # http_client.close()\n return",
"def log_request(self, code='-', size='-'):\n if self.server.log_requests:\n BaseHTTPServer.BaseHTTPRequestHandler.log_request(self, code, size)",
"def log_request(self, code='-', size='-'):\n pass",
"def _Dynamic_Flush(self, request, unused_response, request_id):\n rl = self._pending_requests.get(request_id, None)\n if rl is None:\n return\n group = log_service_pb.UserAppLogGroup(request.logs())\n logs = group.log_line_list()\n for log in logs:\n al = self._pending_requests_applogs[request_id].add()\n al.time = log.timestamp_usec()\n al.level = log.level()\n al.message = log.message()",
"def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)",
"def _handle_requests(self, _req_list):\n\n for req in _req_list:\n req_id_elements = req[\"request_id\"].split(\"-\", 1)\n opt = req_id_elements[0]\n req_id = req_id_elements[1]\n Logger.set_req_id(req_id)\n begin_time = datetime.now()\n\n req_body = json.loads(req[\"request\"])\n\n self.logger.debug(\"input request_type = \" + opt)\n self.logger.debug(\"request = \" + json.dumps(req_body, indent=4))\n\n # Check if the same request with prior request.\n (status, result) = self.ahandler.check_history(req[\"request_id\"])\n\n if result is None:\n if opt in (\"create\", \"delete\", \"update\", \"confirm\", \"rollback\"):\n app = self._handle_app(opt, req_id, req_body)\n\n if app is None:\n errstr = \"valet-engine exits due to \" + opt + \" error\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.error(errstr)\n return False\n\n if app.status == \"locked\":\n errstr = \"datacenter is being serviced by another valet\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.info(errstr)\n continue\n\n (status, result) = self._get_json_result(app)\n\n elif opt in (\"group_query\", \"group_create\"):\n # TODO(Gueyoung): group_delete and group_update\n\n (status, result) = self._handle_rule(opt, req_body)\n\n if result is None:\n errstr = \"valet-engine exits due to \" + opt + \" error\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.info(errstr)\n return False\n\n if status[\"status\"] == \"locked\":\n errstr = \"datacenter is locked by the other valet\"\n Logger.get_logger('audit').error(errstr, beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time, statusCode=False)\n self.logger.info(errstr)\n continue\n\n elif opt == \"ping\":\n # To check if the local valet-engine is alive.\n\n if req_body[\"id\"] == self.valet_id:\n self.logger.debug(\"got ping\")\n\n status = {\"status\": \"ok\", \"message\": \"\"}\n result = {}\n else:\n continue\n\n else:\n status = {\"status\": \"failed\", \"message\": \"unknown operation = \" + opt}\n result = {}\n\n self.logger.error(status[\"message\"])\n\n else:\n self.logger.info(\"decision already made\")\n\n # Store final result in memory cache.\n if status[\"message\"] != \"timeout\":\n self.ahandler.record_history(req[\"request_id\"], status, result)\n\n # Return result\n if not self.dbh.return_request(req[\"request_id\"], status, result):\n return False\n\n self.logger.debug(\"output status = \" + json.dumps(status, indent=4))\n self.logger.debug(\" result = \" + json.dumps(result, indent=4))\n\n Logger.get_logger('audit').info(\"done request = \" + req[\"request_id\"], beginTimestamp=begin_time, elapsedTime=datetime.now() - begin_time)\n self.logger.info(\"done request = \" + req[\"request_id\"] + ' ----')\n\n # this should be handled by exceptions so we can log the audit correctly\n if self.lock.done_with_my_turn() is None:\n return False\n\n return True",
"def __call__(self, request):\n request.start_time = time.time()\n\n response = self.get_response(request)\n\n log_data = self.extract_log_info(request=request, response=response)\n logger.info(log_data)\n\n return response",
"def handler(event, context):\n if event and \"Records\" in event:\n for record in event[\"Records\"]:\n time_str = time.ctime()\n if \"body\" in record:\n try:\n hasura_request(record[\"body\"])\n except Exception as e:\n print(f\"Start Time: {time_str}\", str(e))\n time_str = time.ctime()\n print(\"Done executing: \", time_str)\n raise_critical_error(\n message=f\"Could not process record: {str(e)}\",\n data=record,\n exception_type=Exception\n )",
"def send_logs(self):\n for i in range(30):\n with open('{}-{}.log'.format(self._log_file_path, i), 'a') as log_file:\n for _ in range(self._log_rate):\n log_file.write(self._log_record + '\\n')",
"def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:\n LOGGER.info(f\"logged request: {response.url}\")\n with log_path.open(mode=\"a\", encoding=\"utf-8\") as f:\n all_responses = [response]\n\n # Poll and wait for operations, if applicable\n is_operation_request = bool(\n re.match(re.compile(\".*/api/versioned/v1/operations/.*\"), response.url)\n )\n is_get_request = response.request.method == \"GET\"\n if is_get_request and is_operation_request:\n wait_resp = _collect_operation_calls(response=response)\n all_responses.extend(wait_resp)\n\n all_json = [_response_to_json(r, ip_dict) for r in all_responses]\n f.writelines([f\"{j}\\n\" for j in all_json])",
"def handle(self):\n self.request.recv(1024)\n self.request.sendall(pickle.dumps(self.server.lymphocytes_getter()))",
"def _log_handler(logger_queue):\n while True:\n record = logger_queue.get()\n if record is None:\n break\n logger_ = logging.getLogger(record.name)\n logger_.handle(record)",
"def _generate_logs(self, count, date, remote_host='127.0.0.1', auth_user='paul', request_verb='GET',\n resource='/book/1', protocol='HTTP/1.0', status=200, bytes=20):\n for _ in range(count):\n self.log_queue.append(Log(remote_host, auth_user, date, request_verb, resource, protocol, status, bytes))",
"async def _handle_new_logs(self) -> None:\n async for block in self._new_blocks():\n self._handle_block_data(block)\n logs = self._get_logs_from_block(block.number)\n self.logger.info(\n \"Eth1 Monitor got new eth1 block: %s, number of logs contained in the block: %s\",\n block,\n len(logs),\n )\n self._process_logs(logs, block.number)",
"def dispatch(self):\n handler_start_time = time.time()\n\n logging.info('\\n\\n\\nRequest handler: %r', self)\n count0, count1, count2 = gc.get_count()\n logging.info('gc counts: %d %d %d', count0, count1, count2)\n GC_COUNT.add(count0, {'generation': 0})\n GC_COUNT.add(count1, {'generation': 1})\n GC_COUNT.add(count2, {'generation': 2})\n\n self.mr = monorailrequest.MonorailRequest(self.services)\n\n self.ratelimiter.CheckStart(self.request)\n self.response.headers.add('Strict-Transport-Security',\n 'max-age=31536000; includeSubDomains')\n\n if 'X-Cloud-Trace-Context' in self.request.headers:\n self.mr.profiler.trace_context = (\n self.request.headers.get('X-Cloud-Trace-Context'))\n if trace_service is not None:\n self.mr.profiler.trace_service = trace_service\n\n if self.services.cache_manager:\n # TODO(jrobbins): don't do this step if invalidation_timestep was\n # passed via the request and matches our last timestep\n try:\n with self.mr.profiler.Phase('distributed invalidation'):\n self.services.cache_manager.DoDistributedInvalidation(self.mr.cnxn)\n\n except MySQLdb.OperationalError as e:\n logging.exception(e)\n page_data = {\n 'http_response_code': httplib.SERVICE_UNAVAILABLE,\n 'requested_url': self.request.url,\n }\n self.template = template_helpers.GetTemplate(\n 'templates/framework/database-maintenance.ezt',\n eliminate_blank_lines=self._ELIMINATE_BLANK_LINES)\n self.template.WriteResponse(\n self.response, page_data, content_type='text/html')\n return\n\n try:\n with self.mr.profiler.Phase('parsing request and doing lookups'):\n self.mr.ParseRequest(self.request, self.services)\n\n self.response.headers['X-Frame-Options'] = 'SAMEORIGIN'\n webapp2.RequestHandler.dispatch(self)\n\n except exceptions.NoSuchUserException as e:\n logging.warning('Trapped NoSuchUserException %s', e)\n self.abort(404, 'user not found')\n\n except exceptions.NoSuchGroupException as e:\n logging.warning('Trapped NoSuchGroupException %s', e)\n self.abort(404, 'user group not found')\n\n except exceptions.InputException as e:\n logging.info('Rejecting invalid input: %r', e)\n self.response.status = httplib.BAD_REQUEST\n\n except exceptions.NoSuchProjectException as e:\n logging.info('Rejecting invalid request: %r', e)\n self.response.status = httplib.NOT_FOUND\n\n except xsrf.TokenIncorrect as e:\n logging.info('Bad XSRF token: %r', e.message)\n self.response.status = httplib.BAD_REQUEST\n\n except permissions.BannedUserException as e:\n logging.warning('The user has been banned')\n url = framework_helpers.FormatAbsoluteURL(\n self.mr, urls.BANNED, include_project=False, copy_params=False)\n self.redirect(url, abort=True)\n\n except ratelimiter.RateLimitExceeded as e:\n logging.info('RateLimitExceeded Exception %s', e)\n self.response.status = httplib.BAD_REQUEST\n self.response.body = 'Slow your roll.'\n\n finally:\n self.mr.CleanUp()\n self.ratelimiter.CheckEnd(self.request, time.time(), handler_start_time)\n\n total_processing_time = time.time() - handler_start_time\n logging.warn('Processed request in %d ms',\n int(total_processing_time * 1000))\n\n end_count0, end_count1, end_count2 = gc.get_count()\n logging.info('gc counts: %d %d %d', end_count0, end_count1, end_count2)\n if (end_count0 < count0) or (end_count1 < count1) or (end_count2 < count2):\n GC_EVENT_REQUEST.increment()\n\n if settings.enable_profiler_logging:\n self.mr.profiler.LogStats()\n\n if (self.mr.profiler.trace_context is not None and\n random.random() < settings.trace_fraction):\n self.mr.profiler.ReportTrace()",
"def record(self, response):\n self.get_recorder().record(self.request, response)",
"def process(self, request, **kwargs):\n\n if len(request):\n\n REST_header = \"\"\n REST_verb = \"\"\n str_path = \"\"\n json_payload = \"\"\n\n self.dp.qprint(\"Listener ID - %s: process() - handling request\" % (self.worker_id))\n\n now = datetime.datetime.today()\n str_timeStamp = now.strftime('%Y-%m-%d %H:%M:%S.%f')\n self.dp.qprint(Colors.YELLOW)\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(\"%s incoming data stream\" % (str_timeStamp) )\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(\"len = %d\" % len(request))\n self.dp.qprint(\"***********************************************\")\n self.dp.qprint(Colors.CYAN + \"%s\\n\" % (request.decode()) + Colors.YELLOW)\n self.dp.qprint(\"***********************************************\" + Colors.NO_COLOUR)\n l_raw = request.decode().split('\\n')\n FORMtype = l_raw[0].split('/')[0]\n\n self.dp.qprint('Request = ...')\n self.dp.qprint(l_raw)\n REST_header = l_raw[0]\n REST_verb = REST_header.split()[0]\n str_path = REST_header.split()[1]\n json_payload = l_raw[-1]\n\n # remove trailing '/' if any on path\n if str_path[-1] == '/': str_path = str_path[0:-1]\n\n d_ret = {}\n d_ret['status'] = False\n d_ret['RESTheader'] = REST_header\n d_ret['RESTverb'] = REST_verb\n d_ret['action'] = \"\"\n d_ret['path'] = str_path\n d_ret['receivedByServer'] = l_raw\n\n if REST_verb == 'GET':\n d_ret['GET'] = self.DB_get(path = str_path)\n d_ret['status'] = True\n\n self.dp.qprint('json_payload = %s' % json_payload)\n d_ret['client_json_payload'] = json_payload\n d_ret['client_json_len'] = len(json_payload)\n if len(json_payload):\n d_payload = json.loads(json_payload)\n d_request = d_payload['payload']\n payload_verb = d_request['action']\n if 'meta' in d_request.keys():\n d_meta = d_request['meta']\n d_ret['payloadsize']= len(json_payload)\n\n if payload_verb == 'quit':\n self.dp.qprint('Shutting down server...')\n d_ret['status'] = True\n\n if payload_verb == 'run' and REST_verb == 'PUT':\n d_ret['action'] = payload_verb\n self.processPUT( request = d_request)\n d_ret['status'] = True\n\n if REST_verb == 'POST':\n self.processPOST( request = d_request,\n ret = d_ret)\n return d_ret\n else:\n return False",
"def analysis(N):\n\n http_log_paths = get_http_logs()\n\n httplogs = []\n\n for path in http_log_paths:\n file = path+'/http.log'\n if os.path.isfile(file):\n httplogs.append(file)\n else:\n pass #print(path)\n\n fields = []\n\n for log in httplogs:\n with open(log) as f:\n lines = f.readlines()\n rows = len(lines)\n filesize = sum([len(line) for line in lines])\n\n tss = [] # time series\n methods = []\n uris = []\n uas = []\n request_body_lens = []\n response_body_lens = []\n status_codes = []\n filenames = []\n\n tmp = []\n\n for line in lines[8:len(lines)-1]:\n fs = line.strip().split('\\t')\n\n \"\"\"\n ts = fileds[0]\n uid = fileds[1]\n orig_h = fileds[2]\n orig_p = fileds[3]\n resp_h = fileds[4]\n resp_p = fileds[5]\n trans_depth = fileds[6]\n method = fileds[7]\n host = fileds[8]\n uri = fileds[9]\n referrer = fileds[10]\n user_agent = fileds[11]\n request_body_len = fileds[12]\n response_body_len = fileds[13]\n status_code = fileds[14]\n status_msg = fileds[15]\n info_code = fileds[16]\n info_msg = fileds[17]\n filename = fileds[18]\n tags = fileds[19]\n username = fileds[20]\n password = fileds[21]\n proxied = fileds[22]\n orig_fuids = fileds[23]\n orig_mime_types = fileds[24]\n resp_fuids = fileds[25]\n resp_mime_types = fileds[26]\n\n tss.append(ts)\n methods.append(method)\n uris.append(uri)\n uas.append(user_agent)\n request_body_lens.append(request_body_len)\n response_body_lens.append(response_body_len)\n status_codes.append(status_code)\n filenames.append(filename)\n \"\"\"\n\n tmp.append(fs[N])\n\n #print(log, rows, ','.join(methods))\n\n # time intervals\n #tss_sorted = sorted(map(float,tmp))\n #tss_sorted = map(float, tmp)\n #intervals = map(int,[tss_sorted[i+1]-tss_sorted[i] for i in range(len(tss_sorted)-1)])\n #print('%s %s' % (log, ' '.join(map(str,intervals))))\n #file = urlparse(fs[N]).path.split('/')[-1].split('.')\n #if len(file)>1:\n # tmp.append(file[-1])\n #tmp.append(urlparse(fs[N]).path.split('/')[-1])\n #tmp.append(urlparse(fs[N]).path)\n\n #fields.append(set(tmp))\n #fields.append(intervals)\n fields.append(tmp)\n\n\n dic = {}\n for i in fields:\n for j in i:\n if j in dic:\n dic[j] += 1\n else:\n dic[j] = 1\n ls = sorted(dic.items(), lambda x,y: cmp(x[1], y[1]), reverse = True)\n for i in range(len(ls)):\n print('%s\\t%s' %(ls[i][0], ls[i][1]))\n #print('%s' % join(ls[i][1]))\n\n\n \"\"\"\n col = []\n for i in fields:\n for j in i:\n col.append(j)\n print('%s' % ' '.join(map(str,col)))\n \"\"\"\n\n\n \"\"\"\n dic = {}\n for i in fields:\n for j in i:\n sub = j.split('.')\n if sub[0] in dic:\n dic[sub[0]] += 1\n else:\n dic[sub[0]] = 1\n\n\n if len(sub) > 1:\n if sub[-2]+'.'+sub[-1] in dic:\n dic[sub[-2]+'.'+sub[-1]] += 1\n else:\n dic[sub[-2]+'.'+sub[-1]] = 1\n\n\n ls = sorted(dic.items(), lambda x, y: cmp(x[1], y[1]), reverse=True)\n for i in range(len(ls)):\n print('%s\\t%s' % (ls[i][0], ls[i][1]))\n # print('%s' % join(ls[i][1]))\n\n \"\"\"",
"def fetchLogRecords(self):\n return self.handler.buffer",
"def handle_predictions(predict_fn, request_queue, response_queue):\n loop_times = []\n queue_get_times = []\n handle_times = []\n handle_start_times = []\n # trial_start = datetime.now()\n pred_count = 0\n loop_count = 0\n\n # last_loop_start = datetime.now()\n # loop_dur_file = \"/logs/loop_duration.log\"\n # handle_dur_file = \"/logs/handle_duration.log\"\n # handle_dur_file = \"/logs/handle_duration.log\"\n\n # Field order: clock_time, user time, sys time\n # kernel_measures = False\n # if not os.path.exists(\"/logs\"):\n # os.makedirs(\"/logs\")\n #\n # kernel_instr_file = \"/logs/kernel_measures.csv\"\n\n # with open(loop_dur_file, \"w\") as ld, open(handle_dur_file, \"w\") as hd:\n\n # with open(kernel_instr_file, \"w\") as kd:\n # kd.write(\"wall_clock_secs, user_clock_ticks, kernel_clock_ticks\\n\")\n while True:\n # cur_loop_start = datetime.now()\n # loop_duration = (cur_loop_start - last_loop_start).microseconds\n # loop_times.append(loop_duration)\n # ld.write(\"{}\\n\".format(loop_duration))\n # last_loop_start = cur_loop_start\n\n # t1 = datetime.now()\n prediction_request, recv_time = request_queue.get(block=True)\n # t2 = datetime.now()\n # queue_get_times.append((t2 - t1).microseconds)\n\n # handle_start_times.append(time.time()*1000)\n before_predict_lineage_point = datetime.now()\n # proc_stat_before = check_output([\"cat\", \"/proc/1/stat\"]).strip().split()\n # user_before = int(proc_stat_before[13])\n # sys_before = int(proc_stat_before[14])\n\n\n outputs = predict_fn(prediction_request.inputs)\n # proc_stat_after = check_output([\"cat\", \"/proc/1/stat\"]).strip().split()\n # user_after = int(proc_stat_after[13])\n # sys_after = int(proc_stat_after[14])\n\n after_predict_lineage_point = datetime.now()\n # clock_time = (after_predict_lineage_point - before_predict_lineage_point).total_seconds()\n # user_time = user_after - user_before\n # sys_time = sys_after - sys_before\n # user_time = 0\n # sys_time = 0\n # kd.write(\"{clock},{user},{kernel}\\n\".format(clock=clock_time, user=user_time, kernel=sys_time))\n\n if loop_count <= 50 and loop_count % 10 == 0:\n print((after_predict_lineage_point - before_predict_lineage_point).total_seconds())\n\n pred_count += len(prediction_request.inputs)\n # t3 = datetime.now()\n # handle_times.append((t3 - t2).microseconds)\n # hd.write(\"{}\\n\".format((t3 - t2).microseconds))\n # Type check the outputs:\n if not type(outputs) == list:\n raise PredictionError(\"Model did not return a list\")\n if len(outputs) != len(prediction_request.inputs):\n raise PredictionError(\n \"Expected model to return %d outputs, found %d outputs\" %\n (len(prediction_request.inputs), len(outputs)))\n\n outputs_type = type(outputs[0])\n if outputs_type == np.ndarray:\n outputs_type = outputs[0].dtype\n if outputs_type not in SUPPORTED_OUTPUT_TYPES_MAPPING.keys():\n raise PredictionError(\n \"Model outputs list contains outputs of invalid type: {}!\".\n format(outputs_type))\n\n if outputs_type == str:\n for i in range(0, len(outputs)):\n outputs[i] = unicode(outputs[i], \"utf-8\").encode(\"utf-8\")\n else:\n for i in range(0, len(outputs)):\n outputs[i] = outputs[i].tobytes()\n\n total_length_elements = sum(len(o) for o in outputs)\n\n response = PredictionResponse(prediction_request.msg_id,\n len(outputs), total_length_elements,\n outputs_type)\n for output in outputs:\n response.add_output(output)\n\n response_queue.put((response, recv_time,\n before_predict_lineage_point,\n after_predict_lineage_point))\n # response_queue.put((response, recv_time,\n # None,\n # None))\n\n # if len(loop_times) > 1000:\n # print(\"\\nLoop duration: {} +- {}\".format(np.mean(loop_times), np.std(loop_times)))\n # print(\"Request dequeue duration: {} +- {}\".format(np.mean(queue_get_times), np.std(queue_get_times)))\n # print(\"Handle duration: {} +- {}\".format(np.mean(handle_times), np.std(handle_times)))\n # # throughput = float(pred_count) / (datetime.now() - trial_start).total_seconds()\n # # print(\"Throughput: {}\".format(throughput))\n # # ld.flush()\n # # hd.flush()\n # # kd.flush()\n #\n # loop_times = []\n # queue_get_times = []\n # handle_times = []\n # pred_count = 0\n # trial_start = datetime.now()\n\n # if len(handle_start_times) % 200 == 0:\n # print(json.dumps(handle_start_times))\n loop_count += 1\n sys.stdout.flush()\n sys.stderr.flush()",
"def _log_request(self):\n log = self.server.log\n if log:\n if hasattr(log, \"info\"):\n log.info(self.format_request() + '\\n')\n else:\n log.write(self.format_request() + '\\n')",
"def WriteMessageHandlerRequests(self, requests):\n now = rdfvalue.RDFDatetime.Now()\n for r in requests:\n flow_dict = self.message_handler_requests.setdefault(r.handler_name, {})\n cloned_request = r.Copy()\n cloned_request.timestamp = now\n flow_dict[cloned_request.request_id] = cloned_request",
"def too_many_requests_handler(self) -> Callable[[Response], float]:\n if not self._too_many_requests_handler:\n self._too_many_requests_handler = default_too_many_requests_handler\n return self._too_many_requests_handler",
"def log_requests(response):\n ts = strftime('[%Y-%b-%d %H:%M-%S]')\n\n logger.info('Flask: {0} {1} {2} {3} {4} {5}'.\n format(ts, request.remote_addr, request.method, request.scheme, request.full_path, response.status))\n\n return response"
] | [
"0.642383",
"0.6383582",
"0.6285302",
"0.62784356",
"0.6121849",
"0.6010063",
"0.5838576",
"0.55765617",
"0.55357414",
"0.55147916",
"0.54627067",
"0.5431377",
"0.5374189",
"0.53549516",
"0.5341674",
"0.5333902",
"0.53018004",
"0.52858895",
"0.52804816",
"0.52658737",
"0.52444816",
"0.5237724",
"0.5224631",
"0.5218597",
"0.5211417",
"0.5184595",
"0.5169475",
"0.5144879",
"0.51443726",
"0.5126045"
] | 0.6399498 | 1 |
Plot mesh triangles on a given surface | def plotMesh(verts,tris):
x = verts[:,0]
y = verts[:,1]
plt.figure()
plt.gca().set_aspect('equal')
plt.triplot(x, y, tris, 'k-')
plt.title('Unstructured Mesh')
plt.xlabel('distance (m)')
plt.ylabel('distance (m)') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()",
"def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()",
"def plot_mesh(corners):\r\n triangle = tri.Triangulation(corners[:, 0], corners[:, 1])\r\n\r\n refiner = tri.UniformTriRefiner(triangle)\r\n trimesh = refiner.refine_triangulation(subdiv=4)\r\n \r\n plt.figure(figsize=(6, 4))\r\n for i, mesh in enumerate((triangle, trimesh)):\r\n plt.subplot(1, 2, i+1)\r\n plt.triplot(mesh)\r\n plt.axis('off')\r\n plt.axis('equal')",
"def plotSurface(X):\n from mpl_toolkits.mplot3d import Axes3D\n from mpl_toolkits.mplot3d import proj3d\n f=plt.figure()\n ax=f.add_subplot(111,projection='3d')\n xi=np.arange(10,14,0.05)\n yi=np.arange(12,16,0.05)\n z = matplotlib.mlab.griddata(X[:,0], X[:,1], X[:,2], xi, yi, interp='nn')\n x, y = np.meshgrid(xi, yi)\n ax.plot_surface(x, y, z)\n return f",
"def mplot_mesh(meshtriang: df.Mesh) -> Tuple[plt.Figure, Any]:\n fig, ax = plt.subplots(1)\n ax.triplot(meshtriang, 'ko-', lw=1)\n return fig, ax",
"def surfaceRender(nodal_mesh, focus, ax=None):\n\t# If no axes were passed, generate new set of axes\n\tif not ax:\n\t\tfig = mplt.figure()\n\t\tax = fig.add_subplot(111, projection='3d')\n\n\t# Sort the mesh by first 3 columns\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 0].argsort()]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 1].argsort(kind='mergesort')]\n\tnodal_mesh = nodal_mesh[nodal_mesh[:, 2].argsort(kind='mergesort')]\n\t\n\t# Set up number of divisions and calculate e for each division (as a ratio)\n\tnum_div = 20\n\te = [i/num_div for i in range(num_div + 1)]\n\t# Convert angular values from degrees to radians\n\trads = math.pi/180\n\tnodal_mesh[:, 1:3] *= rads\n\t# Store the shapes and sizes of the mesh values\n\tm = nodal_mesh.shape[0]\n\tsize_nodal_nu = np.where(nodal_mesh[:, 2] == 0)[0].size\n\tsize_nodal_phi = m/size_nodal_nu\n\t# Get the mu and theta values from the mesh\n\tnodal_nu = nodal_mesh[:size_nodal_nu, 1]\n\tnodal_phi = nodal_mesh[::size_nodal_nu, 2]\n\t# Convert apex node from prolate to cartesian, then plot with scatter\n\tif min(nodal_nu) == 0:\n\t\tx, y, z = mathhelper.prolate2cart(nodal_mesh[0, 0], nodal_mesh[0, 1], nodal_mesh[0, 2], focus)\n\t\tax.scatter(z, y, -x)\n\t\tstart_nu = 1\n\telse:\n\t\tstart_nu = 0\n\t# Plot circumferential element boundaries\n\tfor i in range(start_nu, size_nodal_nu):\n\t\tfor j in range(int(size_nodal_phi)):\n\t\t\t# Define nodal values for interpolation\n\t\t\tif j == size_nodal_phi-1:\n\t\t\t\tind0 = i\n\t\t\t\tp0 = 2*math.pi\n\t\t\telse:\n\t\t\t\tind0 = (j+1)*size_nodal_nu + i\n\t\t\t\tp0 = nodal_phi[j+1]\n\t\t\tind1 = (j)*size_nodal_nu + i\n\t\t\tp1 = nodal_phi[j]\n\t\t\t# Get mu and dM/dm1\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 3]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 3]\n\t\t\t# Convert to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot the node\n\t\t\tax.scatter(n0z, n0y, -n0x)\n\t\t\t# Plot the arc segments\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine starting point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get theta\n\t\t\t\tp_here = p0 - e[k]*(p0 - p1)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, nodal_nu[i], p_here, focus)\n\t\t\t\t# Create vectors\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot segments\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t# Plot longitudinal element boundaries\n\tfor i in range(int(size_nodal_phi)):\n\t\tfor j in range(size_nodal_nu-1):\n\t\t\t# Define nodal values needeed for interpolation\n\t\t\tind0 = i*size_nodal_nu + j\n\t\t\tind1 = ind0 + 1\n\t\t\tn0 = nodal_nu[j]\n\t\t\tn1 = nodal_nu[j+1]\n\t\t\t# Get lambda and dL/de2\n\t\t\tm0 = nodal_mesh[ind0, 0]\n\t\t\tdm0 = nodal_mesh[ind0, 4]\n\t\t\tm1 = nodal_mesh[ind1, 0]\n\t\t\tdm1 = nodal_mesh[ind1, 4]\n\t\t\t# Convert nodal points to cartesian\n\t\t\tn0x, n0y, n0z = mathhelper.prolate2cart(nodal_mesh[ind0, 0], nodal_mesh[ind0, 1], nodal_mesh[ind0, 2], focus)\n\t\t\t# Plot arc\n\t\t\tfor k in range(2, len(e)):\n\t\t\t\t# Determine point to use\n\t\t\t\tif k == 2:\n\t\t\t\t\tpt_x, pt_y, pt_z = n0x, n0y, n0z\n\t\t\t\telse:\n\t\t\t\t\tpt_x, pt_y, pt_z = x_here, y_here, z_here\n\t\t\t\t# Get lambda\n\t\t\t\thm0 = 1 - 3*(e[k]**2) + 2*(e[k]**3)\n\t\t\t\thdm0 = e[k]*(e[k] - 1)**2\n\t\t\t\thm1 = (e[k]**2)*(3 - 2*e[k])\n\t\t\t\thdm1 = (e[k]**2)*(e[k] - 1)\n\t\t\t\tm = hm0 * m0 + hdm0 * dm0 + hm1 * m1 + hdm1 * dm1\n\t\t\t\t# Get nu\n\t\t\t\tn_here = n0 + e[k]*(n1-n0)\n\t\t\t\t# Convert to cartesian\n\t\t\t\tx_here, y_here, z_here = mathhelper.prolate2cart(m, n_here, nodal_phi[i], focus)\n\t\t\t\t# Append the vectors for plotting\n\t\t\t\tx = np.append(pt_x, x_here)\n\t\t\t\ty = np.append(pt_y, y_here)\n\t\t\t\tz = np.append(pt_z, z_here)\n\t\t\t\t# Plot the segment\n\t\t\t\tax.plot(z, y, -x, 'k-.')\n\t\t\t\t\n\treturn(ax)",
"def draw_stl_from_mesh(m):\n plt.ion()\n # Create a new plot\n figure = plt.figure()\n axes = mplot3d.Axes3D(figure)\n\n # Render the cube faces\n #for m in meshes:\n axes.add_collection3d(mplot3d.art3d.Poly3DCollection(m.vectors))\n\n # Auto scale to the mesh size\n scale = m.points.flatten(-1)\n axes.auto_scale_xyz(scale, scale, scale)",
"def create_mesh(self):\n print(\"create_mesh\")\n faces = self.get_faces()\n print(\"num faces: {}\".format(len(faces)))\n\n # TODO: perform face filtering to remove long edges in Z direction\n # filtered_faces = self.get_filtered_faces(faces)\n # print(\"num filtered faces: {}\".format(len(filtered_faces)))\n\n vertices = self.xyz_points.T\n\n # handle texture mappings\n vertex_index_to_texture = []\n for j in range(0, self.height):\n for i in range(0, self.width):\n # vertex_index = (j * self.width) + ij\n w = i / self.width\n h = (self.height - j - 1) / self.height\n vertex_index_to_texture.append(\n (w, h)\n )\n\n # Create material.\n # TODO: make the string/filename randomly generated and unique\n file0 = open(os.path.join(self.args.path, \"triangle_mesh.obj.mtl\"), \"w\") # write mode\n file0.write(\"newmtl material_0\\n\")\n # Save image here.\n cv2.imwrite(os.path.join(self.args.path, \"triangle_mesh.png\"), self.bgr)\n file0.write(\"map_Kd triangle_mesh.png\\n\")\n file0.close()\n\n # https://en.wikipedia.org/wiki/Wavefront_.obj_file\n # https://github.com/mmatl/pyrender/blob/master/examples/models/fuze.obj\n obj_path = os.path.join(self.args.path, \"triangle_mesh.obj\")\n file1 = open(obj_path, \"w\") # write mode\n file1.write(\"mtllib ./triangle_mesh.obj.mtl\\n\")\n for vertex in vertices:\n x, y, z = vertex\n file1.write(\"v {} {} {}\\n\".format(x, y, z))\n file1.write(\"usemtl material_0\\n\")\n for w, h in vertex_index_to_texture:\n file1.write(\"vt {} {}\\n\".format(w, h))\n for face in faces:\n a, b, c = face\n a += 1\n b += 1\n c += 1\n file1.write(\"f {}/{} {}/{} {}/{}\\n\".format(\n a, a, b, b, c, c\n )\n )\n file1.close()\n\n # Load the trimesh from OBJ file.\n trimesh_mesh = trimesh.load(obj_path)\n # trimesh_mesh.show()\n\n mesh = pyrender.Mesh.from_trimesh(trimesh_mesh, smooth=False)\n self.scene = pyrender.Scene(ambient_light=[3.0, 3.0, 3.0])\n\n camera = pyrender.IntrinsicsCamera(\n self.focal_length, self.focal_length, self.width / 2, self.height / 2\n )\n self.camera_pose = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # https://pyrender.readthedocs.io/en/latest/examples/cameras.html#creating-cameras\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html\n r = R.from_rotvec(np.array([0, np.pi, 0]))\n r = R.from_rotvec(np.array([0.0, 0, np.pi])) * r\n matrix = r.as_matrix()\n self.camera_pose[:3, :3] = matrix\n\n light = pyrender.PointLight(\n color=[1.0, 1.0, 1.0],\n intensity=0.0\n )\n\n self.nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))\n self.nl = pyrender.Node(light=light, matrix=np.eye(4))\n self.nc = pyrender.Node(camera=camera, matrix=np.eye(4))\n self.scene.add_node(self.nm)\n self.scene.add_node(self.nl)\n self.scene.add_node(self.nc)\n\n # Set the pose and show the image.\n temppose = self.extrinsics @ self.camera_pose\n self.scene.set_pose(self.nl, pose=temppose)\n self.scene.set_pose(self.nc, pose=temppose)\n pyrender.Viewer(self.scene, use_raymond_lighting=True,\n viewport_size=(self.width, self.height))",
"def visualize_in_3d(self,**kwargs):\n fig = plt.figure(figsize=(7,7))\n ax = fig.add_subplot(111, projection='3d')\n\n points = np.vstack([\n c.to_matrix() for c in self.contours if c.inclusion\n ])\n points[:,:2] = points[:,:2] * self.scan.pixel_spacing\n\n # Center the points at the origin for \n # spherical coordinates conversion.\n points = points - points.mean(axis=0)\n\n # Triangulate the azimuth and zenith transformation.\n azimuth = np.arctan2(points[:,1],points[:,0])\n zenith = np.arccos(points[:,2] / np.linalg.norm(points,axis=1))\n azi_zen = np.c_[azimuth.flatten(),zenith.flatten()]\n triangles = Delaunay(azi_zen).simplices\n\n # Start the points at 0 on every axis.\n # This lets the axis ticks to be interpreted as length in mm.\n points = points - points.min(axis=0)\n\n ax.set_xlabel('length (mm)')\n ax.set_ylabel('length (mm)')\n ax.set_zlabel('length (mm)')\n\n # Plot the points.\n ax.plot_trisurf(points[:,0], points[:,1], points[:,2],\n triangles=triangles, **kwargs)\n plt.show()",
"def draw_f():\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x_matrix = np.arange(-10, 11, 0.1)\n y_matrix = np.arange(-10, 11, 0.1)\n x_matrix, y_matrix = np.meshgrid(x_matrix, y_matrix)\n # print(x_matrix)\n u_matrix = x_matrix.copy()\n for i in range(x_matrix.shape[0]):\n for j in range(x_matrix.shape[0]):\n u_matrix[i][j] = f(x_matrix[i][j], y_matrix[i][j])\n surf = ax.plot_surface(x_matrix, y_matrix, u_matrix)\n\n plt.show()\n return surf",
"def plot(mesh):\n from scipy.spatial import delaunay_plot_2d\n fig = delaunay_plot_2d(SimpleMesh(mesh))\n ax = fig.gca()\n ax.set_aspect(\"equal\")\n return fig, ax",
"def plot_surface_3D(self, length = 30, fps = 30, **kwargs):\n fig = utils.get_figure(scale = 3)\n ax = fig.add_subplot(111, projection = '3d')\n\n # surface_x = self.xi_1_mesh\n # surface_y = self.xi_2_mesh\n # surface_x, surface_y, surface_z = self.surface()\n xyz = self.surface()\n\n # surface_x, surface_y = np.meshgrid(surface_x, surface_y)\n\n # print(np.shape(surface_x))\n # print(np.shape(surface_y))\n # print(np.shape(surface_z))\n\n control_points_x = np.array([control_point[0] for control_point in self.control_net.values()])\n control_points_y = np.array([control_point[1] for control_point in self.control_net.values()])\n control_points_z = np.array([control_point[2] for control_point in self.control_net.values()])\n\n # x_min = min(np.min(surface_x), np.min(control_points_x))\n # x_max = max(np.max(surface_x), np.max(control_points_x))\n # x_range = np.abs(x_max - x_min)\n #\n # y_min = min(np.min(surface_y), np.min(control_points_y))\n # y_max = max(np.max(surface_y), np.max(control_points_y))\n # y_range = np.abs(y_max - y_min)\n #\n # z_min = min(np.min(surface_z), np.min(control_points_z))\n # z_max = max(np.max(surface_z), np.max(control_points_z))\n # z_range = np.abs(z_max - z_min)\n #\n # ax.set_xlim(x_min - 0.05 * x_range, x_max + 0.05 * x_range)\n # ax.set_ylim(y_min - 0.05 * y_range, y_max + 0.05 * y_range)\n # ax.set_zlim(z_min - 0.05 * z_range, z_max + 0.05 * z_range)\n\n ax.scatter(control_points_x, control_points_y, control_points_z, depthshade = False, **CONTROL_POLYGON_KWARGS)\n\n # print(np.max(surface_x), np.max(surface_y), np.max(surface_z))\n # print(np.min(surface_x), np.min(surface_y), np.min(surface_z))\n # print(surface_x)\n # print(surface_y)\n # print(surface_z)\n xyz = np.reshape(xyz, (-1, 3))\n print(xyz.shape)\n x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]\n ax.scatter(x, y, z)\n # ax.plot_trisurf(\n # x, y, z,\n # cmap = plt.get_cmap('viridis'),\n # linewidth = 0,\n # antialiased = True,\n # )\n # ax.plot_surface(surface_x, surface_y, surface_z, rstride = 1, cstride = 1)\n # ax.plot_trisurf(surface_x, surface_y, surface_z)\n # ax.plot_trisurf(surface_x, surface_y, surface_z, **CURVE_KWARGS)\n\n ax.axis('off')\n\n ax.view_init(elev = 45, azim = 0) # note that this resets ax.dist to 10, so we can't use it below\n ax.dist = 7.5 # default is 10, so zoom in a little because there's no axis to take up the rest of the space\n\n plt.show()\n utils.save_current_figure(**kwargs)\n\n ### ANIMATION ###\n\n frames = length * fps\n\n writer = anim.writers['ffmpeg'](fps = fps, bitrate = 2000) # don't need a very high bitrate\n\n def animate(frame):\n print(frame, frames, frame / frames)\n ax.azim = 360 * frame / frames # one full rotation\n return [] # must return the list of artists we modified (i.e., nothing, since all we did is rotate the view)\n\n ani = anim.FuncAnimation(fig, animate, frames = frames, blit = True)\n ani.save(f\"{os.path.join(kwargs['target_dir'], kwargs['name'])}.mp4\", writer = writer)\n\n plt.close()",
"def split_triangles(mesh):\n triangles = np.asarray(mesh.triangles).copy()\n vertices = np.asarray(mesh.vertices).copy()\n\n triangles_3 = np.zeros_like(triangles)\n vertices_3 = np.zeros((len(triangles) * 3, 3), dtype=vertices.dtype)\n\n for index_triangle, t in enumerate(triangles):\n index_vertex = index_triangle * 3\n vertices_3[index_vertex] = vertices[t[0]]\n vertices_3[index_vertex + 1] = vertices[t[1]]\n vertices_3[index_vertex + 2] = vertices[t[2]]\n\n triangles_3[index_triangle] = np.arange(index_vertex, index_vertex + 3)\n\n mesh_return = deepcopy(mesh)\n mesh_return.triangles = o3d.utility.Vector3iVector(triangles_3)\n mesh_return.vertices = o3d.utility.Vector3dVector(vertices_3)\n mesh_return.triangle_normals = mesh.triangle_normals\n mesh_return.paint_uniform_color([0.5, 0.5, 0.5])\n return mesh_return",
"def plotWholeRoom(mesh):\r\n fig = plt.figure()\r\n ax = fig.gca(projection='3d')\r\n X = np.arange(0, mesh.xLength+mesh.meshsize, mesh.meshsize)\r\n Y = np.arange(0, mesh.yLength+mesh.meshsize, mesh.meshsize)\r\n X, Y = np.meshgrid(X,Y)\r\n numberOfXNodes = mesh.x_res#round(mesh.xLength/mesh.meshsize)+1\r\n numberOfYNodes = mesh.y_res#round(mesh.yLength/mesh.meshsize)+1\r\n Z = np.array([[mesh.grid[i,j].funcVal for i in range(numberOfYNodes)] for j in range(numberOfXNodes)])\r\n if mesh.y_res==2:\r\n print()\r\n surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,\r\n linewidth=0, antialiased=False)\r\n # add vmin=4, vmax=41, to define lower and upper value for the color-scheme\r\n # set limits for z-axis\r\n ax.set_zlim(np.amin(Z)-mesh.meshsize, np.amax(Z)+mesh.meshsize)\r\n # don't know what these two lines are for\r\n # x.zaxis.set_major_locator(LinearLocator(10))\r\n # ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))\r\n # don't know what these two lines are for\r\n fig.colorbar(surf, shrink=0.5, aspect=5)\r\n plt.show() \r\n return fig",
"def surf_plot(x, y, z, filename, title = None, xlabel = None, ylabel = None, zlabel = None, elev = 0, azim = 0, **surf_kwargs):\n # Checking that the x- and y- and z- inputs are equal in length \n if len(x) != len(y) != len(z):\n raise LengthError()\n\n fig = plt.figure() # Creates blank figure\n ax = fig.gca(projection='3d') # Creating 3-dimensional axes\n fig.set_size_inches(18, 10) # Sets figure size\n\n # Plotting the surface - specifying the colormap, and setting the surface to opaque (with antialiased = False)\n ax.plot_trisurf(x, y, z, cmap = cm.coolwarm, linewidth=0, antialiased=False, **surf_kwargs) \n\n # Setting plot parameters\n ax.set_title(title, fontsize = 24, pad = 15)\n ax.set_xlabel(xlabel, fontsize=18, labelpad = 15)\n ax.set_ylabel(ylabel, fontsize=18, labelpad = 15)\n ax.set_zlabel(zlabel, fontsize=18, labelpad = 15)\n ax.tick_params(axis='both', which='major', pad=10)\n ax.set_zlim(0, 1.0) # z-axis limits set to [0,1] as the z-axis refers to probability in our case.\n\n ax.view_init(elev=elev, azim=azim) # Sets 'camera angle' of surface plot, for saving\n # f-string allows save filepath to be set inside the plt.savefig() function\n plt.savefig(f'{os.path.join(plot_path,filename)}.pdf', dpi = 200) # Saving the plot in the 'plots' folder",
"def plot_3d_heads(ax, vertices, faces):\n # extract vertices coordinates\n x_V = vertices[:, 2]\n y_V = vertices[:, 0]\n z_V = vertices[:, 1]\n\n # plot link between vertices\n for F in range(len(faces)):\n V0 = faces[F, 0]\n V1 = faces[F, 1]\n V2 = faces[F, 2]\n V3 = faces[F, 3]\n ax.plot([x_V[V0], x_V[V1]],\n [y_V[V0], y_V[V1]],\n [z_V[V0], z_V[V1]],\n '-', color= 'grey', linewidth=0.3)\n ax.plot([x_V[V1], x_V[V2]],\n [y_V[V1], y_V[V2]],\n [z_V[V1], z_V[V2]],\n '-', color= 'grey', linewidth=0.3)\n ax.plot([x_V[V2], x_V[V3]],\n [y_V[V2], y_V[V3]],\n [z_V[V2], z_V[V3]],\n '-', color= 'grey', linewidth=0.3)\n ax.plot([x_V[V3], x_V[V1]],\n [y_V[V3], y_V[V1]],\n [z_V[V3], z_V[V1]],\n '-', color= 'grey', linewidth=0.3)",
"def MeshPyTri(points,facets,*args,**kwargs):\n info = triangle.MeshInfo()\n info.set_points(points)\n info.set_facets(facets)\n\n return triangle.build(info,*args,**kwargs)",
"def display_facet(model_name, vertices, faces, plot_type, display_normals=False, scale=0.2):\n # Separate the coordinates of the vertices\n x = vertices[:, 0]\n y = vertices[:, 1]\n z = vertices[:, 2]\n\n # Display the model\n ax = Axes3D(plt.figure())\n if plot_type == 'Facet':\n ax.plot_trisurf(x, y, z, triangles=faces, color=(1, 1, 1, 1), edgecolor='gray')\n elif plot_type == 'Wireframe':\n ax.plot_trisurf(x, y, z, triangles=faces, color='none', edgecolor='black')\n ax.grid(True)\n set_equal(ax)\n\n ax.set_title(model_name, size='14')\n ax.set_xlabel('X', size='12')\n ax.set_ylabel('Y', size='12')\n ax.set_zlabel('Z', size='12')\n\n # Set the tick label size\n ax.tick_params(labelsize=12)\n\n if display_normals:\n\n # Vector from origin to vertices\n r = zeros([vertices.shape[0], 3])\n\n for i in range(vertices.shape[0]):\n r[i] = [vertices[i][0], vertices[i][1], vertices[i][2]]\n\n for i in range(faces.shape[0]):\n a = r[faces[i][1]] - r[faces[i][0]]\n b = r[faces[i][2]] - r[faces[i][1]]\n\n # Outward normal\n normal = cross(a, b) + 0.\n\n # Scale the size of the arrow to be displayed\n normal *= scale\n\n # Put the arrow at the center of the facet\n mean_r = (r[faces[i][0]] + r[faces[i][1]] + r[faces[i][2]]) / 3.0\n\n # Get the arrow for the normal\n arrow = Arrow3D([mean_r[0], mean_r[0] + normal[0]], [mean_r[1], mean_r[1] + normal[1]],\n [mean_r[2], mean_r[2] + normal[2]], mutation_scale=10, lw=1, arrowstyle=\"-|>\", color=\"r\")\n ax.add_artist(arrow)\n\n plt.show()",
"def plot(vert, triangles):\n print \"plotting using mayavi...\"\n print \"unpickling....\"\n vert = cPickle.loads(vert)\n triangles = cPickle.loads(triangles)\n print \" done.\"\n print \"converting data...\"\n x = vert[:, 0]\n y = vert[:, 1]\n z = zeros(len(y))\n t = vert[:, 2]\n print \" done.\"\n def doit():\n global s\n global iter\n if iter == 0:\n print \"plotting the triangular mesh...\"\n s = mlab.triangular_mesh(x, y, z, triangles, scalars=t)\n print \" done.\"\n print \"adjusting view...\"\n mlab.view(0, 0)\n print \" done.\"\n else:\n print \"changing the source...\"\n # This doesn't work due to a bug in mayavi/VTK:\n # http://github.com/certik/mhd-hermes/issues#issue/1\n #s.mlab_source.reset(x=x, y=y, z=z, triangles=triangles, scalars=t)\n # so until this is fixed, let's call triangular_mesh and delete the\n # old mesh (this is slow but works):\n scene = mlab.gcf().scene\n scene.disable_render = True\n s = mlab.triangular_mesh(x, y, z, triangles, scalars=t)\n mlab.get_engine().scenes[0].children[:1] = []\n scene.disable_render = False\n print \" done.\"\n iter += 1\n gui_lock.acquire()\n doit()\n gui_lock.release()",
"def plot3surface( pot, **kwargs ): \n \n fig = plt.figure( figsize = (8., 8.) ) \n gs = matplotlib.gridspec.GridSpec( 3,2, wspace=0.2) \n \n # Make a list with three perpendicular directions which \n # will define the three surface cuts \n perp = [(np.pi/2., 0.), (np.pi/2., -np.pi/2.), (0., -1.*np.pi/2.) ]\n \n # Iterate to plot the three surface cuts\n yMin = 1e16\n yMax = -1e16 \n Ims = []\n for i in range(3):\n ax0 = fig.add_subplot( gs[i,0], projection='3d')\n ax1 = fig.add_subplot( gs[i,1]) \n \n T0, T1, X, Y, Z = surfcut_points( normal = perp[i], \\\n ax0=ax0, **kwargs ) \n \n EVAL = pot.evalpotential(X,Y,Z)\n im = ax1.pcolormesh( T0, T1, EVAL, \\\n cmap=plt.get_cmap('jet') ) \n plt.axes( ax1 ) \n cbar = plt.colorbar(im)\n cbar.set_label( pot.unitlabel, rotation=0) \n \n ymin = EVAL.min()\n ymax = EVAL.max()\n \n Ims.append(im) \n if ymin < yMin : yMin = ymin\n if ymax > yMax : yMax = ymax \n \n for im in Ims:\n im.set_clim( vmin=yMin, vmax=yMax)",
"def plot_surface(\n trj: TrajaDataFrame,\n bins: Optional[Union[int, tuple]] = None,\n cmap: str = \"viridis\",\n **surfaceplot_kws: dict,\n) -> Figure:\n\n after_plot_args, surfaceplot_kws = _get_after_plot_args(**surfaceplot_kws)\n\n X, Y, U, V = coords_to_flow(trj, bins)\n Z = np.sqrt(U * U + V * V)\n\n fig = plt.figure()\n ax = fig.gca(projection=\"3d\")\n ax.plot_surface(\n X, Y, Z, cmap= cmap, linewidth=0, **surfaceplot_kws\n )\n\n ax = _label_axes(trj, ax)\n try:\n ax.set_aspect(\"equal\")\n except NotImplementedError:\n # 3D\n pass\n\n _process_after_plot_args(**after_plot_args)\n return ax",
"def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)",
"def qp(F, V):\n import matplotlib.pyplot\n from mpl_toolkits.mplot3d import Axes3D\n #\n # Plot the surface\n fig = matplotlib.pyplot.figure()\n axs = fig.add_subplot(1,1,1, projection=\"3d\")\n axs.plot_trisurf(V[:,0], V[:,1], V[:,2], triangles=F)\n #\n # Label the axes and set them equal\n axs.set_xlabel(\"x\")\n axs.set_ylabel(\"y\")\n axs.set_zlabel(\"z\")\n axs.axis(\"equal\")\n #\n # And show the figure\n matplotlib.pyplot.show()\n return fig",
"def plotSurface(surfaceFile, comp=2, points=False, tris=False,\n profile=False, ax=None, annotate=True, norm=None,xscale=1, yscale=1):\n verts,data,tris = load_h5(surfaceFile)\n\n if comp==3: #radial displacements\n z = np.hypot(data[:,:,0], data[:,:,1]).flatten()\n else:\n z = data[:,:,comp].flatten()\n #z = data[:,:,comp].flatten()\n x = verts[:,0] / xscale\n y = verts[:,1] / yscale\n\n #NOTE: need to change grid for linear spacing to work properly\n xi = np.linspace(x.min(), x.max(), x.size)\n yi = np.linspace(y.min(), y.max(), y.size)\n zi = griddata(x,y,z, xi,yi, interp='nn') #'nn'\n\n #NOTE: getting error message here...\n # linear interpolation requires exactly the same limits\n #xi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #yi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)\n #zi = griddata(x,y,z, xi,yi, interp='linear') #'nn'\n #ValueError: output grid must have constant spacing when using interp='linear'\n\n if ax==None:\n plt.figure()\n else:\n ax = plt.axes(ax)\n\n #plt.pcolor(xi, yi, zi, cmap=plt.cm.jet) #Very slow...\n x1, x2, y1, y2 = [x.min(), x.max(), y.min(), y.max()]\n im = plt.imshow(zi, cmap=plt.cm.jet, norm=norm, extent=[x1, x2, y1, y2])\n\n if annotate:\n compdict = {0:'Ux',1:'Uy',2:'Uz',3:'Ur'}\n plt.title('{} Displacement'.format(compdict[comp]))\n plt.xlabel('Distance [m]')\n plt.ylabel('Distance [m]')\n cb = plt.colorbar()\n cb.set_label('[m]')\n\n if points:\n plt.plot(x,y,'k.')\n\n if type(tris) is np.ndarray:\n plt.triplot(x, y, tris, 'k-')\n\n # EW profile line through the x-axis\n if profile:\n plt.axhline(linewidth=2, color='r')\n Zi = zi[x.size/2,:]\n plt.figure()\n plt.plot(xi, Zi, 'b.-')\n plt.title('Profile')\n plt.xlabel('Distance [m]')\n plt.ylabel('{} Displacement [m]'.format(compdict[comp]))\n\n return im",
"def plot_mesh_function(mesh, f, title=\"\", colormap = \"hot\", edges = False, mybounds = [], myticks = []) :\n if mesh.dimension() == 1 :\n # get the mesh points\n x = mesh_axes(mesh)\n # plot the map\n plt.plot(x, f)\n \n elif mesh.dimension() == 2 :\n\n # Get the mesh axes and then make a grid of them for plotting.\n x, y = mesh_axes(mesh)\n X, Y = np.meshgrid(x, y)\n # Reshape the function\n f = f.reshape(mesh.number_cells_x(), mesh.number_cells_y())\n if edges :\n plt.pcolor(X, Y, f, cmap=colormap, edgecolors='k')\n else :\n plt.pcolor(X, Y, f, cmap=colormap)\n plt.axis(\"scaled\") \n plt.xlabel(\"x [cm]\")\n plt.ylabel(\"y [cm]\")\n if len(myticks) :\n cbar = plt.colorbar(boundaries=mybounds,ticks=myticks)\n else : \n cbar = plt.colorbar()\n else :\n print \"not ready for 3d\"\n return\n plt.title(title)\n # show the plot\n plt.show()",
"def imshow_mesh_3d(img, vertices, faces, camera_center, focal_length, colors=(76, 76, 204)):\n H, W, C = img.shape\n if not has_pyrender:\n warnings.warn('pyrender package is not installed.')\n return img\n if not has_trimesh:\n warnings.warn('trimesh package is not installed.')\n return img\n try:\n renderer = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H)\n except (ImportError, RuntimeError):\n warnings.warn('pyrender package is not installed correctly.')\n return img\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(vertices))]\n colors = [color_val(c) for c in colors]\n depth_map = np.ones([H, W]) * np.inf\n output_img = img\n for idx in range(len(vertices)):\n color = colors[idx]\n color = [(c / 255.0) for c in color]\n color.append(1.0)\n vert = vertices[idx]\n face = faces[idx]\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color)\n mesh = trimesh.Trimesh(vert, face)\n rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])\n mesh.apply_transform(rot)\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material)\n scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))\n scene.add(mesh, 'mesh')\n camera_pose = np.eye(4)\n camera = pyrender.IntrinsicsCamera(fx=focal_length[0], fy=focal_length[1], cx=camera_center[0], cy=camera_center[1], zfar=100000.0)\n scene.add(camera, pose=camera_pose)\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n valid_mask = (rend_depth < depth_map) * (rend_depth > 0)\n depth_map[valid_mask] = rend_depth[valid_mask]\n valid_mask = valid_mask[:, :, None]\n output_img = valid_mask * color[:, :, :3] + (1 - valid_mask) * output_img\n return output_img",
"def show(self):\n from matplotlib import pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n\n fig = plt.figure()\n ax = Axes3D(fig)\n pos = self.cluster.get_positions()\n from itertools import combinations\n for tri in self.mesh.simplices:\n for comb in combinations(tri, 2):\n x1 = pos[comb[0], 0]\n x2 = pos[comb[1], 0]\n y1 = pos[comb[0], 1]\n y2 = pos[comb[1], 1]\n z1 = pos[comb[0], 2]\n z2 = pos[comb[1], 2]\n ax.plot([x1, x2], [y1, y2], zs=[z1, z2], color=\"black\")\n plt.show()",
"def trisurf(\n x,\n y,\n z,\n simplices,\n show_colorbar,\n edges_color,\n scale,\n colormap=None,\n color_func=None,\n plot_edges=False,\n x_edge=None,\n y_edge=None,\n z_edge=None,\n facecolor=None,\n):\n # numpy import check\n if not np:\n raise ImportError(\"FigureFactory._trisurf() requires \" \"numpy imported.\")\n points3D = np.vstack((x, y, z)).T\n simplices = np.atleast_2d(simplices)\n\n # vertices of the surface triangles\n tri_vertices = points3D[simplices]\n\n # Define colors for the triangle faces\n if color_func is None:\n # mean values of z-coordinates of triangle vertices\n mean_dists = tri_vertices[:, :, 2].mean(-1)\n elif isinstance(color_func, (list, np.ndarray)):\n # Pre-computed list / array of values to map onto color\n if len(color_func) != len(simplices):\n raise ValueError(\n \"If color_func is a list/array, it must \"\n \"be the same length as simplices.\"\n )\n\n # convert all colors in color_func to rgb\n for index in range(len(color_func)):\n if isinstance(color_func[index], str):\n if \"#\" in color_func[index]:\n foo = clrs.hex_to_rgb(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n if isinstance(color_func[index], tuple):\n foo = clrs.convert_to_RGB_255(color_func[index])\n color_func[index] = clrs.label_rgb(foo)\n\n mean_dists = np.asarray(color_func)\n else:\n # apply user inputted function to calculate\n # custom coloring for triangle vertices\n mean_dists = []\n for triangle in tri_vertices:\n dists = []\n for vertex in triangle:\n dist = color_func(vertex[0], vertex[1], vertex[2])\n dists.append(dist)\n mean_dists.append(np.mean(dists))\n mean_dists = np.asarray(mean_dists)\n\n # Check if facecolors are already strings and can be skipped\n if isinstance(mean_dists[0], str):\n facecolor = mean_dists\n else:\n min_mean_dists = np.min(mean_dists)\n max_mean_dists = np.max(mean_dists)\n\n if facecolor is None:\n facecolor = []\n for index in range(len(mean_dists)):\n color = map_face2color(\n mean_dists[index], colormap, scale, min_mean_dists, max_mean_dists\n )\n facecolor.append(color)\n\n # Make sure facecolor is a list so output is consistent across Pythons\n facecolor = np.asarray(facecolor)\n ii, jj, kk = simplices.T\n\n triangles = graph_objs.Mesh3d(\n x=x, y=y, z=z, facecolor=facecolor, i=ii, j=jj, k=kk, name=\"\"\n )\n\n mean_dists_are_numbers = not isinstance(mean_dists[0], str)\n\n if mean_dists_are_numbers and show_colorbar is True:\n # make a colorscale from the colors\n colorscale = clrs.make_colorscale(colormap, scale)\n colorscale = clrs.convert_colorscale_to_rgb(colorscale)\n\n colorbar = graph_objs.Scatter3d(\n x=x[:1],\n y=y[:1],\n z=z[:1],\n mode=\"markers\",\n marker=dict(\n size=0.1,\n color=[min_mean_dists, max_mean_dists],\n colorscale=colorscale,\n showscale=True,\n ),\n hoverinfo=\"none\",\n showlegend=False,\n )\n\n # the triangle sides are not plotted\n if plot_edges is False:\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, colorbar]\n else:\n return [triangles]\n\n # define the lists x_edge, y_edge and z_edge, of x, y, resp z\n # coordinates of edge end points for each triangle\n # None separates data corresponding to two consecutive triangles\n is_none = [ii is None for ii in [x_edge, y_edge, z_edge]]\n if any(is_none):\n if not all(is_none):\n raise ValueError(\n \"If any (x_edge, y_edge, z_edge) is None, \" \"all must be None\"\n )\n else:\n x_edge = []\n y_edge = []\n z_edge = []\n\n # Pull indices we care about, then add a None column to separate tris\n ixs_triangles = [0, 1, 2, 0]\n pull_edges = tri_vertices[:, ixs_triangles, :]\n x_edge_pull = np.hstack(\n [pull_edges[:, :, 0], np.tile(None, [pull_edges.shape[0], 1])]\n )\n y_edge_pull = np.hstack(\n [pull_edges[:, :, 1], np.tile(None, [pull_edges.shape[0], 1])]\n )\n z_edge_pull = np.hstack(\n [pull_edges[:, :, 2], np.tile(None, [pull_edges.shape[0], 1])]\n )\n\n # Now unravel the edges into a 1-d vector for plotting\n x_edge = np.hstack([x_edge, x_edge_pull.reshape([1, -1])[0]])\n y_edge = np.hstack([y_edge, y_edge_pull.reshape([1, -1])[0]])\n z_edge = np.hstack([z_edge, z_edge_pull.reshape([1, -1])[0]])\n\n if not (len(x_edge) == len(y_edge) == len(z_edge)):\n raise exceptions.PlotlyError(\n \"The lengths of x_edge, y_edge and \" \"z_edge are not the same.\"\n )\n\n # define the lines for plotting\n lines = graph_objs.Scatter3d(\n x=x_edge,\n y=y_edge,\n z=z_edge,\n mode=\"lines\",\n line=graph_objs.scatter3d.Line(color=edges_color, width=1.5),\n showlegend=False,\n )\n\n if mean_dists_are_numbers and show_colorbar is True:\n return [triangles, lines, colorbar]\n else:\n return [triangles, lines]",
"def plot3d(self):\n plot_rupture_wire3d(self)",
"def extract_triangles(mesh, materials_list):\n tri_list = []\n do_uv = bool(mesh.tessface_uv_textures)\n\n for mat in materials_list:\n for i, face in enumerate(mesh.tessfaces):\n f_v = face.vertices\n if mesh.materials[face.material_index].name != mat: continue\n\n uf = mesh.tessface_uv_textures.active.data[i] if do_uv else None\n\n fmt = 0\n if(do_uv): fmt = face.material_index\n\n if do_uv:\n f_uv = uf.uv\n\n if len(f_v) == 3:\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n else: new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n tri_list.append(new_tri)\n\n else: # it's a quad\n new_tri = tri_wrapper((f_v[0], f_v[1], f_v[2]), fmt)\n new_tri_2 = tri_wrapper((f_v[0], f_v[2], f_v[3]), fmt)\n\n if (do_uv):\n new_tri.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[1]), uv_key(f_uv[2])\n new_tri_2.faceuvs = uv_key(f_uv[0]), uv_key(f_uv[2]), uv_key(f_uv[3])\n else:\n new_tri.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n new_tri_2.faceuvs = uv_key((0.0,0.0)), uv_key((1.0,0.0)), uv_key((0.0,1.0))\n\n tri_list.append(new_tri)\n tri_list.append(new_tri_2)\n\n return tri_list"
] | [
"0.7586179",
"0.73376596",
"0.7131",
"0.68556476",
"0.67790115",
"0.670109",
"0.66954625",
"0.643024",
"0.63897085",
"0.6355384",
"0.6306112",
"0.62642723",
"0.62621355",
"0.62500685",
"0.6235236",
"0.62293386",
"0.6221899",
"0.62194383",
"0.6191307",
"0.6178185",
"0.6175373",
"0.61661905",
"0.6157867",
"0.6137796",
"0.6130584",
"0.61196977",
"0.60764545",
"0.6067864",
"0.6058667",
"0.6008698"
] | 0.75149196 | 1 |
Plot interpolated image of surface displacements, optionally show original points | def plotSurface(surfaceFile, comp=2, points=False, tris=False,
profile=False, ax=None, annotate=True, norm=None,xscale=1, yscale=1):
verts,data,tris = load_h5(surfaceFile)
if comp==3: #radial displacements
z = np.hypot(data[:,:,0], data[:,:,1]).flatten()
else:
z = data[:,:,comp].flatten()
#z = data[:,:,comp].flatten()
x = verts[:,0] / xscale
y = verts[:,1] / yscale
#NOTE: need to change grid for linear spacing to work properly
xi = np.linspace(x.min(), x.max(), x.size)
yi = np.linspace(y.min(), y.max(), y.size)
zi = griddata(x,y,z, xi,yi, interp='nn') #'nn'
#NOTE: getting error message here...
# linear interpolation requires exactly the same limits
#xi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)
#yi=np.arange(-15000.0,15000.0+1e-14,30000.0/x.size)
#zi = griddata(x,y,z, xi,yi, interp='linear') #'nn'
#ValueError: output grid must have constant spacing when using interp='linear'
if ax==None:
plt.figure()
else:
ax = plt.axes(ax)
#plt.pcolor(xi, yi, zi, cmap=plt.cm.jet) #Very slow...
x1, x2, y1, y2 = [x.min(), x.max(), y.min(), y.max()]
im = plt.imshow(zi, cmap=plt.cm.jet, norm=norm, extent=[x1, x2, y1, y2])
if annotate:
compdict = {0:'Ux',1:'Uy',2:'Uz',3:'Ur'}
plt.title('{} Displacement'.format(compdict[comp]))
plt.xlabel('Distance [m]')
plt.ylabel('Distance [m]')
cb = plt.colorbar()
cb.set_label('[m]')
if points:
plt.plot(x,y,'k.')
if type(tris) is np.ndarray:
plt.triplot(x, y, tris, 'k-')
# EW profile line through the x-axis
if profile:
plt.axhline(linewidth=2, color='r')
Zi = zi[x.size/2,:]
plt.figure()
plt.plot(xi, Zi, 'b.-')
plt.title('Profile')
plt.xlabel('Distance [m]')
plt.ylabel('{} Displacement [m]'.format(compdict[comp]))
return im | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def imshow_surface(self):\n plt.imshow(self.z)\n plt.colorbar()\n plt.show()",
"def plot(model, center, extent, outname):\n # define model grid\n xg = np.linspace(-extent, extent, model.shape[0])\n yg = xg.copy()\n interp_func = RectBivariateSpline(xg, yg, model)\n\n x = np.array([-2, -1, 0, 1, 2]) + center[0]\n y = np.array([-2, -1, 0, 1, 2]) + center[1]\n psf = interp_func(x, y)\n\n x, y = np.meshgrid(x, y)\n f = pl.figure(figsize=(10, 5))\n\n pl.gray()\n ax1 = pl.subplot(121)\n ax1.imshow(model, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n ax1.plot(x, y, 's', mec='r', mfc='none', mew=2)\n\n pl.xlim(-2.5, 2.5)\n pl.ylim(-2.5, 2.5)\n ax2 = pl.subplot(122)\n ax2.imshow(psf, interpolation='nearest', origin='lower',\n extent=(-extent, extent, -extent, extent),\n norm=LogNorm(vmin=model.min(), vmax=model.max()))\n\n ax2.set_xticks([-2, -1, 0, 1, 2])\n ax2.set_yticks([-2, -1, 0, 1, 2])\n ax2.set_xticklabels(['%0.3f' % v for v in x[0]])\n ax2.set_yticklabels(['%0.3f' % v for v in y[:, 0]])\n\n coordsA, coordsB = \"data\", \"data\"\n pixels = np.array([[0.0, 0.0], [2., 2.], [-1., -1.]])\n locs = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, -0.5]])\n rads = [0.15, 0.25, -0.25]\n for i, p in enumerate(pixels):\n xy1 = p + center\n xy2 = p + locs[i]\n con = ConnectionPatch(xyA=xy2, xyB=xy1, coordsA=coordsA,\n coordsB=coordsB, axesA=ax2, axesB=ax1,\n arrowstyle=\"<-, head_length=1.2, head_width=0.8\", \n shrinkB=5,\n connectionstyle='arc3, rad=%s' % rads[i],\n color='r', lw=2)\n ax2.add_artist(con)\n ax2.plot(p[0], p[1], 's', mfc='none', mec='r', mew=2, ms=50)\n\n #pl.xlim(-2.5, 2.5)\n #pl.ylim(-2.5, 2.5)\n f.savefig(outname)",
"def plot_img():\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())\n\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n sample = model.sample_z(data) \n plt.imshow(sample)",
"def show_env(self, img):\n plt.figure(1)\n plt.subplot(111)\n plt.imshow(img, interpolation=\"nearest\")\n plt.show()",
"def plot_img(X: np.ndarray, **kwargs):\n kwargs.setdefault('origin', 'lower') # Sane default\n plt.imshow(X, **kwargs)",
"def visualizeObs():\n fcontourf(fObs, [-2, 2], [-1, 1], [0, 10])",
"def show(image):\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def plot_slice(self,res):\n x = np.linspace(0,1,res)\n y = np.linspace(0,1,res)\n X,Y = np.meshgrid(x,y)\n plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(X,Y,abs(self.psi)[:,:,math.floor(res/2)])\n plt.show()",
"def interpPlot(self):\n self.expInt, a = self.interpData(None, self.expData)\n self.simInt, b = self.interpData(self.optimSim)\n self.residual = abs(self.expInt.data - self.simInt.data)\n\n plt.figure()\n self.sasPlot(self.expInt, sim=self.simInt.data, resid=self.residual)\n\n return",
"def P_AI_Rocky2(in_dict):\n # START\n fs = 16\n plt.rc('font', size=fs)\n fig = plt.figure(figsize=(8,12))\n ds = nc.Dataset(in_dict['fn'])\n\n # PLOT CODE\n aa = [-122.8, -122.54, 47.92, 48.22]\n # import cmocean\n # cmap = cmocean.cm.speed\n # cmap = 'RdYlBu_r'\n cmap = 'Spectral_r'\n\n from warnings import filterwarnings\n filterwarnings('ignore') # skip some warning messages\n \n # plot Code\n \n # calculate speed\n uu = ds['u'][0, -1, :, :]\n vv = ds['v'][0, -1, :, :]\n u = zfun.fillit(uu)\n v = zfun.fillit(vv)\n u[np.isnan(u)] = 0\n v[np.isnan(v)] = 0\n # interpolate to the clipped rho grid\n ur = (u[1:-1,1:] + u[1:-1,:-1])/2\n vr = (v[1:,1:-1] + v[:-1,1:-1])/2\n spd = np.sqrt(ur**2 + vr**2)\n spd[spd==0] = np.nan\n \n G = zrfun.get_basic_info(in_dict['fn'], only_G=True)\n \n # panel 1\n ax = fig.add_subplot(111)\n cs = plt.pcolormesh(G['lon_psi'], G['lat_psi'], spd, cmap=cmap,\n vmin=0, vmax=1.5)\n tstr = (r'Admiralty Inlet Surface Speed ($m\\ s^{-1}$)')\n #pfun.add_bathy_contours(ax, ds, txt=True)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_ylabel('Latitude')\n ax.set_title(tstr)\n pfun.add_info(ax, in_dict['fn'])\n ax.set_xticks([-122.8, -122.7, -122.6])\n ax.set_yticks([48, 48.1, 48.2])\n \n # Inset colorbar\n from mpl_toolkits.axes_grid1.inset_locator import inset_axes\n cbaxes = inset_axes(ax, width=\"4%\", height=\"30%\", loc='upper right', borderpad=3)\n fig.colorbar(cs, cax=cbaxes, orientation='vertical')\n \n #pfun.add_bathy_contours(ax, ds)\n pfun.add_coast(ax)\n ax.axis(aa)\n pfun.dar(ax)\n ax.set_xlabel('Longitude')\n ax.set_title(tstr)\n \n pfun.add_velocity_vectors(ax, ds, in_dict['fn'], v_scl=100, v_leglen=1,\n nngrid=80, zlev='top', center=(.1,.05))\n \n fig.tight_layout()\n # FINISH\n ds.close()\n if len(in_dict['fn_out']) > 0:\n plt.savefig(in_dict['fn_out'])\n plt.close()\n else:\n plt.show()\n plt.rcdefaults()",
"def InterpolateSurfaceVectorsWithLine():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Centre Line...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertCentreLine(Centroids1,Vectors1,50)\r\n print('Centre Line Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Line\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfaceLineVectorInterpolation.dat\",Vectors2,header = header,comments='')",
"def plot_interpolation(self):\r\n self.plot_all_logcalls(True)\r\n print_log('info', 'Interpolation was finished.')",
"def show(image):\n fig = pyplot.figure()\n axis = fig.add_subplot(1, 1, 1)\n imgplot = axis.imshow(image)\n imgplot.set_interpolation('nearest')\n axis.xaxis.set_ticks_position('top')\n axis.yaxis.set_ticks_position('left')\n pyplot.show()",
"def InterpolateSurfaceVectorsWithPlane():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Plane...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertPlane(Centroids1,Vectors1,50,8)\r\n print('Plane Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.5)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.5)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Plane\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfacePlaneVectorInterpolation.dat\",Vectors2,header = header,comments='')",
"def _plot_interpolation(x, y, x_new, y_new, title=\"\"):\n f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)\n axes = (ax1, ax2, ax3)\n coord = [\"X\", \"Y\", \"Z\"]\n\n for idx, ax in enumerate(axes):\n ax.set_title(title + \" (\" + coord[idx] + \" coordinate)\", fontsize=12)\n ax.set_ylabel(\"m\")\n ax.plot(x, y[:, idx], \"bo\", label=\"Original data\")\n ax.plot(x_new, y_new[:, idx], \"ro\", label=\"Interpolated data\")\n\n ax3.set_xlabel(\"Time\")\n ax1.legend(fontsize=8, loc=1)\n f.subplots_adjust(hspace=0.3)\n plt.setp([a.get_xticklabels() for a in f.axes[:-1]], visible=False)\n plt.show()",
"def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))",
"def update_surface(frame):\n \n #fig.suptitle(time[frame])\n im.set_array(surf[frame])\n im.set_extent([np.nanmin(xx[frame]), np.nanmax(xx[frame]), np.nanmin(yy[frame]), np.nanmax(yy[frame])])\n \n line.set_data([(times[:-1] + utc_to_east).plot_date[frame]]*2, ylim)",
"def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis",
"def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()",
"def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')",
"def show(im,fig= None): #X\n im = im.copy()\n if len(im.shape)==1 or im.shape[1]==1:\n im = X2patch(im)\n im[im<=DEAD]=-0.5\n if fig is None:\n plt.figure()\n fig = plt.imshow(hsv_to_rgb(im+0.5))\n fig.set_data(hsv_to_rgb(im+0.5))\n plt.draw()\n plt.pause(0.001)\n return fig",
"def plotArt(self):\n self.isArt=True\n warr=self.ws.value(self.xarr)\n asfarr=st.interpolate(warr, self.swarr, self.sfarr, left=0.0, right=0.0)\n asfarr=asfarr*self.farr.max()/asfarr.max()\n self.fpcurve,=self.axes.plot(self.xarr,asfarr,linewidth=0.5,linestyle='-',\n marker='None',color='r')",
"def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def show(image):\n from matplotlib import pyplot\n import matplotlib as mpl\n fig = pyplot.figure()\n ax = fig.add_subplot(1,1,1)\n imgplot = ax.imshow(image, cmap=mpl.cm.Greys)\n imgplot.set_interpolation('nearest')\n ax.xaxis.set_ticks_position('top')\n ax.yaxis.set_ticks_position('left')\n pyplot.show()",
"def drawRegularSurface(matrix, nx, ny, xinterp, yinterp):\n dislin.surmat(matrix, nx, ny, xinterp, yinterp)",
"def plot(data, interactive=False):\n if interactive:\n plt.ion()\n fig = plt.figure()\n fig.canvas.draw()\n image = call_imshow(data)\n else:\n fig = plt.figure()\n image = call_imshow(data)\n plt.show()\n return fig, image",
"def drawIrregularSurface(xlist, ylist, zmatrix):\n dislin.surfce(xlist, len(xlist), ylist, len(ylist), zmatrix)",
"def visualize(z_in, azimuth=25., elevation=30.,\n thresholds=[0.95, .9, .75, .5, .25, .125], opacities=[1, .9, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75, .5, .25, .1], opacities=[.9, .8, .7, .5, .2, .1],\n# thresholds=[0.94, .89, .75], opacities=[.99, .7, .2],\n# thresholds=[0.7, .5, .2], opacities=[.95, .5, .2],\n fourier_label = {'f_x':'f_x', 'f_y':'f_y', 'f_t':'f_t'},\n filename=None, do_axis=True, do_grids=False, draw_projections=True,\n colorbar=False, f_N=2., f_tN=2., figsize=figsize, dpi=300, figpath=figpath, **kwargs):\n z = z_in.copy()\n N_X, N_Y, N_frame = z.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n\n # Normalize the amplitude.\n z /= z.max()\n\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n #from vispy.util.transforms import perspective, translate, rotate\n from vispy.color import Color\n transparent = Color(color='black', alpha=0.)\n import colorsys\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n vol_data = np.rollaxis(np.rollaxis(z, 1), 2)\n# volume = scene.visuals.Volume(vol_data, parent=view.scene)#frame)\n center = scene.transforms.STTransform(translate=( -N_X/2, -N_Y/2, -N_frame/2))\n# volume.transform = center\n# volume.cmap = 'blues'\n\n if draw_projections:\n from vispy.color import Colormap\n cm = Colormap([(1.0, 1.0, 1.0, 1.0), 'k'])\n opts = {'parent':view.scene, 'cmap':cm, 'clim':(0., 1.)}\n\n energy_xy = np.rot90(np.max(z, axis=2)[:, ::-1], 3)#[:, ::-1]\n fourier_xy = scene.visuals.Image(np.rot90(energy_xy), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (0, 0, 1))\n tr_xy.translate((N_X/2, -N_Y/2, -N_frame/2))\n fourier_xy.transform = tr_xy\n\n energy_xt = np.rot90(np.max(z, axis=1)[:, ::-1], 3)[::-1, ::-1]\n fourier_xt = scene.visuals.Image(energy_xt, **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (1, 0, 0))\n tr_xt.translate((-N_X/2, N_Y/2, -N_frame/2))\n fourier_xt.transform = tr_xt\n\n energy_yt = np.max(z, axis=0)[:, ::-1]\n fourier_yt = scene.visuals.Image(energy_yt, **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((-N_X/2, -N_Y/2, N_frame/2))\n fourier_yt.transform = tr_yt\n\n # Generate iso-surfaces at different energy levels\n surfaces = []\n for i_, (threshold, opacity) in enumerate(zip(thresholds, opacities)):\n surfaces.append(scene.visuals.Isosurface(z, level=threshold,\n# color=Color(np.array(colorsys.hsv_to_rgb(1.*i_/len(thresholds), 1., 1.)), alpha=opacity),\n color=Color(np.array(colorsys.hsv_to_rgb(.66, 1., 1.)), alpha=opacity),\n shading='smooth', parent=view.scene)\n )\n surfaces[-1].transform = center\n\n # Draw a sphere at the origin\n axis = scene.visuals.XYZAxis(parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_Y/2, p[2]*N_frame/2], [p[3]*N_X/2, p[4]*N_Y/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n\n axisX = scene.visuals.Line(pos=np.array([[0, -N_Y/2, 0], [0, N_Y/2, 0]]), color='red', parent=view.scene)\n axisY = scene.visuals.Line(pos=np.array([[-N_X/2, 0, 0], [N_X/2, 0, 0]]), color='green', parent=view.scene)\n axisZ = scene.visuals.Line(pos=np.array([[0, 0, -N_frame/2], [0, 0, N_frame/2]]), color='blue', parent=view.scene)\n\n if do_axis:\n t = {}\n for text in ['f_x', 'f_y', 'f_t']:\n t[text] = scene.visuals.Text(fourier_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['f_x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['f_y'].pos = canvas.size[0] - canvas.size[0] // 8, canvas.size[1] - canvas.size[1] // 6\n t['f_t'].pos = canvas.size[0] // 8, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=elevation, azimuth=azimuth, up='z')\n cam.fov = 48\n cam.scale_factor = N_X * 1.8\n if do_axis: margin = 1.35\n else: margin = 1\n cam.set_range((-N_X/2*margin, N_X/2/margin), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2*margin, N_frame/2/margin))\n view.camera = cam\n\n render_im = canvas.render()\n app.quit()\n if not(filename is None):\n import vispy.io as io\n io.write_png(filename, render_im)\n else:\n return render_im",
"def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()"
] | [
"0.6518163",
"0.6399373",
"0.62921655",
"0.6148502",
"0.6133245",
"0.6123275",
"0.60479116",
"0.60328573",
"0.6023449",
"0.59941006",
"0.59833103",
"0.5982813",
"0.5928283",
"0.5919153",
"0.5898158",
"0.5881833",
"0.58795625",
"0.5879294",
"0.58704317",
"0.5856613",
"0.58554226",
"0.58548635",
"0.5851073",
"0.5851073",
"0.5851073",
"0.58405167",
"0.58248615",
"0.580644",
"0.5800243",
"0.578047"
] | 0.69940144 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.