query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Returns all numbers below N, that is a multiple of M.
def findMultiples(M, N): numbers = [] for i in range(N): if(i + 1 == N): break if(((i + 1) % M) == 0): numbers.append(i+1) return numbers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sixn(m):\n if m <= 2:\n return ()\n if m > 2:\n yield 2\n if m > 3:\n yield 3\n for n in count(1):\n x = 6 * n - 1\n y = x + 2\n if x < m:\n yield x\n else:\n break\n if y < m:\n yield y\n else:\n break", "def islice(n, m):\n npiece = int(math.ceil(1.0*n/m))\n for i in range(npiece):\n if (i+1)*m > n:\n yield i, i*m, n\n else:\n yield i, i*m, (i+1)*m", "def choose(n, m):\n assert n >= m, \"Cannot choose {0} elements from {1}\".format(m, n)\n result = 1\n for n_i, m_i in zip(range(n, m, -1), range(1, m+1)):\n result *= n_i\n result /= m_i\n return result", "def evenly_select(N, M):\n if N == M:\n return np.ones(N, dtype=int)\n assert N > M\n if M > N/2:\n cut = np.ones(N, dtype=int)\n q, r = divmod(N, N-M)\n indices = [q*i + min(i, r) for i in range(N-M)]\n cut[indices] = False\n else:\n cut = np.zeros(N, dtype=int)\n q, r = divmod(N, M)\n indices = [q*i + min(i, r) for i in range(M)]\n cut[indices] = True\n\n return cut", "def cashflow_times(n, m):\n return [i for i in range(m * n+1) if i != 0 ]", "def find_multiple(self, num):\n result = dict()\n for n in range(1, num+1):\n temp = self.find_prime_divisors(n)\n result.update({k:v for k,v in temp.items() if k not in result or result[k] < v})\n return reduce(operator.mul, (pow(k, v) for k,v in result.items()))", "def mult(n,m):\n result = 0\n\n if m == 0 or n == 0:\n result = 0\n\n elif n > 0:\n for x in range(n):\n result = result + m\n else:\n for x in range(-n):\n result = result - m\n return result", "def get_multiples(ratio, n):\n ls = [ratio ** i for i in range(n)]\n return ls", "def hart(N):\n m = 2\n i = 1\n while not is_square(m):\n s = isqrt(N * i) + 1\n m = pow(s, 2, N)\n i += 1\n t = isqrt(m)\n g = gcd(s - t, N)\n return g, N // g", "def partition(n, m, discard= False):\n steps = range(0, 1 + n, m)\n yield from zip(steps, steps[1:])\n if n % m and not discard:\n yield n - (n % m), n", "def slice(n, m):\n chunks = []\n for piece in islice(n, m):\n chunks.append(piece)\n return chunks", "def find_powers(n):\n # find_powers(6) --> [1, 2, 3, 4]\n return list(takewhile(lambda x: len(str(n**x)) == x, count(1)))", "def wrap(x, m, M):\n diff = M - m\n while x > M:\n x = x - diff\n while x < m:\n x = x + diff\n return x", "def algorithm_h(n, m):\n partition = [1]*m\n partition[0] = n - m + 1\n\n while True:\n yield partition[:]\n if partition[1] < partition[0] - 1:\n partition[0] -= 1\n partition[1] += 1\n else:\n j = 2\n s = partition[0] + partition[1] - 1\n while j < m and partition[j] >= partition[0] - 1:\n s += partition[j]\n j += 1\n if j >= m:\n return\n replacement = partition[j] + 1\n partition[j] = replacement\n j -= 1\n while j > 0:\n partition[j] = replacement\n s -= replacement\n j -= 1\n partition[0] = s", "def evenly_select(n, m):\n if n == m:\n return np.ones(n, dtype=int)\n assert n > m\n if m > n/2:\n cut = np.ones(n, dtype=int)\n q, r = divmod(n, n - m)\n indices = [q * i + min(i, r) for i in range(n - m)]\n cut[indices] = False\n else:\n cut = np.zeros(n, dtype=int)\n q, r = divmod(n, m)\n indices = [q * i + min(i, r) for i in range(m)]\n cut[indices] = True\n\n return cut", "def is_multiple(n,m):\n return n % m == 0", "def subdim(number):\n res = []\n for i in [2, 3, 4, 5, 6, 7, 8, 9, 10]:\n res.append(number % i)\n if number % i == 0:\n n = i\n m = number // i\n return n, m\n if not 0 in res:\n return subdim(number + 1)", "def ideal_fanin(k, m):\n fanin = 0\n needed = k\n for select in range(3, m + 1, 2):\n combinations = list(itertools.combinations(range(m), select))\n if len(combinations) <= needed:\n fanin += int(math.ceil(float(len(combinations) * select) / m))\n needed -= len(combinations)\n else:\n fanin += int(math.ceil(float(needed * select) / m))\n needed = 0\n if not needed:\n break\n return fanin", "def bruteForce_MC(N,M):\n hewlist = np.zeros(M)\n for i in range(M):\n x = createDist(N)\n x = np.abs(x-np.mean(x))\n x.sort()\n hewlist[i] = np.median(x)*2.\n return np.mean(hewlist), np.std(hewlist)", "def pick_chosen_points(m, n):\r\n return [i * n // m + n // (2 * m) for i in range(m)]", "def get_k(self, n, m):\n k = m/n * log(2)\n return int(k)", "def solution1(n):\n res = []\n while n > 0:\n m = int(math.floor(math.sqrt(n))**2)\n res.append(m)\n n -= m\n return res", "def maiores(lista, n):\n numeros = [lista for lista in lista if lista > n]\n return numeros", "def uniform_sample(n, m):\n interval = m / n\n indices = [0]\n index = 0.0\n while True:\n index += interval\n if index >= m - 1:\n indices.append(int(m - 1))\n break\n else:\n indices.append(int(index))\n\n return np.array(indices)", "def non_mcnugget():\n nugget = [0, 6, 9, 20]\n mcnugget = set([6, 9, 20])\n\n while True:\n mcnugget = set([m+n for m in mcnugget for n in nugget])\n\n for m in mcnugget:\n found = all([m+j in mcnugget for j in range(6)])\n if found:\n return [k for k in range(1, m) if k not in mcnugget]", "def countm(m):\n nfound=0\n\n for i in range(1,m+1):\n for jpk in range(2,(2*i)+1):\n d1=i*i+(jpk)*(jpk) \n if(checkpsq(d1)): \n if(jpk<=i):\n factor=jpk/2 \n else:\n factor=((2*i-jpk)+2)/2 \n nfound=nfound+factor\n\n return nfound", "def sum_n_m(n, m):\n total = 0\n for i in range(n, m+1):\n total += i\n return total", "def power(x, m, n):\n a = 1\n while m > 0:\n if m % 2 == 1:\n a=(a*x)%n\n x=(x*x)%n\n m//=2\n return a", "def tri(N, M=None, k=0, dtype=float):\r\n if M is None: M = N\r\n m = greater_equal(subtract.outer(arange(N), arange(M)),-k)\r\n return m.astype(dtype)", "def bartlett(M):\n if M < 1:\n return array([])\n if M == 1:\n return ones(1, float)\n n = arange(0,M)\n return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))" ]
[ "0.67994684", "0.67914283", "0.6521023", "0.64013743", "0.632049", "0.62614083", "0.6100065", "0.6055581", "0.6036513", "0.6017747", "0.60122454", "0.60112625", "0.59835833", "0.59610224", "0.59551555", "0.59433913", "0.5900685", "0.587324", "0.5860821", "0.5820013", "0.58147126", "0.58118325", "0.58117676", "0.58067805", "0.5791375", "0.5788349", "0.5780253", "0.5773223", "0.5770476", "0.5767918" ]
0.7943605
0
auth_state enabled and available
async def test_auth_state(app, auth_state_enabled): name = 'kiwi' user = add_user(app.db, app, name=name) assert user.encrypted_auth_state is None cookies = await app.login_user(name) auth_state = await user.get_auth_state() assert auth_state == app.authenticator.auth_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auth_state(self):\n raise NotImplementedError()", "def check_auth():", "def requires_auth(self):\n return True", "def get_authorization():\n return True", "def is_authenticated(self):\n return True #self.authenticated", "def is_authenticated(self):\n return True", "def auth_enabled(self):\n\n return self._api_manager.auth_enabled()", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n result = self.lpass(\"lpass status\")\n\n if \"Logged in as\" in result.output:\n return True\n\n return False", "def set_auth_state(self, data):\n raise NotImplementedError()", "def auth():\n pass", "def auth():\n pass", "def auth_token_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"auth_token_enabled\")", "def get_authenticated_granted(self):", "def is_authenticated(self):\n return False", "def authorization():\n pass", "def is_authenticated(self):\n return self.ping() is not None", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def ready(self):\n if self._wait_auth:\n return False\n return True", "def _can_login(self):\n return all([self.user.is_active, self.status, self.status_detail == \"active\"])", "def is_authenticated(self, request, **kwargs):\r\n return True", "def is_authenticated(self):\r\n return self.authenticated", "def authentication_hook(self):\n pass" ]
[ "0.74882525", "0.71273947", "0.6910631", "0.6909217", "0.670079", "0.6680911", "0.6628001", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.65065384", "0.6505308", "0.6504012", "0.64769554", "0.64769554", "0.64572227", "0.64142615", "0.6381516", "0.63628924", "0.6335662", "0.6330429", "0.63162214", "0.6268234", "0.6267411", "0.6244223", "0.6192191", "0.618395" ]
0.7185515
1
auth_state enabled at the Authenticator level, but unavailable due to no crypto keys.
def auth_state_unavailable(auth_state_enabled): crypto.CryptKeeper.instance().keys = [] yield
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_auth_state(self):\n raise NotImplementedError()", "async def test_auth_state(app, auth_state_enabled):\n name = 'kiwi'\n user = add_user(app.db, app, name=name)\n assert user.encrypted_auth_state is None\n cookies = await app.login_user(name)\n auth_state = await user.get_auth_state()\n assert auth_state == app.authenticator.auth_state", "def check_auth():", "def set_auth_state(self, data):\n raise NotImplementedError()", "def auth_active(hass):\n hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )", "def auth_token_enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"auth_token_enabled\")", "def auth_enabled(self):\n\n return self._api_manager.auth_enabled()", "def ready(self):\n if self._wait_auth:\n return False\n return True", "def auth(self):\n ok = False\n if self.private_token:\n ok = self.token_auth()\n if not ok:\n self.credentials_auth()", "def requires_auth(self):\n return True", "def sr_auth_state(self, **kwargs):\n from pykern import pkunit\n from pykern import pkcollections\n\n m = re.search(\n r\"(\\{.*\\})\",\n pkcompat.from_bytes(self.sr_get(\"authState\").data),\n )\n s = pkcollections.json_load_any(m.group(1))\n for k, v in kwargs.items():\n pkunit.pkeq(\n v,\n s[k],\n \"key={} expected={} != actual={}: auth_state={}\",\n k,\n v,\n s[k],\n s,\n )\n return s", "def _set_authenticator(self):\n pass", "def auth_token_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auth_token_enabled\")", "def is_stateless():\n return AceQLHttpApi.is_stateless()", "def authenticator():", "def enable_authentication(self) -> bool:\n return pulumi.get(self, \"enable_authentication\")", "def _auth_plugin_available(ext):\n return ext.obj.available", "def is_enabled(self):", "def isEnabled(state):\n return (isActive(state) or state == State.preEnabled)", "def check_auth(self):\n if self.type_of_auth == BboxConstant.AUTHENTICATION_TYPE_LOCAL:\n access_level_required = self.get_auth_access_needed_for_local()\n else:\n access_level_required = self.get_auth_access_needed_for_remote()\n\n if access_level_required == BboxConstant.AUTHENTICATION_LEVEL_NONE:\n return False\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PRIVATE:\n return self.is_authentified()\n elif access_level_required == BboxConstant.AUTHENTICATION_LEVEL_PUBLIC:\n return True", "def is_frozensand_auth_available(self):\n cvar = self.getCvar('auth')\n if cvar:\n auth = cvar.getInt()\n return auth != 0\n else:\n return False", "def check_auth(self):\n if self.enterprise_url is not None:\n return True\n try:\n if self.api is not None:\n # Throws AuthenticationFailed if invalid credentials but\n # does not deduct from the rate limit.\n self.api.ratelimit_remaining\n return True\n else:\n self.print_auth_error()\n except AuthenticationFailed:\n self.print_auth_error()\n return False", "def get_authorization():\n return True", "def authorized(self):\n\n # Here we explicitly start because the usage of alembic may be out\n # of our running context.\n return PyFunceble.cli.facility.CredentialLoader.is_already_loaded()", "def enable_auth_gssapi(self):\n UseGSSAPI = False\n GSSAPICleanupCredentials = False\n return UseGSSAPI", "def _check_auth(self):\n if self.authToken:\n return True\n else:\n msg = \"you need to login\"\n self.raise_error(msg)", "def auth_isok(self):\n # pylint: disable=W0603\n global KEY\n return_value = False\n if KEY is None:\n return_value = True\n elif self.headers.get('Authorization') == 'Basic ' + KEY:\n return_value = True\n return return_value", "def check_state(self):\n pass", "def is_authenticated(self):\n return self.ping() is not None", "def _check_authentication(self) -> NoReturn:\n if not self.heartbeat():\n self.authenticate()" ]
[ "0.70946044", "0.6600317", "0.5862487", "0.58282715", "0.577362", "0.5701615", "0.5661563", "0.5567852", "0.55177164", "0.55092674", "0.55005485", "0.54370195", "0.5430303", "0.53735715", "0.5352972", "0.53514665", "0.5348289", "0.5340672", "0.5339704", "0.5333827", "0.53211564", "0.5317445", "0.53099155", "0.5294105", "0.5293451", "0.5264012", "0.5261133", "0.5248625", "0.5242969", "0.52391094" ]
0.6792563
1
Tests whether ``SoundboardSound.__repr__`` works as intended.
def test__SoundboardSound__repr(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240032 volume = 0.69 sound_id = 202305240033 guild_id = 202305240034 sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, available = available, emoji = emoji, name = name, user_id = user_id, volume = volume, ) vampytest.assert_instance(repr(sound), str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_repr(self):\n self.assertEqual(repr(self.deck), \"Deck of 52 cards.\")", "def test_repr(self):\n self.assertEqual(repr(self.card), \"A of Spades\")", "def test_repr(self):\n dummy = DummyCryptographicObject()\n repr(dummy)", "def test_repr_show(self):\n self.assertEquals(\n repr(self.t['CNNNN']),\n \"<Show Chaser Non-Stop News Network (CNNNN) (containing 2 seasons)>\"\n )", "def test_repr(self, r, rep):\n assert repr(r) == rep", "def test_repr():\n c = Circle(4) \n assert c.__repr__() == 'Circle(4)'", "def allow_repr(cls) -> bool:\n raise NotImplementedError", "def test_notification_repr(self) -> None:\n self.assertEqual(repr(self.notification1), \"<Notification 1>\")\n\n # pylint: disable=unnecessary-dunder-call\n self.assertEqual(self.notification1.__repr__(), \"<Notification 1>\")", "def test_repr(self):\n self.assertTrue(repr(self.obj1))\n self.assertTrue(repr(self.obj2))\n self.assertTrue(repr(self.obj3))\n self.assertTrue(repr(self.obj4))\n self.assertTrue(repr(self.obj5))", "def test_repr(self):\n\n char = Character.query.get(1111)\n expected = \"<Character Instance | ID: 1111 | Name: Mario | Game: Super Mario 64>\"\n\n self.assertEqual(expected, str(char))", "def test_repr() -> None:\n attrs = {\"this_attr\": True}\n fixed_time = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC, microsecond=432432)\n state = ha.State(\n \"sensor.temperature\",\n \"18\",\n attrs,\n last_changed=fixed_time,\n last_updated=fixed_time,\n )\n event = ha.Event(\n EVENT_STATE_CHANGED,\n {\"entity_id\": \"sensor.temperature\", \"old_state\": None, \"new_state\": state},\n context=state.context,\n time_fired=fixed_time,\n )\n assert \"2016-07-09 11:00:00+00:00\" in repr(States.from_event(event))\n assert \"2016-07-09 11:00:00+00:00\" in repr(Events.from_event(event))", "def test_repr(self):\n \n # Create a Resource object\n book = Book(\"Penguin Group\", \"New York\", \"fiction\", 1, \"White Noise\", \n Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\",\n [\"culture\", \"survival\", \"life\", \"society\"])\n \n \n # Assert expected result of the repr function\n self.assertEqual(repr(book), (\"Book(1, 'White Noise', \"\\\n \"Name('Don', '', 'DeLillo'), \"\\\n \"'Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.', 'sci-fi', 'English', \"\\\n \"1985, 'US', 326, 'book', \"\\\n \"'['culture', 'survival', 'life', 'society']', \"\\\n \"'Penguin Group', 'New York', 'fiction')\"))", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def __repr__(self) -> str:\n ...", "def test_repr(self):\n obj = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n args = \"value={0}, opaque_type={1}\".format(\n binascii.hexlify(self.bytes_a), enums.OpaqueDataType.NONE)\n expected = \"OpaqueObject({0})\".format(args)\n observed = repr(obj)\n self.assertEqual(expected, observed)", "def test_repr_episode(self):\n self.assertEquals(\n repr(self.t['CNNNN'][1][1]),\n \"<Episode 01x01 - September 19, 2002 (20:30 - 21:00)>\"\n )", "def test_repr(self):\n for duration, repr_, _ in self.test_cases:\n self.assertEqual(repr(Rest(duration)), repr_)", "def test_repr(self):\n \n # Create a Resource object\n resource = Resource(1, \"White Noise\", Name(\"Don\", \"\", \"DeLillo\"), \n \"Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.\",\n \"sci-fi\", \"English\", 1985, \"US\", 326, \"book\", \n [\"culture\", \"survival\", \"life\", \"society\"])\n \n \n # Assert expected result of the repr function\n self.assertEqual(repr(resource), (\"Resource(1, 'White Noise', \"\\\n \"Name('Don', '', 'DeLillo'), \"\\\n \"'Delillo's White Noise follows narrator Jack \"\\\n \"Gladney, a professor at a small Liberal Arts \"\\\n \"college and describes an academic year. Jack \"\\\n \"teaches at a school called the \"\\\n \"College-on-the-Hill, where he serves as the \"\\\n \"department chair of Hitler studies. He lives in \"\\\n \"Blacksmith, a quiet college town, with his wife, \"\\\n \"Babette, and four of their children from earlier \"\\\n \"marriages: Heinrich, Steffie, Denise, and \"\\\n \"Wilder. Throughout the novel, various \"\\\n \"half-siblings and ex-spouses drift in and out \"\\\n \"of the family’s home.', 'sci-fi', 'English', \"\\\n \"1985, 'US', 326, 'book', \"\\\n \"'['culture', 'survival', 'life', 'society']')\"))", "def _repr_(self):\n return repr(self.element())", "def __repr__(self):", "def __repr__(self):", "def __repr__(self):", "def test_reprMethod(self):\n self.assertEqual(\n repr(task.LoopingCall(TestableLoopingCall.__init__)),\n \"LoopingCall<None>(TestableLoopingCall.__init__, *(), **{})\")", "def test_reprSanity(self):\n repr(MessageSet(1, 2))", "def __repr__(self):\n\n return self._repr__base(rich_output=False)", "def __repr__(self):\n\n return self._repr__base(rich_output=False)", "def test_glass_repr__returns_expected_value():\n glass = moet.create_glass(\"A\")\n assert \"moet.glass.Glass(uid=A, pos=None)\" in repr(glass)" ]
[ "0.6914809", "0.6808723", "0.66963106", "0.6534174", "0.6505421", "0.64757746", "0.6391462", "0.6254702", "0.6244902", "0.6209125", "0.6178", "0.6166957", "0.6156544", "0.6156544", "0.6156544", "0.6156544", "0.6156544", "0.6116914", "0.6116562", "0.6096828", "0.60628986", "0.60588056", "0.6048957", "0.6048957", "0.6048957", "0.60472554", "0.6041102", "0.5974292", "0.5974292", "0.5968777" ]
0.77653193
0
Tests whether ``SoundboardSound.__hash__`` works as intended.
def test__SoundboardSound__hash(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240035 volume = 0.69 sound_id = 202305240036 guild_id = 202305240037 keyword_parameters = { 'available': available, 'emoji': emoji, 'name': name, 'user_id': user_id, 'volume': volume, } sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, **keyword_parameters, ) vampytest.assert_instance(repr(sound), str) sound = SoundboardSound(**keyword_parameters) vampytest.assert_instance(repr(sound), str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__SoundboardSound__eq():\n available = False\n emoji = BUILTIN_EMOJIS['heart']\n name = 'rember'\n user_id = 202305240038\n volume = 0.69\n \n sound_id = 202305240039\n guild_id = 202305240040\n \n keyword_parameters = {\n 'available': available,\n 'emoji': emoji,\n 'name': name,\n 'user_id': user_id,\n 'volume': volume,\n }\n \n sound = SoundboardSound.precreate(\n sound_id,\n guild_id = guild_id,\n **keyword_parameters,\n )\n \n vampytest.assert_eq(sound, sound)\n vampytest.assert_ne(sound, object())\n \n test_sound = SoundboardSound(**keyword_parameters,)\n \n vampytest.assert_eq(sound, test_sound)\n \n for field_name, field_value in (\n ('available', True),\n ('emoji', BUILTIN_EMOJIS['x']),\n ('name', 'happy day'),\n ('user_id', 202305240041),\n ('volume', 0.70),\n ):\n test_sound = SoundboardSound(**{**keyword_parameters, field_name: field_value})\n vampytest.assert_ne(test_sound, sound)", "def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))", "def __hash__(self) -> int:", "def __hash__(self):\n return 0", "def check_hashable(self, setup):\n try:\n hash(setup)\n except TypeError as e:\n raise AssertionError(f\"setup object is not hashable:\\n{setup}\") from e", "def __hash__(self):\n\n return hash(str(self.board))", "def test_channel_hash(self):\n acq_channel_1 = AcquireChannel(123)\n acq_channel_2 = AcquireChannel(123)\n\n hash_1 = hash(acq_channel_1)\n hash_2 = hash(acq_channel_2)\n\n self.assertEqual(hash_1, hash_2)", "def __hash__(self) -> int:\n ...", "def __hash__(self) -> int:\n ...", "def __hash__(self):\n\t\treturn 1", "def __hash__(self):\n return hash(self.hash)", "def __hash__(self):\n return super().__hash__()", "def __hash__(self):\n return hash((self.SYMBOL, self._.hash_parameters))", "def __hash__(self):\n return hash((self._im_func, self._im_self_ref, self._im_class))", "def test__ActivityMetadataBase__hash():\n activity_metadata = ActivityMetadataBase()\n \n vampytest.assert_instance(hash(activity_metadata), int)", "def __hash__(self):\r\n return hash(type(self)) ^ hash(self.broadcastable)", "def __hash__(self):\n return hash(self.returnBoard())", "def __hash__(self):\n return self.word.__hash__()", "def test_random_matgame_hash_eq(strats):\n payoffs = rand.random(tuple(strats) + (len(strats),))\n matg = matgame.matgame(payoffs)\n\n copy = matgame.matgame_copy(matg)\n assert hash(copy) == hash(matg)\n assert copy == matg\n\n game = paygame.game_copy(matg)\n copy = matgame.matgame_copy(game)\n assert hash(copy) == hash(matg)\n assert copy == matg", "def __hash__(self):\n raise NotImplementedError", "def __hash__(self):\n return hash(tuple([self.get_rank(), self.get_suit()]))", "def __hash__(self):\n return hash(self.get_canonical_identifier())", "def fixed(o):\n try:\n hash(o)\n except TypeError:\n return False\n return True", "def __hash__(self):\r\n return hash(self.__key())", "def __hash__(self):\n return hash(self.__uuid)", "def __hash__(self):\n return hash(self.__uuid)", "def __hash__(self):\n return hash(tuple(self.sig))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))", "def __hash__(self):\n return hash(str(self))" ]
[ "0.64707255", "0.63552177", "0.6180331", "0.61213714", "0.6096645", "0.60493755", "0.60439104", "0.603657", "0.603657", "0.60337245", "0.6027019", "0.6023845", "0.60236806", "0.59823734", "0.5976516", "0.5973415", "0.59712046", "0.59241724", "0.58853847", "0.5853793", "0.5851253", "0.58267725", "0.5819978", "0.5814218", "0.5810652", "0.5810652", "0.5806252", "0.5798702", "0.5798702", "0.5798702" ]
0.78902173
0
Tests whether ``SoundboardSound.__eq__`` works as intended.
def test__SoundboardSound__eq(): available = False emoji = BUILTIN_EMOJIS['heart'] name = 'rember' user_id = 202305240038 volume = 0.69 sound_id = 202305240039 guild_id = 202305240040 keyword_parameters = { 'available': available, 'emoji': emoji, 'name': name, 'user_id': user_id, 'volume': volume, } sound = SoundboardSound.precreate( sound_id, guild_id = guild_id, **keyword_parameters, ) vampytest.assert_eq(sound, sound) vampytest.assert_ne(sound, object()) test_sound = SoundboardSound(**keyword_parameters,) vampytest.assert_eq(sound, test_sound) for field_name, field_value in ( ('available', True), ('emoji', BUILTIN_EMOJIS['x']), ('name', 'happy day'), ('user_id', 202305240041), ('volume', 0.70), ): test_sound = SoundboardSound(**{**keyword_parameters, field_name: field_value}) vampytest.assert_ne(test_sound, sound)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n if not isinstance(other, NhlOddsScoringPlay):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, DiarizeAudio):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n if not isinstance(other, AudioFrame):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self, other):\n return isinstance(other, type(self)) and set(self.channels) == set(other.channels)", "def __eq__(self, other) -> bool:\r\n if isinstance(other, Square):\r\n if (self.board, self.file, self.rank) == (\r\n other.board, other.file, other.rank):\r\n return True\r\n \r\n return False", "def __eq__(self, other : dumbEmoji) -> bool:\n return type(other) == dumbEmoji and self.sendable == other.sendable", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self,*args):\r\n pass", "def __eq__(self, other):\n if type(other) is not type(self):\n return False\n if self._sample_rate != other._sample_rate:\n return False\n if self._samples.shape != other._samples.shape:\n return False\n if np.any(self.samples != other._samples):\n return False\n return True", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass", "def __eq__(self, *args):\r\n pass" ]
[ "0.7053136", "0.69082266", "0.68528724", "0.6663054", "0.6607504", "0.65843195", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.65642565", "0.6562754", "0.6549555", "0.6549555", "0.6549555" ]
0.7741757
0
Getter method for usr_ping_count, mapped from YANG variable /mpls_state/statistics_oam/usr_ping_count (uint32)
def _get_usr_ping_count(self): return self.__usr_ping_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def get_online_users_count(khoros_object, anonymous=None, registered=None):\n if anonymous and not registered:\n response = api.make_v1_request(khoros_object, '/users/online/anonymous/count')\n elif registered and not anonymous:\n response = api.make_v1_request(khoros_object, '/users/online/registered/count')\n else:\n response = api.make_v1_request(khoros_object, '/users/online/count')\n return response['response']['value']['$']", "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def get_num_psus(self):\n return len(self._psu_list)", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def count():\r\n return User.query.count()", "def get_user_count(self):\n done = self.cur.execute(\"SELECT username FROM users\")\n return done", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def get_users_count(khoros_object, registered=False, online=False):\n if all((registered, online)):\n raise errors.exceptions.InvalidParameterError('You can only select registered or online users but not both.')\n if registered:\n user_count = get_registered_users_count(khoros_object)\n elif online:\n user_count = get_online_user_count(khoros_object)\n else:\n user_count = get_all_users_count(khoros_object)\n return user_count", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def get_users_count():\n # return User.objects.all().count()\n return User.objects.filter(is_active=True,\n last_login__isnull=False).count()", "def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def GetCount(self, label: 'unsigned char') -> \"unsigned long long\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIUS3IUC3_GetCount(self, label)", "def count_user_push(username):\n public_activities = json.loads(query_user_activities(username))\n push_count = 0\n for activity in public_activities:\n if activity['type'] == 'PushEvent':\n push_count += 1\n return 'Total push count: ' + str(push_count)" ]
[ "0.7919465", "0.62490904", "0.5974101", "0.5872466", "0.5848729", "0.5829324", "0.58061343", "0.57633376", "0.5698802", "0.5670618", "0.55348474", "0.54909444", "0.54774773", "0.54751647", "0.54595673", "0.54140943", "0.5395396", "0.5368198", "0.5319455", "0.531552", "0.52972794", "0.52741754", "0.5245219", "0.5238117", "0.522497", "0.52011055", "0.51978695", "0.5192744", "0.5141922", "0.51149833" ]
0.6955739
1
Setter method for usr_ping_count, mapped from YANG variable /mpls_state/statistics_oam/usr_ping_count (uint32)
def _set_usr_ping_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-ping-count", rest_name="usr-ping-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """usr_ping_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-ping-count", rest_name="usr-ping-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__usr_ping_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def get_num_psus(self):\n return len(self._psu_list)", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def get_online_users_count(khoros_object, anonymous=None, registered=None):\n if anonymous and not registered:\n response = api.make_v1_request(khoros_object, '/users/online/anonymous/count')\n elif registered and not anonymous:\n response = api.make_v1_request(khoros_object, '/users/online/registered/count')\n else:\n response = api.make_v1_request(khoros_object, '/users/online/count')\n return response['response']['value']['$']", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def count():\r\n return User.query.count()", "async def connected_users_count(self, event):\n print(\"PublicChatConsumer\", \"connected_users_count\",\n event[\"connected_users_count\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_CONNECTED_USERS_COUNT,\n \"connected_users_count\": event[\"connected_users_count\"]\n })", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def add_user(self, u: \"Node\") -> None:\n\n if u not in self.users_:\n self.users_[u] = 0\n self.users_[u] += 1", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def get_user_count(self):\n done = self.cur.execute(\"SELECT username FROM users\")\n return done", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def get_number_of_likes_for_user(self, user):\n\t\tfrom pins.models import Pin\n\t\tpin_ctype = ContentType.objects.get_for_model(Pin)\n\t\tpin_list = Pin.objects.active_pins().filter(board__user=user).values_list('pk', flat=True)\n\t\treturn self.filter(content_type=pin_ctype, object_id__in=pin_list).count()", "def get_users_count(khoros_object, registered=False, online=False):\n if all((registered, online)):\n raise errors.exceptions.InvalidParameterError('You can only select registered or online users but not both.')\n if registered:\n user_count = get_registered_users_count(khoros_object)\n elif online:\n user_count = get_online_user_count(khoros_object)\n else:\n user_count = get_all_users_count(khoros_object)\n return user_count", "def count_total_each_user():\r\n trans = transaction.begin()\r\n user_list = UserMgr.get_list(active=True)\r\n for user in user_list:\r\n StatBookmarkMgr.count_user_bookmarks(user.username)\r\n trans.commit()", "def get_users_count():\n # return User.objects.all().count()\n return User.objects.filter(is_active=True,\n last_login__isnull=False).count()" ]
[ "0.66030264", "0.6556253", "0.5894535", "0.568632", "0.56651974", "0.56644946", "0.5605921", "0.5584418", "0.55039364", "0.55039364", "0.5503759", "0.5493639", "0.54372156", "0.53541523", "0.5247895", "0.5217682", "0.5213357", "0.5187056", "0.516838", "0.5158767", "0.51152784", "0.5085967", "0.5063331", "0.50588334", "0.50533867", "0.5000247", "0.49808678", "0.49688262", "0.49194285", "0.49020582" ]
0.8726256
0
Getter method for usr_traceroute_count, mapped from YANG variable /mpls_state/statistics_oam/usr_traceroute_count (uint32)
def _get_usr_traceroute_count(self): return self.__usr_traceroute_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_usr_traceroute_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_traceroute_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-traceroute-count\", rest_name=\"usr-traceroute-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_traceroute_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def NoOfSRTunnels(self):\r\n\t\treturn self._get_attribute('noOfSRTunnels')", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def voterContactCount(self, user):\n return self.votercontact_set.filter(user=user).count()", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def count():\r\n return User.query.count()", "async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val", "def get_users_count(self):\n try:\n roles = self.db_handler.get_roles_list()\n reply = ''\n\n for role_id, role_name in roles:\n reply += f'{role_name}ів - {self.db_handler.get_staff_count_by_role(role_id)[0]}\\n'\n\n return reply\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get_roles_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_count(khoros_object, user_settings['id'], 'roles')", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "def getViewPortAppCount(self):\n logger.debug('Getting map view port app count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.dstCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def trace_region_count(self):\n cmd = enums.JLinkTraceCommand.GET_NUM_REGIONS\n data = ctypes.c_uint32(0)\n res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))\n if (res == 1):\n raise errors.JLinkException('Failed to get trace region count.')\n return data.value", "def get_total_session_count(self) -> int:\n return self.streams_count", "def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))", "def get_connected_users_count(room: PublicChatRoom) -> int:\n return room.users.count()", "def get_otu_counts(fpath):\r\n\r\n try:\r\n otu_table = parse_biom_table(open(fpath, 'U'))\r\n except (TypeError, IOError):\r\n raise MissingFileError('OTU table file required for this analysis')\r\n\r\n if (otu_table.ObservationMetadata is None or\r\n otu_table.ObservationMetadata[0]['taxonomy'] is None):\r\n raise ValueError(\r\n '\\n\\nThe lineages are missing from the OTU table. Make sure you included the lineages for the OTUs in your OTU table. \\n')\r\n\r\n return otu_table" ]
[ "0.8096988", "0.58348656", "0.5540107", "0.5337788", "0.5088764", "0.50769204", "0.5072222", "0.50714403", "0.5059296", "0.50581366", "0.50281113", "0.49717057", "0.4894099", "0.48607355", "0.48384598", "0.48260984", "0.48260832", "0.48071045", "0.47291836", "0.47040775", "0.46823198", "0.46797007", "0.46164805", "0.45977813", "0.45746386", "0.4571215", "0.45613363", "0.45551383", "0.4542036", "0.4527165" ]
0.7333012
1
Setter method for usr_traceroute_count, mapped from YANG variable /mpls_state/statistics_oam/usr_traceroute_count (uint32)
def _set_usr_traceroute_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-traceroute-count", rest_name="usr-traceroute-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """usr_traceroute_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="usr-traceroute-count", rest_name="usr-traceroute-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__usr_traceroute_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_usr_traceroute_count(self):\n return self.__usr_traceroute_count", "def _set_usr_ping_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"usr_ping_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"usr-ping-count\", rest_name=\"usr-ping-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__usr_ping_count = t\n if hasattr(self, '_set'):\n self._set()", "def getViewPortUserCount(self):\n logger.debug('Getting map view port user count...')\n elements = get_elements_by_css(\".leaflet-marker-icon.srcCluster\")\n users = 0\n for element in elements:\n users += int(get_text(element))\n return users", "def n_users(self):\n if self._n_users is None:\n self._n_users = len(self.user_unique_vals)\n return self._n_users", "def getUserCount(self):\n logger.debug('Getting the number of users discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='usersdiscovered']\"))", "def count_users(self, session) -> int:\n\n users_quantity = session.query(User).count()\n return users_quantity", "def count_users(self):\n return self.get_session.query(func.count(self.user_model.id)).scalar()", "def user_count(self, user_count):\n\n self._user_count = user_count", "def user_count(self, user_count):\n\n self._user_count = user_count", "def count_user():\r\n session = tables.get_session()\r\n if session is None:\r\n return 0\r\n count = 0\r\n try:\r\n user_account = UserAccount()\r\n uid = user_account.get_max_uid(session)\r\n if uid is None:\r\n return 0\r\n return uid + 1\r\n except SQLAlchemyError as err:\r\n LOGGER.error('Count user number failed: %s', err)\r\n return count\r\n finally:\r\n session.close()\r\n return count", "def unseen_count_for(self, user):\r\n return self.filter(user=user, unseen=True).count()", "def _get_usr_ping_count(self):\n return self.__usr_ping_count", "def get_number_of_pins_for_user(self, user):\n\t\treturn self.active_pins().filter(board__user=user).count()", "def guests_counter(window, n_guests):\r\n window.write_event_value('-COUNT-', n_guests)", "def voterContactCount(self, user):\n return self.votercontact_set.filter(user=user).count()", "def get_registered_users_count(khoros_object):\n response = api.make_v1_request(khoros_object, '/users/registered/count')\n return response['response']['value']['$']", "def headcount(self):\n self.cleanup()\n return len([True for u in self.users if not u.name.startswith('System/')])", "def _get_count(_khoros_object, _user_id, _object_type):\n _api_response = query_users_table_by_id(_khoros_object, f'{_object_type}.count(*)', _user_id)\n return int(_api_response['data']['items'][0][_object_type]['count'])", "def get_session_count(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetSessionCount', self.handle)", "def NoOfSRTunnels(self):\r\n\t\treturn self._get_attribute('noOfSRTunnels')", "def count():\r\n return User.query.count()", "def get_all_users_count(khoros_object):\n liql_query = 'SELECT count(*) FROM users'\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def get_num_psus(self):\n return len(self._psu_list)", "def add_user(self, u: \"Node\") -> None:\n\n if u not in self.users_:\n self.users_[u] = 0\n self.users_[u] += 1", "def get_online_user_count(khoros_object):\n liql_query = \"select count(*) from users where online_status = 'online'\"\n api_response = liql.perform_query(khoros_object, liql_query=liql_query, verify_success=True)\n return int(api_response['data']['count'])", "def total_users(user):\n user_count = User.objects.filter(is_active=True).count()\n\n return NumberResponse(user_count, 'Total number of users')", "def get_total_session_count(self) -> int:\n return self.streams_count", "def number_users_active(self) -> int:\r\n unique_users = {\r\n row['user']\r\n for row in self.rows\r\n }\r\n\r\n return len(unique_users)", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def target_lun_in_use_count(self):\n return self._target_lun_in_use_count" ]
[ "0.69384295", "0.67749554", "0.55161387", "0.5156745", "0.5149116", "0.51029897", "0.506374", "0.5044057", "0.5044057", "0.4962718", "0.49115133", "0.49114674", "0.48995262", "0.48913693", "0.48689324", "0.4850962", "0.48443764", "0.4837305", "0.48038968", "0.47544253", "0.4691356", "0.4687675", "0.46174464", "0.45912525", "0.45411113", "0.44826928", "0.44712198", "0.44477135", "0.4445319", "0.44297427" ]
0.88829786
0
Getter method for echo_req_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_sent_count (uint32)
def _get_echo_req_sent_count(self): return self.__echo_req_sent_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def sent_count(comment):\n return comment.__len__()", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n return self._message_count", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def n_sents(doc: Doc) -> int:\n if not doc.has_annotation(\"SENT_START\"):\n LOGGER.warning(\n \"`doc` has not been segmented into sentences; applying spaCy's rule-based, \"\n \"`Sentencizer` pipeline component to `doc` before counting...\"\n )\n doc = _SENTENCIZER(doc)\n return itertoolz.count(doc.sents)", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def message_count(self):\n return len(self.messages)", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def message_count(self):\n pass", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def num_requests_sent(self):\n return dict(self._requests_count)", "def response_count(self):\n return self.responses.count()", "def CountFlowLogEntries(self, client_id, flow_id):\n return len(self.ReadFlowLogEntries(client_id, flow_id, 0, sys.maxsize))", "def CountFlowResults(self, client_id, flow_id, with_tag=None, with_type=None):\n return len(\n self.ReadFlowResults(\n client_id,\n flow_id,\n 0,\n sys.maxsize,\n with_tag=with_tag,\n with_type=with_type))", "def orders_count(self):\n return Order.objects.filter(email=self.email).count()" ]
[ "0.7922508", "0.6860566", "0.67645717", "0.65473276", "0.64664423", "0.5968414", "0.5861087", "0.5787943", "0.5689521", "0.56807286", "0.5430021", "0.54220355", "0.5201689", "0.5130782", "0.49798772", "0.4948916", "0.48980132", "0.48834348", "0.4872083", "0.48253825", "0.48244604", "0.47584635", "0.47438982", "0.47157636", "0.47141638", "0.47054735", "0.46999666", "0.4631265", "0.4605633", "0.45964065" ]
0.7223332
1
Setter method for echo_req_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_sent_count (uint32)
def _set_echo_req_sent_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-sent-count", rest_name="echo-req-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_sent_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-sent-count", rest_name="echo-req-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_sent_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def sent_count(comment):\n return comment.__len__()", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def n_sents(doc: Doc) -> int:\n if not doc.has_annotation(\"SENT_START\"):\n LOGGER.warning(\n \"`doc` has not been segmented into sentences; applying spaCy's rule-based, \"\n \"`Sentencizer` pipeline component to `doc` before counting...\"\n )\n doc = _SENTENCIZER(doc)\n return itertoolz.count(doc.sents)", "def set_number_of_sentences(self):\n self.number_of_sentences = int(self.num_sentences.get())", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n return self._message_count", "def message_count(self):\n pass", "def count_likes(self):\n likes = self.event_likes\n num_likes = len(likes)\n return num_likes", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def num_requests_sent(self):\n return dict(self._requests_count)", "def _get_num_sentences(doc: Doc, min_sen_length=5):\n return len([sent for sent in list(doc.sents) if len(sent.text.strip())>min_sen_length])", "def message_count(self):\n return len(self.messages)", "def response_count(self):\n return self.responses.count()", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def count(self, value):\n \n self._count = int(value)", "def agent_count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"agent_count\")", "def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)" ]
[ "0.7546443", "0.74266785", "0.69616437", "0.66970843", "0.6288053", "0.6279342", "0.6064423", "0.57884806", "0.5556439", "0.5305215", "0.52710307", "0.5181049", "0.50654066", "0.50303006", "0.50086975", "0.49919927", "0.48444322", "0.4777001", "0.4770685", "0.4731099", "0.46893677", "0.46591547", "0.4655248", "0.46507818", "0.46184546", "0.4587416", "0.45654553", "0.4543196", "0.4541121", "0.4517093" ]
0.86451477
0
Getter method for echo_req_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_received_count (uint32)
def _get_echo_req_received_count(self): return self.__echo_req_received_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def get_message_count(self):\n return self.buffer.count()", "def getNumOfMsgRec(self):\n return self.MsgReceiveCount", "def get_kudos_received_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_received')", "def message_count(self):\n return self._message_count", "def get_message_length(self):\n return len(self._payload)", "def message_count(self):\n return len(self.messages)", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def message_count(self):\n pass", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def message_len(self):\n # expect F, use zero\n return len(self.message) if self.message else 0", "def sent_count(comment):\n return comment.__len__()", "def message_length(self):\n return self._message_length", "def getLength(msg):\n return len(msg)", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def read_count(self):\n return self._read_count" ]
[ "0.77231425", "0.7023055", "0.6542562", "0.62707675", "0.5976226", "0.5851045", "0.58174163", "0.54856044", "0.54173255", "0.5317395", "0.50623125", "0.5022893", "0.5022014", "0.4883163", "0.4860375", "0.48226205", "0.48192063", "0.4816786", "0.4814243", "0.4797339", "0.46779853", "0.46332717", "0.4602284", "0.4594219", "0.45920545", "0.45881212", "0.4575078", "0.4533078", "0.45307335", "0.44945496" ]
0.70718306
1
Setter method for echo_req_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_received_count (uint32)
def _set_echo_req_received_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-received-count", rest_name="echo-req-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_received_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-received-count", rest_name="echo-req-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_received_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n return self._message_count", "def getNumOfMsgRec(self):\n return self.MsgReceiveCount", "def message_count(self):\n pass", "def get_kudos_received_count(khoros_object, user_settings=None, user_id=None, login=None, email=None):\n user_settings = _process_settings_and_user_id(khoros_object, user_settings, user_id, login, email)\n return _get_sum_weight(khoros_object, user_settings['id'], 'kudos_received')", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def message_count(self):\n return len(self.messages)", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def sent_count(comment):\n return comment.__len__()", "def amount_of_receivers(self) -> int:\n return sum([1 for _ in self.receivers])", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def get_message_length(self):\n return len(self._payload)", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def set_Count(self, value):\n super(MoneyReceivedInputSet, self)._set_input('Count', value)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def message_count(self) -> int:\n return len(self._leased_messages)", "def message_count(self, message_count):\r\n\r\n self._message_count = message_count", "def VendorMessageLength(self):\n\t\treturn self._get_attribute('vendorMessageLength')" ]
[ "0.7757586", "0.7065394", "0.68073034", "0.67364514", "0.66612166", "0.62458676", "0.56309354", "0.5338627", "0.52505404", "0.50096846", "0.48576525", "0.48448968", "0.48185655", "0.47561345", "0.4739262", "0.46860245", "0.46509308", "0.46177593", "0.45707104", "0.45225346", "0.45101976", "0.4484876", "0.44768032", "0.44697633", "0.44539776", "0.4451774", "0.4446846", "0.4445863", "0.44211182", "0.4417924" ]
0.84776396
0
Getter method for echo_req_timeout_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_timeout_count (uint32)
def _get_echo_req_timeout_count(self): return self.__echo_req_timeout_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def get_event_history_count(self, event_label, timeout=10.0):\n\n self.verify_event_labels(\n [event_label],\n error_message=\"%s get_event_history_count failed.\" % self._device_name)\n\n try:\n count, timedout = _get_event_history_count(\n self.event_file_path, event_label, timeout=timeout)\n return ParserResult(timedout=timedout, results_list=[], count=count)\n except Exception as err:\n raise errors.ParserError(\n \"Retrieving event {} history from {} failed. Error {!r}\".format(\n event_label, self.event_file_path, err))", "def tcp_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tcp_timeout_seconds\")", "def _get_event_history_count(device_event_file_path, event_label, timeout=10.0):\n result = 0\n timedout = False\n\n file_exists, remaining_timeout = _wait_for_event_file(device_event_file_path,\n timeout)\n if not file_exists:\n timedout = True\n return result, timedout\n\n timeout_str = \"{:f}\".format(remaining_timeout)\n\n grep_cmd = [\n \"timeout\", timeout_str, \"grep\", \"-c\", \"-w\", event_label,\n device_event_file_path\n ]\n grep_proc = subprocess.Popen(grep_cmd, stdout=subprocess.PIPE)\n out, _ = grep_proc.communicate()\n if grep_proc.returncode == 124:\n timedout = True\n\n if out:\n result = int(out.strip())\n\n return result, timedout", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def get_message_count(self):\n return self.buffer.count()", "def response_count(self):\n return self.responses.count()", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def message_count(self):\n return self._message_count", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def message_count(self):\n return len(self.messages)", "def timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"timeout_seconds\")", "def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout", "def timeout_seconds(self):\n return self._timeout_seconds", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def message_count(self):\n pass", "def GetTotalQueueCount(handler, query):\n # pylint: disable=unused-argument\n\n json_config = {}\n json_config['count'] = 0\n\n with active_tivos_lock:\n for tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['count'] += len(active_tivos[tivoIP]['queue'])\n\n handler.send_json(json.dumps(json_config))", "def count(self):\n return len(self._request_sessions)" ]
[ "0.80206865", "0.6734563", "0.6540391", "0.6376731", "0.61047983", "0.60804176", "0.60245043", "0.5819295", "0.55777395", "0.49859354", "0.49369183", "0.4863983", "0.48448384", "0.48247913", "0.4788822", "0.4729328", "0.47008264", "0.4682831", "0.46340755", "0.46337748", "0.46146536", "0.45855942", "0.45836985", "0.4581994", "0.45731935", "0.4570734", "0.45420828", "0.45405638", "0.45283872", "0.45199665" ]
0.7347358
1
Setter method for echo_req_timeout_count, mapped from YANG variable /mpls_state/statistics_oam/echo_req_timeout_count (uint32)
def _set_echo_req_timeout_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-timeout-count", rest_name="echo-req-timeout-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_req_timeout_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-req-timeout-count", rest_name="echo-req-timeout-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_req_timeout_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def message_count_limit(self) -> ConfigNodePropertyInteger:\n return self._message_count_limit", "def message_count_limit(self, message_count_limit: ConfigNodePropertyInteger):\n\n self._message_count_limit = message_count_limit", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def tcp_timeout_seconds(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"tcp_timeout_seconds\")", "def count_update_pool_size(self, count_update_pool_size: ConfigNodePropertyInteger):\n\n self._count_update_pool_size = count_update_pool_size", "def org_apache_felix_http_session_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_session_timeout", "def response_count(self):\n return self.responses.count()", "def count_update_pool_size(self) -> ConfigNodePropertyInteger:\n return self._count_update_pool_size", "def count_chat_with(self, actor_label):\n query = read_query('trust/count_chat_with') % actor_label\n response = self._submit_query(query)\n\n return response[0]['num_chats']['value'].split('/')[-1] if response != [] else ''", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def message_count(self):\n pass", "async def count(self, **kw):\n\n pass", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def get_event_history_count(self, event_label, timeout=10.0):\n\n self.verify_event_labels(\n [event_label],\n error_message=\"%s get_event_history_count failed.\" % self._device_name)\n\n try:\n count, timedout = _get_event_history_count(\n self.event_file_path, event_label, timeout=timeout)\n return ParserResult(timedout=timedout, results_list=[], count=count)\n except Exception as err:\n raise errors.ParserError(\n \"Retrieving event {} history from {} failed. Error {!r}\".format(\n event_label, self.event_file_path, err))", "def test_nmap_icmp_echo_request(self):\n assert_equal(self.test_nmap.ICMP_ECHO_REQUEST, 8)", "def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")", "def count(self, value):\n \n self._count = int(value)", "def org_apache_felix_http_timeout(self) -> ConfigNodePropertyInteger:\n return self._org_apache_felix_http_timeout", "def timeout_seconds(self):\n return self._timeout_seconds" ]
[ "0.7362202", "0.7095978", "0.6999127", "0.6642217", "0.64998084", "0.60558325", "0.57168615", "0.5617509", "0.52749455", "0.50046265", "0.4905859", "0.49030608", "0.48003668", "0.4683387", "0.4644038", "0.45746693", "0.45659736", "0.4561842", "0.45596886", "0.4557894", "0.44879726", "0.44478822", "0.44476786", "0.4437888", "0.44238985", "0.4407464", "0.4398701", "0.4393611", "0.4392381", "0.43914813" ]
0.87411803
0
Getter method for echo_resp_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_sent_count (uint32)
def _get_echo_resp_sent_count(self): return self.__echo_resp_sent_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def response_count(self):\n return self.responses.count()", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def sent_count(comment):\n return comment.__len__()", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def get_response_total_text(self):\r\n return self._get_element_text(\".response-count\")", "def get_message_count(self):\n return self.buffer.count()", "def count_simsimi_msg(db):\n try:\n count = db.get('simsimi_info')['qty_answed_message']\n except:\n count = 1\n return count", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def message_count(self):\n return self._message_count", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def TriggeredVendorMessageLength(self):\n\t\treturn self._get_attribute('triggeredVendorMessageLength')", "def message_count(self):\n return len(self.messages)" ]
[ "0.7885906", "0.71892625", "0.7101065", "0.69950885", "0.66274446", "0.6617462", "0.635762", "0.5800941", "0.577533", "0.55431056", "0.5397536", "0.5257677", "0.5200311", "0.5128361", "0.5111158", "0.51002544", "0.50733143", "0.50320685", "0.50222456", "0.4920297", "0.4917206", "0.4911336", "0.48959243", "0.48711067", "0.48487678", "0.4768916", "0.47582203", "0.47199044", "0.47185415", "0.4690841" ]
0.7451523
1
Setter method for echo_resp_sent_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_sent_count (uint32)
def _set_echo_resp_sent_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-sent-count", rest_name="echo-resp-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_resp_sent_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-sent-count", rest_name="echo-resp-sent-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_resp_sent_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def response_count(self):\n return self.responses.count()", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def sent_count(comment):\n return comment.__len__()", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def sent_count(self):\n count = []\n for i in tqdm(self.text):\n count.append(len(sent_tokenize(i)))\n return count", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def send_resp(self):\n self.n_send_resp += 1", "def get_response_total_text(self):\r\n return self._get_element_text(\".response-count\")", "def recv_resp(self):\n self.n_recv_resp += 1", "def getNumOfMsgSend(self):\n return self.MsgSendCount", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def sentence_count(self, doc):\n\n return len(sent_tokenize(doc))", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)", "def message_count(self):\n return self._message_count", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def get_message_count(self):\n return self.buffer.count()", "def message_count(self):\n pass", "async def sqs_count(self, _) -> Response:\n message = {\n \"message_count\": self.sqs.count()\n }\n return Response(text=json.dumps(message))", "def vsce_uokms_server_decrypt_response_len(self, ctx):\n vsce_uokms_server_decrypt_response_len = self._lib.vsce_uokms_server_decrypt_response_len\n vsce_uokms_server_decrypt_response_len.argtypes = [POINTER(vsce_uokms_server_t)]\n vsce_uokms_server_decrypt_response_len.restype = c_size_t\n return vsce_uokms_server_decrypt_response_len(ctx)", "def count(self):\n\n return self._get(\"count\", rtype=UInt)" ]
[ "0.7872074", "0.7738624", "0.7278106", "0.7268996", "0.6715367", "0.65274954", "0.6379835", "0.60264504", "0.5693287", "0.55500114", "0.5237852", "0.515169", "0.50731426", "0.5025889", "0.49551892", "0.4954932", "0.4935209", "0.49100864", "0.4884356", "0.48487535", "0.48274243", "0.47079432", "0.46981698", "0.46899626", "0.46785292", "0.46620303", "0.46530867", "0.46495768", "0.4625437", "0.46002287" ]
0.8611856
0
Getter method for echo_resp_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_received_count (uint32)
def _get_echo_resp_received_count(self): return self.__echo_resp_received_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_resp_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-received-count\", rest_name=\"echo-resp-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def response_count(self):\n return self.responses.count()", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def get_message_count(self):\n return self.buffer.count()", "def recv_resp(self):\n self.n_recv_resp += 1", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def vsce_uokms_server_decrypt_response_len(self, ctx):\n vsce_uokms_server_decrypt_response_len = self._lib.vsce_uokms_server_decrypt_response_len\n vsce_uokms_server_decrypt_response_len.argtypes = [POINTER(vsce_uokms_server_t)]\n vsce_uokms_server_decrypt_response_len.restype = c_size_t\n return vsce_uokms_server_decrypt_response_len(ctx)", "def message_count(self):\n return self._message_count", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def get_count(self):\n return unpack(os.read(self.fd, 8))", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def get_order_count(self):\n resp = self.app.get('/orders')\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = json.loads(resp.data)\n return len(data)", "def receive_response(self):\n return self.socket.receive()", "def message_count(self):\n return len(self.messages)", "def message_count(self):\n pass", "def get_message_length(self):\n return len(self._payload)", "def get_response_record_count(self):\n if self.record_count is None:\n raise QueryNotExecuted(\"No query has been executed. Use the Execute Query keyword to retrieve records.\")\n else:\n return self.record_count", "def compute_mean_response_length(self):\n mean_response_length = 0\n for row in self.responses:\n mean_response_length += len(row.response)\n return round(mean_response_length / len(self.responses), 2)" ]
[ "0.785285", "0.70278424", "0.6552268", "0.64812785", "0.6124184", "0.55265635", "0.5396158", "0.5115494", "0.5112692", "0.50826174", "0.49409315", "0.4853131", "0.4822724", "0.47961304", "0.4737998", "0.47299495", "0.47204226", "0.46952853", "0.46684587", "0.46589735", "0.46527314", "0.46346018", "0.4572006", "0.45142123", "0.4505391", "0.44645974", "0.44538108", "0.4426214", "0.4396354", "0.43819296" ]
0.731385
1
Setter method for echo_resp_received_count, mapped from YANG variable /mpls_state/statistics_oam/echo_resp_received_count (uint32)
def _set_echo_resp_received_count(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-received-count", rest_name="echo-resp-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """echo_resp_received_count must be of a type compatible with uint32""", 'defined-type': "uint32", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="echo-resp-received-count", rest_name="echo-resp-received-count", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)""", }) self.__echo_resp_received_count = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_echo_req_received_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_received_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-received-count\", rest_name=\"echo-req-received-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_received_count = t\n if hasattr(self, '_set'):\n self._set()", "def _set_echo_resp_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_resp_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-resp-sent-count\", rest_name=\"echo-resp-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_resp_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_received_count(self):\n return self.__echo_resp_received_count", "def _set_echo_req_timeout_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_timeout_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-timeout-count\", rest_name=\"echo-req-timeout-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_timeout_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_req_received_count(self):\n return self.__echo_req_received_count", "def _set_echo_req_sent_count(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"echo_req_sent_count must be of a type compatible with uint32\"\"\",\n 'defined-type': \"uint32\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name=\"echo-req-sent-count\", rest_name=\"echo-req-sent-count\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='uint32', is_config=False)\"\"\",\n })\n\n self.__echo_req_sent_count = t\n if hasattr(self, '_set'):\n self._set()", "def _get_echo_resp_sent_count(self):\n return self.__echo_resp_sent_count", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def recv_resp(self):\n self.n_recv_resp += 1", "def response_count(self):\n return self.responses.count()", "def _get_echo_req_sent_count(self):\n return self.__echo_req_sent_count", "def _get_echo_req_timeout_count(self):\n return self.__echo_req_timeout_count", "def count_response_codes():\n code = request.args.get('code', 200)\n log_lines = request.args.get('log_lines')\n\n if log_lines:\n lines_list = json.loads(log_lines)\n count = count_by_code(lines_list, code)\n else:\n count = 0\n\n response = str(count)\n return response", "def vsce_uokms_server_decrypt_response_len(self, ctx):\n vsce_uokms_server_decrypt_response_len = self._lib.vsce_uokms_server_decrypt_response_len\n vsce_uokms_server_decrypt_response_len.argtypes = [POINTER(vsce_uokms_server_t)]\n vsce_uokms_server_decrypt_response_len.restype = c_size_t\n return vsce_uokms_server_decrypt_response_len(ctx)", "def echo_reply_handler(self, ev):\n now_timestamp = time.time()\n try:\n latency = now_timestamp - eval(ev.msg.data)\n self.echo_latency[ev.msg.datapath.id] = latency\n except:\n return", "def get_num_displayed_responses(self):\r\n return len(self._find_within(\".discussion-response\"))", "def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count", "def get_message_count(self):\n return self.buffer.count()", "def count(self):\n\n return self._get(\"count\", rtype=UInt)", "def message_count(self):\n return self._message_count", "def message_count(self):\n pass", "def get_response_pdu_size(self):\n count = self.count // 8\n if self.count % 8:\n count += 1\n\n return 1 + 1 + count", "def response_received(self, ignored):\n self._received += 1", "def __len__(self):\n response = self._rpc(self._declare(True))\n return response.message_count", "def _parse_release_count(self, resp: Dict[str, Any]) -> str:\n return f\"{len(resp.get('releases', []))}\"", "def count_messages(self, statuses=DEFAULT_MESSAGE_STATUSES):\n return self.request(\"count:Message\", [{\"status\": statuses}])", "def get_response_total_text(self):\r\n return self._get_element_text(\".response-count\")", "def message_count(self):\n return len(self.messages)", "def get_shown_responses_text(self):\r\n return self._get_element_text(\".response-display-count\")", "def count(self, page_size=10, vtimeout=10):\r\n a = self.get_attributes('ApproximateNumberOfMessages')\r\n return int(a['ApproximateNumberOfMessages'])" ]
[ "0.76704717", "0.73230594", "0.70944583", "0.6276271", "0.618727", "0.6063231", "0.60443354", "0.51781255", "0.5142111", "0.50479865", "0.49492952", "0.48183542", "0.47569573", "0.47515997", "0.47491166", "0.4727505", "0.46277964", "0.45390615", "0.45219365", "0.44867164", "0.44473416", "0.44427106", "0.44093305", "0.4398009", "0.43944886", "0.43344253", "0.4282784", "0.42712533", "0.42634273", "0.42538837" ]
0.8556093
0
Getter method for return_codes, mapped from YANG variable /mpls_state/statistics_oam/return_codes (list)
def _get_return_codes(self): return self.__return_codes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_return_codes(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGListType(\"number\",return_codes.return_codes, yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"return_codes must be of a type compatible with list\"\"\",\n 'defined-type': \"list\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGListType(\"number\",return_codes.return_codes, yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name=\"return-codes\", rest_name=\"return-codes\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)\"\"\",\n })\n\n self.__return_codes = t\n if hasattr(self, '_set'):\n self._set()", "def traffic_statuscodes_cachecodes(self, **kwargs):\n url_path = 'traffic/statuscodes/cachecodes'\n self.logger.debug(f\"Get list of cache codes\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def codes(self, name):\n return self._get_valuemap(name, non_mapped='codes')", "def returncode(self: \"ShellOutput\") -> Artefact[int]:\n self.__check_len()\n return self.returncodes[0]", "def _build_return_code_enum():\n prefix = 'XTT_RETURN_'\n codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)}\n return IntEnum('ReturnCode', codes)", "def returncodes(self):\n for p in self.processes:\n p.wait()\n codes = [p.poll() for p in self.processes]\n if set(codes) == set([0]):\n return []\n return codes", "def get_observatory_codes_async(self, get_raw_response=False, cache=True):\n\n self.query_type = 'observatory_code'\n response = self._request('GET', self.OBSERVATORY_CODES_URL,\n timeout=self.TIMEOUT, cache=cache)\n\n return response", "def codes(self):\n return [card.code for card in self.cards]", "def get_pcode_list(self) -> List[str]:\n return self.pcodes", "def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")", "def is_return_code_mode(self):\n return self.bisect_config.get('test_type') == 'return_code'", "def get_lock_codes(device: Device) -> Sequence[str]:\n try:\n codes_str = cast(str, device.attributes[ATTR_LOCK_CODES].value)\n codes = loads(codes_str)\n return [codes[id][\"name\"] for id in codes]\n except Exception as e:\n _LOGGER.warn(\"Error getting lock codes for %s: %s\", device, e)\n return []", "def _get_module_return_code(self, status, module):\n\n # initialize return code array\n arr = []\n check_failed = False\n\n if module not in status.data:\n # assume running\n arr = [1]\n else:\n for job_name in status.data[module].keys():\n if job_name != 'pipeline_index':\n\n # update the job status and get the status string\n status._update_job_status(module, job_name)\n js = status.data[module][job_name]['job_status']\n\n if js == 'successful':\n arr.append(0)\n elif js == 'failed':\n arr.append(2)\n check_failed = True\n elif js is None:\n arr.append(3)\n else:\n arr.append(1)\n\n status._dump()\n\n return_code = self._parse_code_array(arr)\n\n status = self.RETURN_CODES[return_code]\n fail_str = ''\n if check_failed and status != 'failed':\n fail_str = ', but some jobs have failed'\n logger.info('Module \"{}\" for job \"{}\" is {}{}.'\n .format(module, self._config.name, status, fail_str))\n\n return return_code", "def get_code(self, obj):\n return [], []", "def code_types(self):\n return self.codes.keys()", "def http_return_code(res_data) -> (int, str):\n\n start = re.search(\"[0-9]{3}\", res_data).start()\n end_of_line = res_data.find(\"\\r\\n\")\n code = int(res_data[start:start+3])\n if end_of_line == -1:\n end_of_line = len(res_data)\n meaning = res_data[start+4:end_of_line]\n return code, meaning", "def get_registry_codes( ):\n return _theRegistry.get_codes( )", "def return_code(self) -> int:\n raise NotImplementedError(\"Base method not implemented\")", "def pin_code(self) -> List[PinCodeSummary]:\n return self._pin_code", "def geneSymbols(self, returnType=\"list\"):\n\t\treturn self._dataframe['GeneSymbol'].to_dict() if returnType==\"dict\" else self._dataframe['GeneSymbol'].tolist()", "def set_retcodes(self,rc1,rc2) :\n\t\tif self.save_trace : \n\t\t\tself.rc1.append(rc1)\n\t\t\tself.rc2.append(rc2)\n\t\telse : \n\t\t\t# Save the return codes from the last iter\n\t\t\tself.rc1 = [rc1]\n\t\t\tself.rc2 = [rc2]", "def ReturnCodeObject(self,code):\n\n if code in self.setOfUnusableCodes:\n return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]]\n else:\n return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]]", "def health_check_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"health_check_codes\")", "def get_opcodes(self, script, verify_minimal_data=False, pc=0):\n while pc < len(script):\n opcode, data, new_pc, is_ok = self.scriptStreamer.get_opcode(\n script, pc, verify_minimal_data=verify_minimal_data)\n yield opcode, data, pc, new_pc\n pc = new_pc", "def get_code_mapping( id ):\n returnVal = []\n theCodes = _theRegistry.get_code( id )\n codes = theCodes.get_codes()\n descs = theCodes.get_descriptions()\n for (code, desc) in map(None, codes, descs):\n returnVal.append( { 'code' : code, 'description' : desc } )\n return returnVal", "def get_code():\n return jsonify({\"status\": \"0\", \"code\": code_status})", "def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]", "def code(self) -> pulumi.Input['CanaryCodeArgs']:\n return pulumi.get(self, \"code\")", "def view_promo_codes():\n\n results = []\n promo_codes = Promo_code.query.filter_by().all()\n for codes in promo_codes:\n result = {\n 'id': codes.id,\n 'code': codes.code,\n 'event': codes.event,\n 'status': codes.status,\n 'price': codes.price\n }\n results.append(result)\n if datetime.utcnow() > codes.expiry_date:\n codes.status = 'expired'\n if len(results) > 0:\n return jsonify({'promo_codes': results,\n 'count': str(len(results)),\n 'status': 'pass',\n 'message': 'promo codes found'\n }), 200\n return jsonify({'count': '0','status': 'fail',\n 'message': 'no promo codes found'\n }), 404", "def codelists():\n return CodelistSet()" ]
[ "0.74746984", "0.6001439", "0.5503843", "0.52020866", "0.51311105", "0.5087448", "0.5053257", "0.50515985", "0.49911606", "0.48414174", "0.48189038", "0.46903574", "0.4685551", "0.4676945", "0.46544784", "0.46471697", "0.45722067", "0.45684677", "0.45653287", "0.44813254", "0.44799843", "0.44608784", "0.44549376", "0.4413035", "0.4407787", "0.4407223", "0.43919137", "0.43874228", "0.4381796", "0.4377668" ]
0.6220025
1
Setter method for return_codes, mapped from YANG variable /mpls_state/statistics_oam/return_codes (list)
def _set_return_codes(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("number",return_codes.return_codes, yang_name="return-codes", rest_name="return-codes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name="return-codes", rest_name="return-codes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """return_codes must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("number",return_codes.return_codes, yang_name="return-codes", rest_name="return-codes", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='number', extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}), is_container='list', yang_name="return-codes", rest_name="return-codes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-statistics-oam-retcode', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='list', is_config=False)""", }) self.__return_codes = t if hasattr(self, '_set'): self._set()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_return_codes(self):\n return self.__return_codes", "def traffic_statuscodes_cachecodes(self, **kwargs):\n url_path = 'traffic/statuscodes/cachecodes'\n self.logger.debug(f\"Get list of cache codes\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def codes(self, name):\n return self._get_valuemap(name, non_mapped='codes')", "def set_retcodes(self,rc1,rc2) :\n\t\tif self.save_trace : \n\t\t\tself.rc1.append(rc1)\n\t\t\tself.rc2.append(rc2)\n\t\telse : \n\t\t\t# Save the return codes from the last iter\n\t\t\tself.rc1 = [rc1]\n\t\t\tself.rc2 = [rc2]", "def _build_return_code_enum():\n prefix = 'XTT_RETURN_'\n codes = {k[len(prefix):]:v for (k, v) in vars(_lib).items() if k.startswith(prefix)}\n return IntEnum('ReturnCode', codes)", "def return_ids(self, return_ids):\n\n self._return_ids = return_ids", "def codes(self):\n return [card.code for card in self.cards]", "def returncode(self: \"ShellOutput\") -> Artefact[int]:\n self.__check_len()\n return self.returncodes[0]", "def returncodes(self):\n for p in self.processes:\n p.wait()\n codes = [p.poll() for p in self.processes]\n if set(codes) == set([0]):\n return []\n return codes", "def license_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"license_codes\")", "def get_pcode_list(self) -> List[str]:\n return self.pcodes", "def is_return_code_mode(self):\n return self.bisect_config.get('test_type') == 'return_code'", "def return_code(self) -> int:\n raise NotImplementedError(\"Base method not implemented\")", "def get_observatory_codes_async(self, get_raw_response=False, cache=True):\n\n self.query_type = 'observatory_code'\n response = self._request('GET', self.OBSERVATORY_CODES_URL,\n timeout=self.TIMEOUT, cache=cache)\n\n return response", "def code_types(self):\n return self.codes.keys()", "def codelists():\n return CodelistSet()", "def set_ret(self, ret):\n \n self.ret = [i for i in ret]", "def setExecutionStatus(self, return_code):\n if return_code == 0:\n self.execution_status = 'executed'\n else:\n self.execution_status = 'failed'", "def geneSymbols(self, returnType=\"list\"):\n\t\treturn self._dataframe['GeneSymbol'].to_dict() if returnType==\"dict\" else self._dataframe['GeneSymbol'].tolist()", "def traffic_statuscodes(self, **kwargs):\n self.logger.debug(f\"Get status codes report data,)\")\n url_path = 'traffic/statuscodes'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def get_registry_codes( ):\n return _theRegistry.get_codes( )", "def _add_status_code(runner, return_value):\n if isinstance(return_value, Mapping):\n status_code = return_value.get('statusCode')\n if status_code:\n runner.resource['metadata']['status_code'] = status_code", "def discount_codes(self):\n return [DiscountCode(x) for x in self._dict.get('discount_codes', [])]", "def parse_files_to_codes_mapping( # noqa: C901\n value_: Sequence[str] | str,\n) -> list[tuple[str, list[str]]]:\n if not isinstance(value_, str):\n value = \"\\n\".join(value_)\n else:\n value = value_\n\n ret: list[tuple[str, list[str]]] = []\n if not value.strip():\n return ret\n\n class State:\n seen_sep = True\n seen_colon = False\n filenames: list[str] = []\n codes: list[str] = []\n\n def _reset() -> None:\n if State.codes:\n for filename in State.filenames:\n ret.append((filename, State.codes))\n State.seen_sep = True\n State.seen_colon = False\n State.filenames = []\n State.codes = []\n\n def _unexpected_token() -> exceptions.ExecutionError:\n return exceptions.ExecutionError(\n f\"Expected `per-file-ignores` to be a mapping from file exclude \"\n f\"patterns to ignore codes.\\n\\n\"\n f\"Configured `per-file-ignores` setting:\\n\\n\"\n f\"{textwrap.indent(value.strip(), ' ')}\"\n )\n\n for token in _tokenize_files_to_codes_mapping(value):\n # legal in any state: separator sets the sep bit\n if token.tp in {_COMMA, _WS}:\n State.seen_sep = True\n # looking for filenames\n elif not State.seen_colon:\n if token.tp == _COLON:\n State.seen_colon = True\n State.seen_sep = True\n elif State.seen_sep and token.tp == _FILE:\n State.filenames.append(token.src)\n State.seen_sep = False\n else:\n raise _unexpected_token()\n # looking for codes\n else:\n if token.tp == _EOF:\n _reset()\n elif State.seen_sep and token.tp == _CODE:\n State.codes.append(token.src)\n State.seen_sep = False\n elif State.seen_sep and token.tp == _FILE:\n _reset()\n State.filenames.append(token.src)\n State.seen_sep = False\n else:\n raise _unexpected_token()\n\n return ret", "def health_check_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"health_check_codes\")", "def get_code(self, obj):\n return [], []", "def pin_code(self) -> List[PinCodeSummary]:\n return self._pin_code", "def _set_success_codes(self, fname, success_codes):\n func = getattr(self._dll, fname)\n argtypes, func.argtuple_t, restype = self._fundecls[fname]\n argtypes = [argtype\n if not (isinstance(argtype, type(ctypes.POINTER(ctypes.c_int))) and\n argtype._type_.__module__ != \"ctypes\") # remove struct (nested) pointers\n else ctypes.c_voidp for argtype in argtypes]\n func.argtypes = argtypes\n try:\n success_code_type, = set(type(code) for code in success_codes)\n except ValueError:\n raise AssertionError(\"Success code of different types\")\n if success_code_type == restype:\n func.success_codes = success_codes\n func.errcheck = errcheck\n else:\n func.restype = restype\n setattr(self, fname, func)", "def ReturnCodeObject(self,code):\n\n if code in self.setOfUnusableCodes:\n return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]]\n else:\n return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]]", "def fallback_status_codes(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"fallback_status_codes\")" ]
[ "0.6291366", "0.614847", "0.55107534", "0.54416496", "0.5277496", "0.5117377", "0.50821906", "0.50539535", "0.49968654", "0.4987946", "0.49695534", "0.47850552", "0.47627786", "0.4757852", "0.4730072", "0.4728713", "0.47217613", "0.46919942", "0.46385816", "0.4602432", "0.4597837", "0.4594224", "0.45785755", "0.45754677", "0.45607373", "0.45357856", "0.4532626", "0.45313764", "0.45036027", "0.44664925" ]
0.8261662
0
Assumes binary array of 1 and 0 as input. Calculate longest ranges of 1's.
def count_ranges(a): ranges = [] count = 0 for i, v in enumerate(a): if v == 1: # same as previous value count += 1 else: if count > 1: ranges.append([i, count]) # [end, length] count = 0 return ranges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def solution(N):\n # write your code in Python 3.6\n bin_number = str(bin(N))[2:]\n new_bin_gap = False\n longest_bin_gap = 0\n bin_gap_counter = 0\n for char in bin_number:\n if char == '1':\n if bin_gap_counter > longest_bin_gap:\n longest_bin_gap = bin_gap_counter\n new_bin_gap = True\n bin_gap_counter = 0\n elif new_bin_gap:\n bin_gap_counter += 1\n return longest_bin_gap", "def max_ones_seq(self, array, m):\n n = len(array)\n i, j = 0, 0 # start, end of current consecutive 1s sequence\n x, y = 0, 0 # start, end of longest consecutive 1s sequence\n while j < n:\n if array[j]: # current element is 1\n if j - i > y - x: # update start, end of longest 1s sequence\n x, y = i, j\n j += 1 # move the right pointer\n elif not array[j] and m > 0: # current element is 0, we can flip it\n if j - i > y - x: # update start, end of longest 1s sequence\n x, y = i, j\n m -= 1 # deacrese number of allowed flips\n j += 1 # move the right pointer\n else: # current element is zero and we are out of flips\n if not array[i]: # start of current 1s sequence is 0\n m += 1 # increase available flips\n i += 1 # move the left pointer\n return list(range(x, y + 1))", "def find_all_maxima(arr):\n\n checks = np.r_[True, arr[1:] > arr[:-1]] & np.r_[arr[:-1] > arr[1:], True]\n maxima = np.where(checks)[0]\n return maxima", "def find_max_continous_sequence(array, start):\n pos = start\n while pos + 1 < len(array):\n if not array[pos] + 1 == array[pos + 1]:\n break\n pos += 1\n if pos + 1 == len(array):\n return array[start:]\n return array[start:pos + 1]", "def longestCommomSubsequence(self, arrays: List[List[int]]) -> List[int]:\n counts = Counter(val for arr in arrays for val in arr)\n res = []\n for val, count in counts.items():\n if count == len(arrays): res.append(val)\n return res", "def max_ones_length(self, array, m):\n n = len(array)\n i, j = 0, 0 # sliding window indices\n curr_ones = 0\n max_ones = 0\n while j < n:\n if array[j]: # current element is 1, increase 1s count\n curr_ones += 1\n j += 1\n max_ones = max(max_ones, curr_ones) # update max 1s count\n elif not array[j] and m > 0: # current element is 0, we can flip it\n curr_ones += 1\n m -= 1\n j += 1\n max_ones = max(max_ones, curr_ones) # update max 1s count\n else: # current element is zero and we are out of flips\n if not array[i]: # start of current 1s sequence is 0\n m += 1 # increase available flips\n i += 1 # move the left pointer\n curr_ones -= 1 # decrease current 1s count\n return max_ones", "def get_max_width(binary_mask):\n start_px = 0\n end_px = 0\n\n for i, row in enumerate(binary_mask):\n max = np.argmax(row)\n if max > 0:\n start_px = i\n break\n\n for i, row in enumerate(binary_mask[::-1]):\n max = np.argmax(row)\n if max > 0:\n end_px = i\n break\n\n return binary_mask.shape[0] - start_px - end_px", "def longest_seq_of_1s(n, index_to_ignore):\n max_ = 0\n counter = 0\n for i in range(SEQ_LENGTH):\n if i == index_to_ignore or get_bit(n, i):\n counter += 1\n max_ = max(counter, max_)\n else:\n counter = 0\n return max_", "def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]", "def solution(n: int) -> int:\n binary_gap = 0\n count = 0\n # skip the lowest zeros\n while n and (n & 1) == 0:\n n = n >> 1\n while n:\n while n & 1:\n n = n >> 1\n while n and (n & 1) == 0:\n count += 1\n n = n >> 1\n if n & 1 and binary_gap < count:\n binary_gap = count\n count = 0\n return binary_gap", "def get_lengths_from_binary_sequence_mask(\n mask: torch.BoolTensor,\n) -> torch.LongTensor:\n return mask.sum(-1)", "def longest_sequence(start=1, end=1000000):\n\n max_length = 0\n max_start_value = 0\n\n # generate sequence for each value\n for i in range(start, end):\n current = generate_sequence(i)\n\n # if the current sequence is the longest, update values\n if len(current) > max_length:\n max_length = len(current)\n max_start_value = i\n\n return max_length, max_start_value", "def run_length_coding(arr: np.ndarray, max_len=0xF) -> List[RunLength]:\n\n def _break_up_rle(code, max_len):\n l = code[\"zeros\"]\n div = l // max_len\n full = {\n \"zeros\": max_len - 1, # minus 1 because we get another for free from the value\n \"value\": 0\n }\n return ([full] * div) + [{\n \"zeros\": l - (div * max_len),\n \"value\": code[\"value\"]\n }]\n\n def reduction(agg, next):\n if \"value\" in agg[-1]:\n agg.append({\"zeros\": 0})\n\n if next == 0:\n agg[-1][\"zeros\"] += 1\n return agg\n\n if \"value\" not in agg[-1]:\n agg[-1][\"value\"] = next\n\n return agg\n utils.debug_msg(\"Going to determine RLE for %d size array\" % len(arr))\n rl = functools.reduce(reduction, arr, [{\"zeros\": 0}])\n utils.debug_msg(\"%d long RLE created\" % len(rl))\n # If the last element has no value then it was 0! That is a special tuple, (0,0)\n if \"value\" not in rl[-1]:\n rl[-1] = {\"zeros\": 0, \"value\": 0}\n\n # the goal of RLE in the case of compression is to contain the first symbol (length, size) within a byte\n # so if the length is too long, then we need to break it up\n if max_len is not None:\n utils.debug_msg(\"Breaking up RLE lengths that are larger than %d\" % max_len)\n rl = [_break_up_rle(code, max_len) for code in rl]\n rl = utils.flatten(rl)\n\n utils.debug_msg(\"Make RLE objects\")\n return [RunLength.from_dict(r) for r in rl]", "def get_lengths_from_binary_sequence_mask(mask: torch.Tensor):\n return mask.long().sum(-1)", "def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens", "def build_bridge(blocks):\n bridges = []\n for start in [ b for b in blocks if 0 in b ]:\n tmp = blocks[:]\n tmp.remove(start)\n bridges.append(build(tmp, start[1], [start], sum(start)))\n return find_max()", "def get_length_of_longest_sub_array(l):\n if len(l) < 1:\n return 0\n\n longest_seen_sequence = 0\n\n this_sequence_length = 1\n\n previous = l[0]\n\n for _, current in enumerate(l):\n\n if current > previous:\n this_sequence_length = this_sequence_length + 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n else:\n this_sequence_length = 1\n\n if this_sequence_length > longest_seen_sequence:\n longest_seen_sequence = this_sequence_length\n\n previous = current\n\n return longest_seen_sequence", "def array_maximal_adjacent_difference( arr ):\n length = len(arr) - 1\n diffs = [ abs( arr[i] - arr[i+1] ) for i in range( length ) ]\n return max(diffs)", "def intervals(b, min_length=1, forgivingJump=True, removeSmallRel=True, removeSmallFact=0.1, mergeCloseRel=False, mergeCloseFact=0.2):\r\n b = np.asarray(b)\r\n total = np.sum(b)\r\n\r\n min_length=max(min_length,1)\r\n if forgivingJump:\r\n min_jump=min_length\r\n else:\r\n min_jump=1\r\n\r\n if total==0:\r\n IStart = np.array([])\r\n IEnd = np.array([])\r\n Lengths= np.array([])\r\n return IStart, IEnd, Lengths\r\n elif total==1:\r\n i = np.where(b)[0][0]\r\n IStart = np.array([i])\r\n IEnd = np.array([i])\r\n Lengths= np.array([1])\r\n else:\r\n n = len(b)\r\n Idx = np.arange(n)[b]\r\n delta_Idx=np.diff(Idx)\r\n jumps =np.where(delta_Idx>min_jump)[0]\r\n if len(jumps)==0:\r\n IStart = np.array([Idx[0]])\r\n IEnd = np.array([Idx[-1]])\r\n else:\r\n istart=Idx[0]\r\n jumps=np.concatenate(([-1],jumps,[len(Idx)-1]))\r\n IStart = Idx[jumps[:-1]+1] # intervals start right after a jump\r\n IEnd = Idx[jumps[1:]] # intervals stops at jump\r\n Lengths = IEnd-IStart+1\r\n\r\n # Removing intervals smaller than min_length\r\n bKeep = Lengths>=min_length\r\n IStart = IStart[bKeep]\r\n IEnd = IEnd[bKeep]\r\n Lengths = Lengths[bKeep]\r\n # Removing intervals smaller than less than a fraction of the max interval\r\n if removeSmallRel:\r\n bKeep = Lengths>=removeSmallFact*np.max(Lengths)\r\n IStart = IStart[bKeep]\r\n IEnd = IEnd[bKeep]\r\n Lengths = Lengths[bKeep]\r\n\r\n # Distances between intervals\r\n if mergeCloseRel:\r\n if len(IStart)<=2:\r\n pass\r\n else:\r\n D = IStart[1:]-IEnd[0:-1]\r\n #print('D',D,np.max(D),int(np.max(D) * mergeCloseFact))\r\n min_length = max(int(np.max(D) * mergeCloseFact), min_length)\r\n if min_length<=1:\r\n pass \r\n else:\r\n #print('Readjusting min_length to {} to accomodate for max interval spacing of {:.0f}'.format(min_length, np.mean(D)))\r\n return intervals(b, min_length=min_length, forgivingJump=True, removeSmallRel=removeSmallRel, removeSmallFact=removeSmallFact, mergeCloseRel=False)\r\n return IStart, IEnd, Lengths", "def get_intervals(l):\n intervals = len(l) * [0]\n # Initalize with 1\n intervals[0] = 1\n for k in range(1, len(l)):\n intervals[k] = (len(l[k]) + 1) * intervals[k - 1]\n\n return intervals", "def longest_run(L):\n\tlongest_length = 1\n\tincreasing_length = 1\n\tdecreasing_length = 1\n\tfor i in range(len(L) - 1):\n\t\tif L[i] >= L[i+1]:\n\t\t\tdecreasing_length += 1\n\t\telse:\n\t\t\tdecreasing_length = 1\n\t\tif L[i] <= L[i+1]:\n\t\t\tincreasing_length += 1\n\t\telse:\n\t\t\tincreasing_length = 1\n\t\tif increasing_length > longest_length:\n\t\t\tlongest_length = increasing_length\n\t\t\trun_end = i + 1\n\t\telif decreasing_length > longest_length:\n\t\t\tlongest_length = decreasing_length\n\t\t\trun_end = i + 1\n\n\treturn sum(L[run_end - longest_length + 1 : run_end+1])", "def highest_bin_freq(ary):\n num_true = 0\n num_false = 0\n\n for val in ary:\n num_true += 1 if val == '1' else 0\n num_false += 1 if val == '0' else 0\n\n return '1' if num_true > num_false else '0'", "def solution(N):\n\n # get binary representation of number\n binary_repr = f\"{N:b}\"\n\n # initialise counters\n current_gap, max_gap = 0, 0\n\n for b in binary_repr:\n # end of gap, update max\n if b == '1':\n max_gap = max(current_gap, max_gap)\n current_gap = 0\n # increase gap counter\n else:\n current_gap += 1\n\n return max_gap", "def longestIncreasingSubsequence(nums):\n if not nums:\n return 0\n \n dp = [None] * len(nums)\n dp[0] = 1\n maxans = 1\n \n for i in range(1, len(dp)):\n maxval = 0\n for j in range(0, i):\n if nums[i] > nums[j]:\n maxval = max(maxval, dp[j])\n \n dp[i] = maxval + 1\n maxans = max(maxans, dp[i])\n \n return maxans", "def int_to_max_bit(num, length):\n if num >= 2**length:\n return [None]\n if num == 1:\n return [str(num)]\n a = 2**(length-1)\n if num > a:\n return sorted([str(a)] + int_to_max_bit(num - a, length-1))\n elif num == a:\n return [str(a)]\n else:\n return int_to_max_bit(num, length-1)", "def get_lims(data):\n return data[:, 0].min() - 1, data[:, 0].max() + 1, data[:, 1].min() - 1, data[:, 1].max() + 1", "def largest_cc(mask):\n # We use asarray to be able to work with masked arrays.\n mask = np.asarray(mask)\n labels, label_nb = ndimage.label(mask)\n if not label_nb:\n raise ValueError('No non-zero values: no connected components')\n if label_nb == 1:\n return mask.astype(np.bool_)\n label_count = np.bincount(labels.ravel().astype(np.int_))\n # discard 0 the 0 label\n label_count[0] = 0\n return labels == label_count.argmax()", "def max_subarray(sequence=[-5, 20, -10, 30, 15]):\n\n sums = {}\n indices = []\n\n for i in range(len(sequence)):\n for j in range(i+1, len(sequence)):\n sub_seq = sequence[i:j+1]\n sub_seq_sum = sum(sub_seq)\n #print(sub_seq,'=>',sub_seq_sum)\n sums[sum(sub_seq)]=[i,j+1]\n\n i_indice = sums[max(sums)][0]\n j_indice = sums[max(sums)][1]\n return (max(sums), sequence[i_indice:j_indice])", "def recursive_index_decode(int_array, max=32767, min=-32768):\n out_arr = []\n decoded_val = 0\n for item in int_array.tolist():\n if item==max or item==min:\n decoded_val += item\n else:\n decoded_val += item\n out_arr.append(decoded_val)\n decoded_val = 0\n return numpy.asarray(out_arr,dtype=numpy.int32)", "def longincseq(v):\n n=len(v)\n if n==0: return -1\n l = 0\n u = n-1\n max2here=1\n maxsofar=1\n for i in xrange(l+1, u+1):\n if v[i]>v[i-1]: \n max2here+=1\n else:\n max2here=1\n maxsofar = max(maxsofar, max2here)\n return maxsofar" ]
[ "0.6214653", "0.61803985", "0.61673665", "0.6140894", "0.60812706", "0.60509235", "0.60332197", "0.6032968", "0.59139353", "0.5893834", "0.5799357", "0.5777712", "0.57581043", "0.57554233", "0.57196367", "0.56999797", "0.5694365", "0.5683879", "0.56704044", "0.56537473", "0.5638462", "0.5632882", "0.5608549", "0.559905", "0.55871063", "0.55709624", "0.55658615", "0.55631924", "0.5548492", "0.55396307" ]
0.6188714
1
from range of count_ranges, return the 'howmany' longest ranges
def find_longest_ranges(range, howmany): range.sort(key=lambda x: x[1]) # sort by length if howmany > 1: range = range[-howmany:] # get last few range.sort(key=lambda x: x[0]) # sorted by starttime return range else: return range[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overlap_len(range1, range2):\n return min(range1[1], range2[1]) - max(range1[0], range2[0])", "def count_ranges(a):\n ranges = []\n count = 0\n for i, v in enumerate(a):\n if v == 1: # same as previous value\n count += 1\n else:\n if count > 1:\n ranges.append([i, count]) # [end, length]\n count = 0\n return ranges", "def find_best_point(self, start_i, end_i, ranges):\n maxLenIdx = 0\n maxLen = 0\n for i in range(ranges):\n if ranges[i] > maxLen:\n maxLen = ranges[i]\n maxLenIdx = i\n return maxLenIdx", "def find_max_gap(self, free_space_ranges):\n # mask the bubble\n masked = np.ma.masked_where(free_space_ranges==0, free_space_ranges)\n # get a slice for each contigous sequence of non-bubble data\n slices = np.ma.notmasked_contiguous(masked)\n max_len = slices[0].stop - slices[0].start\n chosen_slice = slices[0]\n # I think we will only ever have a maximum of 2 slices but will handle an\n # indefinitely sized list for portablility\n for sl in slices[1:]:\n sl_len = sl.stop - sl.start\n if sl_len > max_len:\n max_len = sl_len\n chosen_slice = sl\n return chosen_slice.start, chosen_slice.stop", "def longest_sequence(start=1, end=1000000):\n\n max_length = 0\n max_start_value = 0\n\n # generate sequence for each value\n for i in range(start, end):\n current = generate_sequence(i)\n\n # if the current sequence is the longest, update values\n if len(current) > max_length:\n max_length = len(current)\n max_start_value = i\n\n return max_length, max_start_value", "def get_lengths(auswahl):\n # list() for python3 compat.\n return list(map(max, list(zip(*[map(len, one) for one in auswahl]))))", "def find_max_gap(self, free_space_ranges):\n start = end = 200\n curr_start = 200\n #print(free_space_ranges)\n for i in range(201, 880):\n if free_space_ranges[i] != 0:\n if free_space_ranges[i-1] == 0:\n curr_start = i\n else:\n if (i-curr_start) > end-start:\n start = curr_start\n end = i\n return start, end", "def find_long_runs(num_sequence, l):\n chunked = [(k, list(g)) for k, g in itertools.groupby(num_sequence)]\n retval = [(i, len(g)) for i, (k, g) in enumerate(chunked) if k and len(g) > l]\n return retval", "def findMaxLength(self, nums):\n dict1 = dict()\n count = 0\n maxlen = 0\n for i in range(len(nums)):\n if nums[i] == 1:\n count = count + 1\n else:\n count = count - 1\n\n if count == 0:\n maxlen = max(maxlen, i + 1)\n if count not in dict1:\n dict1[count] = i\n else:\n maxlen = max(maxlen, i - (dict1.get(count)))\n return maxlen", "def count_to_len(X:np.array, Max:int=10, Min:int=2):\n return np.interp(X, (X.min(), X.max()), (Max, Min))", "def get_sequence_lengths( widths ): \n seq_len = (widths - 2) / 8\n return seq_len", "def _getLongestLength(self, listOfLists):\n\t\tmax = -1\n\t\tfor list in listOfLists:\n\t\t\tif len(list) > max:\n\t\t\t\tmax = len(list)\n\t\treturn max", "def stab_the_num(intervals):\r\n n = len(intervals)\r\n points = []\r\n\r\n left_points = []\r\n right_points = []\r\n for i, j in intervals:\r\n left_points.append(i)\r\n right_points.append(j)\r\n\r\n count = 0\r\n points.append(right_points[0])\r\n for i in range(1, n):\r\n if left_points[i] > points[count]:\r\n count += 1\r\n points.append(right_points[i])\r\n\r\n return points", "def total_range_size(self) -> int:\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()", "def part_2(ranges: 'RangeSet', total_ips_count: int = 1 << 32) -> int:\n\n allowed_count = total_ips_count - len(ranges)\n print(f\"part 2: there are total {allowed_count} allowed IPs\")\n return allowed_count", "def _number_of_intervals(self):\n return self._number_of_levels - 1", "def lenRange(start, stop, step=1):\n return (stop - start + step - 1 + 2 * (step < 0)) // step", "def bu(lengths: List[int], L: int) -> int:\n N = len(lengths)\n dp = [0] + [-1]*L\n for l in lengths:\n for j in range(l, L+1):\n dp[j] = max(dp[j], dp[j-l]+1 if dp[j-l] != -1 else -1)\n return dp[-1]", "def max_total_length(murals):\n if not murals:\n return 0\n\n no_overlap = []\n for mural in murals:\n if mural[1] <= murals[0][0] or mural[0] >= murals[0][1]:\n no_overlap.append(mural)\n\n value = murals[0][1] - murals[0][0]\n del murals[0]\n return max(value + max_total_length(no_overlap), max_total_length(murals))", "def time_interval(intervals: List[Tuple[int, int]]) -> int:\n\n start = []\n finish = []\n for elems in intervals:\n start.append(elems[0])\n finish.append(elems[1])\n\n # Sorting the start and end times separately\n start.sort()\n finish.sort()\n\n index1, index2 = 0, 0\n current_rooms = 0\n max_rooms = 0\n # The logic below is, we add each room when current finish time is greater than current start time\n # This informs us whether how many times are collapsing with each other at any time\n while (index1 < len(start)) and (index2 < len(finish)):\n if start[index1] < finish[index2]:\n current_rooms += 1\n index1 += 1\n else:\n index2 += 1\n max_rooms = max(max_rooms, current_rooms)\n current_rooms -= 1\n max_rooms = max(max_rooms, current_rooms)\n return max_rooms", "def brute(limit):\n c_lengths = {s: collatz_length(s) for s in range(1, limit+1)}\n return max(c_lengths, key=lambda x: c_lengths[x])", "def longest_run(L):\n\tlongest_length = 1\n\tincreasing_length = 1\n\tdecreasing_length = 1\n\tfor i in range(len(L) - 1):\n\t\tif L[i] >= L[i+1]:\n\t\t\tdecreasing_length += 1\n\t\telse:\n\t\t\tdecreasing_length = 1\n\t\tif L[i] <= L[i+1]:\n\t\t\tincreasing_length += 1\n\t\telse:\n\t\t\tincreasing_length = 1\n\t\tif increasing_length > longest_length:\n\t\t\tlongest_length = increasing_length\n\t\t\trun_end = i + 1\n\t\telif decreasing_length > longest_length:\n\t\t\tlongest_length = decreasing_length\n\t\t\trun_end = i + 1\n\n\treturn sum(L[run_end - longest_length + 1 : run_end+1])", "def howmany_within_range(row, minimum, maximum):\n count = 0\n for n in row:\n if minimum <= n <= maximum:\n count = count + 1\n return count", "def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])", "def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens", "def get_long_len(nums):\n return len(str(max(nums + [sum(nums)])))", "def range_overlap(ranges):\n max_left = 0.0\n min_right = 1.0\n for (left, right) in ranges:\n max_left = max(max_left, left)\n min_right = min(min_right, right)\n return (max_left, min_right)", "def length_range_for_entropy(entropy):\n min_length = 3\n max_length = min_length + int(entropy / 2)\n return min_length, max_length", "def longest(self):\n cps = collections.Counter()\n for crd in self:\n cps += collections.Counter( {crd.suit} )\n return sorted(cps.items(), reverse=True, key=lambda x:x[1])", "def maximumGap(self, nums: List[int]) -> int:\r\n n = len(nums)\r\n if n < 2: return 0 \r\n l, r = min(nums), max(nums)\r\n if r - l == 0: return 0 \r\n gap_instance = max(1, (r - l) // n)\r\n gapcnts = math.ceil((r - l + 1) / gap_instance)\r\n buckets = [[-1, -1] for _ in range(gapcnts)] \r\n calpos = lambda num: (num - l) // gap_instance\r\n\r\n for num in nums:\r\n pos = calpos(num)\r\n if num < buckets[pos][0] or buckets[pos][0] == -1:\r\n buckets[pos][0] = num \r\n if num > buckets[pos][1] or buckets[pos][1] == -1:\r\n buckets[pos][1] = num \r\n\r\n ans, pre = 0, l\r\n for small, large in buckets:\r\n if small == -1:\r\n continue \r\n else:\r\n ans = max(small - pre, ans)\r\n pre = large\r\n return ans" ]
[ "0.6553023", "0.63709086", "0.6276035", "0.618169", "0.61786216", "0.60901237", "0.60474753", "0.6026283", "0.6022194", "0.6012892", "0.5978638", "0.58781105", "0.58559895", "0.58521396", "0.5841774", "0.58397764", "0.58287156", "0.58229816", "0.5816759", "0.5807792", "0.57680637", "0.5757407", "0.5746607", "0.5719498", "0.5714347", "0.5701421", "0.5691887", "0.56878656", "0.5683026", "0.5661917" ]
0.74052256
0
Function used to attach form fields to wtforms. Not really a great solution but is approved by wtforms.
def attach_custom_user_fields(form_cls, **kwargs): new_fields = UserFields.query.filter_by(**kwargs).all() for field in new_fields: validators = [] if field.required: validators.append(InputRequired()) if field.field_type == "text": input_field = StringField( field.name, description=field.description, validators=validators ) elif field.field_type == "boolean": input_field = BooleanField( field.name, description=field.description, validators=validators ) setattr(form_cls, f"fields[{field.id}]", input_field)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def form_CustomisedFormLayoutFields(request):\n schema = schemaish.Structure()\n schema.add( 'firstName', schemaish.String())\n schema.add( 'surname', schemaish.String())\n schema.add( 'age', schemaish.Integer())\n schema.add( 'sex', schemaish.String())\n\n form = formish.Form(schema, 'form')\n\n return form", "def individual_formfields():\n # Instantiate Consent Tracker\n consent = s3db.auth_Consent(processing_types = VOL_CONSENT_OPTIONS)\n\n formfields = [utable.first_name,\n utable.last_name,\n Field(\"addr_L3\",\n label = T(\"Location\"),\n requires = IS_IN_SET(districts_and_uk),\n ),\n Field(\"addr_street\",\n label = T(\"Street Address\"),\n ),\n Field(\"addr_postcode\",\n label = T(\"Postcode\"),\n ),\n Field(\"mobile\",\n label = T(\"Contact Number (Preferred)\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (T(\"Contact Number (Preferred)\"),\n T(\"Ideally a Mobile Number, so that we can send you Text Messages.\")),\n ),\n ),\n Field(\"home\",\n label = T(\"Contact Number (Secondary)\"),\n requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),\n ),\n utable.email,\n utable[passfield],\n # Password Verification Field\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n ),\n\n # Skills\n s3db.hrm_multi_skill_id(empty = False,\n label = T(\"Volunteer Offer\"),\n ),\n Field(\"skills_details\",\n label = T(\"Please specify details\"),\n ),\n Field(\"certificates\", \"list:string\",\n label = T(\"Qualifications\"),\n requires = IS_IN_SET(certificates, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"experience\",\n label = T(\"Skills and Experience\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Co-ordination, Event Management, PCV qualified.\")\n ),\n Field(\"resources\",\n label = T(\"Offers of Resources\"),\n widget = lambda f, v: \\\n s3_comments_widget(f, v, _placeholder = \"e.g. Minibus.\")\n ),\n Field(\"where_operate\", \"list:string\",\n label = T(\"Where would you be willing to volunteer?\"),\n requires = IS_IN_SET(districts, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"travel\", \"integer\",\n label = T(\"Willing to Travel?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"slots\", \"list:string\",\n label = T(\"Times\"),\n requires = IS_IN_SET(slots, multiple=True),\n widget = S3MultiSelectWidget(header=\"\",\n selectedList=3),\n ),\n Field(\"significant_physical\", \"integer\",\n label = T(\"That require significant physical activity (including lifting and carrying) and may involve being outdoors (e.g. clean up of affected properties)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"some_physical\", \"integer\",\n label = T(\"That require some physical activity and may involve being outdoors (e.g. door knocking)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"little_physical\", \"integer\",\n label = T(\"That require little physical activity and are based indoors (e.g. preparing refreshments)\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"health_details\",\n label = T(\"If you wish, you can give us some further information on any fitness, medical or mobility issues that might limit the kind of activities you are able to volunteer for; this will help us to suggest suitable opportunities for you\"),\n ),\n Field(\"faith_requirements\", \"integer\",\n label = T(\"Do you have any faith requirements that you would like help with if you are coming to Support Cumbria?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"faith_requirements_details\",\n label = T(\"If Yes please outline\"),\n ),\n Field(\"emergency_contact_name\",\n label = T(\"Contact Name\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"emergency_contact_number\",\n label = T(\"Contact Number\"),\n requires = IS_PHONE_NUMBER_MULTI(),\n ),\n Field(\"emergency_contact_relationship\",\n label = T(\"Relationship\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"workplace\", \"integer\",\n label = T(\"Are you volunteering under your workplace volunteering scheme?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n Field(\"workplace_details\",\n label = T(\"If yes please name your employer\"),\n ),\n Field(\"dbs\", \"integer\",\n label = T(\"Are you DBS checked?\"),\n requires = IS_IN_SET({0: T(\"No\"),\n 1: T(\"Yes\"),\n }),\n widget = lambda f, v: \\\n SQLFORM.widgets.radio.widget(f, v,\n style=\"divs\"),\n ),\n #Field(\"convictions\", \"integer\",\n # label = T(\"Do you have any unspent convictions?\"),\n # comment = T(\"Please tick 'Yes' if you have any convictions that are not yet spent under the Rehabilitation of Offenders Act 1974. The term 'convictions' is used to refer to any sentence or disposal issued by a court. If all your convictions are spent, you can tick 'No'. If you're not sure if your convictions are unspent or spent, you can use a tool available at www.disclosurecalculator.org.uk and read guidance at hub.unlock.org.uk/roa\"),\n # requires = IS_IN_SET({0: T(\"No\"),\n # 1: T(\"Yes\"),\n # }),\n # widget = lambda f, v: \\\n # SQLFORM.widgets.radio.widget(f, v,\n # style=\"divs\"),\n # ),\n # Consent (GDPR + FOC)\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n required_fields = [\"first_name\",\n \"last_name\",\n \"addr_L3\",\n \"addr_street\",\n \"addr_postcode\",\n \"mobile\",\n \"emergency_contact\",\n \"where_operate\",\n ]\n\n return formfields, required_fields", "def form_tweaks(self):\n pass", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n placeholders = {\n \"first_name\": \"First Name\",\n \"last_name\": \"Last Name\",\n \"default_phone_num\": \"Phone Number\",\n \"default_passport_num\": \"Passport Number\",\n }\n\n self.fields[\"default_phone_num\"].widget.attrs[\"autofocus\"] = True\n for field in self.fields:\n placeholder = placeholders[field]\n self.fields[field].widget.attrs[\"placeholder\"] = placeholder\n self.fields[field].widget.attrs[\n \"class\"\n ] = \"border-black rounded-0 \\\n all-form-input\"\n self.fields[field].label = False\n self.helper = FormHelper()\n self.helper.form_tag = True\n self.helper.layout = Layout(\n Div(\n Field(\n \"first_name\",\n ),\n Field(\n \"last_name\",\n ),\n Field(\n \"default_phone_num\",\n ),\n Field(\n \"default_passport_num\",\n ),\n ),\n ButtonHolder(\n Submit(\"submit\", \"Save\", css_class=\"m-0 btn btn-outline\"),\n ),\n )", "def make_fields(self):\n for name, prop in self.edit:\n instance_value = self.model.get(name)\n post_value = self.data[name] if (self.data and self.data.has_key(name)) else instance_value\n form_field_class = self.get_field_type(prop)\n form_field = form_field_class(model=self.model, property=prop, name=name, instance_value=instance_value, post_value=post_value)\n self.add(form_field)", "def add_field(self, name, value):\n self.form_fields.append((name, value))\n return", "def add_field(self, name, value):\n self.form_fields.append((name, value))\n return", "def add_field(self, name, value):\n self.form_fields.append((name, value))\n return", "def render_custom_fields(form):\n return {\n 'form': form,\n }", "def formfields():\n\n T = current.T\n request = current.request\n\n auth = current.auth\n auth_settings = auth.settings\n auth_messages = auth.messages\n\n utable = auth_settings.table_user\n passfield = auth_settings.password_field\n\n # Last name is required\n utable.last_name.requires = IS_NOT_EMPTY(error_message=T(\"input required\"))\n\n # Don't check for duplicate email (will be done in onvalidation)\n # => user might choose to use the current email address of the account\n # => if registration key or code are invalid, we don't want to give away\n # any existing email addresses\n utable.email.requires = [IS_EMAIL(error_message = auth_messages.invalid_email),\n IS_LOWER(),\n ]\n\n # Instantiate Consent Tracker\n consent = ConsentTracking(processing_types=[\"STORE\", \"RULES_ISS\"])\n\n # Form fields\n formfields = [utable.first_name,\n utable.last_name,\n utable.email,\n utable[passfield],\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (auth_messages.verify_password,\n T(\"Enter the same password again\"),\n ),\n ),\n ),\n Field(\"code\",\n label = T(\"Registration Code\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n\n # Required fields\n required_fields = [\"first_name\",\n \"last_name\",\n ]\n\n return formfields, required_fields", "def test_make_form_field():", "def formfields(cls):\n\n T = current.T\n request = current.request\n\n auth = current.auth\n auth_settings = auth.settings\n auth_messages = auth.messages\n\n utable = auth_settings.table_user\n passfield = auth_settings.password_field\n\n # Instantiate Consent Tracker\n consent = ConsentTracking(processing_types=[\"SHARE\", \"RULES_PRO\", \"TPNDO\"])\n\n # Last name is required\n utable.last_name.requires = IS_NOT_EMPTY(error_message=T(\"input required\"))\n\n #ltable = s3db.gis_location\n\n # Lookup projects with provider self-registration\n projects = cls.selectable_projects()\n\n # Lookup site services\n services = cls.selectable_services()\n\n # Lookup applicable organisation types\n org_types = applicable_org_types(None, group=TESTSTATIONS, represent=True)\n\n # Form fields\n formfields = [# -- User account ---\n utable.first_name,\n utable.last_name,\n utable.email,\n utable[passfield],\n\n # Password Verification Field\n Field(\"password_two\", \"password\",\n label = auth_messages.verify_password,\n requires = IS_EXPR(\"value==%s\" % \\\n repr(request.vars.get(passfield)),\n error_message = auth_messages.mismatched_password,\n ),\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (auth_messages.verify_password,\n T(\"Enter the same password again\"),\n ),\n ),\n ),\n # -- Test Station ---\n Field(\"organisation\",\n label = T(\"Name\"),\n requires = [IS_NOT_EMPTY(), IS_LENGTH(60)],\n comment = DIV(_class = \"tooltip\",\n _title = \"%s|%s\" % (T(\"Test Station Name\"),\n T(\"Specify the name of the test station (max 60 characters)\"),\n ),\n ),\n ),\n Field(\"organisation_type\", \"integer\",\n label = T(\"Organization Type\"),\n requires = IS_IN_SET(org_types),\n ),\n\n # -- Address --\n Field(\"location\", \"json\",\n widget = LocationSelector(\n levels = (\"L1\", \"L2\", \"L3\", \"L4\"),\n required_levels = (\"L1\", \"L2\", \"L3\"),\n show_address = True,\n address_required = True,\n show_postcode = True,\n postcode_required = True,\n show_map = True,\n ),\n ),\n # -- Service Offer --\n Field(\"opening_times\",\n label = T(\"Opening Hours\"),\n requires = IS_NOT_EMPTY(),\n ),\n Field(\"service_mode\", \"integer\",\n label = T(\"Service Mode\"),\n requires = IS_IN_SET(cls.selectable_services_modes()),\n ),\n Field(\"services\", \"list:integer\",\n label = T(\"Services\"),\n requires = IS_IN_SET(services,\n multiple = True,\n zero = None,\n ),\n widget = WithAdvice(S3GroupedOptionsWidget(cols=1),\n # Widget intro from CMS\n text = (\"org\",\n \"facility\",\n \"SiteServiceIntro\",\n ),\n ),\n ),\n # -- Contact and Appointments --\n Field(\"facility_phone\",\n label = T(\"Telephone\"),\n requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI()),\n ),\n Field(\"facility_email\",\n label = T(\"Email\"),\n requires = IS_EMPTY_OR(IS_EMAIL()),\n ),\n Field(\"facility_website\",\n label = T(\"Website\"),\n ),\n Field(\"booking_mode\", \"integer\",\n label = T(\"Appointments via\"),\n requires = IS_EMPTY_OR(IS_IN_SET(\n cls.selectable_booking_modes(),\n )),\n ),\n Field(\"comments\", \"text\",\n label = T(\"Comments\"),\n widget = s3_comments_widget,\n ),\n\n # -- Administrative --\n Field(\"projects\", \"list:integer\",\n label = T(\"Programs\"),\n requires = [IS_IN_SET(projects,\n multiple = True,\n zero = None,\n ),\n IS_NOT_EMPTY(),\n ],\n widget = WithAdvice(S3GroupedOptionsWidget(cols=1),\n # Widget intro from CMS\n text = (\"org\",\n \"organisation\",\n \"ProjectParticipationIntro\",\n ),\n ),\n ),\n # -- Privacy and Consent --\n Field(\"consent\",\n label = T(\"Consent\"),\n widget = consent.widget,\n ),\n ]\n\n # Required fields\n required_fields = [\"first_name\",\n \"last_name\",\n ]\n\n # Subheadings\n subheadings = ((0, T(\"User Account\")),\n (5, T(\"Test Station\")),\n (7, T(\"Address\")),\n (8, T(\"Service Offer\")),\n (11, T(\"Contact and Appointments\")),\n (16, T(\"Administrative\")),\n (17, \"%s / %s\" % (T(\"Privacy\"), T(\"Terms of Service\"))),\n )\n\n # Geocoder\n current.response.s3.scripts.append(\"/%s/static/themes/RLP/js/geocoderPlugin.js\" % request.application)\n\n return formfields, required_fields, subheadings", "def __init__(self, *args, **kwargs):\n super(SignupForm, self).__init__(*args, **kwargs)\n self.fields['email'].required = True\n self.fields['first_name'].required = True\n self.fields['password'].widget = forms.PasswordInput() \n\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def add_field(self, name, value):\n if not isinstance(value, str):\n value = json.dumps(value, ensure_ascii=False)\n self.form_fields.append((name, value))\n return", "def make_form(self):", "def __init__(field, form, content):", "def __init__(field, form, content):", "def formfields(form, *fields, **kwargs):\n from bigfoot import elements\n wrapper_class = kwargs.pop('wrapper_class', elements.ElementSet)\n field_class = kwargs.pop('field_class', elements.FormField)\n if not fields:\n fields = form.fields.keys()\n res = [field_class(form, field, **kwargs) for field in fields]\n if wrapper_class:\n res = wrapper_class(*res)\n return res", "def add_field(self, **kwargs):\n field = {\n 'name': kwargs.get('name'),\n 'value': kwargs.get('value'),\n 'inline': kwargs.get('inline', False)\n }\n\n self.fields.append(field)", "def form_RestishExample(request):\n form = formish.Form(SimpleSchema())\n form['comments'].widget = formish.TextArea()\n return form", "def __init__(self, *args, **kwargs):\n super(CustomAuthenticationForm, self).__init__(*args, **kwargs)\n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def _generate_form_fields(self):\n params = list(filter(lambda x: (x.precedence is None or x.precedence >= 0) and not x.constant,\n self.param.params().values()))\n for p in sorted(params, key=lambda p: p.precedence or 9999):\n # TODO: Pass p.__dict__ as second argument instead of arbitrary\n p_name = p.name\n\n # Preserve param tuple type.\n if self.data:\n if isinstance(getattr(self.param, p.name), tuple):\n p.default = tuple(self.data.getlist(p.name))\n\n # Preserve initial options for Selector\n if isinstance(self.param.params()[p_name], (param.FileSelector, param.MultiFileSelector)):\n p.default = \"\"\n\n self.fields[p_name] = self.widget_map[type(p)](self.param, p, p.name)\n self.fields[p_name].label = p.name.replace(\"_\", \" \").title()\n if self.read_only is None:\n widget_attribute = {'class': 'form-control'}\n else:\n # TODO: Should this be readonly instead of disable?\n widget_attribute = {'class': 'form-control', 'disabled': self.read_only}\n self.fields[p_name].widget.attrs.update(widget_attribute)\n self.fields[p_name].required = not self.param.params()[p_name].allow_None\n self.fields[p_name].disabled = self.param.params()[p_name].constant\n self.fields[p_name].help_text = self.param.params()[p_name].doc\n # self.fields = self.base_fields", "def addProductFields(form, forCreation=False, restWriter=None, hasOptions=False):\n form.addField('code', formal.String(required=True, strip=True))\n form.addField('title', formal.String(required=True, strip=True))\n\n images = formal.Group('images')\n form.add( images )\n images.add( formal.Field('mainImage', formal.File(required=forCreation), \n widgetFactory=formal.widgetFactory( formal.FileUploadWidget,\n convertibleFactory=contenttypeutil.KeyToFileConverter,\n originalKeyIsURL=True),description='click to change') )\n images.add( formal.Field('ndgrad', formal.File(), \n widgetFactory=formal.widgetFactory( formal.FileUploadWidget,\n convertibleFactory=contenttypeutil.KeyToFileConverter,\n originalKeyIsURL=True),description='click to change') )\n\n\n availability = formal.Group('availability')\n form.add( availability )\n\n availability.add( formal.Field('show', formal.Boolean()))\n availability.add( formal.Field('available', formal.Boolean()) )\n availability.add( formal.Field('availabilityDescription', formal.String()) )\n\n metadata = formal.Group('metadata')\n form.add( metadata )\n\n metadata.add( formal.Field('date', formal.Date(), formal.widgetFactory(formal.DatePartsInput, dayFirst=True)))\n metadata.add( formal.Field('location', formal.String()) )\n \n lensOptions = [\n \"80mm Schneider Super Symmar XL f/4.5\",\n \"110mm Schneider Super Symmar XL f/5.6\",\n \"150mm Rodenstock Sironar S f/5.6\",\n \"240mm Fujinon A f/9\",\n \"360mm Nikkor T*ED f/8\",\n \"360mm Nikkor T*ED f/11\",\n ]\n metadata.add( formal.Field('lens', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=lensOptions) ) )\n \n # this is a redundant field... need to remove if possible\n metadata.add( formal.Field('speedaperture', formal.String()) )\n \n speedOptions = ['1/500', '1/250','1/125','1/60','1/30','1/15','1/8','1/4','1/2','1s','2s','4s','8s','15s','30s','1m','2m']\n metadata.add( formal.Field('speed', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=speedOptions),description='If you enter a text value please use the same format as the existing values e.g. 6s, 1/3, 2m' ) )\n \n \n apertureOptions = ['f/5.6','f/6.3','f/8','f/8⅓','f/8½','f/8⅔','f/16','f/16⅓','f/16½','f/16⅔','f/22','f/22⅓','f/22½','f/22⅔','f/32','f/32⅓','f/32½','f/32⅔','f/45','f/45⅓','f/45½','f/45⅔']\n metadata.add( formal.Field('aperture', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=apertureOptions) ) ) \n metadata.add( formal.Field('tiltswing', formal.String()) )\n metadata.add( formal.Field('fronttilt', formal.Integer()) )\n metadata.add( formal.Field('reartilt', formal.Integer()) )\n metadata.add( formal.Field('risefall', formal.String()) )\n ndfilters = ['0.3S','0.45S','0.6S','0.75S','0.9S','0.3H','0.45H','0.6H','0.75H','0.9H']\n metadata.add( formal.Field('ndfilters', formal.String(),formal.widgetFactory(formal.SelectOtherChoice, options=ndfilters)) )\n otherfilters=['81A','81B','81C','Polariser']\n metadata.add( formal.Field('otherfilters', formal.String(), formal.widgetFactory(formal.SelectOtherChoice, options=otherfilters)) )\n\n \n \n \n data_strings = [\n (0, '-'),\n (1, '*'),\n (2, '**'),\n (3, '***'),\n (4, '****'),\n (5, '*****'),\n ] \n \n metadata.add( formal.Field('rating', formal.Integer(), formal.widgetFactory(formal.SelectChoice, options=data_strings)) )\n\n\n description = formal.Group('description')\n form.add( description )\n parsers = [('markdown','MarkDown'),('xhtml','XHTML'),('plain','Plain Text')]\n description.add( formal.Field('summary', formal.RichTextType(required=True),\n widgetFactory=formal.widgetFactory(richtextarea.RichTextArea, parsers=parsers),\n cssClass=' '.join(['imagepicker','preview','itemselector']) ) )\n description.add( formal.Field('description', formal.RichTextType(required=True),\n widgetFactory=formal.widgetFactory(richtextarea.RichTextArea, parsers=parsers),\n cssClass=' '.join(['imagepicker','preview','itemselector']) ) )\n description.add( formal.Field('categories', formal.Sequence(formal.String()), \n widgetFactory=categorieswidget.FormalCheckboxTreeMultichoice ) )\n\n\n\n if not hasOptions:\n pricing = formal.Group('pricing')\n form.add( pricing )\n pricing.add( formal.Field('price', formal.Decimal(required=True)) )\n\n\n seo = formal.Group('seo')\n form.add( seo )\n seo.add( formal.Field('titleTag', formal.String()) )\n seo.add( formal.Field('metaDescription', formal.String()) )\n seo.add( formal.Field('metaKeywords', formal.String()) )", "def render_form(form: wtforms.Form) -> Markup:\n # the defaults for checkboxes and submits are weird and the API limited,\n # hence this hacky fix\n checkboxes = [field.name for field in form if isinstance(field.widget, wtforms.widgets.CheckboxInput)]\n submits = [field.name for field in form if isinstance(field.widget, wtforms.widgets.SubmitInput)]\n return (\n wtforms_bootstrap5.RendererContext()\n .form()\n .default_field(\n row_class=\"row mb-3\",\n label_class=\"form-label col-sm-3 col-form-label\",\n field_wrapper_class=\"col-sm-9\",\n field_wrapper_enabled=True,\n )\n .field(\n *checkboxes,\n wrapper_class=\"offset-sm-3 col-sm-9\",\n wrapper_enabled=True,\n field_wrapper_enabled=False,\n )\n .field(\n *submits,\n field_wrapper_class=\"offset-sm-3 col-sm-9\",\n field_wrapper_enabled=True,\n )\n ).render(form)", "def boots_field(field):\n\n field.field.widget.attrs['placeholder'] = field.label\n\n if type(field.field) in BOOTSTRAP_TEMPLATE_SWITCH:\n t = template.loader.get_template(BOOTSTRAP_TEMPLATE_SWITCH[type(field.field)])\n else:\n t = template.loader.get_template(\"bootstrap_tags/form_field.html\")\n\n return t.render(template.Context({\"field\": field}))", "def __init__(self, *args, **kwargs):\n super(ProfileForm, self).__init__(*args, **kwargs) \n for field in self.fields:\n self.fields[field].widget.attrs.update(\n {\n 'class': 'form-control',\n }\n )", "def get_form(self, request, obj=None, **kwargs):\n defaults = {}\n if obj is None:\n defaults.update(\n {\"form\": self.add_form, \"fields\": flatten_fieldsets(self.add_fieldsets)}\n )\n defaults.update(kwargs)\n return super().get_form(request, obj, **defaults)", "def render_form():", "def __init__(self, *args, **kwargs):\n kwargs.pop('widget_syntax')\n\n super(TemplateForm, self).__init__( *args, **kwargs)\n print self.fields", "def model_form_factory(base=Form, meta=ModelFormMeta, **defaults):\n\n class ModelForm(six.with_metaclass(meta, base)):\n \"\"\"\n A function that returns SQLAlchemy session. This should be\n assigned if you wish to use Unique validator. If you are using\n Flask-SQLAlchemy along with WTForms-Alchemy you don't need to\n set this.\n \"\"\"\n get_session = None\n\n class Meta:\n model = None\n\n default = None\n\n #: Whether or not to skip unknown types. If this is set to True,\n #: fields with types that are not present in FormGenerator type map\n #: will be silently excluded from the generated form.\n #:\n #: By default this is set to False, meaning unknown types throw\n #: exceptions when encountered.\n skip_unknown_types = defaults.pop('skip_unknown_types', False)\n\n #: Whether or not to assign all fields as optional, useful when\n #: creating update forms for patch requests\n all_fields_optional = defaults.pop('all_fields_optional', False)\n\n validators = defaults.pop('validators', {})\n\n #: A dict with keys as field names and values as field arguments.\n field_args = defaults.pop('field_args', {})\n\n #: A dict with keys as field names and values as widget options.\n widget_options = defaults.pop('widget_options', {})\n\n #: Whether or not to include only indexed fields.\n only_indexed_fields = defaults.pop('only_indexed_fields', False)\n\n #: Whether or not to include primary keys.\n include_primary_keys = defaults.pop('include_primary_keys', False)\n\n #: Whether or not to include foreign keys. By default this is False\n #: indicating that foreign keys are not included in the generated\n #: form.\n include_foreign_keys = defaults.pop('include_foreign_keys', False)\n\n #: Whether or not to strip string fields\n strip_string_fields = defaults.pop('strip_string_fields', False)\n\n #: Whether or not to include datetime columns that have a default\n #: value. A good example is created_at column which has a default\n #: value of datetime.utcnow.\n include_datetimes_with_default = defaults.pop(\n 'include_datetimes_with_default', False\n )\n\n #: The default validator to be used for not nullable columns. Set\n #: this to `None` if you wish to disable it.\n not_null_validator = defaults.pop(\n 'not_null_validator',\n InputRequired()\n )\n\n #: A dictionary that overrides not null validation on type level.\n #: Keys should be valid SQLAlchemy types and values should be valid\n #: WTForms validators.\n not_null_validator_type_map = defaults.pop(\n 'not_null_validator_type_map',\n ClassMap(\n [(sa.String, [InputRequired(), DataRequired()])]\n )\n )\n\n #: Default email validator\n email_validator = Email\n\n #: Default length validator\n length_validator = Length\n\n #: Default unique validator\n unique_validator = Unique\n\n #: Default number range validator\n number_range_validator = NumberRange\n\n #: Default date range validator\n date_range_validator = DateRange\n\n #: Default time range validator\n time_range_validator = TimeRange\n\n #: Default optional validator\n optional_validator = Optional\n\n #: Which form generator to use. Only override this if you have a\n #: valid form generator which you want to use instead of the\n #: default one.\n form_generator = defaults.pop(\n 'form_generator', FormGenerator\n )\n\n #: Default date format\n date_format = defaults.pop('date_format', '%Y-%m-%d')\n\n #: Default datetime format\n datetime_format = defaults.pop(\n 'datetime_format', '%Y-%m-%d %H:%M:%S'\n )\n\n #: Dictionary of SQLAlchemy types as keys and WTForms field classes\n #: as values. The key value pairs of this dictionary override\n #: the key value pairs of FormGenerator.TYPE_MAP.\n #:\n #: Using this configuration option one can easily configure the\n #: type conversion in class level.\n type_map = defaults.pop('type_map', ClassMap())\n\n #: Whether or not to raise InvalidAttributExceptions when invalid\n #: attribute names are given for include / exclude or only\n attr_errors = defaults.pop('attr_errors', True)\n\n #: Additional fields to include in the generated form.\n include = defaults.pop('include', [])\n\n #: List of fields to exclude from the generated form.\n exclude = defaults.pop('exclude', [])\n\n #: List of fields to only include in the generated form.\n only = defaults.pop('only', [])\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets object as form attribute.\"\"\"\n\n self._obj = kwargs.get('obj', None)\n super(ModelForm, self).__init__(*args, **kwargs)\n\n if defaults:\n raise UnknownConfigurationOption(\n list(defaults.keys())[0]\n )\n\n return ModelForm" ]
[ "0.698078", "0.6600586", "0.659325", "0.6585638", "0.6533927", "0.6412825", "0.6412825", "0.6412825", "0.629907", "0.62634355", "0.62466544", "0.6142649", "0.6102095", "0.60763484", "0.60400486", "0.5991732", "0.5991732", "0.5946256", "0.5897828", "0.5873216", "0.58706456", "0.5866613", "0.5850554", "0.5831659", "0.5808978", "0.5762365", "0.57622373", "0.5726104", "0.5725978", "0.5718044" ]
0.6660687
1
If we have a registration code required, we attach it to the form similar to attach_custom_user_fields
def attach_registration_code_field(form_cls): if Configs.registration_code: setattr( # noqa B010 form_cls, "registration_code", StringField( "Registration Code", description="Registration code required to create account", validators=[InputRequired()], ), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def registration(request, code=None):\n if request.method == \"POST\":\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(\"confirm.html\")\n else:\n form = RegistrationForm()\n return render(request, 'registration.html', {'form': form, 'code': code})", "def build_registration_code_field(form_cls):\n if Configs.registration_code:\n field = getattr(form_cls, \"registration_code\") # noqa B009\n field.field_type = \"text\"\n return [field]\n else:\n return []", "def handle_register(self, code):\n\n if code in self.factory.users_codes and not self.factory.debug and False:\n self._snd(u\"Codigo Ya Registrado\\nIntroduzca codigo\")\n return\n\n if self.get_odoo_connexion(code):\n self.state = \"tasks\"\n self.menu1_tasks()\n return\n else:\n self._snd(u\"No se pudo establecer\\n la conexion.\\nIntroduzca codigo\")", "def user_register():\n \n data = user_obj.user_register(request.forms) \n return data", "def register(request):\n if request.method == 'GET':\n form = CustomUserCreationForm()\n elif request.method == 'POST':\n form = CustomUserCreationForm( data=request.POST )\n\n if form.is_valid():\n user = form.save( commit=False )\n # we can make any last second changes to the user\n user.save()\n return redirect( '/' )\n\n context = {'form': form}\n return render( request, 'register.html', context )", "def register(request):\n if not settings.BMAT_ALLOW_REGISTER:\n return render(request, \"users/no_register.html\", {})\n \n if request.method == \"GET\":\n return render(request, \"users/register.html\", {\"form\":CustomUserCreationForm()})\n \n elif request.method == \"POST\":\n f = CustomUserCreationForm(data=request.POST)\n \n if not f.is_valid():\n return render(request, \"users/register.html\", {\"form\":f})\n \n u = f.save(commit=False)\n \n u.email = f.cleaned_data.get(\"email\", \"\")\n u.save()\n \n u = authenticate(username=u.username, password=f.cleaned_data[\"password1\"])\n alogin(request, u)\n \n return redirect(\"/\")", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)", "def auto_register(request,backend=None,error_msgs=''):\r\n # Check if a username is provided\r\n username_form = forms.AutoRegisterForm()\r\n if request.method == 'POST' and request.POST.get('username'):\r\n name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')\r\n username_form = forms.AutoRegisterForm(request.POST)\r\n if username_form.is_valid():\r\n username = username_form.cleaned_data['username']\r\n try:\r\n interface.get_user_without_password(username)\r\n error_msgs ='That username is already in use.'\r\n except DoesNotExistError:\r\n request.session['saved_username'] = request.POST['username']\r\n backend = request.session[name]['backend']\r\n return redirect('socialauth_complete', backend=backend)\r\n name = setting('SOCIAL_AUTH_PARTIAL_PIPELINE_KEY', 'partial_pipeline')\r\n backend=request.session[name]['backend']\r\n return render_to_response('accounts/auto_register.html', {'backend' : backend, 'error_msgs' : error_msgs, 'username_form' : username_form}, RequestContext(request))", "def create_register_user(self, data, user_type):\n data.pop('password_confirm')\n data['user_type'] = user_type\n user = User.objects.create_user(**data)\n return user", "def registration_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"registration_code\")", "def register(request, key):\n profile = cpm.UserProfile.objects.filter(\n activation_key=key)\n\n if not profile.exists() or profile[0].user.is_active:\n hero_title = 'Hmm... that registration key is invalid.'\n return render_err_msg(request, hero_title)\n\n user = profile[0].user\n\n if request.POST:\n reg_form = RegForm(request.POST)\n if reg_form.is_valid():\n user.is_active = True\n user.first_name = reg_form.cleaned_data['first_name']\n user.last_name = reg_form.cleaned_data['last_name']\n user.set_password(reg_form.cleaned_data['password'])\n\n pic_url = put_profile_pic(\n reg_form.cleaned_data['pic_url'], user.profile)\n if pic_url:\n user.profile.pic_url = pic_url\n\n user.profile.class_year = reg_form.cleaned_data['class_year']\n\n alt_emails = request.POST.getlist('alt_email')\n for alt_email in alt_emails:\n if alt_email:\n user.profile.add_email(alt_email)\n\n user.save()\n user.profile.save()\n\n user = auth.authenticate(username=user.username,\n password=reg_form.cleaned_data['password'])\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n # Redirect to a success page.\n return redirect('/')\n\n else:\n reg_form = RegForm()\n\n template_values = {\n 'page_title': 'register',\n 'form': reg_form,\n 'user': user,\n }\n\n return render_to_response('register.html',\n template_values, request)", "def register(request):\n registered = False\n if request.method == 'POST':\n user_form = UserForm(data=request.POST)\n profile_form = UserProfileInfoForm(data=request.POST)\n if user_form.is_valid() and profile_form.is_valid():\n user = user_form.save()\n user.set_password(user.password)\n user.save()\n profile = profile_form.save(commit=False)\n profile.user = user\n profile.save()\n registered = True\n else:\n print(user_form.errors,profile_form.errors)\n else:\n user_form = UserForm()\n profile_form = UserProfileInfoForm()\n return render(request,'footBallApp/registration.html',\n {'user_form':user_form,\n 'profile_form':profile_form,\n 'registered':registered})", "def register_user(request, extra_context=None):\r\n if request.user.is_authenticated():\r\n return redirect(reverse('dashboard'))\r\n if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP'):\r\n # Redirect to branding to process their certificate if SSL is enabled\r\n # and registration is disabled.\r\n return external_auth.views.redirect_with_get('root', request.GET)\r\n\r\n context = {\r\n 'course_id': request.GET.get('course_id'),\r\n 'email': '',\r\n 'enrollment_action': request.GET.get('enrollment_action'),\r\n 'name': '',\r\n 'running_pipeline': None,\r\n 'platform_name': microsite.get_value(\r\n 'platform_name',\r\n settings.PLATFORM_NAME\r\n ),\r\n 'selected_provider': '',\r\n 'username': '',\r\n }\r\n\r\n if extra_context is not None:\r\n context.update(extra_context)\r\n\r\n if context.get(\"extauth_domain\", '').startswith(external_auth.views.SHIBBOLETH_DOMAIN_PREFIX):\r\n return render_to_response('register-shib.html', context)\r\n\r\n # If third-party auth is enabled, prepopulate the form with data from the\r\n # selected provider.\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and pipeline.running(request):\r\n running_pipeline = pipeline.get(request)\r\n current_provider = provider.Registry.get_by_backend_name(running_pipeline.get('backend'))\r\n overrides = current_provider.get_register_form_data(running_pipeline.get('kwargs'))\r\n overrides['running_pipeline'] = running_pipeline\r\n overrides['selected_provider'] = current_provider.NAME\r\n context.update(overrides)\r\n\r\n return render_to_response('register.html', context)", "def __init__(self, *args, **kwargs):\n super(RegisterForm, self).__init__(*args, **kwargs)\n\n self.fields['nick'].widget.attrs.update({\n 'label': 'Přezdívka',\n 'placeholder': 'Mirek'\n })\n\n self.fields['name'].widget.attrs.update({\n 'label': 'Jméno',\n 'placeholder': 'Mirek'\n })\n\n self.fields['surname'].widget.attrs.update({\n 'label': 'Příjmení',\n 'placeholder': 'Dušín'\n })\n\n self.fields['email'].widget.attrs.update({\n 'label': 'E-mail',\n 'placeholder': '[email protected]'\n })\n\n self.fields['age'].widget.attrs.update({'label': 'Věk'})\n self.fields['age'].initial = 18\n\n self.fields['race'].widget.attrs.update({'label': 'Rasa'})\n self.fields['race'].queryset = Race.objects.filter(\n active=True).only('id', 'name')\n\n self.fields['group'].widget.attrs.update({\n 'label': 'Skupina',\n 'placeholder': 'Rychlé Šípy'\n })\n\n for field in self.fields.keys():\n self.fields[field].widget.attrs.update({\n 'required': self.fields[field].required,\n 'title': '',\n 'class': 'form-control'\n })", "def register(self, form):\n new_user = form.save(commit=False)\n username_field = getattr(new_user, 'USERNAME_FIELD', 'username')\n # Save lowercased email as username.\n setattr(new_user, username_field, form.cleaned_data['email'].lower())\n new_user.first_name = form.cleaned_data['first_name']\n new_user.last_name = form.cleaned_data['last_name']\n new_user.save()\n new_user = authenticate(username=getattr(new_user, username_field), password=form.cleaned_data['password1'])\n login(self.request, new_user)\n user_registered.send(sender=self.__class__, user=new_user, request=self.request)\n profile, _ = Profile.objects.get_or_create(user=new_user)\n self.request.session['signed_up'] = True\n profile.payment_plan = int(form.cleaned_data['payment_plan'])\n profile.company_name = form.cleaned_data['company']\n profile.phone = form.cleaned_data['phone']\n profile.save(update_fields=['payment_plan', 'company_name', 'phone'])\n if profile.payment_plan != Profile.PAYMENT_PLAN_FREE:\n messages.add_message(self.request, messages.INFO,\n 'Congratulations! We won\\'t charge you for this plan for now.')\n return new_user", "def setup(request, template='socialregistration/setup.html',\n form_class=UserForm, extra_context=dict(), claim_form_class=ClaimForm):\n try:\n social_user = request.session['socialregistration_user']\n social_profile = request.session['socialregistration_profile']\n except KeyError:\n return render_to_response(\n template, dict(error=True), context_instance=RequestContext(request))\n\n if not GENERATE_USERNAME:\n # User can pick own username\n if not request.method == \"POST\":\n form = form_class(social_user, social_profile,)\n else:\n form = form_class(social_user, social_profile, request.POST)\n try:\n if form.is_valid():\n form.save()\n user = form.profile.authenticate()\n user.set_unusable_password() # we want something there, but it doesn't need to be anything they can actually use - otherwise a password must be assigned manually before the user can be banned or any other administrative action can be taken\n user.save()\n login(request, user)\n\n if 'socialregistration_user' in request.session: del request.session['socialregistration_user']\n if 'socialregistration_profile' in request.session: del request.session['socialregistration_profile']\n\n return HttpResponseRedirect(_get_next(request))\n except ExistingUser:\n # see what the error is. if it's just an existing user, we want to let them claim it.\n if 'submitted' in request.POST:\n form = claim_form_class(\n request.session['socialregistration_user'],\n request.session['socialregistration_profile'],\n request.POST\n )\n else:\n form = claim_form_class(\n request.session['socialregistration_user'],\n request.session['socialregistration_profile'],\n initial=request.POST\n )\n\n if form.is_valid():\n form.save()\n\n user = form.profile.authenticate()\n login(request, user)\n\n if 'socialregistration_user' in request.session: del request.session['socialregistration_user']\n if 'socialregistration_profile' in request.session: del request.session['socialregistration_profile']\n\n return HttpResponseRedirect(_get_next(request))\n\n extra_context['claim_account'] = True\n\n extra_context.update(dict(form=form))\n\n return render_to_response(template, extra_context,\n context_instance=RequestContext(request))\n \n else:\n # Generate user and profile\n social_user.username = str(uuid.uuid4())[:30]\n social_user.save()\n social_user.set_unusable_password() # we want something there, but it doesn't need to be anything they can actually use - otherwise a password must be assigned manually before the user can be banned or any other administrative action can be taken\n social_user.save()\n\n social_profile.content_object = social_user\n social_profile.save()\n\n # Authenticate and login\n user = social_profile.authenticate()\n login(request, user)\n\n # Clear & Redirect\n if 'socialregistration_user' in request.session: del request.session['socialregistration_user']\n if 'socialregistration_profile' in request.session: del request.session['socialregistration_profile']\n return HttpResponseRedirect(_get_next(request))", "def register(request):\n form = RegistrationForm()\n if request.is_ajax():\n # If the request is an AJAX request, then we want to handle\n # the team assignment and return the result as data.\n form = RegistrationForm(request.POST)\n if form.is_valid():\n user_data = form.cleaned_data\n user_data['username'] = user_data['username'].lower()\n user_data['quest_id'] = user_data['username']\n user = None\n users = CustomUser.objects.filter(username__exact=user_data['quest_id'])\n if users.count() > 0:\n user = users[0]\n else:\n user = None\n\n if user is None or user.team is None:\n team_assignment = sorting_hat.find_pink_tie_team_assignment(user_data)\n user_data.pop('quest_id')\n if user is None:\n user = CustomUser(**user_data)\n else:\n user.first_name = user_data['first_name']\n user.last_name = user_data['last_name']\n user.is_active = True\n user.team = team_assignment\n user.save()\n if user.is_first_year:\n return json_response({ 'valid': True, 'team': user.team.id })\n return json_response({ 'valid': False })\n return render(request, 'registration/register.html', context=RequestContext(request, { 'form' : form, 'team': request.user.team }))", "def register_form():\n\n return render_template(\"register.html\")", "def register_form():\n\n # A dictionary of language options with it's keys. Key as html option id\n # dict[key] as language options. \n lang_option = {\"en\": \"English\", \"sv\": \"Swedish\", \"zh-CN\": \"Chinese\", \n \"es\": \"Spanish\", \"fr\": \"French\", \"ru\": \"Russian\"}\n\n\n return render_template(\"register.html\", lang_option=lang_option)", "def show_register_form():\n return render_template(\"register-form.html\")", "def registration():\n registration_page = Registration()\n registration_page.registration_main_page()", "def register(email, display_name=None):", "def register_form():\n\n return render_template(\"register-form.html\")", "def validate_registration(registration_code):\n aaa.validate_registration(registration_code)\n return 'Thanks. <a href=\"/login\">Go to login</a>'", "def registration_code(self):\n return self._regcode", "def register_user():\n pass", "def add_user():\n\n return render_template('register-form.html')", "def form_valid(self, form, request):\n data = form.data\n\n # Password hashing\n password = make_password(data.get('password1'))\n\n # Checkbox has value 'on' instead of True\n volunteer = False\n flag = data.get('volunteer')\n if flag is not None and flag != 'false' and flag != 'False':\n volunteer = True\n\n # Break first_name and last_name\n names = data.get('name').strip().split(' ')\n first_name = names[0]\n last_name = ''\n if len(names) > 1:\n last_name = ' '.join(names[1:])\n\n err = self.register(data.get('username'), data.get('email'), data.get(\n 'phone_number'), volunteer, password, first_name, last_name)\n return err", "def register(request):\n register_form = UserCreationForm()\n return render(request, 'metro_app/register.html', {'form': register_form})", "def _register_user(request_form):\n idnr = request_form['idnr']\n\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n\n response = elster_client.send_unlock_code_request_with_elster(request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n\n create_user(idnr, request_form['dob'].strftime(\"%d.%m.%Y\"), request_id)" ]
[ "0.6838169", "0.66669655", "0.6514536", "0.6208826", "0.6137646", "0.6134475", "0.6013371", "0.6006505", "0.6004355", "0.5999936", "0.5989894", "0.5950262", "0.59496284", "0.5925366", "0.59166074", "0.58668554", "0.58395535", "0.5811587", "0.5776874", "0.5757824", "0.57516545", "0.57429975", "0.5727455", "0.5722573", "0.57154155", "0.5713376", "0.57004833", "0.56962436", "0.56723225", "0.5664976" ]
0.78658634
0
Custom init to persist the obj parameter to the rest of the form
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) obj = kwargs.get("obj") if obj: self.obj = obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, *args, **kwargs):\n\n self._obj = kwargs.get('obj', None)\n super(ModelForm, self).__init__(*args, **kwargs)", "def __init__(self, obj, attribs):\n self.obj = obj\n self.attribs = attribs\n if self.obj:\n self._save()", "def __init__(self, obj, field, value):\n self._object = obj\n self._field = field\n self._value = value", "def __init__(self,obj):\n self.nature_libelle = obj['NatureLibelle']\n self.ins_nom = obj['InsNom']\n self.ins_numero_install = obj['InsNumeroInstall']\n self.equipement_id = obj['EquipementId']", "def __init__(field, form, content):", "def __init__(field, form, content):", "def init(self, obj):\n obj_dict = {'name': obj.get_obj_name(),\n 'properties': obj.get_obj_properties(),\n 'actions': []}\n\n self.log_data[obj.get_obj_id()] = obj_dict", "def __init__(self, attck_obj = None, **kwargs):\n\n self.attck_obj = attck_obj\n\n self.id = super(AttckTools, self)._set_id(kwargs)\n self.name = super(AttckTools, self)._set_attribute(kwargs, 'name')\n self.alias = super(AttckTools, self)._set_attribute(kwargs, 'aliases')\n self.description = super(AttckTools, self)._set_attribute(kwargs, 'description')\n self.reference = super(AttckTools, self)._set_reference(kwargs)\n self.created = super(AttckTools, self)._set_attribute(kwargs, 'created')\n self.modified = super(AttckTools, self)._set_attribute(kwargs, 'modified')\n self.stix = super(AttckTools, self)._set_attribute(kwargs, 'id')\n self.type = super(AttckTools, self)._set_attribute(kwargs, 'type')\n self.wiki = super(AttckTools, self)._set_wiki(kwargs)\n self.contributor = super(AttckTools, self)._set_attribute(kwargs, 'contributor')", "def __init__(self, **kwargs):\n fields = get_fields(type(self))\n fields = dict((field.field_name, field) for field in fields)\n for name, value in kwargs.items():\n object.__setattr__(self, name, value)\n \n # Get the default values\n if kwargs:\n for name, field in fields.items():\n if not field.auto_increment and not name in kwargs:\n default = field.default\n if default is None:\n raise ValueError(\"the field {} of model {} has no \" \\\n \"default value\".format(field.field_name,\n type(self)))\n elif callable(default):\n default = default(self)\n\n object.__setattr__(self, name, default)\n \n # If named parameters were specified, save the object\n if kwargs and Model.data_connector:\n with Model.data_connector.u_lock:\n Model.data_connector.add_object(self)", "def __init__(self, obj, *args, **kwargs):\n self.obj_ = obj\n super(ArtificialRV, self).__init__(*args, **kwargs)", "def __init__(self, handler=None, formdata=None, obj=None, prefix='', **kwargs):\n if handler:\n self._handler = handler\n super(Form, self).__init__(formdata=TornadoInputWrapper(self._handler), obj=obj, prefix=prefix, **kwargs)", "def from_obj(self, obj):\n self.__obj = obj\n self.__obj.swagger_types = self.swagger_types\n self.__obj.swagger_map = self.swagger_map", "def __init__(self, *args, **kwargs):\n\t\t\n\t\tinstance = kwargs.get('instance', None)\n\t\tinitial = kwargs.pop('initial', None)\n\t\tif instance is not None:\n\t\t\tif initial is None:\n\t\t\t\tinitial = {}\n\t\t\t\tinitial['ingredient_name'] = instance.ingredient.name\n\t\t\t\tinitial['unit_name'] = instance.unit.name\n\t\t\tif initial is not None:\n\t\t\t\tkwargs['initial'] = initial\n\t\tsuper(RecipeIngredientForm, self).__init__(*args, **kwargs)", "def __init__(self, **params):\n self.__object = object_param(**params)", "def __init__ (self, *k, **kw):\n self.newobj = True\n self.keyvals = {}\n self.locals = []\n self.reinit ()\n for i in self.locals:\n fieldobj = object.__getattribute__(self, i)\n if kw.has_key (i):\n self.keyvals[i] = kw[i]\n else:\n if fieldobj.required == True:\n if fieldobj.default is not None:\n self.keyvals[i] = fieldobj.default\n else:\n raise Exception (\"Need a default value for %s\" % (i))", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__(self, **kwargs):\n self.__dict__.update(kwargs)", "def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )", "def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()", "def __init__(self, instance=None):\n self.instance = instance\n\n for name, field in self.hidden_fields.items():\n self.hidden_fields[name] = getattr(self.instance, name)", "def __init__(self, *args, **kwargs):\n super(HiddenModelObjectInputForm, self).__init__(*args, **kwargs)\n self.fields['model'].choices = get_registered_models(\n ignore=IGNORED_MODELS\n )", "def full_init_self(self, db, field_name, model):\n if not self.db:\n self.__class__.db = db\n\n self.field_name = field_name\n self.model = model # property", "def __init__(self, *args, **kwargs):\n user = None\n if 'user' in kwargs:\n user = kwargs.pop('user')\n super(PersonForm, self).__init__(*args, **kwargs)\n if user:\n self.fields['username'].initial = user.username\n self.fields['first_name'].initial = user.first_name\n self.fields['last_name'].initial = user.last_name\n self.fields['email_address'].initial = user.email\n self.fields.keyOrder = [\n 'id', 'username', 'first_name', 'middle_name', 'last_name',\n 'email_address', 'gender',\n 'new_password', 'confirm_new_password', 'signature',\n 'signature_html', 'time_zone', 'language', 'show_signatures',\n 'avatar', 'autosubscribe', 'comment'\n ]", "def __init__(self, **kwargs):\n\t\t# unparse input\t\t\n\t\tif 'obj' in kwargs: \n\t\t\tself.obj = kwargs.pop('obj')\n\t\t\t# sanity check\n\t\t\tif 'dir_obj' in kwargs:\n\t\t\t\tif self.obj.dir_obj != kwargs.pop('dir_obj'):\n\t\t\t\t\traise Exception(\"[operator] conflicting dir_obj entered\")\n\t\telse: \n\t\t\tself.obj = obsObj(**kwargs)\n\n\t\tself.ra = self.obj.ra\n\t\tself.dec = self.obj.dec\n\t\tself.dir_obj = self.obj.dir_obj\n\n\t\t# sanity check\n\t\tif self.dir_obj is None:\n\t\t\traise TypeError('dir_obj not specified')", "def __init__(self, *args, **kwargs):\n super(AddEventForm, self).__init__(*args)\n\n if kwargs.get('current_user') is not None:\n self.fields['speakers'].initial = kwargs.get('current_user')\n\n self.fields['speakers'].label_from_instance = self.label_from_instance", "def __init__(self, obj=None, key=None):\n d = _get(self, \"__dict__\")\n d[\"_obj\"] = obj\n d[\"__key__\"] = key", "def __init__(self, obj):\n self.obj = obj\n self._pkcache = {}\n self._idcache = obj.__class__.__instance_cache__\n self._typecache = defaultdict(dict)\n self.init()", "def __init__(__self__, *,\n object_id: Optional[pulumi.Input[str]] = None):\n if object_id is not None:\n pulumi.set(__self__, \"object_id\", object_id)", "def __init__(self, initial_params, save_name=\"model_param.joblib\"):\n super().__init__()\n self.initial_params = initial_params\n self.save_name = save_name", "def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()" ]
[ "0.81270677", "0.7214874", "0.6869673", "0.66981107", "0.66653264", "0.66653264", "0.6648626", "0.65440965", "0.6481102", "0.6445112", "0.6415407", "0.6336561", "0.6326652", "0.63131803", "0.6255758", "0.6211104", "0.6211104", "0.62063265", "0.6182554", "0.613536", "0.6123644", "0.6113366", "0.60831493", "0.6076878", "0.6075449", "0.60648745", "0.6062343", "0.60353625", "0.60159445", "0.5991739" ]
0.7258114
1
Labeled tokens come back from the UI as JSON. This method pulls them from the json and dumps
def get_labels(): json_request = request.json # get the json from the server keys = sort_keys(json_request.keys()) # sort the keys (i.e. the token ids) labels = [] for k in keys: # get the labels that the user input to the UI val = (json_request[k]['text'], json_request[k]['value']) labels.append(val) return labels
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tokens_json(self):\n token_id, secret = self.decoded_token\n token_row = self.unauthenticated_token_row\n tokens_encoded = Fernet(secret).decrypt(\n token_row.tokens_fernet.encode('ascii'))\n return json.loads(tokens_encoded.decode('ascii'))", "def tokens():\n pass", "def act(self):\n if not self.label_candidates:\n self.label_candidates = True\n for text in self.observation.get('label_candidates', ()):\n if text:\n tokens = self.tokenize(text)\n self.add_to_dict([self.get_template(tokens)])\n\n return {'id': self.getID()}", "def read_json(self):\n utterances, labels = [], []\n for log in self.log_json:\n for turn in log['turns']:\n utterance = turn['output']['transcript']\n label = turn['output']['dialog-acts'][0]['act']\n utterances.append(utterance)\n labels.append(label)\n\n return utterances, labels", "def serialize_tokens(json_obj):\n\t# load into memory\n\tres = json.dumps(json_obj)\n\twith open(config.TOKENPATH, \"w+\") as f:\n\t\tf.write(res)\n\treturn json_obj[\"access_token\"], json_obj[\"refresh_token\"]", "def get_terms(self):\n return json.loads(self.terms)", "def _parse_tokens(self, body):\n\n old_token = self.token\n old_json_token = self.json_token\n\n self.token = self._parse_token(body)\n self.json_token = self._parse_json_token(body)\n\n logger.debug('Token set to: %s (Old: %s)', self.token, old_token)\n logger.debug('JSON token set to: %s (Old: %s)', self.json_token,\n old_json_token)", "def parse(self, tokenizer):\n pass", "def __str__(self):\n return self.token", "def get_payloads(self, text):\n return [json.dumps({\n 'inputs': text,\n 'parameters': {'candidate_labels': self.action_labels,\n 'hypothesis_template': 'The action that the user wants to perform is {}.'}\n }),\n json.dumps({\n 'inputs': text,\n 'parameters': {'candidate_labels': self.location_labels,\n 'hypothesis_template': 'The user wants to work with {}.'}\n })]", "def toString(self):\n return self.tokens.toString()", "def get_labels(self, labels_from_json):\n self.raw_labels = labels_from_json", "def build_expected_user_labels_response(self):\n labels = [\n {\n \"key\": \"key1\",\n \"value\": \"value1\"\n },\n {\n \"key\": \"key2\",\n \"value\": \"value2\"\n }\n ]\n return labels", "def token(self) -> str:", "def _parse_individual_tokens(self, tokens: List[str]) -> List:\r\n objs = []\r\n\r\n for token in tokens:\r\n obj = self._parse_token(token)\r\n objs.append(obj)\r\n\r\n return objs", "def tokenize(self):\n\n self.feats = {\n 'features': [], # Lists of the `InputFeatures` objects.\n 'segments': [], # Segments of the phrase. 0: Promoun, 1: A-term, 2: B-term \n 'df_ids': [], # DataFrame index.\n 'target_token_ids': [] # Indexes of the target term in the tokens lists.\n }\n unique_id = 0 # Unique ID of the dataset.\n for _, row in tqdm(self.df.iterrows()):\n segment_tokens = self.tokenize_single_row(row)\n for j, segment in enumerate(segment_tokens):\n if segment['target_token_index'] > 0:\n features = self.tokens_to_features(unique_id, segment['tokens'])\n unique_id += 1\n self.feats['features'].append(features)\n self.feats['segments'].append(j)\n self.feats['target_token_ids'].append(segment['target_token_index'] )\n self.feats['df_ids'].append(row.ID)", "def process_data(self, json_dict: dict):\n all_token_ids = []\n all_level_ids = []\n all_synset_ids = []\n all_lemma_ids = []\n all_is_highway = []\n all_targets = []\n\n def tokenize(lemma_):\n return self.tokenizer(\n lemma_,\n add_special_tokens=False,\n truncation=True,\n is_split_into_words=True,\n return_token_type_ids=False,\n ).input_ids\n\n def add_lemma(lemma_, abs_level_, synset_id_, is_highway_):\n lemma_token_ids = tokenize([lemma_])\n n_tokens_ = len(lemma_token_ids)\n token_ids.extend(lemma_token_ids)\n level_ids.extend([self.level_to_id[abs_level_]] * n_tokens_)\n synset_ids.extend([synset_id_] * n_tokens_)\n lemma_ids.extend([lemma_ids[-1] + 1] * n_tokens_)\n is_highway.extend([is_highway_] * n_tokens_)\n\n # Go through all JSON entries\n for synset in tqdm(json_dict.values()):\n token_ids = []\n level_ids = []\n synset_ids = [0]\n lemma_ids = [0]\n is_highway = []\n\n lemmas = [l.replace(\"_\", \" \") for l in synset[\"lemmas\"]]\n abs_level = (\"current\", \"current\")\n\n # Save all lemmas of the current node\n synset_token_ids = self.tokenizer.batch_encode_plus(lemmas,\n add_special_tokens=False,\n return_token_type_ids=False).input_ids\n all_targets.append(synset_token_ids)\n\n for level in (\"hypernyms\", \"hyponyms\"):\n for sub_synset in synset[level].values():\n if \"lemmas\" in sub_synset:\n lemmas = [l.replace(\"_\", \" \") for l in sub_synset[\"lemmas\"]]\n abs_level = (level, \"current\")\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n for sub_level in (\"hypernyms\", \"hyponyms\"):\n for sub_sub_lemmas in sub_synset[sub_level].values():\n lemmas = [l.replace(\"_\", \" \") for l in sub_sub_lemmas]\n abs_level = (level, sub_level)\n synset_id = synset_ids[-1] + 1\n\n # Add the synset's lemma that is on highway\n highway_lemma = lemmas.pop(0)\n add_lemma(highway_lemma, abs_level, synset_id, True)\n\n # Add the synset's other lemmas\n for lemma in lemmas:\n add_lemma(lemma, abs_level, synset_id, False)\n\n # Append the global lists\n all_token_ids.append(token_ids)\n all_level_ids.append(level_ids)\n all_synset_ids.append(synset_ids[1:])\n all_lemma_ids.append(lemma_ids[1:])\n all_is_highway.append(is_highway)\n\n data = (\n all_token_ids,\n all_level_ids,\n all_synset_ids,\n all_lemma_ids,\n all_is_highway,\n all_targets\n )\n\n return data", "def json(self):\n return json.loads(self.text)", "def __init__(self):\n self.tokens = []", "def look_up_a_token():\n try:\n data = request.get_json(force=True)\n except Exception:\n data = None\n if data:\n tok = data['token']\n else:\n tok = request.headers.get('TOK_ID')\n request.data\n\n try:\n creation_time = int(round(datetime.timestamp(tokens[tok]), 0))\n issue_time = tokens[tok].isoformat()\n except Exception:\n _now = datetime.now(UTC)\n creation_time = int(round(datetime.timestamp(_now)))\n issue_time = _now.isoformat()\n tokens[tok] = _now\n expire_time = datetime.fromtimestamp(creation_time + 2764790)\n\n return jsonify({\n \"data\": {\n \"accessor\": \"8609694a-cdbc-db9b-d345-e782dbb562ed\",\n \"creation_time\": creation_time,\n \"creation_ttl\": 2764800,\n \"display_name\": \"fooname\",\n \"entity_id\": \"7d2e3179-f69b-450c-7179-ac8ee8bd8ca9\",\n \"expire_time\": expire_time.isoformat(),\n \"explicit_max_ttl\": 0,\n \"id\": tok,\n \"identity_policies\": [\n \"dev-group-policy\"\n ],\n \"issue_time\": issue_time,\n \"meta\": {\n \"username\": \"tesla\"\n },\n \"num_uses\": 0,\n \"orphan\": True,\n \"path\": \"auth/kubernetes/login\",\n \"policies\": [\n \"default\"\n ],\n \"renewable\": True,\n \"ttl\": 2764790\n }\n })", "def _parse_json(model, f_name):\n # get the word index dictionary corresponding to the feature model type\n if model == \"baseline\":\n word_dict = _parse_word_dict(\"baseline_dict.txt\")\n elif model == \"hashing\":\n word_dict = _parse_word_dict(\"hashing_dict.txt\")\n elif model == \"cluster\":\n word_dict = _parse_word_dict(\"cluster_dict.txt\")\n else:\n error(\"Unknown model type %s\" % model)\n\n if os.path.isfile(f_name):\n if _svm:\n model += \"svm\"\n out = open(\"datasets/%s_%s.txt\" % (f_name[f_name.rfind(\"/\") + 1:].split(\".\")[0], model), \"w\")\n with open(f_name) as f:\n for line in f:\n obj = json.loads(line)\n txt = obj[\"text\"]\n rat = obj[\"stars\"] if \"stars\" in obj else 0\n out.write(\"%d \\t\" % rat)\n features = []\n for t in _extract(txt):\n if t in word_dict:\n while len(features) <= word_dict[t]:\n features.append(0)\n features[word_dict[t]] += 1\n for i, c in enumerate(features):\n if c == 0:\n continue\n if _svm:\n i += 1\n out.write(\"%d:%d \" % (i, c))\n out.write(\"\\n\")\n out.close()\n else:\n error(\"parse json - not a file: %s\" % f_name)", "def denormalize_token_data(self, data):\n if not data:\n return\n\n return {\"oauth_token\": data.get(\"token\"),\n \"oauth_token_secret\": data.get(\"extra\")}", "def parse_tokens(self, tokens):\n for token in tokens:\n self.parse_token(token)", "def to_dict(self):\n return {\n 'token': self.token\n }", "def json_to_labels(data):\n labels = []\n for item in data:\n labels.append(Label(item['title'], item['color'], item['desc']))\n return labels", "def __get_token_data__(self):\n raise Exception(\"Implement me!\")", "def listTags(self, authenticationToken):\r\n pass", "def _parse_json_token(self, body):\n\n token_match = re.search('var\\s*jsonToken\\s*=[\\s\\']*([\\w-]+)', body)\n return token_match.group(1)", "def tokens(self):\n return self.__tokens", "def tokens(self):\n data, end = \\\n self.pat.traverse(lambda obj, *args: obj.tokens(*args),\n self.begin, self.data)\n return data" ]
[ "0.5711956", "0.55669457", "0.5513183", "0.54622537", "0.52287966", "0.5219712", "0.5186664", "0.51845807", "0.5176791", "0.5157778", "0.5122742", "0.51104176", "0.5076709", "0.5054146", "0.50533843", "0.5040823", "0.5034359", "0.502462", "0.50213176", "0.49873635", "0.4974976", "0.4970196", "0.49564272", "0.49485597", "0.49437186", "0.4939112", "0.49060458", "0.4903211", "0.49003676", "0.48996854" ]
0.65568817
0
Used in ``mezzanine.pages.views.page`` to ensure ``PageMiddleware`` or a subclass has been installed. We cache the result on the ``PageMiddleware._installed`` to only run this once.
def installed(cls): try: return cls._installed except AttributeError: name = "mezzanine.pages.middleware.PageMiddleware" installed = middlewares_or_subclasses_installed([name]) setattr(cls, "_installed", installed) return installed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Install (self):\n if self in sys.meta_path:\n return\n sys.meta_path.insert (0, self)", "def is_installed(self):\n pass", "def pre_installation(self):\n pass", "def autodiscover():\n from django.utils.importlib import import_module\n global LOADED\n if LOADED:\n return\n LOADED = True\n for app in settings.INSTALLED_APPS:\n try:\n import_module(\"%s.page_widgets\" % app)\n except ImportError, e:\n if \"WidgetModel\" in \"%s\" % e:\n traceback.print_exc(file=sys.stdout)\n pass", "def on_load_middleware():\n\n # protect middleware wrapping: only a single thread proceeds\n global load_middleware_lock # lock gets overwritten as None after init\n if not load_middleware_lock: # already initialized? abort\n return\n mwlock = load_middleware_lock\n mwlock.acquire() # acquire global lock\n if not load_middleware_lock: # check again\n mwlock.release() # abort\n return\n load_middleware_lock = None # mark global as \"init done\"\n\n try:\n # middleware hooks\n from django.conf import settings\n for i in settings.MIDDLEWARE_CLASSES:\n if i.startswith('oboe'):\n continue\n dot = i.rfind('.')\n if dot < 0 or dot+1 == len(i):\n continue\n objname = i[dot+1:]\n imports.whenImported(i[:dot],\n functools.partial(middleware_hooks, objname=objname)) # XXX Not Python2.4-friendly\n\n # ORM\n if oboe.config['inst_enabled']['django_orm']:\n from oboeware import inst_django_orm\n imports.whenImported('django.db.backends', inst_django_orm.wrap)\n\n # templates\n if oboe.config['inst_enabled']['django_templates']:\n from oboeware import inst_django_templates\n import django\n if StrictVersion(django.get_version()) >= StrictVersion('1.3'):\n imports.whenImported('django.template.base', inst_django_templates.wrap)\n else:\n imports.whenImported('django.template', inst_django_templates.wrap)\n\n # load pluggaable instrumentation\n from loader import load_inst_modules\n load_inst_modules()\n\n # it's usually a tuple, but sometimes it's a list\n if type(settings.MIDDLEWARE_CLASSES) is tuple:\n settings.MIDDLEWARE_CLASSES = ('oboeware.djangoware.OboeDjangoMiddleware',) + settings.MIDDLEWARE_CLASSES\n elif type(settings.MIDDLEWARE_CLASSES) is list:\n settings.MIDDLEWARE_CLASSES = ['oboeware.djangoware.OboeDjangoMiddleware'] + settings.MIDDLEWARE_CLASSES\n else:\n print >> sys.stderr, \"Oboe error: thought MIDDLEWARE_CLASSES would be either a tuple or a list, got \" + \\\n str(type(settings.MIDDLEWARE_CLASSES))\n\n finally: # release instrumentation lock\n mwlock.release()\n\n try:\n add_rum_template_tags()\n except Exception, e:\n print >> sys.stderr, \"Oboe error: couldn't add RUM template tags: %s\" % (e,)", "def __bool__(self):\n return self.installed", "def page_setup(self):\n return self.container['page_setup']", "def set_installed(self):\n self._installed = True", "def do_post_install(self, context):\n pass", "def is_installed(cls):\n return find_spec_or_loader(cls.module) is not None", "def on_install(self, request, trigger_context):\n raise NotImplementedError", "def setup_page(self):\n raise NotImplementedError", "def __init__(self, get_response):\n if not settings.PRODUCTION_ENVIRONMENT and not settings.TESTING:\n self.get_response = get_response\n else:\n raise MiddlewareNotUsed()", "def installed(self):\n if self._installed is None:\n self._installed = (self.path is not None)\n return self._installed", "def setup_page(self):\r\n raise NotImplementedError", "def process_request(self, request): # pylint: disable=R0201\n\n error = (\"The Django CAS middleware requires authentication \"\n \"middleware to be installed. Edit your MIDDLEWARE_CLASSES \"\n \"setting to insert 'django.contrib.auth.middleware.\"\n \"AuthenticationMiddleware'.\")\n assert hasattr(request, 'user'), error", "def load_middleware(*args, **kwargs):\n inject_middleware()\n BaseHandler.load_middleware = original_load_middleware\n return original_load_middleware(*args, **kwargs)", "def _install(self):\n\n pass", "def is_installed(self):\n return not self.dont_install", "def _auto_discover(self):\n if self._initialized:\n return\n\n from django.conf import settings\n from django.utils.importlib import import_module\n from django.utils.module_loading import module_has_submodule\n\n self._initialized = True\n for app in settings.INSTALLED_APPS:\n mod = import_module(app)\n # Attempt to import the app's panels module.\n try:\n import_module('%s.panels' % app)\n except:\n # Decide whether to bubble up this error. If the app just\n # doesn't have an panels module, we can ignore the error\n # attempting to import it, otherwise we want it to bubble up.\n if module_has_submodule(mod, 'panels'):\n raise", "def ready(self):\n from django_sites_extensions import models\n from django_sites_extensions import signals", "def __init__(self):\n if not self.is_installed():\n self.install()\n else:\n self.load_users()", "def inject_middleware():\n if 'appmap.django.Middleware' not in settings.MIDDLEWARE:\n settings.MIDDLEWARE.insert(0, 'appmap.django.Middleware')", "def can_load_page(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n expect_loading = False\n if 'expect_loading' in kwargs:\n expect_loading = kwargs['expect_loading']\n del kwargs['expect_loading']\n if expect_loading:\n self._loaded = False\n result = func(self, *args, **kwargs)\n self.wait_for_page_loaded()\n return result\n return func(self, *args, **kwargs)\n\n return wrapper", "def installed(self) -> bool:\n return self._installed", "def middleware(self, *args, **kwargs):\n return super(Blueprint, self).middleware(*args, **kwargs)", "def _install(self):\n # Default implementation\n for pm_name, package in self._provider_package.items():\n if helpers[pm_name]:\n helpers[pm_name].install_package(package)\n return\n raise self.unsure_how_to_install()", "def post_installation(self, exc_value):\n pass", "def page_setup(self, page_setup):\n\n self.container['page_setup'] = page_setup", "def on_registered(self):\r\n super().on_registered()\r\n\r\n # Register type information\r\n cls = self.__class__\r\n\r\n subclass_cache = cls._of_subclass_cache\r\n type_cache = cls._of_type_cache\r\n\r\n # Cache subtypes\r\n for base_cls in cls.__mro__:\r\n try:\r\n instances = subclass_cache[base_cls]\r\n\r\n except KeyError:\r\n instances = subclass_cache[base_cls] = set()\r\n\r\n instances.add(self)\r\n\r\n # Cache the type\r\n try:\r\n instances = type_cache[cls]\r\n\r\n except KeyError:\r\n instances = type_cache[cls] = set()\r\n\r\n instances.add(self)\r\n\r\n ReplicableRegisteredSignal.invoke(target=self)" ]
[ "0.5708004", "0.5457926", "0.53875947", "0.5337035", "0.53081757", "0.5224973", "0.52124566", "0.5208954", "0.51006395", "0.5072566", "0.5057796", "0.5034346", "0.5021708", "0.50119513", "0.5004418", "0.5003487", "0.4994785", "0.49845058", "0.49206704", "0.49062628", "0.49048826", "0.48873", "0.4885075", "0.48722622", "0.48634034", "0.4862464", "0.4837229", "0.4826915", "0.478637", "0.47698748" ]
0.8444131
0
Checks if all the arguments it receives are numeric (according to
def are_numeric(*values): for value in values: if not is_numeric(value): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_arguments(arguments):\n quit = False\n for argument, value in vars(arguments).items():\n try:\n float(value)\n except:\n print(\"{} must be numeric\".format(argument))\n quit = True\n if quit:\n exit(1)", "def check_for_float_and_int(check):", "def number_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, numbers.Number):\n name = type(var).__name__\n raise DigitError(\n 'Function {} expected number, {} got instead.'.format(func, name))", "def numeric_check(param, name):\n\tif not isinstance(param, numbers.Number):\n\t\traise TypeError(\"Keyword arg '%s' must be a real number. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def is_float(*args): \n try:\n for i in args:\n float(i)\n return True\n except Exception:\n return False", "def isNumeric(obj):\n return isinstance(obj, (int, float, bool))", "def _check_args(self):\n if not isinstance(self.digits, str):\n raise TypeError('digits must be of type string.')\n if isinstance(self.n_points, float):\n self.n_points = int(self.n_points)\n if not isinstance(self.n_points, int):\n raise TypeError('n_points must be of type integer.')\n if self.n_points < 0:\n raise ValueError('n_points must be positive.')", "def hasCorrectNumberArguments(self, *args):\n return _libsbml.ASTBasePlugin_hasCorrectNumberArguments(self, *args)", "def is_numeric(self) -> bool:\n return False", "def args_is_good(arg_list: list) -> bool:\n usage_msg = (\n \"Usage: python operations.py <number1> <number2>\\n\"\n \"Example:\\n\"\n \" python operations.py 10 3\\n\"\n )\n too_many_msg = \"InputError: too many arguments\\n\"\n only_numbers_msg = \"InputError: only numbers\\n\"\n if len(arg_list) == 1:\n print(usage_msg)\n return False\n if len(arg_list) > 3:\n print(too_many_msg, usage_msg)\n return False\n try:\n a, b = int(arg_list[1]), int(arg_list[2])\n # discarding floats here, even those like 5.0\n # use float.is_integer() if need to keep those\n # keeping only 42 or \"42\" (ints with or without quotes)\n if arg_list[1] == str(a) and arg_list[2] == str(b):\n return True\n except TypeError:\n print(only_numbers_msg, usage_msg)\n return False", "def isnumeric(self):\n return isnumeric(self)", "def is_numeric(value):\n return isinstance(value, int) or isinstance(value, float)", "def numeric(*args):", "def is_numeric(x):\n if isinstance(x, NUMBER_TYPES):\n return True\n elif isinstance(x, np.ndarray):\n return x.dtype.type not in NUMPY_NON_TYPES\n return False", "def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())", "def __check_args(self):\n self.__check_args_type()\n self.__check_args_val()", "def is_numeric(obj):\n return isinstance(obj, (int, float, complex))", "def __verify_numeric(self, action, value):\n if action != \"1\": # if the action is anything other than inserting:\n return True\n try:\n return value.isnumeric()\n except ValueError:\n return False", "def isNumber(x):\n\treturn type(x) in [int, float]", "def check_for_float(check):", "def isNumber(x):\n return isinstance(x, (int, float))", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def isNumeric(self,chain):\n res = True\n try:\n int(chain)\n except:\n res = False\n return res", "def validate_numeric_annots(self):\n valid = True\n for annot_header in self.file.columns[1:]:\n annot_name = annot_header[0]\n annot_type = annot_header[1]\n column_dtype = self.file.dtypes[annot_header]\n if annot_type == \"numeric\" and column_dtype == \"object\":\n valid = False\n msg = f\"Numeric annotation, {annot_name}, contains non-numeric data (or unidentified NA values)\"\n self.store_validation_issue(\n \"error\", msg, \"content:invalid-type:not-numeric\"\n )\n return valid", "def is_int(*args): \n try:\n for i in args:\n int(i)\n return True\n except Exception:\n return False", "def is_numeric(number):\n\n if isinstance(number, bool):\n return False\n elif isinstance(number, int) or isinstance(number, float):\n return True\n else:\n return False", "def is_numeric(val):\n if \\\n isinstance(val, int) or \\\n isinstance(val, float):\n return True\n elif \\\n isinstance(val, str) and \\\n val.isdigit():\n return True\n else:\n return False", "def is_numeric(value):\n return any([\n type(value) is str and value.isnumeric(),\n hasattr(value, 'is_integer') and value.is_integer(),\n type(value) is int,\n ])", "def _is_number(data):\n return len(data) and np.issubdtype(_to_ndarray(data).dtype, np.number)", "def test_not_int(self):\n invalid_args = [\"random string\", \"123\", 123.5]\n for arg in invalid_args:\n assert meters_to_km(arg) is arg" ]
[ "0.79766375", "0.72621953", "0.7188788", "0.7124984", "0.7036451", "0.7026137", "0.6995436", "0.69920516", "0.6922569", "0.6913737", "0.68856776", "0.68640095", "0.68215996", "0.67944366", "0.6732328", "0.6717718", "0.6717057", "0.6711378", "0.664611", "0.6630572", "0.6597417", "0.6569322", "0.6494181", "0.64835775", "0.6482341", "0.6480279", "0.64764017", "0.64741606", "0.645952", "0.6457773" ]
0.7544063
1
return the unitwise definition corresponding to attrname
def _get_wavelength_attrs_with_units(self, attrname, units='AA'): attr = self._lick[attrname] if self.wavelength_unit is not None: if units is None: return attr * unit[self.wavelength_unit] else: return (attr * unit[self.wavelength_unit]).to(units) else: return attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_wavelength_attrs_with_units(self, attrname, units='AA'):\n attr = self._lick[attrname]\n if self.wavelength_unit is not None:\n if units is None:\n return attr * Unit(self.wavelength_unit)\n else:\n return (attr * Unit(self.wavelength_unit)).to(units)\n else:\n return attr", "def _parse_unit_attr(attr: str) -> str:\n parts = attr.split('_', maxsplit=1)\n valid_attr = len(parts) == 2 and parts[0] == \"unit\"\n if not valid_attr:\n raise ValueError(\"{0} is not a valid unit attribute.\".format(attr))\n return parts[1]", "def mineral_attr(attribute):\n return attribute[0]", "def get_attr(self, attr_name, ds_name=None):\n if self.science_product:\n return self.__nc_attr(attr_name, ds_name)\n\n return self.__h5_attr(attr_name, ds_name)", "def get(self, attrname):\n return self.__dict__['_'+attrname]", "def create_descr(self, attr_name):", "def getUnitDefinition(self, *args):\n return _libsbml.Model_getUnitDefinition(self, *args)", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def __getattribute__(self, attr):\n if attr in ('make_rdm1s', 'spin_square', 'contract_2e',\n 'absorb_h1e'):\n raise AttributeError\n else:\n return object.__getattribute__(self, attr)", "def get_attribute(self, name):\n\n pass", "def get_unit(self,tag):", "def __h5_attr(self, attr_name, ds_name):\n if ds_name is not None:\n dset = self.fid['/PRODUCT/{}'.format(ds_name)]\n if attr_name not in dset.attrs.keys():\n return None\n\n attr = dset.attrs[attr_name]\n else:\n if attr_name not in self.fid.attrs:\n return None\n\n attr = self.fid.attrs[attr_name]\n\n if isinstance(attr, bytes):\n return attr.decode('ascii')\n\n return attr", "def get_attr(self):\n attr = self._bld.FindOrCreateAttribute(self._sobj, self.sname)\n return attr._narrow(self.stype)", "def _desc_op(attr_name):", "def attributeDecl(self, elem, name, type, defi, defaultValue, nameList):\n pass", "def attr(self, name):\r\n return Assert(getattr(self.obj, name))", "def AttributeString(self) -> str:", "def AttributeString(self) -> str:", "def attr(node: md.Document, name: str) -> str:\n return node.getAttribute(name)", "def get_attr(self, name: str):\n return self.call(name)", "def __getattr__(self, name):\n if name == \"mu\":\n self.mu = self.mdp.stationary_distribution(\n seed=1000, iterations=100000, policy=self.target_policy)\n return self.mu\n elif name == \"beh_mu\":\n self.beh_mu = self.mdp.stationary_distribution(\n seed=1000, iterations=100000, policy=self.behavior_policy)\n return self.beh_mu\n elif name == \"V_true\":\n self.V_true = dynamic_prog.estimate_V_discrete(\n self.mdp, policy=self.target_policy, gamma=self.gamma)\n return self.V_true\n else:\n raise AttributeError(name)", "def get_unit(shared, unit_name):\n if (shared.config.get_safe('data', 'use_units') != 'off'):\n unit_val, unit_str = shared.config.get_safe_literal('units', unit_name,\n default=(1.0, ''))\n if unit_str:\n unit_str = ' [' + unit_str + ']'\n else:\n unit_val = 1.0\n unit_str = ''\n \n return unit_val, unit_str", "def about_attribute(self, name):\n for cdef in self.getmro():\n if name in cdef.attrs:\n s_result = cdef.attrs[name].s_value\n if s_result != s_ImpossibleValue:\n return s_result\n else:\n return None\n return None", "def __getattr__( self, attrName ):\r\n if attrName!=attrName.lower() and attrName!=\"caseSensitive\" and not self.caseSensitive and \\\r\n (attrName.startswith(\"start_\") or attrName.startswith(\"end_\")):\r\n return getattr(self,attrName.lower())\r\n raise AttributeError, attrName", "def __nc_attr(self, attr_name, ds_name):\n if ds_name is not None:\n for grp_name in ['/target_product', '/side_product']:\n dset = self.fid['{}/{}'.format(grp_name, ds_name)]\n if attr_name in dset.ncattrs():\n return dset.getncattr(attr_name)\n\n return None\n\n if attr_name not in self.fid.ncattrs():\n return None\n\n return self.fid.getncattr(attr_name)", "def _mangle_attr(name):\n return 'm_' + name", "def __getattr__(self, name):\n if not name in self._attrs.iterkeys():\n raise AttributeError(name)\n return self._attrs[name]", "def getAttrs(element, exclude=(), required=()):\n conversionTable = {'lowerBound':PQU.PQU, 'upperBound':PQU.PQU, 'value':PQU.PQU, 'energy':PQU.PQU,\n 'neutronWidth':PQU.PQU, 'captureWidth':PQU.PQU, 'fissionWidthA':PQU.PQU, 'fissionWidthB':PQU.PQU, 'competitiveWidth':PQU.PQU,\n 'levelSpacing':PQU.PQU, 'Q':PQU.PQU, 'radius':PQU.PQU, 'effectiveRadius':PQU.PQU,\n 'reconstructCrossSection':getBool, 'multipleRegions': getBool, 'LdependentScatteringRadii': getBool,\n 'calculateChannelRadius':getBool, 'computeAngularDistribution':getBool, 'forSelfShieldingOnly': getBool,\n 'calculateShift':getBool,'calculatePenetrability':getBool,\n 'LvaluesNeededForConvergence':int, 'ENDF_MT':int, 'index':int, 'L':int,\n 'neutronDOF':floatOrint, 'gammaDOF':floatOrint, 'competitiveDOF':floatOrint, 'fissionDOF':floatOrint,\n 'spin':xParticle.spin, 'parity':xParticle.parity,\n 'scatteringRadius':(lambda foo: scatteringRadius(PQU.PQU(foo)) if foo!='energyDependent' else foo),\n }\n attrs = dict( element.items() )\n for key in attrs.keys():\n if key in exclude: attrs.pop(key)\n elif key in conversionTable: attrs[key] = conversionTable[key]( attrs[key] )\n for val in required:\n if val not in attrs: attrs[val] = False\n return attrs", "def _arg_attr(identifier, attr1, attr2):\n return attr1 if identifier.startswith('t') else attr2", "def createUnitDefinition(self):\n return _libsbml.Model_createUnitDefinition(self)" ]
[ "0.63725454", "0.60422206", "0.5772077", "0.57339555", "0.5650211", "0.5626752", "0.5624979", "0.55762106", "0.55762106", "0.5543511", "0.55427015", "0.55405295", "0.5533721", "0.5516388", "0.54795426", "0.5476638", "0.5459001", "0.5459001", "0.5439841", "0.54318297", "0.5421264", "0.54183096", "0.54182243", "0.5405263", "0.5390103", "0.5385034", "0.537344", "0.5331236", "0.53082806", "0.52970684" ]
0.6441973
0
Scan for independent loops and set up dictionaries.
def main(self, verbose=0): indepdict=self.scan_for_loop(self.indeploop) pegdict1 = self.scan_for_loop(self.pegloop1) pegdict2 = self.scan_for_loop(self.pegloop2) if len(indepdict.keys()) == 0 and len(pegdict1.keys()) == 0 and len(pegdict2.keys()) == 0: return dict() alldict = dict(indepdict) alldict.update(pegdict1) alldict.update(pegdict2) indepcomb=self.get_combo_list(indepdict, 0) pegcomb1=self.get_combo_list(pegdict1, 1) pegcomb2=self.get_combo_list(pegdict2, 1) allcombs = self.combine_three_combo_lists(indepcomb, pegcomb1, pegcomb2) datasets = self.prepare_looped_datasets(alldict, allcombs) createdfiles = self.create_input_files(datasets) if verbose == 1: self.print_list(indepcomb) self.print_list(pegcomb1) self.print_list(pegcomb2) self.print_list(allcombs) for datakey in datasets: self.print_list(datasets[datakey]) return createdfiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])\n for lvalidx in loopedlines.keys():\n newdata[lvalidx] = loopedlines[lvalidx]\n datasets_dict[combct] = newdata\n combct = combct + 1\n return datasets_dict", "def iterate():\n # States are of the form (coordinates, word so far, used spots)\n # Load the initial states into the stack\n global theStack\n for r,layer in enumerate(honeycomb):\n for e,el in enumerate(layer):\n theStack.append( ((e,r), [el],set([(e,r)])) )\n \n while (len(theStack) != 0):\n #pop the next run\n (e,r),soFar,used=theStack[-1]\n theStack=theStack[:-1]\n #run it!\n step((e,r),soFar,used)", "def find_loop_nest_with_map(kernel: LoopKernel) -> Mapping[str, AbstractSet[str]]:\n result = {}\n\n from loopy.kernel.data import ConcurrentTag, IlpBaseTag\n\n all_nonpar_inames = {\n iname for iname in kernel.all_inames()\n if not kernel.iname_tags_of_type(iname,\n (ConcurrentTag, IlpBaseTag))}\n\n iname_to_insns = kernel.iname_to_insns()\n\n for iname in all_nonpar_inames:\n result[iname] = {other_iname\n for insn in iname_to_insns[iname]\n for other_iname in kernel.insn_inames(insn) & all_nonpar_inames}\n\n return result", "def multiple_eval_for_loops_v2():", "def multiple_eval_for_loops_v1():", "def initialize_sets(self):\n for block in self.blocks:\n # Insert phi nodes from SSA stage into the assignments of the block\n for phi in block.phis:\n block.gen.setdefault(phi, []).insert(0, phi)\n\n # Update the kill set with the variables that are assigned to in\n # the block\n block.kill = set(block.gen)\n block.output = set(block.gen)\n #for entry in block.bound:\n # block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def __calculate_iterations(self):\n iterables = {}\n \n def get_type(type_options):\n key = self._config_dict[type_options].keys()[0]\n data = self._config_dict[type_options][key]\n \n if type(data) == dict:\n iterables[key] = [data]\n else:\n iterables[key] = data\n \n for config_type in self.__iterables:\n get_type(config_type)\n \n self.__iterables = [dict(zip(iterables, v)) for v in product(*iterables.values())] # Calculates the cartesian product of all the lists to iterate to generate permutations.\n self.__iterables_counter = 0", "def find_loop_nest_around_map(kernel: LoopKernel) -> Mapping[str, AbstractSet[str]]:\n result: Dict[str, Set[str]] = {}\n\n all_inames = kernel.all_inames()\n\n iname_to_insns = kernel.iname_to_insns()\n\n # examine pairs of all inames--O(n**2), I know.\n from loopy.kernel.data import IlpBaseTag\n for inner_iname in all_inames:\n result[inner_iname] = set()\n for outer_iname in all_inames:\n if inner_iname == outer_iname:\n continue\n\n if kernel.iname_tags_of_type(outer_iname, IlpBaseTag):\n # ILP tags are special because they are parallel tags\n # and therefore 'in principle' nest around everything.\n # But they're realized by the scheduler as a loop\n # at the innermost level, so we'll cut them some\n # slack here.\n continue\n\n if iname_to_insns[inner_iname] < iname_to_insns[outer_iname]:\n result[inner_iname].add(outer_iname)\n\n for dom in kernel.domains:\n for outer_iname in dom.get_var_names(isl.dim_type.param):\n if outer_iname not in all_inames:\n continue\n\n for inner_iname in dom.get_var_names(isl.dim_type.set):\n result[inner_iname].add(outer_iname)\n\n return result", "def NM08_model_loop(root, run_dict, res_dict, dual_list, perm_tup, machine,\n decimate=100, i=1, verbose=False):\n if machine == 'laptop':\n fz_file_pat = '/home/chet/gmt/data/NZ/wells/feedzones/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/home/chet/data/mrp_data/Steve_Sewell_MRP_PhD_Data/' \\\n 'Natural_State_Temperatures/NM08_profile_pyfehm_comma.txt'\n excel_file = '/home/chet/data/mrp_data/well_data/flow_rates/' \\\n 'July_2017_final/Merc_Ngatamariki.xlsx'\n elif machine == 'server':\n fz_file_pat = '/Users/home/hoppche/data/merc_data/wells/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/Users/home/hoppche/data/merc_data/temps/' \\\n 'NM08_profile_pyfehm_comma.txt'\n excel_file = '/Users/home/hoppche/data/merc_data/flows/' \\\n 'Merc_Ngatamariki.xlsx'\n # Make the directory for this object\n print('Making grid')\n # Extract just floats and exponent from perms\n work_dir = '{}/run_{}'.format(root, i)\n dat = make_NM08_grid(work_dir=work_dir, log_base=3, max_range=15)\n print('Assigning reservoir parameters')\n dat = reservoir_params(dat, temp_file=T_file, reservoir_dict=res_dict,\n show=False)\n print('Defining well nodes')\n dat = define_well_nodes(\n dat, well_file_pattern=fz_file_pat,\n well_name='NM08', type='injection', surf_loc=[1500., 1500.])\n print('Running initial condition')\n dat = run_initial_conditions(dat)\n dat = set_well_boundary(\n dat, excel_file=excel_file, sheet_name='NM08 Stimulation',\n well_name='NM08', dates=[datetime(2012, 6, 7), datetime(2012, 7, 12)],\n t_step='day', decimate=decimate, debug=0)\n dat = set_stress(dat)\n dat = set_dual(dat, zonelist=['tahorakuri'], dual_list=dual_list)\n if perm_tup:\n dat = set_permmodel(dat, zonelist=['tahorakuri'], index=perm_tup[0],\n permmodel_dict=perm_tup[1])\n model_run(dat, run_dict, verbose=verbose)\n return", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def algorithm_loop(self):", "def organise_scans(self):\n self.wh_to_th = {}\n self.th_to_wh = {}\n\n wh_to_th_metrics = []\n th_to_wh_metrics = []\n wh_to_th_params = {}\n th_to_wh_params = {}\n wh_to_th_minim_info = {}\n th_to_wh_minim_info = {}\n wh_to_th_minim_info['time'] = []\n wh_to_th_minim_info['iterations'] = []\n wh_to_th_minim_info['funcevals'] = []\n wh_to_th_minim_info['status'] = []\n th_to_wh_minim_info['time'] = []\n th_to_wh_minim_info['iterations'] = []\n th_to_wh_minim_info['funcevals'] = []\n th_to_wh_minim_info['status'] = []\n\n for injparam in sorted(self.data_sets.keys()):\n injlabels = self.labels[injparam].dict\n for injkey in self.data_sets[injparam].keys():\n h0_metric_val = self.data_sets[injparam][injkey][\n 'h0_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n h1_metric_val = self.data_sets[injparam][injkey][\n 'h1_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n if h1_metric_val > h0_metric_val:\n bestfit = 'h0'\n altfit = 'h1'\n else:\n bestfit = 'h1'\n altfit = 'h0'\n\n wh_to_th_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)]['fid_asimov']\n th_to_wh_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)]['fid_asimov']\n\n wh_to_th_metrics.append(wh_to_th_fit['metric_val'])\n th_to_wh_metrics.append(th_to_wh_fit['metric_val'])\n\n for systkey in wh_to_th_fit['params'].keys():\n if systkey not in wh_to_th_params.keys():\n wh_to_th_params[systkey] = []\n wh_to_th_params[systkey].append(\n wh_to_th_fit['params'][systkey]\n )\n for systkey in th_to_wh_fit['params'].keys():\n if systkey not in th_to_wh_params.keys():\n th_to_wh_params[systkey] = []\n th_to_wh_params[systkey].append(\n th_to_wh_fit['params'][systkey]\n )\n\n wh_to_th_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_time'])\n wh_to_th_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n wh_to_th_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n wh_to_th_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n \n th_to_wh_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_time'])\n th_to_wh_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n th_to_wh_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n th_to_wh_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n\n wh_to_th_params['bestfit'] = bestfit\n wh_to_th_params['altfit'] = altfit\n th_to_wh_params['bestfit'] = bestfit\n th_to_wh_params['altfit'] = altfit\n\n self.wh_to_th['metrics'] = wh_to_th_metrics\n self.th_to_wh['metrics'] = th_to_wh_metrics\n self.wh_to_th['params'] = wh_to_th_params\n self.th_to_wh['params'] = th_to_wh_params\n self.wh_to_th['minim_info'] = wh_to_th_minim_info\n self.th_to_wh['minim_info'] = th_to_wh_minim_info", "def initialize(self): \n \n \n fixed_counts = {}\n partial_counts = {}\n \n self.ctx.static = []\n self.ctx.partial = {}\n self.ctx.select = []\n \n self.ctx.compositions = []\n self.ctx.sites_refactored = {}\n \n # We collect the sites into their composition and calculate the theoretical number of occupied sites\n for site in self.ctx.structure:\n if self.__is_partial(site):\n self.ctx.sites_refactored.setdefault(site.species_and_occu, [])\n if site.species_and_occu not in self.ctx.compositions:\n self.ctx.compositions.append(site.species_and_occu)\n \n self.ctx.partial.setdefault(site.species_and_occu, [[]])\n self.ctx.partial.get(site.species_and_occu)[0].append(site)\n \n partial_counts.setdefault(site.species_and_occu, [[0, 0] for s in site.species_and_occu])\n \n for i, element in enumerate(site.species_and_occu):\n partial_counts[site.species_and_occu][i][0] += site.species_and_occu.get(element)\n partial_counts[site.species_and_occu][i][1] += site.species_and_occu.get(element)\n else:\n self.ctx.static.append(PeriodicSite(site.specie, site.coords, site.lattice, True, True))\n fixed_counts.setdefault(site.specie, 0)\n fixed_counts[site.specie] += 1\n \n # If all sites are static, then no need to do anything.\n if len(self.ctx.static) == len(self.ctx.structure):\n self.ctx.do_break = 0\n self.out('structures.%s' % self.inputs.structure.uuid, self.inputs.structure)\n return\n \n # We compile the number of occupied site for each partial composition while not going over the theoretical number\n for comp in partial_counts:\n self.ctx.rs.shuffle(self.ctx.partial.get(comp)[0])\n for i, sp in enumerate(comp):\n partial_counts[comp][i][0] = np.floor(partial_counts[comp][i][0])\n \n # Calculation of the departure from the composition. \n error = {\n el: self.ctx.structure.composition.get(el) - fixed_counts.get(el, 0)\n for el in self.ctx.structure.composition\n }\n\n for comp in partial_counts:\n for i, sp in enumerate(comp):\n error[sp] -= partial_counts.get(comp)[i][0]\n\n # Adding ions to sites with the highest departure from theoretical number as long as the error\n # is greater than 0.5.\n for element in error:\n while error[element] > 0.5:\n if error[element] > 0:\n max_error = (None, 0)\n for i, comp in enumerate(partial_counts):\n if element in comp:\n for j, sp in enumerate(comp):\n if sp == element:\n err = (partial_counts.get(comp)[j][1] - partial_counts.get(comp)[j][0]) ** 2\n if err > max_error[1]:\n max_error = ((comp, j), err)\n partial_counts.get(max_error[0][0])[max_error[0][1]][0] += 1\n error[element] -= 1\n \n self.ctx.configurations = tuple()\n self.ctx.configuration_hashes = tuple()\n self.ctx.configuration_steps = tuple()\n self.ctx.configuration_energies = tuple()\n \n for comp in partial_counts:\n # For each site, calculate log10 of the multinomial factor,\n # it will be used to scale the probability of each site to \n # be used for a swap.\n n = 0\n for i in range(len(self.ctx.partial.get(comp)[-1])):\n n += np.log10(i + 1)\n \n for i, sp in enumerate(comp):\n for j in range(int(partial_counts.get(comp)[i][0])):\n n -= np.log10(j + 1)\n \n for _ in range(int(partial_counts.get(comp)[i][0])):\n site = self.ctx.partial.get(comp)[-1].pop(0)\n self.ctx.partial.get(comp).insert(0, PeriodicSite(Specie(sp, self.ctx.charges.get(sp.value, 0)), \n site.coords, site.lattice, True, True))\n self.ctx.sites_refactored.get(comp).append(sp)\n leftovers = self.ctx.partial.get(comp).pop()\n \n for j in range(len(leftovers)):\n n -= np.log10(j + 1)\n \n for site in leftovers:\n self.ctx.partial.get(comp).insert(0, PeriodicSite(self.ctx.vacancy, \n site.coords, site.lattice, True, True))\n self.ctx.sites_refactored.get(comp).append(self.ctx.vacancy.element)\n\n for _ in range(np.ceil(n).astype(int)):\n self.ctx.select.append(comp)\n \n for sites_refactored in self.ctx.sites_refactored.values():\n self.ctx.rs.shuffle(sites_refactored)\n \n self.ctx.idxes = [idx for idx in range(len(self.ctx.select))]\n self.ctx.sites = self.ctx.partial\n del self.ctx.partial\n \n self.ctx.partial_refactored = []\n # (site #, element) -> particle #\n self.ctx.indices = {}\n i = 0\n \n for site in self.ctx.structure:\n if self.__is_partial(site):\n for element in site.species_and_occu.keys():\n self.ctx.indices[(i, element)] = len(self.ctx.partial_refactored)\n self.ctx.partial_refactored.append(PeriodicSite(Specie(element, self.ctx.charges.get(element.value)), site.coords, site.lattice, True, True))\n i += 1\n \n self.ctx.all_indices = set(range(len(self.ctx.partial_refactored)))\n structure = Structure.from_sites(self.ctx.partial_refactored)\n self.ctx.ewald = EwaldSummation(structure)\n\n self.ctx.energy = self.__ewald(self.ctx.sites_refactored) * np.ones(1)\n self.ctx.tested = np.empty(0, dtype=np.float)\n self.ctx.accepted = np.empty(0, dtype=np.float)\n\n if self.inputs.verbose:\n self.report('Starting structure: E = %f' % self.ctx.energy[-1])", "def interpret_specs(self,details,return_stubs=False):\n\n\t\t#---this loop interpreter allows for a loop key at any point over specs in list or dict\n\t\t#---trim a copy of the specs so all loop keys are terminal\n\t\tdetails_trim = deepcopy(details)\n\t\t#---get all paths to a loop\n\t\tnonterm_paths = list([tuple(j) for j in set([tuple(i[:i.index('loop')+1]) \n\t\t\tfor i,j in catalog(details_trim) if 'loop' in i[:-1]])])\n\t\t#---some loops end in a list instead of a sub-dictionary\n\t\tnonterm_paths_list = list([tuple(j) for j in set([tuple(i[:i.index('loop')+1]) \n\t\t\tfor i,j in catalog(details_trim) if i[-1]=='loop'])])\n\t\t#---for each non-terminal path we save everything below and replace it with a key\n\t\tnonterms = []\n\t\tfor path in nonterm_paths:\n\t\t\tbase = deepcopy(delve(details_trim,*path[:-1]))\n\t\t\tnonterms.append(base['loop'])\n\t\t\tpivot = delve(details_trim,*path[:-1])\n\t\t\tpivot['loop'] = base['loop'].keys()\n\t\t#---hypothesize over the reduced specifications dictionary\n\t\tsweeps = [{'route':i[:-1],'values':j} for i,j in catalog(details_trim) if 'loop' in i]\n\t\t#---! note that you cannot have loops within loops (yet?) but this would be the right place for it\n\t\tif sweeps == []: new_calcs = [deepcopy(details)]\n\t\telse: new_calcs = hypothesis(sweeps,default=details_trim)\n\t\tnew_calcs_stubs = deepcopy(new_calcs)\n\t\t#---replace non-terminal loop paths with their downstream dictionaries\n\t\tfor ii,i in enumerate(nonterms):\n\t\t\tfor nc in new_calcs:\n\t\t\t\tdownkey = delve(nc,*nonterm_paths[ii][:-1])\n\t\t\t\tupkey = nonterm_paths[ii][-2]\n\t\t\t\tpoint = delve(nc,*nonterm_paths[ii][:-2])\n\t\t\t\tpoint[upkey] = nonterms[ii][downkey]\n\t\t#---loops over lists (instead of dictionaries) carry along the entire loop which most be removed\n\t\tfor ii,i in enumerate(nonterm_paths_list):\n\t\t\tfor nc in new_calcs: \n\t\t\t\t#---! this section is supposed to excise the redundant \"loop\" list if it still exists\n\t\t\t\t#---! however the PPI project had calculation metadata that didn't require it so we just try\n\t\t\t\ttry:\n\t\t\t\t\tpivot = delve(nc,*i[:-2]) if len(i)>2 else nc\n\t\t\t\t\tval = delve(nc,*i[:-1])[i[-2]]\n\t\t\t\t\tpivot[i[-2]] = val\n\t\t\t\texcept: pass\n\t\treturn new_calcs if not return_stubs else (new_calcs,new_calcs_stubs)", "def iterate_results(results, extract_fn):\n outputs = {}\n for environment, environment_results in results.items():\n if environment not in outputs:\n outputs[environment] = {}\n for experimental_setting, setting_results in environment_results.items():\n outputs[environment][experimental_setting] = []\n for config, seeds_results in setting_results.items():\n for seed, actual_results in seeds_results.items():\n output = extract_fn(actual_results)\n outputs[environment][experimental_setting].append(output)\n outputs[environment][experimental_setting] = np.array(outputs[environment][experimental_setting])\n return outputs", "def __staticLoopBoundScanning(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # generate the lower and upper values of each inter-tile loop\n val_table = {}\n for iname in outer_loop_inames:\n _, _, _, st_exp, _ = loop_info_table[iname]\n lval = ast.IdentExp(self.__getTileIterName(iname, tile_level))\n t = ast.BinOpExp(\n ast.IdentExp(self.__getTileSizeName(iname, tile_level)),\n ast.ParenthExp(st_exp.replicate()),\n ast.BinOpExp.SUB,\n )\n uval = ast.BinOpExp(lval.replicate(), ast.ParenthExp(t), ast.BinOpExp.ADD)\n val_table[iname] = (lval, uval)\n\n # iterate over each statement to determine loop bounds that are affine functions\n # of outer loop iterators\n lb_exps_table = {}\n ub_exps_table = {}\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # determine the value of the new lower loop bound\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_prolog:\n t = self.__findMinMaxVal(\n \"max\", lb_exp.replicate(), lb_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # determine the value of the new upper loop bound\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_epilog:\n t = self.__findMinMaxVal(\n \"min\", ub_exp.replicate(), ub_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def initialize(self):\n self.assmts = {}\n\n offset = 0\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.bit = 1 << offset\n assmts.mask = assmts.bit\n self.assmts[entry] = assmts\n offset += 1\n\n for block in self.blocks:\n block.stats = block.phis.values() + block.stats\n for stat in block.stats:\n if isinstance(stat, (PhiNode, NameAssignment)):\n stat.bit = 1 << offset\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= stat.bit\n offset += 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bound:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.itervalues():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def _init_dictionaries(self):\n\t\t# Dictionary contatining all actionPotential\n\t\tself.actionPotentials = {}\n\t\t# Dictionary containing all cells id.\n\t\t# Cells id are used by neuron to communicate synapses between different cells in different hosts. Ids (gids) can be any integer, they just need to be unique.\n\t\tself.cellsId = {}\n\t\t# Dictionary containing all cells\n\t\tself.cells = {}\n\n\t\tself._nMuscles = len(self._infoMuscles)\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\t# Create sub-dictionaries for all DoF\n\t\t\tself.actionPotentials[muscle]={}\n\t\t\tself.cellsId[muscle]={}\n\t\t\tself.cells[muscle]={}\n\t\t\tfor cellInfo in self._infoCommonCellsInMuscles:\n\t\t\t\t# add lists containing cell ids/cells/ap\n\t\t\t\tcellClass = cellInfo[0]\n\t\t\t\tcellName = cellInfo[1]\n\t\t\t\tself.cellsId[muscle][cellName]=[]\n\t\t\t\tself.cells[muscle][cellName]=[]\n\t\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\t\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\t\tself.actionPotentials[muscle][cellName]=[]\n\n\t\t# Add special cells (specifc for some muscles or not muscle related)\n\t\tfor cellInfo in self._infoSpecialCells:\n\t\t\tgroupOrMuscle = cellInfo[0]\n\t\t\tcellClass = cellInfo[1]\n\t\t\tcellName = cellInfo[2]\n\t\t\tif not groupOrMuscle in self.cellsId.keys():\n\t\t\t\tself.actionPotentials[groupOrMuscle]={}\n\t\t\t\tself.cellsId[groupOrMuscle]={}\n\t\t\t\tself.cells[groupOrMuscle]={}\n\n\t\t\tself.cellsId[groupOrMuscle][cellName]=[]\n\t\t\tself.cells[groupOrMuscle][cellName]=[]\n\t\t\tif (cellClass==\"Motoneuron\" or cellClass==\"IntFireMn\") and self.recordMotoneurons:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"AfferentFiber\" and self.recordAfferents:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]\n\t\t\telif cellClass==\"IntFire\" and self.recordIntFire:\n\t\t\t\tself.actionPotentials[groupOrMuscle][cellName]=[]", "def _project_loops(self):\n\n self._create_projection_datasets()\n self._get_sho_chunk_sizes(10)\n\n '''\n Loop over the FORCs\n '''\n for forc_chunk_index in range(self._num_forcs):\n pos_chunk_index = 0\n\n self._current_sho_spec_slice = slice(self.sho_spec_inds_per_forc * self._current_forc,\n self.sho_spec_inds_per_forc * (self._current_forc + 1))\n self._current_met_spec_slice = slice(self.metrics_spec_inds_per_forc * self._current_forc,\n self.metrics_spec_inds_per_forc * (self._current_forc + 1))\n dc_vec = self._get_dc_offset()\n '''\n Loop over positions\n '''\n while self._current_pos_slice.stop < self._end_pos:\n loops_2d, nd_mat_shape_dc_first, order_dc_offset_reverse = self._get_projection_data(pos_chunk_index)\n\n # step 8: perform loop unfolding\n projected_loops_2d, loop_metrics_1d = self._project_loop_batch(dc_vec, np.transpose(loops_2d))\n\n # test the reshapes back\n projected_loops_2d = self._reshape_projected_loops_for_h5(projected_loops_2d,\n order_dc_offset_reverse,\n nd_mat_shape_dc_first)\n self.h5_projected_loops[self._current_pos_slice, self._current_sho_spec_slice] = projected_loops_2d\n\n metrics_2d = self._reshape_results_for_h5(loop_metrics_1d, nd_mat_shape_dc_first)\n\n self.h5_loop_metrics[self._current_pos_slice, self._current_met_spec_slice] = metrics_2d\n\n # Reset the position slice\n self._current_pos_slice = slice(None)\n\n pass", "def init_globals():\n global cycles, used_edges, split\n cycles = []\n used_edges = []\n split = []", "def loops(graph = None):\n\tunknown_structs = []\n\tcompound_structs = []\n\tloops_dict = create_components_dict()\n\tfor subgraph in nx.connected_component_subgraphs(graph):\n\t\tif subgraph.number_of_nodes() < 3:\n\t\t\tunknown_structs.append(subgraph)\n\t\telse:\n\t\t\tif connectivity_threshold(graph = subgraph) > 2 or loop_type(graph= subgraph) == 'NA':\n\t\t\t\tcompound_structs.append(subgraph)\n\t\t\telse:\n\t\t\t\tloops_dict[loop_type(graph= subgraph)].append(subgraph)\n\treturn loops_dict", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def initialize(self):\n self.assmts = {}\n\n bit = 1\n for entry in self.entries:\n assmts = AssignmentList()\n assmts.mask = assmts.bit = bit\n self.assmts[entry] = assmts\n bit <<= 1\n\n for block in self.blocks:\n for stat in block.stats:\n if isinstance(stat, NameAssignment):\n stat.bit = bit\n assmts = self.assmts[stat.entry]\n assmts.stats.append(stat)\n assmts.mask |= bit\n bit <<= 1\n\n for block in self.blocks:\n for entry, stat in block.gen.items():\n assmts = self.assmts[entry]\n if stat is Uninitialized:\n block.i_gen |= assmts.bit\n else:\n block.i_gen |= stat.bit\n block.i_kill |= assmts.mask\n block.i_output = block.i_gen\n for entry in block.bounded:\n block.i_kill |= self.assmts[entry].bit\n\n for assmts in self.assmts.values():\n self.entry_point.i_gen |= assmts.bit\n self.entry_point.i_output = self.entry_point.i_gen", "def make_loop(loop_orders, dtypes, loop_tasks, sub, openmp=None):\r\n def loop_over(preloop, code, indices, i):\r\n iterv = 'ITER_%i' % i\r\n update = \"\"\r\n suitable_n = \"1\"\r\n for j, index in enumerate(indices):\r\n var = sub['lv%i' % j]\r\n dtype = dtypes[j]\r\n update += \"%(dtype)s &%(var)s_i = * ( %(var)s_iter + %(iterv)s * %(var)s_jump%(index)s_%(i)s );\\n\" % locals()\r\n if index != 'x':\r\n suitable_n = \"%(var)s_n%(index)s\" % locals()\r\n if openmp:\r\n openmp_elemwise_minsize = theano.config.openmp_elemwise_minsize\r\n forloop = \"\"\"#pragma omp parallel for if( %(suitable_n)s >=%(openmp_elemwise_minsize)s)\\n\"\"\" % locals()\r\n else:\r\n forloop = \"\"\r\n forloop += \"\"\"for (int %(iterv)s = 0; %(iterv)s<%(suitable_n)s; %(iterv)s++)\"\"\" % locals()\r\n return\"\"\"\r\n %(preloop)s\r\n %(forloop)s {\r\n %(update)s\r\n %(code)s\r\n }\r\n \"\"\" % locals()\r\n\r\n preloops = {}\r\n for i, (loop_order, dtype) in enumerate(zip(loop_orders, dtypes)):\r\n for j, index in enumerate(loop_order):\r\n if index != 'x':\r\n preloops.setdefault(j, \"\")\r\n preloops[j] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n break\r\n else: # all broadcastable\r\n preloops.setdefault(0, \"\")\r\n preloops[0] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n\r\n s = \"\"\r\n\r\n for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):\r\n s = loop_over(preloops.get(i, \"\") + pre_task, s + task, indices, i)\r\n\r\n s += loop_tasks[-1]\r\n return \"{%s}\" % s", "def tunes_non_cyclic():\n A = Tune(\"A\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\",\n next_in_set=[\"B\", \"D\"])\n B = Tune(\"B\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\",\n next_in_set=[\"C\"])\n C = Tune(\"C\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\")\n D = Tune(\"D\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\")\n E = Tune(\"E\", key=None, tunetype=\"Reel\", played=None, source_code=\"TC\",\n next_in_set=[\"A\"])\n tunes = {\n \"A\": A,\n \"B\": B,\n \"C\": C,\n \"D\": D,\n \"E\": E\n }\n return tunes", "def make_sol_dict():\n file_names = [\"FORMAT3_Copy of KommuneMTPLforTriangle.xls\",\n \"C Triangulations analysis R2017 GC20161109.xls\",\n \"EVOLUTION 2017 _ M+F - Triangles cat nat brut net.xls\",\n \"Bsp8 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"Analysis MTPL MOD.xls\",\n \"Bsp6 _ Dreiecke aus GCNA für CU1.4.1.xls\",\n \"FORMAT6_sinistres.xls\",\n \"FORMAT1_LOSSES-MTPL-OVER-500-GROUP-2005_modified.xls\"]\n solutions_dict = dict()\n raw_dict = dict()\n for file_name in file_names:\n sr_list, file_name = ExcelLoader.load_excel(pdir.RESOURCES_DIR + \"/raw_test_files/\" + file_name)\n dh = DataHolder()\n for sr in sr_list:\n dh.add_sheet(sr.sheet_name, pd.DataFrame(columns=sr.headers, data=sr.row_vals),\n pd.DataFrame(columns=sr.headers, data=sr.xls_types), orig_sheet_name=sr.sheet_name)\n\n dh = SheetPreProcessor.separate_components(dh)\n raw_dict[file_name] = dh.encode()\n dh = HorizontalMerger.horizontal_merge(dh)\n #temp_path = pdir.RESOURCES_DIR + \"/temp/\"\n #dh.write_excel(temp_path + file_name)\n solutions_dict[file_name] = dh\n solutions_dict = MergePararametersOptimizer.make_ind_col_dict(solutions_dict)\n with open(pdir.RESOURCES_DIR + \"/test/merge_solutions.obj\", \"wb\") as temp_file:\n pickle.dump(solutions_dict, temp_file)\n with open(pdir.RESOURCES_DIR + \"/test/raw_test.obj\", \"wb\") as temp_file:\n pickle.dump(raw_dict, temp_file)", "def loop_nonThreaded():\n nonlocal index, total\n nonlocal d_tree\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal str_desc\n\n b_analyzeStatusHist: bool = False\n b_inputStatusHist: bool = False\n b_outputStatusHist: bool = False\n\n if int(self.verbosityLevel) and self.toConsole():\n iterator = tqdm( self.d_inputTree.items(),\n desc = str_desc)\n else:\n iterator = self.d_inputTree.items()\n\n for path, data in iterator:\n dret_inputSet = {}\n dret_analyze = {}\n dret_outputSet = {}\n # Read (is sometimes skipped) / Analyze / Write (also sometimes skipped)\n if fn_inputReadCallback:\n dret_inputSet = inputSet_read(path, data)\n try:\n b_inputStatusHist = b_inputStatusHist or dret_inputSet['status']\n except:\n pass\n if fn_analysisCallback:\n try:\n dret_analyze = analysis_do(path, d_tree[path], index)\n except:\n dret_analyze['status'] = False\n self.dp.qprint(\"Analysis failed\", comms = 'error')\n try:\n b_analyzeStatusHist = b_analyzeStatusHist or dret_analyze['status']\n except:\n pass\n if fn_outputWriteCallback:\n if 'status' in dret_analyze.keys():\n if dret_analyze['status']:\n dret_outputSet = outputSet_write(path, d_tree[path])\n try:\n b_outputStatusHist = b_outputStatusHist or dret_outputSet['status']\n except:\n pass\n index += 1\n dret_inputSet['status'] = b_inputStatusHist\n dret_analyze['status'] = b_analyzeStatusHist\n dret_outputSet['status'] = b_outputStatusHist\n tree_removeDeadBranches()", "def _extract_loops(self, pdb, loop_type, mapping, normalize):\n try:\n mlab = matlab.Matlab(self.config['locations']['fr3d_root'])\n [loops, count, err_msg] = mlab.extractLoops(pdb, loop_type, nout=3)\n except Exception as err:\n self.logger.exception(err)\n raise err\n\n if err_msg != '':\n raise core.MatlabFailed(err_msg)\n\n if loops == 0:\n self.logger.warning('No %s in %s', loop_type, pdb)\n loop_id = self._get_fake_loop_id(pdb, loop_type)\n return [mod.LoopInfo(loop_id=loop_id,\n type = 'NA',\n pdb_id=pdb,\n sequential_id='000',\n length=0,\n seq='',\n r_seq='',\n nwc_seq='',\n r_nwc_seq='',\n unit_ids='',\n loop_name='')]\n\n self.logger.info('Found %i %s loops', count, loop_type)\n\n data = []\n for index in xrange(count):\n loop = loops[index].AllLoops_table\n full_id = normalize(loop.full_id)\n loop_id = self._get_loop_id(full_id, pdb, loop_type, mapping)\n loops[index].Filename = loop_id\n\n data.append(mod.LoopInfo(\n loop_id=loop_id,\n type=loop_type,\n pdb_id=pdb,\n sequential_id=loop_id.split(\"_\")[-1],\n length=int(loops[index].NumNT[0][0]),\n seq=loop.seq,\n r_seq=loop.r_seq,\n nwc_seq=loop.nwc,\n r_nwc_seq=loop.r_nwc,\n unit_ids=','.join(full_id),\n loop_name=loop.loop_name))\n\n if self.save_loops:\n self.__save__(loops, self.config['locations']['loops_mat_files'])\n\n return data" ]
[ "0.60553885", "0.5839073", "0.5820277", "0.5816718", "0.5751431", "0.5579861", "0.5559903", "0.55534524", "0.5551435", "0.55467737", "0.5465791", "0.5446664", "0.54339755", "0.54255664", "0.53983235", "0.5374087", "0.53734773", "0.53571624", "0.53484", "0.53369623", "0.5333001", "0.5323921", "0.5288923", "0.5288657", "0.52676123", "0.52584344", "0.52571315", "0.52535546", "0.52184725", "0.5215378" ]
0.59351087
1
Prepare looped lines from looping dictionary.
def prepare_looped_lines(self, alldict, comblist): loopline_dict=dict() for stridx in comblist: lidx = int(stridx.split('-')[0]) loopidx = int(stridx.split('-')[1]) loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[lidx]['append'] + '\n' return loopline_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_looped_datasets(self, alldict, allcombs):\n datasets_dict=dict()\n numcombs = len(allcombs)\n combct = 0\n while combct < numcombs:\n newdata = list(self.baseinput.data)\n loopedlines = dict()\n loopedlines = self.prepare_looped_lines(alldict, allcombs[combct])\n for lvalidx in loopedlines.keys():\n newdata[lvalidx] = loopedlines[lvalidx]\n datasets_dict[combct] = newdata\n combct = combct + 1\n return datasets_dict", "def lines():\n line_dict = {}\n #\n line_dict['ArI'] = 2**0\n line_dict['HgI'] = 2**1\n line_dict['KrI'] = 2**2\n line_dict['NeI'] = 2**3\n line_dict['XeI'] = 2**4\n line_dict['CdI'] = 2**5\n line_dict['ZnI'] = 2**6\n line_dict['HeI'] = 2**7\n line_dict['OH_R24000'] = 2**8\n line_dict['OH_triplespec'] = 2**9\n line_dict['CuI'] = 2**10\n line_dict['ArII'] = 2**11\n line_dict['OH_XSHOOTER'] = 2**12\n line_dict['OH_GNIRS'] = 2**13\n line_dict['OH_NIRES'] = 2**14\n line_dict['ThAr_XSHOOTER_VIS'] = 2**15\n line_dict['OH_GMOS'] = 2**16\n line_dict['OH_MODS'] = 2**17\n line_dict['ThAr_MagE'] = 2**18 # R=4100\n line_dict['OH_FIRE_Echelle'] = 2**19 # R=6000\n line_dict['Ar_IR_GNIRS'] = 2**20 # R=6000\n line_dict['FeI'] = 2**21\n line_dict['FeII'] = 2**22\n line_dict['UNKNWN'] = 2**23\n line_dict['Ar_IR_MOSFIRE'] = 2 ** 24\n line_dict['Ne_IR_MOSFIRE'] = 2 ** 25\n line_dict['OH_MOSFIRE_Y'] = 2 ** 26\n line_dict['OH_MOSFIRE_J'] = 2 ** 27\n line_dict['OH_MOSFIRE_H'] = 2 ** 28\n line_dict['OH_MOSFIRE_K'] = 2 ** 29\n line_dict['ThAr_XSHOOTER_UVB'] = 2**30\n #\n return line_dict", "def set_dict(self, lines):\n for line in lines:\n line = line.rstrip()\n split_line = line.split(\"\\t\")\n old_gene_id = split_line[0]\n new_gene_id = split_line[2]\n conv_dict = self.conversion_dict\n conv_dict[old_gene_id] = new_gene_id\n self.conversion_dict = conv_dict", "def _clean_up_loop_dict(loop_dict):\n \n # Remove the 'data_header' tag if it exists\n # since it is a list of dataframes\n # Then re-attach each of them one at a time\n if u'data_header' in loop_dict.keys():\n header_df_list = loop_dict.pop(u'data_header')\n \n if isinstance(header_df_list, list):\n for df in enumerate(header_df_list):\n loop_dict[u'data_header_'+str(df[0]+1)] = df[1]\n else:\n loop_dict[u'data_header_1'] = header_df_list\n \n return loop_dict", "def main_dictionary():\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n dictionary(line.split(\" \"))", "def prepare_lines_data(self):\n for l_hd in self.hour_data:\n if not self.node_from or not self.node_to:\n print('ERROR! line %i-%i has no node(s)' % (self.node_from_code, self.node_to_code))\n if l_hd.state and self.node_from.get_node_hour_state(l_hd.hour) \\\n and self.node_to.get_node_hour_state(l_hd.hour):\n if not self.type:\n node_start = self.node_from_code\n node_finish = self.node_to_code\n base_coeff = 0\n k_pu = 0\n else:\n node_start = self.node_to_code\n node_finish = self.node_from_code\n base_coeff = self.node_to.voltage_class / self.node_from.voltage_class\n k_pu = math.sqrt(math.pow(self.kt_re, 2) + math.pow(self.kt_im, 2))\n lag = math.atan(self.kt_im / self.kt_re) if self.kt_re else 0\n\n self.eq_db_lines_data.append((\n l_hd.hour, node_start, node_finish, self.parallel_num, self.type,\n max(self.node_from.voltage_class, self.node_to.voltage_class), base_coeff,\n l_hd.r, l_hd.x, l_hd.g, -l_hd.b, k_pu, lag, -l_hd.b_from, -l_hd.b_to\n ))", "def fill_line(self, dct):\n return self._line_format % self.list_values(dct)", "def process_lines(self):\n\n for line in self.all_lines:\n container_re = re.compile(r'(.*?) bags')\n bags_re = re.compile(r'(?:(\\d+)|no other) (.*?) bags*')\n container_name = re.match(container_re, line).group(1)\n bags = re.findall(bags_re, line)\n self.all_bags[container_name] = bags", "def process(self):\n first_line = self.setup[\"first_line\"]\n last_line = self.setup[\"last_line\"]\n\n self.logger.info(\"Using lines %s - %s\", first_line, last_line)\n\n path_temp = \"{}_\".format(self.path)\n\n with open(self.path, \"r\") as src, open(path_temp, \"w\") as dest:\n lines = src.r..\n copy_lines = lines[first_line-1:last_line]\n dest.write(\"\".join(copy_lines))\n\n os.rename(path_temp, self.path)", "def refresh_lines(self):\n for line_data in self._data_lines:\n line = BasketLine.from_dict(self, line_data)\n pricing_context = PricingContext(shop=self.shop, customer=self.customer, supplier=line.supplier)\n line.cache_info(pricing_context)\n self._add_or_replace_line(line)", "def get_unidecode_lines(self, lines_dict):\n for line in lines_dict:\n line['parnter_name'] = unicode(line['partner_name'])\n return lines_dict", "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def _loopPreparation(self, stimNumber):\n self.nbFrames=10000 #TO DO --> better place for this line of code\n\n self.stimName= self.experimentName+'_S%(number)03d' % {\"number\": stimNumber} #%02d return a 2 char string : 1-->01\n (self.tiffWriterList, self.textFile) = filesInit( self.savePath,\n self.stimName,\n self.nbFrames,\n self.maxFrames)\n if self.seqMode == \"rgbMode\":\n self._rgbSequenceInit()\n elif self.seqMode == 'rbMode':\n self._rbSequenceInit()\n self.arduinoSync()", "def __init__(self, lines):\n self.tiles = {}\n self.parse(lines)\n self.find_neighbors()\n self.find_corners()\n self.build_grid_top()\n self.build_grid_left()\n self.fill_grid()\n self.stitch_image()", "def modify_body(lines, PE_dims, var_map): \n loop_bodies = []\n # Locate the user statements\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('hls_pipeline') != -1:\n # extract the loop body\n body_start = line_id\n r_minus_l = -1\n nxt_line_id = line_id + 1 \n while nxt_line_id < len(lines):\n nxt_line = lines[nxt_line_id]\n if nxt_line.find('}') != -1:\n r_minus_l += 1\n if nxt_line.find('{') != -1:\n r_minus_l -= 1\n if r_minus_l == 0:\n body_end = nxt_line_id - 1\n break\n nxt_line_id += 1\n loop_body = lines[body_start : body_end + 1]\n #print(loop_body)\n loop_bodies.append({'pos': [body_start, body_end], 'lines': loop_body})\n \n # Modidy the loop bodies\n #for body in loop_bodies:\n body_offset = 0\n for idx in range(len(loop_bodies)):\n body = loop_bodies[idx]\n body_lines = body['lines'] \n group_names = []\n has_data_trans = True\n data_trans_info = extract_data_trans_info(body_lines, PE_dims)\n # Remove the in transfer\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('read_channel_intel') != -1:\n has_data_trans = True\n # Locate the read block and the write block\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n m = re.search(r'\\((.+?)\\)', line) \n fifo_name = m.group(1)\n group_name = fifo_name.split('_')[1]\n group_names.append(group_name)\n break\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n # Remove the out transfer\n has_data_trans = True\n while has_data_trans:\n has_data_trans = False\n for line_id in range(len(body_lines)):\n line = body_lines[line_id]\n if line.find('write_channel_intel') != -1:\n m = re.search(r'\\((.+?)\\)', line)\n fifo_name = m.group(1).split(',')[0]\n group_name = fifo_name.split('_')[1]\n if group_name in group_names:\n has_data_trans = True\n block_start, block_end = locate_data_trans_block(line_id, body_lines)\n if has_data_trans:\n body_lines = body_lines[:block_start] + body_lines[block_end + 1:]\n #print(body_lines)\n # Wrap the body with space loops\n for dim_idx in range(len(PE_dims)):\n dim = PE_dims[dim_idx] \n line = f'#pragma unroll\\nfor (int s{dim_idx} = 0; s{dim_idx} < {dim}; s{dim_idx}++) {{\\n'\n body_lines.insert(dim_idx, line) \n for dim in PE_dims:\n body_lines.append('}\\n')\n\n # Modify the index\n body_lines = modify_index(body_lines, var_map, PE_dims)\n #print(body_lines)\n\n # Insert the data transfer stmts\n body_lines = insert_data_trans(body_lines, data_trans_info, PE_dims)\n #loop_bodies[idx]['lines'] = body_lines\n\n # Replace the loop bodies\n body_pos = body['pos'] \n lines = lines[: body_offset + body_pos[0]] \\\n + body_lines \\\n + lines[body_offset + body_pos[1] + 1 :] \n body_offset += len(body_lines) - (body_pos[1] - body_pos[0] + 1)\n\n return lines", "def update_lines(self):\n self._checkfigure()\n for ld in self.lines:\n line = ld['line']\n\n color = ld['color']\n line.set_color(color)\n\n lw = ld['linewidth']\n hlf = ld['highlight factor']\n highlight = hlf if ld['highlighted'] else 1.0\n lw = lw*highlight\n line.set_linewidth(lw)\n\n for vline in ld['vlines']:\n vline.set_color(color)\n vline.set_linestyle('--')\n vline.set_linewidth(lw)\n\n for hline in ld['vlines']:\n hline.set_color(color)\n hline.set_linestyle('--')\n hline.set_linewidth(lw)", "def reset_lines(chat_lines):\n for line in xrange(24):\n chat_lines[line].setText(chat_lines[line+1].getText())\n chat_lines[line].setTextColor(chat_lines[line+1].getTextColor())\n chat_lines[24].setText(\"\")", "def _preprocess(self, stream):\n unfinished = ' <unfinished ...>'\n resumed = '<... [^ ]+ resumed> (.*)$'\n in_progressed = {}\n\n for line in stream:\n pid, timestamp, rest = line.rstrip().split(None, 2)\n\n # Save any lines that are unfinished.\n # Line must *end* with the string unfinished.\n i = rest.rfind(unfinished)\n if i != -1 and i == len(rest) - len(unfinished):\n partial_line = rest[:i]\n in_progressed[pid] = (timestamp, partial_line)\n continue\n\n # Resume lines. Line must *start* with resumed string.\n match = re.search(resumed, line)\n if match:\n resumed_line = match.groups()[0]\n timestamp, partial_line = in_progressed.pop(pid)\n line = '{} {} {}{}'.format(\n pid, timestamp, partial_line, resumed_line)\n\n yield line", "def clean_line_generator_v2(df_pkl=None, fn='untitled'):", "def preprocess(args, id2info, mapping):\n polyline_spans = []\n keys = list(id2info.keys())\n assert 'AV' in keys\n assert 'AGENT' in keys\n keys.remove('AV')\n keys.remove('AGENT')\n keys = ['AGENT', 'AV'] + keys\n vectors = []\n two_seconds = mapping['two_seconds']\n mapping['trajs'] = []\n mapping['agents'] = []\n for id in keys:\n polyline = {}\n\n info = id2info[id]\n start = len(vectors)\n if args.no_agents:\n if id != 'AV' and id != 'AGENT':\n break\n\n agent = []\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n agent.append((line[X], line[Y]))\n\n if args.visualize:\n traj = np.zeros([args.hidden_size])\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n traj = traj[:i * 2].copy()\n break\n traj[i * 2], traj[i * 2 + 1] = line[X], line[Y]\n if i == len(info) - 1:\n traj = traj[:(i + 1) * 2].copy()\n traj = traj.reshape((-1, 2))\n mapping['trajs'].append(traj)\n\n for i, line in enumerate(info):\n if larger(line[TIMESTAMP], two_seconds):\n break\n x, y = line[X], line[Y]\n if i > 0:\n # print(x-line_pre[X], y-line_pre[Y])\n vector = [line_pre[X], line_pre[Y], x, y, line[TIMESTAMP], line[OBJECT_TYPE] == 'AV',\n line[OBJECT_TYPE] == 'AGENT', line[OBJECT_TYPE] == 'OTHERS', len(polyline_spans), i]\n vectors.append(get_pad_vector(vector))\n line_pre = line\n\n end = len(vectors)\n if end - start == 0:\n assert id != 'AV' and id != 'AGENT'\n else:\n mapping['agents'].append(np.array(agent))\n\n polyline_spans.append([start, end])\n\n assert_(len(mapping['agents']) == len(polyline_spans))\n\n assert len(vectors) <= max_vector_num\n\n t = len(vectors)\n mapping['map_start_polyline_idx'] = len(polyline_spans)\n if args.use_map:\n vectors, polyline_spans = get_sub_map(args, mapping['cent_x'], mapping['cent_y'], mapping['city_name'],\n vectors=vectors,\n polyline_spans=polyline_spans, mapping=mapping)\n\n # logging('len(vectors)', t, len(vectors), prob=0.01)\n\n matrix = np.array(vectors)\n # matrix = np.array(vectors, dtype=float)\n # del vectors\n\n # matrix = torch.zeros([len(vectors), args.hidden_size])\n # for i, vector in enumerate(vectors):\n # for j, each in enumerate(vector):\n # matrix[i][j].fill_(each)\n\n labels = []\n info = id2info['AGENT']\n info = info[mapping['agent_pred_index']:]\n if not args.do_test:\n if 'set_predict' in args.other_params:\n pass\n else:\n assert len(info) == 30\n for line in info:\n labels.append(line[X])\n labels.append(line[Y])\n\n if 'set_predict' in args.other_params:\n if 'test' in args.data_dir[0]:\n labels = [0.0 for _ in range(60)]\n\n if 'goals_2D' in args.other_params:\n point_label = np.array(labels[-2:])\n mapping['goals_2D_labels'] = np.argmin(get_dis(mapping['goals_2D'], point_label))\n\n if 'lane_scoring' in args.other_params:\n stage_one_label = 0\n polygons = mapping['polygons']\n min_dis = 10000.0\n for i, polygon in enumerate(polygons):\n temp = np.min(get_dis(polygon, point_label))\n if temp < min_dis:\n min_dis = temp\n stage_one_label = i\n\n mapping['stage_one_label'] = stage_one_label\n\n mapping.update(dict(\n matrix=matrix,\n labels=np.array(labels).reshape([30, 2]),\n polyline_spans=[slice(each[0], each[1]) for each in polyline_spans],\n labels_is_valid=np.ones(args.future_frame_num, dtype=np.int64),\n eval_time=30,\n ))\n\n return mapping", "def _line_wrapper( self, diffs ):\n\n\t\t# pull from/to data and flags from mdiff iterator\n\t\tfor fromdata, todata, flag in diffs:\n\t\t\t# check for context separators and pass them through\n\t\t\tif flag is None:\n\t\t\t\tyield fromdata, todata, flag\n\t\t\t\tcontinue\n\t\t\t( fromline, fromtext ), ( toline, totext ) = fromdata, todata\n\t\t\t# for each from/to line split it at the wrap column to form\n\t\t\t# list of text lines.\n\t\t\tfromlist, tolist = [], []\n\t\t\tself._split_line( fromlist, fromline, fromtext )\n\t\t\tself._split_line( tolist, toline, totext )\n\t\t\t# yield from/to line in pairs inserting blank lines as\n\t\t\t# necessary when one side has more wrapped lines\n\t\t\twhile fromlist or tolist:\n\t\t\t\tif fromlist:\n\t\t\t\t\tfromdata = fromlist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\tfromdata = ( '', ' ' )\n\t\t\t\tif tolist:\n\t\t\t\t\ttodata = tolist.pop( 0 )\n\t\t\t\telse:\n\t\t\t\t\ttodata = ( '', ' ' )\n\t\t\t\tyield fromdata, todata, flag", "def set_initial_values(self):\n #Stores each line of the text file in a list\n self.text = []\n \n #Scrolling distance\n self.scroll = 0\n\n #Zooming level (font size) \n self.zoom = 12\n\n #Factor by which is decrement self.zoom\n self.factor = 0\n\n #Number of tabs spaces before a line\n self.indent = 0\n\n #Flag to only set up pango descriptions only once \n self.set_pc = 1\n\n #list of indetation level of all lines\n self.tab_index = []\n\n #Total line count\n self.line_count = 0\n\n #line number of line rendered off top of window \n self.min_text = 0\n #line number of line rendered off bottom of window \n self.max_text = 50\n\n #y position for cairo for the text at the top\n self.min_cairo = 20\n\n #y position for text at bottom\n self.max_cairo = 20\n\n #x positiong for indented text\n self.tab_cairo = 20", "def initialize(lines, dim):\n start_gen = defaultdict(int)\n for i, line in enumerate(lines):\n for j, letter in enumerate(line):\n if letter == \"#\":\n start_gen[(i, j) + (0,) * (dim - 2)] = 1\n return start_gen", "def _fill_template_text(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n line_text_keys = [\"text\", \"altText\", \"label\", \"uri\"]\n try:\n for key in line_text_keys:\n if key in template:\n template[key] = template[key].format(**template_vars)\n except KeyError as e:\n logger.exception(\n \"Failed to fill line template '{}'. \"\n \"Tried to replace '{}' but could not find \"\n \"a value for it. There is no slot with this \"\n \"name nor did you pass the value explicitly \"\n \"when calling the template. Return template \"\n \"without filling the template. \"\n \"\".format(template, e.args[0]))\n return template", "def postprocess_ini_section_items(items: Union[Mapping, Iterable]) -> Generator:\n splitter_re = re.compile('[\\n\\r\\t]+')\n if isinstance(items, Mapping):\n items = items.items()\n for k, v in items:\n if v.startswith('\\n'):\n v = splitter_re.split(v[1:])\n v = [vv.strip() for vv in v if vv.strip()]\n v = [vv for vv in v if not vv.startswith('#')] # remove commented lines\n yield k, v", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def _finalize_strokes(self, strokes, lines=None):\n for i, offsets in tqdm(enumerate(strokes)):\n if lines and not lines[i]:\n print(\"Empty line? Stroke:\")\n print(offsets[:10])\n continue\n\n offsets[:, :2] *= 1.5\n curr_strokes = drawing.offsets_to_coords(offsets)\n curr_strokes = drawing.denoise(curr_strokes)\n curr_strokes[:, :2] = drawing.align(curr_strokes[:, :2])\n\n # Normalize\n curr_strokes[:, 1] -= np.min(curr_strokes[:, 1])\n max_y = np.max(curr_strokes[:, 1])\n if max_y:\n curr_strokes[:, :2] /= max_y\n else:\n warnings.warn(f\"max y is zero {curr_strokes}\")\n\n # Convert end points to start points\n #curr_strokes = eos_to_sos(curr_strokes)\n\n yield curr_strokes", "def make_loop_careduce(loop_orders, dtypes, loop_tasks, sub):\r\n\r\n def loop_over(preloop, code, indices, i):\r\n iterv = 'ITER_%i' % i\r\n update = \"\"\r\n suitable_n = \"1\"\r\n for j, index in enumerate(indices):\r\n var = sub['lv%i' % j]\r\n update += \"%(var)s_iter += %(var)s_jump%(index)s_%(i)s;\\n\" % locals()\r\n if index != 'x':\r\n suitable_n = \"%(var)s_n%(index)s\" % locals()\r\n return \"\"\"\r\n %(preloop)s\r\n for (int %(iterv)s = %(suitable_n)s; %(iterv)s; %(iterv)s--) {\r\n %(code)s\r\n %(update)s\r\n }\r\n \"\"\" % locals()\r\n\r\n preloops = {}\r\n for i, (loop_order, dtype) in enumerate(zip(loop_orders, dtypes)):\r\n for j, index in enumerate(loop_order):\r\n if index != 'x':\r\n preloops.setdefault(j, \"\")\r\n preloops[j] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n break\r\n else: # all broadcastable\r\n preloops.setdefault(0, \"\")\r\n preloops[0] += (\"%%(lv%(i)s)s_iter = (%(dtype)s*)(PyArray_DATA(%%(lv%(i)s)s));\\n\" % locals()) % sub\r\n\r\n if len(loop_tasks) == 1:\r\n s = preloops.get(0, \"\")\r\n else:\r\n s = \"\"\r\n for i, (pre_task, task), indices in reversed(zip(xrange(len(loop_tasks) - 1), loop_tasks, zip(*loop_orders))):\r\n s = loop_over(preloops.get(i, \"\") + pre_task, s + task, indices, i)\r\n\r\n s += loop_tasks[-1]\r\n return \"{%s}\" % s", "def _prepare_invoice_lines(self, exchange_line, order_line):\n invoice_type = {\n 'sale.order.line': {\n 'higher': 'out_invoice', 'lower': 'out_refund',\n 'type': 'sale', 'field': 'exchange_sale_line_id'\n },\n 'purchase.order.line': {\n 'higher': 'in_invoice', 'lower': 'in_refund',\n 'type': 'purchase', 'field': 'exchange_purchase_line_id'\n },\n }\n product = exchange_line.exchange_product_id or exchange_line.product_id\n data = {\n 'invoice_type': False,\n 'values': {\n 'product_id': product.id,\n 'quantity': exchange_line.quantity,\n 'name': 'Exchange for [%s]' % exchange_line.product_id.display_name,\n }\n }\n if exchange_line.exchange_product_id or \\\n exchange_line.price_subtotal > order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['higher']\n elif exchange_line.price_subtotal < order_line.price_subtotal:\n data['invoice_type'] = invoice_type[order_line._name]['lower']\n else:\n return {}\n data[invoice_type[order_line._name]['type']] = order_line.order_id\n data['values'][invoice_type[order_line._name]['field']] = order_line.id\n data['values']['price_unit'] = exchange_line.price_unit\n # TODO i think we should take the different between prices NOT the all price\n # abs(exchange_line.price_unit - order_line.price_unit)\n return data", "def set_all_lines_to_initial_positions(self):\n self.lines[1] = [None for _ in range(self.lines[0].__len__())]\n for line_no in range(0, self.lines[0].__len__()):\n self[line_no].move_to(\n self.get_center() + self.lines_initial_positions[line_no]\n )\n return self" ]
[ "0.62352186", "0.5535244", "0.540953", "0.538531", "0.53788745", "0.5370846", "0.53634304", "0.5115916", "0.510933", "0.50691956", "0.5020476", "0.5014774", "0.50135577", "0.50128126", "0.5011278", "0.5003016", "0.49997228", "0.49952468", "0.49902463", "0.49880245", "0.49821338", "0.49678752", "0.4942249", "0.493176", "0.49271667", "0.4912079", "0.49110472", "0.49006313", "0.48940745", "0.48726735" ]
0.7311217
0
Prepare looped datasets from looping lines.
def prepare_looped_datasets(self, alldict, allcombs): datasets_dict=dict() numcombs = len(allcombs) combct = 0 while combct < numcombs: newdata = list(self.baseinput.data) loopedlines = dict() loopedlines = self.prepare_looped_lines(alldict, allcombs[combct]) for lvalidx in loopedlines.keys(): newdata[lvalidx] = loopedlines[lvalidx] datasets_dict[combct] = newdata combct = combct + 1 return datasets_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)", "def _prepare_sets(self):\n\n ds_images, ds_labels = self._load_images_labels()\n\n ds_images_2 = ds_images.take(self.val_count)\n ds_labels_2 = ds_labels.take(self.val_count)\n ds_images_1 = ds_images.skip(self.val_count)\n ds_labels_1 = ds_labels.skip(self.val_count)\n\n ds_1 = (ds_images_1, ds_labels_1)\n ds_2 = (ds_images_2, ds_labels_2)\n\n return ds_1, ds_2", "def create_data_generators(shuffle=True, novelty_type='normal', item_to_include='None',\n scale_level=1):\n\n total_noi_i = 10 # Number of processed images from one environemnt i\n noe = 1 # Numer of environments\n n_p = 32 # Patch size, patch --> n_p x n_p\n\n novelty = novelty_type\n datasets = []\n\n for i in range(noe):\n\n # Load only images of the environment which includes images of the stated novel item.\n if item_to_include is not None and novelty == 'novel_item':\n dataset_env_i = PolycraftDatasetWithSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level,\n item_name=item_to_include)\n datasets.append(dataset_env_i)\n # We only process the one environment with the item (maybe change this\n # if we have more than one environement per novel_item!?)\n break\n\n # No specific item given which should be included.\n else:\n dataset_env_i = PolycraftDatasetNoSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level)\n datasets.append(dataset_env_i)\n\n final_dataset = ConcatDataset(datasets)\n\n total_noi = len(final_dataset) # Total number of processed images from all datasets\n\n if(total_noi < 7):\n print('Number of samples too small for splitting dataset in training-/valid-/test set.')\n\n train_noi = int(0.7 * total_noi) # Number of images used for training (70 %)\n valid_noi = int(0.15 * total_noi) # Number of images used for validation (15 %)\n test_noi = total_noi - train_noi - valid_noi # Number of images used for testing (15 %)\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(\n final_dataset, [train_noi, valid_noi, test_noi])\n\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True)\n\n return train_loader, valid_loader, test_loader", "def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def prepare_dataset(self, xs: List[str], ys: List[str], batch_size: int = None):\n\n if batch_size is None:\n batch_size = self.cM.batch_size\n\n examples = [data.Example.fromlist([x, y], self.data_fields) for x, y in zip(xs, ys)]\n\n dataset = data.Dataset(examples, fields=self.data_fields)\n\n iterator = data.BucketIterator(dataset, batch_size=batch_size, shuffle=False)\n\n return iterator", "def prepare_lines_data(self):\n for l_hd in self.hour_data:\n if not self.node_from or not self.node_to:\n print('ERROR! line %i-%i has no node(s)' % (self.node_from_code, self.node_to_code))\n if l_hd.state and self.node_from.get_node_hour_state(l_hd.hour) \\\n and self.node_to.get_node_hour_state(l_hd.hour):\n if not self.type:\n node_start = self.node_from_code\n node_finish = self.node_to_code\n base_coeff = 0\n k_pu = 0\n else:\n node_start = self.node_to_code\n node_finish = self.node_from_code\n base_coeff = self.node_to.voltage_class / self.node_from.voltage_class\n k_pu = math.sqrt(math.pow(self.kt_re, 2) + math.pow(self.kt_im, 2))\n lag = math.atan(self.kt_im / self.kt_re) if self.kt_re else 0\n\n self.eq_db_lines_data.append((\n l_hd.hour, node_start, node_finish, self.parallel_num, self.type,\n max(self.node_from.voltage_class, self.node_to.voltage_class), base_coeff,\n l_hd.r, l_hd.x, l_hd.g, -l_hd.b, k_pu, lag, -l_hd.b_from, -l_hd.b_to\n ))", "def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)", "def set_batch_data():\r\n if not os.path.exists(filepath):\r\n download_data()\r\n for n in range(0,6):\r\n d = read(filepath + flist[n])\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, trts = {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n trts['x'], trts['y'] = d['data'], d['labels']\r\n trtsflag = ['train', 'train', 'train', 'train', 'train', 'test']\r\n\r\n data['flag'] = trtsflag[n]\r\n data[trtsflag[n]] = trts\r\n save_pkl(data, savename=flist[n]+'.pkl')", "def _build_datasets(self):\n self._build_datasets_sis3302()\n self._build_datasets_sis3305()", "def separate(self):\n print(\"start dataset separating\")\n sum = 0\n for i in tqdm(range(len(self.itemlen))):\n il = self.itemlen[i]\n if il < 3:\n sum += il\n continue\n rarr = list(range(sum, sum+il))\n random.shuffle(rarr)\n self.train.append({\n 'input': self.input[rarr[0]],\n 'label': self.label[i]\n })\n self.val.append({\n 'input': self.input[rarr[1]],\n 'label': self.label[i]\n })\n for j in range(2, len(rarr)):\n self.test.append({\n 'input': self.input[rarr[j]],\n 'label': self.label[i]\n })\n sum += il", "def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()", "def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def dataset_read(self):\n # while self.running:\n # grab current data_list and own it locally per cycle\n # to avoid mid-parse changes\n self.local_data_list = self.data_list\n\n # set a random duration for reading from random line\n # before choosing another from current set\n dataset_read_dur = (random.randrange(3000, 13000) / 1000) * self.glob_speed\n\n # prepare start line to read\n starting_line = self.line_to_read()\n\n # sorts out durations\n if self.debug_choose:\n print('B1 dataset line read duration = ', dataset_read_dur)\n end_time = self.end_time_calc(dataset_read_dur)\n\n # determine if read is to be looped or sequential\n looped = self.is_loop()\n\n while time.time() < end_time:\n # calc baudrate and cycle clock for speed of line read\n baudrate = self.baudrate()\n\n # if looped\n if looped > 0:\n loop_end = time.time() + looped\n\n # reset the start read point\n line_to_read = starting_line\n\n # for each loop\n while time.time() < loop_end:\n active_line = self.local_data_list[line_to_read]\n self.parse_active_line(active_line)\n line_to_read += 1\n if self.debug_read:\n print(f'******** line to read LOOPING {line_to_read}')\n # print(f'config data = {config.x_ds}, {config.y_ds}, {config.z_ds}')\n\n # pause for 10th of baudrate, while parse_active_line slides\n time.sleep(baudrate/10)\n else:\n # if no loop\n active_line = self.local_data_list[starting_line]\n self.parse_active_line(active_line)\n starting_line += 1\n if self.debug_read:\n print(f'******** line to read NO LOOP {starting_line}')\n # print(f'config data = {config.x_ds}, {config.y_ds}, {config.z_ds}')\n\n # pause for 10th of baudrate, while parse_active_line slides\n time.sleep(baudrate/10)", "def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)", "def prepareDataBatches(self, traindata, trainlabel):\n index = np.random.permutation(len(traindata))\n traindata = traindata[index]\n trainlabel = trainlabel[index]\n split_no = int(len(traindata) / self.batchSize)\n return zip(np.split(traindata[:split_no*self.batchSize], split_no), np.split(trainlabel[:split_no*self.batchSize], split_no))", "def prepare_dataset(fpath):\n raise NotImplementedError", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def _fillBatches(self):\n\n batchRE = r\"\"\"\n B\n (?P<observebatch>\\d+?)\n (?P<startend>[SE])\n (?P<sequence>\\d+?)\n _SR\n (?:_(?P<extraInjections>\\d+?|\\w+?))?\n $\n \"\"\"\n batchRE = re.compile(batchRE, re.VERBOSE)\n # We canot infer batches unless we have runorder\n if 'Run Order' in self.sampleMetadata.keys():\n currentBatch = 0\n # Loop over samples in run order\n for index, row in self.sampleMetadata.sort_values(by='Run Order').iterrows():\n nameComponents = batchRE.search(row['Sample File Name'])\n if nameComponents:\n # Batch start\n if nameComponents.group('startend') == 'S':\n # New batch - increment batch no\n if nameComponents.group('sequence') == '1':\n currentBatch = currentBatch + 1\n\n # Don't include the dilution series or blanks\n if not ((row['AssayRole'] == AssayRole.LinearityReference) or (row['SampleType'] == SampleType.ProceduralBlank)):\n self.sampleMetadata.loc[index, 'Batch'] = currentBatch\n self.sampleMetadata.loc[index, 'Correction Batch'] = currentBatch\n\n else:\n warnings.warn('Unable to infer batches without run order, skipping.')\n return", "def generate_data(self):\n\n column_num = 1\n src_path = self.src_paths_after_pre_process\n target_path = self.tgt_paths_after_pre_process\n\n src_ds = load_textline_dataset([src_path], column_num)\n\n src_ds = src_ds[0]\n\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n src_ds = src_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n src_size_ds = src_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n src_ds = src_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n if self.infer_without_label:\n data_set = tf.data.Dataset.zip((src_ds, src_size_ds))\n\n else:\n tgt = load_textline_dataset([target_path], column_num)\n tgt = tgt[0]\n tgt_out_ds = tgt.map(lambda x: x + ' ' + self.END_TOKEN)\n tgt_in_ds = tgt.map(lambda x: self.START_TOKEN + ' ' + x)\n\n tgt_in_ds = tgt_in_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len, self.\n text_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_size_ds = tgt_in_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_ds = tgt_in_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n inp_ds = tf.data.Dataset.zip(\n (src_ds, src_size_ds, tgt_in_ds, tgt_in_size_ds))\n\n if self.use_label_vocab:\n target_vocab_file_path = self.label_vocab_file_paths[0]\n else:\n target_vocab_file_path = self.text_vocab_file_path\n tgt_out_ds = tgt_out_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len,\n target_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_out_ds = tgt_out_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n data_set = tf.data.Dataset.zip((inp_ds, tgt_out_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n data_size = get_file_len(self.src_paths_after_pre_process)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set", "def prepare_typerec_dataset(self, data_raw):\n\n self._logger.info(f'Preparing Wikidata-TypeRec dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n\n try:\n sample = self.prepare_typerec_sample(line)\n data.append(sample)\n sample_count += 1\n except Exception as e:\n self._logger.info(str(e))\n sample_count_failed += 1\n\n self._logger.info(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def recreate_and_prepare_datasets_for_training(self, datasets: List[str], width: int, height: int,\n use_fixed_canvas: bool,\n stroke_thicknesses_for_generated_symbols: List[int],\n staff_line_spacing: int,\n staff_line_vertical_offsets: List[int],\n random_position_on_canvas: bool) -> None:\n self.__delete_dataset_directory()\n self.__download_and_extract_datasets(datasets, width, height, use_fixed_canvas, staff_line_spacing,\n staff_line_vertical_offsets, stroke_thicknesses_for_generated_symbols,\n random_position_on_canvas)", "def __data_generation(self, rows):\n samples = np.zeros((rows, self.image_width, self.image_height, self.image_depth))\n targets = np.zeros((rows, self.image_width, self.image_height, self.num_classes))\n for j in range(rows):\n for row1, row2 in zip(self.reader1, self.reader2):\n array_row1 = np.array(row1, dtype=np.float)\n samples[j,:,:,:] = preprocess_feature(array_row1,\n self.image_width, self.image_height, self.image_depth)\n try:\n next(self.reader1)\n except StopIteration:\n print(\"CSV iteration end for feature. Calling 'break'.\")\n break\n\n array_row2 = np.array(row2, dtype=np.int)\n targets[j,:,:,:] = preprocess_label(array_row2,\n self.image_width, self.image_height, self.num_classes)\n try:\n next(self.reader2)\n except StopIteration:\n print(\"CSV iteration end for label. Calling 'break'.\")\n break\n\n return samples, targets", "def InitDataset(self):\n train_txt = 'ImageSets/Main/train.txt'\n val_txt = 'ImageSets/Main/val.txt'\n annotations = \"Annotations\"\n jpegimages = \"JPEGImages\"\n images_path = train_txt if (self.is_train) else val_txt \n images_path = readTxt(os.path.join(self.path, images_path))\n images_path.pop(-1)\n # rawdata format: [path_2_image, path_2_xml]\n rawData = list()\n for each in images_path:\n xml = os.path.join(self.path, annotations, each + '.xml')\n jpeg = os.path.join(self.path, jpegimages, each + '.jpg')\n rawData.append([jpeg, xml])\n return rawData", "def _create_examples(self, lines, set_type):\n # Parallelizing a bit batch computation because it is quite slow...\n #lines = lines[:500]\n step = 18 # 17 sentences per input sequence\n #encoded_dict = self.tokenizer.encode('[CLS] ' + ' [SEP] [CLS] '.join(lines) + ' [SEP]')\n #tokens = np.array(encoded_dict.tokens)\n #ids = np.array(encoded_dict.ids)\n \n n = len(lines)\n \n def f(i, sequence):\n guid = \"%s-%s\" % (set_type, i)\n text_a = self.pad_to_max_length([2] + self.mask_tokens(sequence) + [3])\n text_b = [0 if item==0 else 1 for item in text_a]\n label = self.pad_to_max_length([2] + sequence + [3])\n label = [label[i] if item==4 else -100 for i, item in enumerate(text_a)] # for loss computation, only taking into account MASK tokens with id==4\n example = InputExample(guid=guid,text_a=text_a,text_b=text_b,label=label)\n return example\n \n def g(i, line):\n sequence = self.tokenizer.encode(' '.join(line)).ids\n return f(i, sequence)\n \n # Splitting data for memory issues...\n indexes = list(range(0, n, step))\n m = len(indexes)\n n_splits = self.n_splits\n splits = [indexes[i*m//n_splits: m*(i+1)//n_splits] for i in range(n_splits)]\n for index_split, split in enumerate(splits):\n print(f\"Computing split {index_split+1} / {n_splits}... Split size: {len(split)}\")\n examples = Parallel(n_jobs=-1)(delayed(g)(index+split[0], lines[i:i + step]) for index, i in tqdm(enumerate(split)))\n self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl'), examples)\n # Merging\n #examples = [self.load_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl')) for index_split in range(n_splits)]\n #examples = [item for l in examples for item in l]\n #self.save_object(os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples.pkl'), examples)\n \n examples_paths = [os.path.join(self.dataset_dir, f'{self.dataset_name}{set_type}_examples_split-{index_split}.pkl') for index_split in range(n_splits)]\n \n return examples_paths", "def _proc_dataset(d):\n # merge 2dseq complex frame group if present\n if d.is_complex and d.type == '2dseq':\n d = FrameGroupMerger().merge(d, 'FG_COMPLEX')\n\n # prepare the data array\n if d.is_svs:\n data = _prep_data_svs(d)\n elif d.is_mrsi:\n data = _prep_data_mrsi(d)\n else:\n data = d.data\n\n # get properties\n properties = d.to_dict()\n\n # some Bruker datasets do not have affine property\n if d.type == 'fid': if not 'affine' in properties: properties.update({'affine':np.identity(4)})\n \n yield data, properties", "def _build_datasets_sis3302(self):\n bc_arr = np.where(self._active_brdch[\"SIS 3302\"])\n\n for board, channel in zip(bc_arr[0], bc_arr[1]):\n brd = board + 1\n ch = channel + 1\n slot = self.get_slot(brd, \"SIS 3302\")\n\n for cname in self._active_config:\n # create main dataset\n dset_name = f\"{cname} [Slot {slot}: SIS 3302 ch {ch}]\"\n shape = (self._sn_size, self._nt)\n data = np.empty(shape=shape, dtype=np.int16)\n self.create_dataset(dset_name, data=data)\n\n # create header dataset\n hdset_name = f\"{dset_name} headers\"\n shape = (self._sn_size,)\n dtype = np.dtype(\n [\n (\"Shot number\", np.int32),\n (\"Scale\", np.float32),\n (\"Offset\", np.float32),\n (\"Min\", np.uint16),\n (\"Max\", np.uint16),\n (\"Clipped\", np.int8),\n ]\n )\n dheader = np.empty(shape=shape, dtype=dtype)\n dheader[\"Shot number\"] = np.arange(\n 1, shape[0] + 1, 1, dtype=dheader[\"Shot number\"].dtype\n )\n dheader[\"Scale\"] = 7.7241166e-5\n dheader[\"Offset\"] = -2.531\n dheader[\"Min\"] = data.min(axis=1)\n dheader[\"Max\"] = data.max(axis=1)\n dheader[\"Clipped\"] = 0\n self.create_dataset(hdset_name, data=dheader)", "def _create_projection_datasets(self):\n # First grab the spectroscopic indices and values and position indices\n self._sho_spec_inds = self.h5_main.h5_spec_inds\n self._sho_spec_vals = self.h5_main.h5_spec_vals\n self._sho_pos_inds = self.h5_main.h5_pos_inds\n\n fit_dim_ind = self.h5_main.spec_dim_labels.index(self._fit_dim_name)\n\n self._fit_spec_index = fit_dim_ind\n self._fit_offset_index = 1 + fit_dim_ind\n\n # Calculate the number of loops per position\n cycle_start_inds = np.argwhere(self._sho_spec_inds[fit_dim_ind, :] == 0).flatten()\n tot_cycles = cycle_start_inds.size\n\n # Make the results group\n self._h5_group = create_results_group(self.h5_main, 'Loop_Fit')\n write_simple_attrs(self._h5_group, {'projection_method': 'pycroscopy BE loop model'})\n\n # Write datasets\n self.h5_projected_loops = create_empty_dataset(self.h5_main, np.float32, 'Projected_Loops',\n h5_group=self._h5_group)\n\n h5_loop_met_spec_inds, h5_loop_met_spec_vals = write_reduced_spec_dsets(self._h5_group, self._sho_spec_inds,\n self._sho_spec_vals, self._fit_dim_name,\n basename='Loop_Metrics')\n\n self.h5_loop_metrics = write_main_dataset(self._h5_group, (self.h5_main.shape[0], tot_cycles), 'Loop_Metrics',\n 'Metrics', 'compound', None, None, dtype=loop_metrics32,\n h5_pos_inds=self.h5_main.h5_pos_inds,\n h5_pos_vals=self.h5_main.h5_pos_vals,\n h5_spec_inds=h5_loop_met_spec_inds,\n h5_spec_vals=h5_loop_met_spec_vals)\n\n # Copy region reference:\n copy_region_refs(self.h5_main, self.h5_projected_loops)\n copy_region_refs(self.h5_main, self.h5_loop_metrics)\n\n self.h5_main.file.flush()\n self._met_spec_inds = self.h5_loop_metrics.h5_spec_inds\n\n return", "def data_process(self):\n logging.info('Processing the data and split files')\n lines = Utility.file_len(self.fname)\n self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,\n cpu_count().real)", "def preprocess(self):\n \n file_name_list = os.listdir(self.image_dir)\n random.seed(1234)\n random.shuffle(file_name_list)\n \n for i,d in enumerate(self.domains):\n self.attr2idx[d]=i \n\n for i, file_name in enumerate(file_name_list):\n if (file_name.startswith('X_')):\n continue\n \n parts = file_name.split(\"-\")\n label = int(parts[0])\n if label not in self.domains:\n continue\n img_name = file_name\n\n count=self.get_sample_count(label)\n if count<self.valid_set_size:\n # create holdout set on the fly\n utils.copy_file(self.image_dir,self.valid_set_dir,img_name)\n else:\n self.dataset.append([img_name, self.attr2idx[label]])\n \n self.increment_sample_count(label)\n\n print(\"Sample count per domain: \"+str(self.sample_count)+\" (including holdout set, holdout size per domain is: \"+str(self.valid_set_size)+\")\")\n print('Finished preprocessing the dataset...')" ]
[ "0.6232418", "0.6178191", "0.61240345", "0.6034664", "0.5999491", "0.59856147", "0.5969796", "0.5958377", "0.5950267", "0.5871935", "0.58671767", "0.5818414", "0.5809877", "0.5804624", "0.5801337", "0.5794393", "0.5745656", "0.5720152", "0.5712723", "0.5677284", "0.56412107", "0.5601151", "0.5599085", "0.559302", "0.5569282", "0.55593264", "0.5534732", "0.55347306", "0.55164444", "0.54838455" ]
0.6497661
0
Create independently looped input files.
def create_input_files(self, datasets_dict): ifname = self.keywords['inputfile'] dirstem = os.path.dirname(ifname) basename = os.path.basename(ifname).split('.')[0] createdfiles=list() if dirstem == "": dirstem = os.getcwd() dkeys = datasets_dict.keys() dkeys.sort() dct=1 for didx in dkeys: newfile = MASTFile() newfile.data = list(datasets_dict[didx]) newname="%s/loop_%s_%s.inp" % (dirstem, basename, str(dct).zfill(2)) newfile.to_file(newname) #createdfiles.append(os.path.basename(newname)) createdfiles.append(newname) dct=dct+1 return createdfiles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _make_files(self, dir, num_files=10):\n for i in range(num_files):\n self._make_random_file(dir)", "def create_input_files(in_dir, R, I):\n def get_filepath(in_volume, infiles_partition):\n _3d_pos = numeric_to_3d_pos(in_volume.index, infiles_partition, order='F')\n i, j, k = _3d_pos\n out_filename = f'{i}_{j}_{k}.hdf5'\n return os.path.join(in_dir, out_filename)\n\n infiles_partition = get_blocks_shape(R, I)\n infiles_volumes = get_named_volumes(infiles_partition, I)\n for in_volume in infiles_volumes:\n filepath = get_filepath(in_volume, infiles_partition)\n arr = create_random_dask_array(I, distrib='normal', dtype=np.float16)\n save_to_hdf5(arr, filepath, physik_cs=None, key='/data', compression=None)", "def split_start(infiles, outfiles):\n\n # split always runs exactly one job (unlike @subdivide)\n # So it implicitly combines all its inputs before running and generating multiple output\n # @originate generates multiple output so the input for @split is a list...\n infile = infiles[0]\n\n # clean up previous\n for f in outfiles:\n os.unlink(f)\n\n\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n #\n # Create more files than the previous invocation\n #\n #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n n_to_produce = len(outfiles) + 1\n for i in range(n_to_produce):\n f = '{}{}.split'.format(tempdir, i)\n open(f, 'a').close()", "def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()", "def create_inputs_recipe():\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GNIRSLongslit([sci_ad])\n p.prepare(bad_wcs=\"fix\")\n p.addDQ()\n p.addVAR(read_noise=True)\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # p.flatCorrect()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))", "def make_dummy_files(paths):\n for p in paths:\n make_dummy_file(p)", "def convert_files_parallel(self) -> None:\n file_paths = []\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n file_paths.append(os.path.join(\n self.audios_dir, file))\n with Pool(cpu_count()) as p:\n p.map(self.convert_file, file_paths)", "def _open_files(inputs, mode):\n assert isinstance(inputs, list)\n\n local_open = pf.open\n return [local_open(ffile, mode=mode) for ffile in inputs]", "def stage_input_file(workdir_path, files):\n if not isinstance(files, list):\n files = [files]\n\n for file_dict in files:\n location = urlparse(file_dict['location'])\n if 'basename' in file_dict:\n dest_path = os.path.join(workdir_path, file_dict['basename'])\n else:\n dest_path = os.path.join(workdir_path, os.path.basename(location.path))\n shutil.copy(location.path, dest_path)\n file_dict['path'] = dest_path\n\n for i, secondary_file in enumerate(file_dict.get('secondaryFiles', [])):\n stage_input_file(workdir_path, file_dict['secondaryFiles'][i])", "def prepare_io(filename, input_dataset, output_dataset):\n file_id = filename[1:] if filename.startswith(os.sep) else filename\n file_in = os.path.join(input_dataset.path, 'files', file_id)\n file_out = os.path.join(output_dataset.path, 'files', file_id)\n ensure_path(os.path.dirname(file_out))\n return file_in, file_out", "def io_files(self, iterable, ext=None, func=None):\n for input_path in iterable:\n output_path, temp_file = self.check_output_path(input_path, ext)\n\n try:\n func(input_path, temp_file)\n except Exception as e:\n if self._force_continue is True:\n self.handle_error(e, input_path)\n else:\n raise e\n\n self.overwrite_output_path(input_path, output_path, temp_file)", "def create(self):\n\n for key in self.dirs.keys():\n if type(self.dirs[key]) == dict:\n for dir_type in self.dirs[key].keys():\n create_if_not_exists(self.dirs[key][dir_type])\n else:\n create_if_not_exists(self.dirs[key])\n\n self.inputFileIds = {}\n for sample_name, sample_info in self.samples.items():\n if not sample_info['use_it']:\n continue\n\n process_name = sample_info[\"process_name_specific\"]\n is_mc = (sample_info[\"type\"] == \"mc\")\n\n if not is_mc:\n continue\n\n logging.info(\"Creating configuration files to run '%s' for sample %s\" % (self.executable, process_name))\n\n inputFileList = generateInputFileList(sample_info, self.max_files_per_job)\n key_dir = getKey(process_name)\n\n outputFile = os.path.join(\n self.dirs[key_dir][DKEY_HISTO], \"%s.root\" % process_name\n )\n self.outputFiles[process_name] = {\n 'inputFiles' : [],\n 'outputFile' : outputFile,\n }\n if os.path.isfile(outputFile) and tools_is_file_ok(outputFile, min_file_size = 2000):\n logging.info('File {} already exists --> skipping job'.format(outputFile))\n continue\n\n for jobId in inputFileList.keys():\n\n key_file = getKey(sample_name, jobId)\n\n self.inputFiles[key_file] = inputFileList[jobId]\n if len(self.inputFiles[key_file]) == 0:\n logging.warning(\n \"'%s' = %s --> skipping job !!\" % (key_file, self.inputFiles[key_file])\n )\n continue\n\n self.cfgFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.txt\" % (process_name, jobId)\n )\n self.outputFiles_tmp[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_HISTO_TMP], \"histogram_%i.root\" % jobId\n )\n self.logFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_LOGS], \"project_%s_%i.log\" % (process_name, jobId)\n )\n self.scriptFiles_projection[key_file] = os.path.join(\n self.dirs[key_dir][DKEY_CFGS], \"project_%s_%i_cfg.sh\" % (process_name, jobId)\n )\n projection_module = self.projection_module\n if projection_module == \"count\":\n projection_module = \"countHistogramAll\"\n if sample_name.startswith('/TTTo'):\n projection_module += \"CompTopRwgt\"\n elif sample_info['sample_category'].startswith('ttH'):\n projection_module += \"CompHTXS\"\n elif isSplitByNlheJet(process_name):\n projection_module += \"SplitByLHENjet\"\n elif isSplitByNlheHT(process_name):\n projection_module += \"SplitByLHEHT\"\n elif isSplitByNlheJetHT(process_name, sample_name):\n projection_module += \"SplitByLHENjetHT\"\n self.jobOptions_sbatch[key_file] = {\n 'histName' : process_name,\n 'inputFiles' : self.inputFiles[key_file],\n 'cfgFile_path' : self.cfgFiles_projection[key_file],\n 'outputFile' : self.outputFiles_tmp[key_file],\n 'logFile' : self.logFiles_projection[key_file],\n 'scriptFile' : self.scriptFiles_projection[key_file],\n 'projection_module' : projection_module,\n }\n if self.projection_module != 'puHist':\n self.jobOptions_sbatch[key_file]['ref_genWeight'] = self.ref_genWeights[process_name]\n if process_name not in self.ref_genWeights:\n raise RuntimeError(\"Unable to find reference LHE weight for process %s\" % process_name)\n self.createCfg_project(self.jobOptions_sbatch[key_file])\n self.outputFiles[process_name]['inputFiles'].append(self.outputFiles_tmp[key_file])\n\n if self.is_sbatch:\n logging.info(\"Creating script for submitting '%s' jobs to batch system\" % self.executable)\n self.num_jobs['project'] += self.createScript_sbatch(\n self.executable, self.sbatchFile_projection, self.jobOptions_sbatch\n )\n\n logging.info(\"Creating Makefile\")\n lines_makefile = []\n self.addToMakefile_project(lines_makefile)\n self.addToMakefile_hadd(lines_makefile)\n if self.plot:\n self.addToMakefile_plot(lines_makefile)\n self.addToMakefile_finalHadd(lines_makefile)\n self.createMakefile(lines_makefile)\n logging.info(\"Done\")\n\n return self.num_jobs", "def pre_loop(self):\n\t\tk = Kernel(name=\"misc.mkfile\")\n\t\tk.arguments = [\"--size=1000\", \"--filename=reference.dat\"]\n\t\tk.upload_input_data = ['levenshtein.py']\n\t\treturn k", "def flow_from_files(self, filenames=None, batch_size=32):\n\n if filenames:\n self.filenames = filenames\n\n for i in range(0, len(self.filenames), batch_size):\n yield np.concatenate([np.load(self.path / f) \\\n for f in self.filenames.iloc[i:i+batch_size]])", "def create_test_input_files(input1, input2):\n random.shuffle(input1)\n random.shuffle(input2)\n filename1 = application.join_abs_path(EMPTY_TEST_DIR, 'file-1.gz')\n filename2 = application.join_abs_path(EMPTY_TEST_DIR, 'file-2.gz')\n\n with gzip.open(filename1, 'wb') as file1:\n file1.write('\\n'.join(input1))\n with gzip.open(filename2, 'wb') as file2:\n file2.write('\\n'.join(input2))", "def create_fake_files(self, temp_dir):\n for fake_file in self.processed_fake_file + self.non_processed_fake_files:\n temp_fake_file = Path(temp_dir) / Path(fake_file)\n temp_fake_file.mkdir(parents=True, exist_ok=True)\n temp_fake_file.touch(exist_ok=True)", "def generate_input_files(elevation_folder_path, template_input_file_path):\n import pathlib\n json_dict = get_inputs_from_file(template_input_file_path)\n\n path_to_match = pathlib.Path(elevation_folder_path)\n\n for heightfile in path_to_match.glob(\"*.npy\"):\n dot_index = str(heightfile).rfind('.')\n filename_base = str(heightfile)[:dot_index]\n opt_output_filename = filename_base + \".out\"\n opt_input_filename = filename_base + \".json\"\n\n localdict = json_dict.copy()\n\n localdict[\"output_file\"] = opt_output_filename\n localdict[\"elevation_file\"] = str(heightfile)\n\n dump_json_dict(out_dict=localdict, filename=opt_input_filename)", "def convert_files_sequential(self) -> None:\n for file in os.listdir(self.audios_dir):\n if file.endswith(self.input_format):\n self.convert_file(os.path.join(\n self.audios_dir, file), self.output_format)", "def create_input_file(self, polymer_identifier, format, outpath):\n\n\t\tsmiles = self.get_smiles_from_identifier(polymer_identifier)\n\t\t\n\t\tresult = generate_input_files(smiles, format)\n\t\twith open(outpath, 'w+') as f:\n\t\t\tf.write(result)", "def processImages(self):\n for file in os.listdir(self.config[\"tempPath\"]):\n self.logger.debug(\"Calling generateImages for the file: {0}\".format(file))\n self.generateText(file)", "def make(input_filepath, output_filepath) -> None:\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def create_files(self):\n self._do_action_under_lock(self._create_files)", "def initiallize_buffer(self):\n assert os.path.isdir(self.directory)\n #sorting files topologically, files' format is -> data_num.h5 \n files_list = sorted(os.listdir(self.directory + '/' + self.name + '/'), key = lambda x: int(x.split(\"_\")[1].split(\".\")[0]))\n self.files_counter = 0\n if files_list != []: \n for file_name in files_list:\n self.memorize(name = file_name, error = 1)\n self.files_counter += 1\n self.files_tracker = file_name\n else:\n self.files_tracker = 'data_-1.h5'", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def create_from_files():\n logging.info('\"Create from files\" task started using config file %s', args.config)\n file_dir_path = config['input_dir']\n files = os.listdir(file_dir_path)\n\n for file_name in files:\n filename_without_extension = os.path.splitext(file_name)[0]\n if len(filename_without_extension) > 255:\n message = 'Truncating the filename \"' + filename_without_extension + '\" since it exceeds Drupal\\'s maximum node title length of 255 characters.'\n logging.error(message)\n filename_without_extension = filename_without_extension[:255]\n\n islandora_model = set_model_from_extension(file_name, config)\n\n node_json = {\n 'type': [\n {'target_id': config['content_type'],\n 'target_type': 'node_type'}\n ],\n 'title': [\n {'value': filename_without_extension}\n ],\n 'status': [\n {'value': config['published']}\n ],\n 'field_model': [\n {'target_id': islandora_model,\n 'target_type': 'taxonomy_term'}\n ]\n }\n\n node_headers = {\n 'Content-Type': 'application/json'\n }\n node_endpoint = '/node?_format=json'\n node_response = issue_request(config, 'POST', node_endpoint, node_headers, node_json, None)\n if node_response.status_code == 201:\n node_uri = node_response.headers['location']\n print('+ Node for \"' + filename_without_extension + '\" created at ' + node_uri + '.')\n logging.info('Node for \"%s\" created at %s.', filename_without_extension, node_uri)\n if 'output_csv' in config.keys():\n write_to_output_csv(config, '', node_response.text)\n\n file_path = os.path.join(config['input_dir'], file_name)\n media_type = set_media_type(file_path, config)\n media_response_status_code = create_media(config, file_name, node_uri)\n allowed_media_response_codes = [201, 204]\n if media_response_status_code in allowed_media_response_codes:\n print('+ ' + media_type.title() + \" media for \" + filename_without_extension + \" created.\")\n logging.info(\"Media for %s created.\", file_path)\n else:\n logging.error('Node for \"%s\" not created, HTTP response code was %s.', os.path.join(config['input_dir'], file_name), node_response.status_code)", "def split_input(self):\n namenode = self.runner.namenode\n splitter = Splitter(RECORDS_PER_BLOCK)\n results = []\n input_files = []\n for fname in self.inputs:\n input_files.append(RecordFile(fname, namenode))\n\n taskid = 0\n for block in splitter.split(input_files):\n fname = map_input(self.id, taskid)\n taskid += 1\n namenode.create_file(fname)\n\n bytes_written = 0\n for record in block:\n bytes_written += namenode.write_file(fname, bytes_written,\n record)\n\n namenode.close_file(fname)\n results.append(fname)\n self.open_files.append(fname)\n\n for file_ in input_files:\n file_.close()\n\n return results", "def create_input_sample_files(self, input_files: List[Path]) -> pd.DataFrame:\n assemblies = {}\n reads = {}\n sample_names = set()\n data = []\n\n # Initial pass of files to break up into assemblies/reads\n for file in input_files:\n sf = SequenceFile(file)\n sample_name = sf.get_genome_name(exclude_paired_end_indicators=True)\n if sf.is_assembly():\n if sample_name in sample_names:\n if sample_name in assemblies:\n previous_files = [assemblies[sample_name]]\n else:\n previous_files = reads[sample_name]\n raise Exception(f'Duplicate sample with name [{sample_name}]. current_file=[{file}], '\n f'previous_file(s)={previous_files}')\n else:\n sample_names.add(sample_name)\n assemblies[sample_name] = file\n elif sf.is_reads():\n if sample_name in assemblies:\n previous_files = assemblies[sample_name]\n raise Exception(f'Duplicate sample with name [{sample_name}]. current_file=[{file}], '\n f'previous_file(s)={previous_files}')\n elif sample_name in reads:\n if len(reads[sample_name]) != 1:\n raise Exception(f'Invalid number of files for sample with name [{sample_name}]. '\n f'current_file=[{file}], previous_files={reads[sample_name]}')\n else:\n reads[sample_name].append(file)\n else:\n reads[sample_name] = [file]\n\n sample_names.add(sample_name)\n else:\n logger.warning(f'Input file [{file}] with unknown file type (not assembly or reads). Ignoring.')\n\n # Now we iterate over samples to insert into an array to create the final dataframe\n for sample in assemblies:\n data.append([sample, assemblies[sample], pd.NA, pd.NA])\n\n # Iterate over reads to insert into array for final dataframe\n for sample in reads:\n if len(reads[sample]) == 1:\n data.append([sample, pd.NA, reads[sample][0], pd.NA])\n elif len(reads[sample]) == 2:\n file1 = SequenceFile(reads[sample][0])\n file2 = SequenceFile(reads[sample][1])\n\n file1_differences = file1.name_differences(file2)\n file2_differences = file2.name_differences(file1)\n\n if len(file1_differences) != 1 or len(file2_differences) != 1:\n raise Exception(\n f'Files [{reads[sample]}] do not have exactly one difference between names, cannot determine'\n f' paired structure.')\n else:\n f1d = file1_differences[0].lower()\n f2d = file2_differences[0].lower()\n\n if f1d == '1' and f2d == '2':\n forward = file1\n reverse = file2\n elif f1d == 'f' and f2d == 'r':\n forward = file1\n reverse = file2\n elif f2d == '1' and f1d == '2':\n reverse = file1\n forward = file2\n elif f1d == 'r' and f2d == 'f':\n reverse = file1\n forward = file2\n else:\n raise Exception(f'Cannot determine pair structure for files [{reads[sample]}]')\n\n data.append([sample, pd.NA, forward.file, reverse.file])\n else:\n raise Exception(f'Invalid number of files for sample [{sample}], files={reads[sample]}')\n\n return pd.DataFrame(data, columns=self.INPUT_SAMPLE_FILE_COLUMNS)", "def start():\r\n\r\n total_files = sum([len(files) for r, d, files in os.walk(abs_source_directory)])\r\n total_files_down = total_files\r\n for i in range(total_files, 0, -1):\r\n if i % 10 == 0:\r\n total_files_down = i\r\n break\r\n current_iteration = 0\r\n last_factor = 0\r\n position = 1\r\n print(\"[{0}] {1}/{2}\".format(\" \" * 10, 0, total_files))\r\n for path, dirs, files in os.walk(abs_source_directory):\r\n for file_name in list(filter(lambda x: x.endswith(\".pdf\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(normal_regex, file_source_path)\r\n # Handles normal past-papers\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, matched_groups=found_groups)\r\n except AttributeError:\r\n # Handles music past-papers\r\n if \"Music_\" in file_source_path:\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, music_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n elif \"Exam Pack list of omitted papers and markschemes\" in file_name:\r\n pass\r\n else:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n # Handles mp3 files\r\n for file_name in list(filter(lambda x: x.endswith(\".mp3\"), files)):\r\n file_source_path = os.path.join(path, file_name)\r\n out = re.search(audio_music_regex, file_source_path)\r\n try:\r\n found_groups = out.groups()\r\n write_copy(file_source_path, file_name, audio_groups=found_groups)\r\n except AttributeError:\r\n print(f\"CRITICAL ERROR: File not handled: {file_source_path}\")\r\n current_iteration += 1\r\n if current_iteration == last_factor + total_files_down / 10:\r\n last_factor = current_iteration\r\n print(\"[{0}{1}] {2}/{3}\".format(\"-\" * position, \" \" * (10 - position), current_iteration, total_files))\r\n position += 1\r\n print(\"[{0}] {1}/{2}\".format(\"-\" * 10, total_files, total_files))", "def open_read_files(answer_files, answers):\r\n \"\"\"And designates each file to a variable in answers\"\"\"\r\n count = 0\r\n s = 0\r\n answer_files2 = []\r\n for file in answer_files[:]: # Used [:] to get all file_names in answer_files\r\n ans = open(file, mode='r')\r\n print(f\"Opening {ans.name}\")\r\n time.sleep(s)\r\n answers[count] = ans\r\n count += 1\r\n if ans.closed == False: # Section for checking if files are closed\r\n print(f\"Closing {ans.name}\")\r\n ans.close()\r\n answer_files2.append(ans.name)\r\n answer_files.remove(ans.name)\r\n time.sleep(s)\r\n return answer_files2, answers", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')" ]
[ "0.6580419", "0.6510424", "0.6231249", "0.6147335", "0.61276037", "0.60895264", "0.60482085", "0.6024064", "0.59860575", "0.5985176", "0.59534013", "0.58648974", "0.58312774", "0.582473", "0.5797607", "0.57878214", "0.5783304", "0.5764239", "0.5764155", "0.57330567", "0.57255614", "0.5718252", "0.5709521", "0.57032084", "0.5697962", "0.56756985", "0.566487", "0.56636274", "0.5657628", "0.5656732" ]
0.6638289
0
Extract constant names from sybdb.h to use as python constants
def extract_constants(freetds_include="sybdb.h", constants_file="bcp_constants.py"): fileno, source_file = mkstemp(suffix=".c", text=True) write(fileno, "#include <{}>".format(freetds_include).encode()) close(fileno) fileno, include_directives = mkstemp(suffix=".txt") close(fileno) if ON_WINDOWS: cmd_template = "cl /E {includes} {source} > {output}" else: cmd_template = "cpp {includes} '{source}' > '{output}'" cmd = cmd_template.format( output=normpath(include_directives), source=normpath(source_file), includes=" ".join( "-I{}".format(normpath(_include)) for _include in include_dirs ) ) fifo = Popen(cmd, shell=True, stdin=None, stdout=None, stderr=None, close_fds=True) fifo.communicate() fifo.wait() remove(source_file) if fifo.returncode < 0: raise Exception("Cannot run preprocessor step") row_regex = re.compile('[\r\n]+') field_regex = re.compile('[\s]+') with open(include_directives, "r") as fd: include_paths = list( _filename for contents in [fd.read()] for _row in row_regex.split(contents) if _row.find(freetds_include) > -1 for _index, _word in enumerate(field_regex.split(_row)) if _index == 2 for _filename in [_word.strip('"')] if exists(_filename) ) remove(include_directives) for include_file in include_paths: with open(include_file, "r") as fd: definition_pairs = [ (_values[1], int(_values[2])) for contents in [fd.read()] for _row in row_regex.split(contents) for _values in [field_regex.split(_row)] if len(_values) == 3 and _values[0] == "#define" and _values[2].isdigit() ] if len(definition_pairs): with open(constants_file, "w") as output_fd: output_fd.write("\n".join("%s=%d" % _row for _row in definition_pairs)) break else: raise Exception("Couldn't find a freetds include file")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_declarations(self):\n return \"extern const unsigned int %s;\\n\" % self.name", "def get_calculable_constant_names_latex():\n return r\"t_0\", r\"S_{rr}\", r\"S_{r\\theta}\", r\"S_{rz}\", r\"S_{zz}\" \\\n r\"\\alpha\", r\"\\beta\", r\"\\gamma\", r\"C_{13}\", r\"C_{33}\", \\\n r\"\\hat{E}\", r\"g_1\"", "def _parseKeyNames(lib):\n _keyNames = {}\n for attr in dir(lib): # from the modules variables\n if attr[:6] == 'TCODK_': # get the K_* constants\n _keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs\n return _keyNames", "def get_definitions(self):\n return \"const unsigned int %s = 0x%xu;\\n\" % (self.name, self.address)", "def get_consts(self):\n consts = []\n for key in self.constants:\n consts.append({\n 'key': key,\n 'value': self.constants[key],\n })\n return consts", "def get_predefined_constant_names_latex():\n return \"t_0/t_g\", \"t_g\", r\"\\dot{\\varepsilon}\", \\\n \"E_1\", \"E_3\", r\"\\nu_{21}\", r\"\\nu_{31}\"", "def get_defined_constants():\n raise NotImplementedError()", "def consts(consts):\n\n namespace = { }\n\n for c in consts:\n constname = c[\"constname\"]\n consttype = c[\"consttype\"]\n constval = c[\"constval\"]\n\n # Correct various values that won't evaluate in python.\n if constval == \"( SteamItemInstanceID_t ) ~ 0\":\n constval = \"-1\"\n elif constval == \"( ( uint32 ) 'd' << 16U ) | ( ( uint32 ) 'e' << 8U ) | ( uint32 ) 'v'\":\n constval = \"6579574\"\n else:\n constval = re.sub(r\"(0x[0-9a-fA-F]*)ull\", r\"\\1\", constval)\n\n # Evaluate the result, and place it into the namespace.\n value = eval(constval, namespace, namespace)\n namespace[constname] = value\n\n # Generate.\n mapped = map_type(consttype)\n\n if value > 0:\n p(f\"{constname} = {mapped}(0x{value:x})\")\n else:\n p(f\"{constname} = {mapped}({value})\")", "def constants(self):\n return self._constants", "def get_constants(prefix):\n return {getattr(socket, name): name \n for name in dir(socket) if name.startswith(prefix)}", "def parse_defines(self):\n for line in self.header.splitlines():\n if line.lower().startswith(\"#define\"):\n _, line = line.strip().split(None, 1) # remove #define\n if \" \" in line:\n symbol, value = line.split(None, 1)\n if value.isdigit():\n value = int(value)\n elif value.startswith(\"0x\"):\n value = int(value, 16)\n elif value in self.types:\n self.types[symbol] = self.types[value]\n else:\n symbol = line\n value = \"\"\n self.constants[symbol] = value\n return self.constants", "def get_constants(prefix):\n return {\n getattr(socket, n): n\n for n in dir(socket)\n if n.startswith(prefix)\n }", "def get_constants(prefix):\n return dict( (getattr(socket, n), n)\n for n in dir(socket)\n if n.startswith(prefix)\n )", "def gen_cheader(protocol):\n\ts = \"\"\"/* Junior Design Sp2018 Final Project\n * Robot Firmware - RPi <-> Microcontroller Communication\n * Nick Ames 2018\n * WARNING: This file is automatically generated by gen-files.py\n * Any changes you make will be erased.\n */\n#include <stdfix.h>\n#include <stdint.h>\n#include \"config.h\"\n\n\"\"\"\n\ts += \"struct comm_data_t {\\n\"\n\tfor r in protocol:\n\t\ts += \"\\t\" + r.size + \" \" + r.name + \"; /* \" + r.desc + \" */\\n\"\n\ts += \"};\\n\\n\"\n\tfor r in protocol:\n\t\ts += \"%s get_%s(void); /* %s */\\n\"%(r.size, r.name, r.desc)\n\t\ts += \"void set_%s(%s); /* %s */\\n\\n\"%(r.name, r.size, r.desc)\n\ts += \"\"\"extern volatile struct comm_data_t Data;\"\"\"\n\treturn s", "def package_macros(self):\n from re import sub\n NAME = sub(r'[\\.\\-\\s]', '_', self.name.upper())\n return [('HAVE_' + NAME, '1')]", "def compose_defines():\n return \"\"\"\nLIBPBDATA_INC ?=../pbdata\nLIBPBIHDF_INC ?=../hdf\nLIBBLASR_INC ?=../alignment\nLIBPBDATA_LIB ?=%(thisdir)s/pbdata/\nLIBPBIHDF_LIB ?=%(thisdir)s/hdf/\nLIBBLASR_LIB ?=%(thisdir)s/alignment/\nnohdf ?=1\n\"\"\"%(dict(thisdir=thisdir))", "def include_constants_pi():\n return f\"\"\"\n#define PI_F 3.14159274101257f\n#define PI_2_F 1.57079637050629f\n#define PI_4_F 0.78539818525314f\n\"\"\"", "def get_constants_list(self):\n return [self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12]", "def get_constants(self):\n temp = self._properties.get('constants', [])\n return temp", "def constants(self):\n return self.bot.constants", "def getCDefinesAsString( targetPlatform, targetName ):\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s' % fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C = ''\n raw_CPP = ''\n raw_C_CFLAGS = ''\n raw_CPP_CFLAGS = ''\n regexp_C = re.compile( '^C_DEFINES\\s=\\s+(.*)$' )\n regexp_CPP = re.compile( '^CXX_DEFINES\\s=\\s+(.*)$' )\n regexp_C_CFLAGS = re.compile( '^C_FLAGS\\s=\\s+(.*)$' )\n regexp_CPP_CFLAGS = re.compile( '^CXX_FLAGS\\s=\\s+(.*)$' )\n result = ''\n\n for line in content:\n tmp = regexp_C.search( line )\n\n if tmp:\n raw_C = tmp.group( 1 )\n # logging.debug( 'raw C defines: %s' % raw_C )\n\n tmp = regexp_CPP.search( line )\n\n if tmp:\n raw_CPP = tmp.group( 1 )\n # logging.debug( 'raw CPP defines: %s' % raw_CPP )\n\n tmp = regexp_C_CFLAGS.search(line)\n\n if tmp:\n raw_C_CFLAGS = tmp.group(1)\n\n tmp = regexp_CPP_CFLAGS.search(line)\n\n if tmp:\n raw_CPP_CFLAGS = tmp.group(1)\n\n candidates = ( shlex.split( raw_C ) +\n shlex.split( raw_CPP ) +\n shlex.split( raw_C_CFLAGS ) +\n shlex.split( raw_CPP_CFLAGS ) )\n\n for candidate in candidates:\n if candidate.startswith( '-D' ):\n result += candidate + ' '\n\n return result", "def load_constants():\r\n marker_dictionary = dict()\r\n marker_dictionary[\"SP\"] = SP\r\n marker_dictionary[\"LCL\"] = LCL\r\n marker_dictionary[\"ARG\"] = ARG\r\n marker_dictionary[\"THIS\"] = THIS\r\n marker_dictionary[\"THAT\"] = THAT\r\n marker_dictionary[\"SCREEN\"] = SCREEN\r\n marker_dictionary[\"KBD\"] = KBD\r\n for i in range(0, RAM_RESERVE_END):\r\n marker_dictionary[\"R\"+str(i)] = i\r\n return marker_dictionary", "def constants(self):\n return self._constants", "def get_constants(self):\n return self.D1, self.D2, self.A1, self.A2, \\\n self.F1, self.F2, self.S12", "def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymStringVec.append(\"TET_SYM\");\n\tSymStringVec.append(\"OCT_SYM\");\n\tSymStringVec.append(\"ICOS_SYM\");\n\tSymStringVec.append(\"ISYM\");\n\treturn SymStringVec", "def GetDefineGuardSymbol(file_name):\n return os.path.basename(file_name).upper().replace('.', '_')", "def gyp_defines():\n return dict(arg.split('=', 1)\n for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))", "def normalize_const(var_name):\n return var_name.lower().split('_')", "def cblas_header_text():\r\n\r\n return \"\"\"\r\n //#include <stddef.h>\r\n\r\n #undef __BEGIN_DECLS\r\n #undef __END_DECLS\r\n #ifdef __cplusplus\r\n #define __BEGIN_DECLS extern \"C\" {\r\n #define __END_DECLS }\r\n #else\r\n #define __BEGIN_DECLS /* empty */\r\n #define __END_DECLS /* empty */\r\n #endif\r\n\r\n __BEGIN_DECLS\r\n\r\n #define MOD %\r\n\r\n /*\r\n * Enumerated and derived types\r\n */\r\n #define CBLAS_INDEX size_t /* this may vary between platforms */\r\n\r\n enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};\r\n enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};\r\n enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};\r\n enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};\r\n enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};\r\n\r\n float cblas_sdsdot(const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY);\r\n double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,\r\n const int incY);\r\n float cblas_sdot(const int N, const float *X, const int incX,\r\n const float *Y, const int incY);\r\n double cblas_ddot(const int N, const double *X, const int incX,\r\n const double *Y, const int incY);\r\n\r\n /*\r\n * Functions having prefixes Z and C only\r\n */\r\n void cblas_cdotu_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotu);\r\n void cblas_cdotc_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotc);\r\n\r\n void cblas_zdotu_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotu);\r\n void cblas_zdotc_sub(const int N, const void *X, const int incX,\r\n const void *Y, const int incY, void *dotc);\r\n\r\n\r\n /*\r\n * Functions having prefixes S D SC DZ\r\n */\r\n float cblas_snrm2(const int N, const float *X, const int incX);\r\n float cblas_sasum(const int N, const float *X, const int incX);\r\n\r\n double cblas_dnrm2(const int N, const double *X, const int incX);\r\n double cblas_dasum(const int N, const double *X, const int incX);\r\n\r\n float cblas_scnrm2(const int N, const void *X, const int incX);\r\n float cblas_scasum(const int N, const void *X, const int incX);\r\n\r\n double cblas_dznrm2(const int N, const void *X, const int incX);\r\n double cblas_dzasum(const int N, const void *X, const int incX);\r\n\r\n\r\n /*\r\n * Functions having standard 4 prefixes (S D C Z)\r\n */\r\n CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);\r\n CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);\r\n CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);\r\n CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 1 BLAS routines\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (s, d, c, z)\r\n */\r\n void cblas_sswap(const int N, float *X, const int incX, \r\n float *Y, const int incY);\r\n void cblas_scopy(const int N, const float *X, const int incX, \r\n float *Y, const int incY);\r\n void cblas_saxpy(const int N, const float alpha, const float *X,\r\n const int incX, float *Y, const int incY);\r\n\r\n void cblas_dswap(const int N, double *X, const int incX, \r\n double *Y, const int incY);\r\n void cblas_dcopy(const int N, const double *X, const int incX, \r\n double *Y, const int incY);\r\n void cblas_daxpy(const int N, const double alpha, const double *X,\r\n const int incX, double *Y, const int incY);\r\n\r\n void cblas_cswap(const int N, void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_ccopy(const int N, const void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_caxpy(const int N, const void *alpha, const void *X,\r\n const int incX, void *Y, const int incY);\r\n\r\n void cblas_zswap(const int N, void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_zcopy(const int N, const void *X, const int incX, \r\n void *Y, const int incY);\r\n void cblas_zaxpy(const int N, const void *alpha, const void *X,\r\n const int incX, void *Y, const int incY);\r\n\r\n\r\n /* \r\n * Routines with S and D prefix only\r\n */\r\n void cblas_srotg(float *a, float *b, float *c, float *s);\r\n void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);\r\n void cblas_srot(const int N, float *X, const int incX,\r\n float *Y, const int incY, const float c, const float s);\r\n void cblas_srotm(const int N, float *X, const int incX,\r\n float *Y, const int incY, const float *P);\r\n\r\n void cblas_drotg(double *a, double *b, double *c, double *s);\r\n void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);\r\n void cblas_drot(const int N, double *X, const int incX,\r\n double *Y, const int incY, const double c, const double s);\r\n void cblas_drotm(const int N, double *X, const int incX,\r\n double *Y, const int incY, const double *P);\r\n\r\n\r\n /* \r\n * Routines with S D C Z CS and ZD prefixes\r\n */\r\n void cblas_sscal(const int N, const float alpha, float *X, const int incX);\r\n void cblas_dscal(const int N, const double alpha, double *X, const int incX);\r\n void cblas_cscal(const int N, const void *alpha, void *X, const int incX);\r\n void cblas_zscal(const int N, const void *alpha, void *X, const int incX);\r\n void cblas_csscal(const int N, const float alpha, void *X, const int incX);\r\n void cblas_zdscal(const int N, const double alpha, void *X, const int incX);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 2 BLAS\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (S, D, C, Z)\r\n */\r\n void cblas_sgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n const float *X, const int incX, const float beta,\r\n float *Y, const int incY);\r\n void cblas_sgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const float alpha,\r\n const float *A, const int lda, const float *X,\r\n const int incX, const float beta, float *Y, const int incY);\r\n void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *A, const int lda, \r\n float *X, const int incX);\r\n void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const float *A, const int lda, \r\n float *X, const int incX);\r\n void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *Ap, float *X, const int incX);\r\n void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *A, const int lda, float *X,\r\n const int incX);\r\n void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const float *A, const int lda,\r\n float *X, const int incX);\r\n void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const float *Ap, float *X, const int incX);\r\n\r\n void cblas_dgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n const double *X, const int incX, const double beta,\r\n double *Y, const int incY);\r\n void cblas_dgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const double alpha,\r\n const double *A, const int lda, const double *X,\r\n const int incX, const double beta, double *Y, const int incY);\r\n void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *A, const int lda, \r\n double *X, const int incX);\r\n void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const double *A, const int lda, \r\n double *X, const int incX);\r\n void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *Ap, double *X, const int incX);\r\n void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *A, const int lda, double *X,\r\n const int incX);\r\n void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const double *A, const int lda,\r\n double *X, const int incX);\r\n void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const double *Ap, double *X, const int incX);\r\n\r\n void cblas_cgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *X, const int incX, const void *beta,\r\n void *Y, const int incY);\r\n void cblas_cgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const void *alpha,\r\n const void *A, const int lda, const void *X,\r\n const int incX, const void *beta, void *Y, const int incY);\r\n void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, void *X,\r\n const int incX);\r\n void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda,\r\n void *X, const int incX);\r\n void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n\r\n void cblas_zgemv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *X, const int incX, const void *beta,\r\n void *Y, const int incY);\r\n void cblas_zgbmv(const enum CBLAS_ORDER order,\r\n const enum CBLAS_TRANSPOSE TransA, const int M, const int N,\r\n const int KL, const int KU, const void *alpha,\r\n const void *A, const int lda, const void *X,\r\n const int incX, const void *beta, void *Y, const int incY);\r\n void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda, \r\n void *X, const int incX);\r\n void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *A, const int lda, void *X,\r\n const int incX);\r\n void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const int K, const void *A, const int lda,\r\n void *X, const int incX);\r\n void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,\r\n const int N, const void *Ap, void *X, const int incX);\r\n\r\n\r\n /* \r\n * Routines with S and D prefixes only\r\n */\r\n void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *A,\r\n const int lda, const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const float alpha, const float *A,\r\n const int lda, const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *Ap,\r\n const float *X, const int incX,\r\n const float beta, float *Y, const int incY);\r\n void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,\r\n const float alpha, const float *X, const int incX,\r\n const float *Y, const int incY, float *A, const int lda);\r\n void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, float *A, const int lda);\r\n void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, float *Ap);\r\n void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY, float *A,\r\n const int lda);\r\n void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const float *X,\r\n const int incX, const float *Y, const int incY, float *A);\r\n\r\n void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *A,\r\n const int lda, const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const double alpha, const double *A,\r\n const int lda, const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *Ap,\r\n const double *X, const int incX,\r\n const double beta, double *Y, const int incY);\r\n void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,\r\n const double alpha, const double *X, const int incX,\r\n const double *Y, const int incY, double *A, const int lda);\r\n void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, double *A, const int lda);\r\n void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, double *Ap);\r\n void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, const double *Y, const int incY, double *A,\r\n const int lda);\r\n void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const double *X,\r\n const int incX, const double *Y, const int incY, double *A);\r\n\r\n\r\n /* \r\n * Routines with C and Z prefixes only\r\n */\r\n void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *Ap,\r\n const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const void *X, const int incX,\r\n void *A, const int lda);\r\n void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const float alpha, const void *X,\r\n const int incX, void *A);\r\n void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *Ap);\r\n\r\n void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const int K, const void *alpha, const void *A,\r\n const int lda, const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const void *alpha, const void *Ap,\r\n const void *X, const int incX,\r\n const void *beta, void *Y, const int incY);\r\n void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const void *X, const int incX,\r\n void *A, const int lda);\r\n void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,\r\n const int N, const double alpha, const void *X,\r\n const int incX, void *A);\r\n void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *A, const int lda);\r\n void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,\r\n const void *alpha, const void *X, const int incX,\r\n const void *Y, const int incY, void *Ap);\r\n\r\n /*\r\n * ===========================================================================\r\n * Prototypes for level 3 BLAS\r\n * ===========================================================================\r\n */\r\n\r\n /* \r\n * Routines with standard 4 prefixes (S, D, C, Z)\r\n */\r\n void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const float alpha, const float *A,\r\n const int lda, const float *B, const int ldb,\r\n const float beta, float *C, const int ldc);\r\n void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n const float *B, const int ldb, const float beta,\r\n float *C, const int ldc);\r\n void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const float *A, const int lda,\r\n const float beta, float *C, const int ldc);\r\n void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const float *A, const int lda,\r\n const float *B, const int ldb, const float beta,\r\n float *C, const int ldc);\r\n void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n float *B, const int ldb);\r\n void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const float alpha, const float *A, const int lda,\r\n float *B, const int ldb);\r\n\r\n void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const double alpha, const double *A,\r\n const int lda, const double *B, const int ldb,\r\n const double beta, double *C, const int ldc);\r\n void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n const double *B, const int ldb, const double beta,\r\n double *C, const int ldc);\r\n void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const double *A, const int lda,\r\n const double beta, double *C, const int ldc);\r\n void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const double *A, const int lda,\r\n const double *B, const int ldb, const double beta,\r\n double *C, const int ldc);\r\n void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n double *B, const int ldb);\r\n void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const double alpha, const double *A, const int lda,\r\n double *B, const int ldb);\r\n\r\n void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const void *alpha, const void *A,\r\n const int lda, const void *B, const int ldb,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n\r\n void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_TRANSPOSE TransB, const int M, const int N,\r\n const int K, const void *alpha, const void *A,\r\n const int lda, const void *B, const int ldb,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *beta, void *C, const int ldc);\r\n void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,\r\n const enum CBLAS_DIAG Diag, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n void *B, const int ldb);\r\n\r\n\r\n /* \r\n * Routines with prefixes C and Z only\r\n */\r\n void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const float alpha, const void *A, const int lda,\r\n const float beta, void *C, const int ldc);\r\n void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const float beta,\r\n void *C, const int ldc);\r\n\r\n void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,\r\n const enum CBLAS_UPLO Uplo, const int M, const int N,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const void *beta,\r\n void *C, const int ldc);\r\n void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const double alpha, const void *A, const int lda,\r\n const double beta, void *C, const int ldc);\r\n void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,\r\n const enum CBLAS_TRANSPOSE Trans, const int N, const int K,\r\n const void *alpha, const void *A, const int lda,\r\n const void *B, const int ldb, const double beta,\r\n void *C, const int ldc);\r\n\r\n void cblas_xerbla(int p, const char *rout, const char *form, ...);\r\n\r\n __END_DECLS\r\n \"\"\"", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret" ]
[ "0.63279045", "0.6142163", "0.594471", "0.5942139", "0.59368765", "0.59215355", "0.590987", "0.58534914", "0.57482266", "0.57082814", "0.5656697", "0.56237125", "0.55894226", "0.55852455", "0.5570691", "0.5560409", "0.5546228", "0.5487835", "0.5454318", "0.5445472", "0.5438231", "0.5431478", "0.54168934", "0.539878", "0.5372401", "0.5366455", "0.5365839", "0.5358318", "0.5319855", "0.530538" ]
0.6312631
1
Get open accounts Returns array with active account numbers
async def get_open_accounts(self): result = [] URL = API_HOST + "/api/resources/header" async with async_timeout.timeout(TIMEOUT): response = await self.session.get(URL) json_data = await response.json() accounts = json_data["data"]["accounts"]["data"]["data"] for account in accounts: if account["statusCategory"] == STATUS_CATEGORY_OPEN: result.append(account["accountNumber"]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "def list_active_customers():\n active_customers = 0\n for customer in cm.Customers:\n if customer.status == \"Active \":\n active_customers += 1\n return active_customers", "def list_accounts(self):\n pass", "def getConnectedAccounts(**kwargs):\n strProdURL = kwargs[\"strProdURL\"]\n orgID = kwargs[\"ORG_ID\"]\n sessiontoken = kwargs[\"sessiontoken\"]\n\n accounts = get_connected_accounts_json(strProdURL, orgID, sessiontoken)\n orgtable = PrettyTable(['OrgID'])\n orgtable.add_row([orgID])\n print(str(orgtable))\n table = PrettyTable(['Account Number','id'])\n for i in accounts:\n table.add_row([i['account_number'],i['id']])\n \n print(\"Connected Accounts\")\n print(table)", "def get_accounts(self):\r\n return self._accounts", "def get_accounts(self):\n return self.accounts", "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def returnOpenOrders(self, account=None):\n if not account:\n if \"default_account\" in config:\n account = config[\"default_account\"]\n if not account:\n raise ValueError(\"You need to provide an account\")\n\n orders = self.dpay.rpc.get_open_orders(account, limit=1000)\n return orders", "def get_accounts(self):\n return self.accounts.all()", "def accounts(self):\r\n return acc.Accounts(self)", "def get_accounts(self):\n\n\t\treturn self.__accounts", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def list_active_customers():\n db_customers = Customers.select()\n LOGGER.debug(\"Calculating number of active customers\")\n # Technically used this in Lesson 03, but it is a comprehension. Another method added below.\n number_active = sum([int(x.status) for x in db_customers])\n LOGGER.info(\"There are %d active customers\", number_active)\n\n return number_active", "def list_active_customers():\n customer_active = Customer.select().where(Customer.status == 'Active')\n print('{} Active Customers'.format(len(customer_active)))\n return len(customer_active)", "def GetAccountList(self):\n\t\treturn self.accounts.keys()", "def list_active_customer():\n active_customer = Customer.select().where(Customer.is_active).count()\n LOGGER.info('Number of active customers retrieved.')\n return active_customer", "def get_accounts():\n graph = facebook.GraphAPI(mytoken)\n pages = graph.get_object('me/accounts')\n pages_info=[]\n for page in pages['data']:\n pages_info.append( ( page['name'], page['access_token'] ) )\n return pages_info", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def list_active_customers():\n init_database()\n return Customer.select().where(Customer.active_status).count()", "def display_accounts(cls):\n return cls.account_list", "def list_accounts(min_conf=1):\n min_conf = str(min_conf)\n try:\n stdout = subprocess.check_output([\"litecoin-cli\", \"listaccounts\", min_conf])\n accounts = json.loads(stdout.decode())\n except:\n sys.exit(1)\n\n return accounts", "def list_active_customers():\n with cm.DATABASE.transaction():\n # .select() has a .where() method to specify criteria for searching\n active_customers = cm.Customer.select().where(\n cm.Customer.status == \"Active\").count()\n LOGGER.info(\"Active customers: %s\", active_customers)\n return active_customers", "def getactiveusers(self):\n\n select_activeusers = (\n \"SELECT count(DISTINCT username) FROM public.jobs \"\n \"WHERE latestjobversion = True AND insertdate BETWEEN Date(%s) AND Date(%s) \"\n \"AND (username NOT IN (%s)) \"\n )\n\n\n self.pgcursor.execute(select_activeusers, (self.startdate, self.enddate, self.adminusers))\n\n activeusers = 0\n x = self.pgcursor.fetchone()\n if x is not None:\n activeusers = x[0]\n\n # print(\"No of active users: {0}\".format(activeusers))\n return activeusers", "def active_users(self, *args, **kwargs):\r\n return self._get('ActiveUsers', *args, **kwargs)", "def list(ctx):\n if ctx.obj.get('NAMESPACE') != 'accounts':\n click.echo(\n click.style('Only account data is available for listing.', fg='red')\n )\n return\n\n swag = create_swag_from_ctx(ctx)\n accounts = swag.get_all()\n _table = [[result['name'], result.get('id')] for result in accounts]\n click.echo(\n tabulate(_table, headers=[\"Account Name\", \"Account Number\"])\n )", "def get_open_orders(self):\n url = 'https://coincheck.com/api/exchange/orders/opens'\n headers = make_header(url, access_key=self.access_key, secret_key=self.secret_key)\n r = requests.get(url, headers=headers, timeout=self.timeout)\n return json.loads(r.text)", "def list_active_customers():\n return Customer.select().where(Customer.is_active).count()", "def accounts(self):\n return self._accounts.values()", "def return_active_users():\n return json.dumps(app.active_users)" ]
[ "0.63580984", "0.6303217", "0.62876016", "0.6213196", "0.61325455", "0.61324793", "0.6089535", "0.6086548", "0.6079627", "0.6079134", "0.6050323", "0.6013716", "0.59957004", "0.59836334", "0.59793425", "0.5972705", "0.5965284", "0.59572", "0.5945932", "0.59283507", "0.5911551", "0.58857083", "0.58805954", "0.5874453", "0.58570915", "0.5855002", "0.58417356", "0.58339196", "0.5831123", "0.5826693" ]
0.8321022
0
Get budget billing data
async def __getBBL_async(self, account, projectedBillData) -> dict: _LOGGER.info("Getting budget billing data") data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_BUDGET_BILLING_PREMISE_DETAILS.format(account=account) ) if response.status == 200: r = (await response.json())["data"] dataList = r["graphData"] # startIndex = len(dataList) - 1 billingCharge = 0 budgetBillDeferBalance = r["defAmt"] projectedBill = projectedBillData["projected_bill"] asOfDays = projectedBillData["as_of_days"] for det in dataList: billingCharge += det["actuallBillAmt"] calc1 = (projectedBill + billingCharge) / 12 calc2 = (1 / 12) * (budgetBillDeferBalance) projectedBudgetBill = round(calc1 + calc2, 2) bbDailyAvg = round(projectedBudgetBill / 30, 2) bbAsOfDateAmt = round(projectedBudgetBill / 30 * asOfDays, 2) data["budget_billing_daily_avg"] = bbDailyAvg data["budget_billing_bill_to_date"] = bbAsOfDateAmt data["budget_billing_projected_bill"] = float(projectedBudgetBill) async with async_timeout.timeout(TIMEOUT): response = await self.session.get( URL_BUDGET_BILLING_GRAPH.format(account=account) ) if response.status == 200: r = (await response.json())["data"] data["bill_to_date"] = float(r["eleAmt"]) data["defered_amount"] = float(r["defAmt"]) except Exception as e: _LOGGER.error(e) return data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetCampaignBudget(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def budget(self):\n return self._budget", "def billing_info(self):\r\n return BillingInfo(self)", "def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")", "def get_budgets(self) -> list:\n return self.budget_manager.get_budgets()", "def get_budgets(self) -> list:\n return list(self.budgets.values())", "def billing(self):\n return self._billing", "def billing_info(self):\n return self._billing_info", "def get_budget(self, category: BudgetCategory) -> Budget:\n return self.budgets.get(category, None)", "def get_spend_by_campaign_custom(self, budget_id, aw_account_id):\n try:\n budget = Budget.objects.get(id=budget_id)\n google_ads_account = DependentAccount.objects.get(id=aw_account_id)\n except (Budget.DoesNotExist, DependentAccount.DoesNotExist):\n return\n\n client = get_client()\n client.client_customer_id = google_ads_account.dependent_account_id\n\n aw_campaigns = budget.aw_campaigns.filter(account=google_ads_account)\n aw_campaign_ids = list(set([aw_campaign.campaign_id for aw_campaign in aw_campaigns]))\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n start_date = budget.start_date\n end_date = budget.end_date\n\n campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n print(campaign_row)\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n yest_campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n yest_campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': yest_campaign_report_selector\n }\n\n start_date = budget.start_date\n yest_end_date = datetime.datetime.now() - datetime.timedelta(1)\n\n yest_campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': yest_end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(yest_campaign_report_query))\n for campaign_row in campaign_report:\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend_until_yesterday = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n # try:\n # campaign_report = \\\n # Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(yest_campaign_report_query))[0]\n # except IndexError:\n # return\n #\n # campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n # start_date=budget.start_date,\n # end_date=budget.end_date)\n #\n # campaign_spend_object.spend_until_yesterday = int(campaign_report['cost']) / 1000000\n # campaign_spend_object.save()\n\n return 'get_spend_by_campaign_custom'", "def get_expenses(budget):\n return sum(expense['bgt'] for expense in budget['spend'])", "def get(self):\n\n bill = {\n 'product': {\n 'name': self.order.product.name,\n 'price': self.order.product.price\n },\n 'order_date_of_creation': self.order.date_of_creation,\n 'bill_date_of_creation': timezone.now(),\n 'discounts': [],\n 'total': self.order.product.price\n }\n\n return self.add_discount(bill)", "def budget_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"budget_name\")", "async def future_budget(budget: Budget):\n\n # Get the JSON object from the request body and cast it to a dictionary\n input_dict = budget.to_dict()\n bank_account_id = input_dict['bank_account_id']\n monthly_savings_goal = input_dict['monthly_savings_goal']\n\n transactions = load_user_data(bank_account_id)\n\n # instantiate the user\n user = User(transactions)\n\n # predict budget using time series model\n pred_bud = user.predict_budget()\n\n # if a fatal error was encountered while generating the budget,\n # return no budget along with the warning list\n if user.warning == 2:\n return json.dumps([None, user.warning_list])\n\n # modify budget based on savings goal\n modified_budget = user.budget_modifier(\n pred_bud, monthly_savings_goal=monthly_savings_goal)\n\n # if a fatal error was encountered while modifying the budget,\n # return no budget along with the warning list\n if user.warning == 2:\n return json.dumps([None, user.warning_list])\n\n # if a non-fatal warning was encountered in predict_budget() or\n # budget_modifier(), return the budget along with the warning list\n elif user.warning == 1:\n return json.dumps([modified_budget, user.warning_list])\n\n return modified_budget", "def get_debt_state(member, limit_year, limit_month):\n if member.first_payment_year is None:\n # never paid! using registration date to start with\n yearmonths_paid = set()\n year_to_check = member.registration_date.year\n month_to_check = member.registration_date.month\n else:\n # build a set for the year/month of paid quotas\n quotas = Quota.objects.filter(member=member).all()\n yearmonths_paid = {(q.year, q.month) for q in quotas}\n\n year_to_check = member.first_payment_year\n month_to_check = member.first_payment_month\n\n # verify the limit is after member started paying\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n return []\n elif year_to_check > limit_year:\n return []\n\n # build a set of all the year/month the member should have paid up to (including) the limit\n should_have_paid = set()\n while True:\n should_have_paid.add((year_to_check, month_to_check))\n year_to_check, month_to_check = increment_year_month(year_to_check, month_to_check)\n if year_to_check == limit_year:\n if month_to_check > limit_month:\n break\n elif year_to_check > limit_year:\n break\n\n return sorted(should_have_paid - yearmonths_paid)", "def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan", "def budget_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"budget_name\")", "def get_budgets(budg_path, exp_path, dates=None):\n exp_budg = data_help.read_jsonFile(budg_path)\n exp_data = data_help.read_jsonFile(exp_path)\n if dates == None:\n dates = [util.get_current_month()]\n for date in dates:\n exp_budg_keys = exp_budg.keys()\n if date not in exp_budg_keys: # check for current month to find exp categories\n print(\n f\"I have detected some data with for the month {date} that has no budget set.\")\n print(\n \"Please set the budget for this month.. or delete the data and run the program again.\")\n if len(exp_budg) != 0:\n user_in = util.get_user_input_for_chars(\n \"Would you like to the whole thing (w) or create new (n)? \", ['w', 'n'])\n\n if user_in == 'w':\n key = util.select_dict_key_using_integer(\n exp_budg, \"Please select a budget to copy: \", print_children=True, print_vals=False, \n print_child_vals=True)\n exp_budg[date] = exp_budg[key]\n elif user_in == 'n':\n exp_budg[date] = declare_new_budget(date, exp_data)\n else:\n exp_budg[date] = declare_new_budget(date, exp_data)\n\n print(f\"Your budget is now saved for {date}.\")\n\n else:\n print(f\"Your monthly budget for {date} is: \")\n\n util.print_simple_dict(exp_budg[date], print_vals=True)\n\n data_help.write_to_jsonFile(budg_path, exp_budg)\n return", "def get_budget(self, names=None, zones=None, net=False, pivot=False):\n recarray = _get_budget(\n self._budget, self._zonenamedict, names=names, zones=zones, net=net\n )\n\n if pivot:\n recarray = _pivot_recarray(recarray)\n\n return recarray", "def get_period_budgets(cls, now):\n limits_dict = {}\n strategies = cls.objects_visible.filter(is_distributed_evenly=True)\n strategies = cls.running(strategies)\n\n for strategy in strategies:\n limits_dict[strategy.public_id] = strategy.period_budget(now)\n\n log.info('[SPENDINGS] Period budgets calculated (currency): {0}'.format(limits_dict))\n\n # Cast to budget precision used in Redis\n return {strategy: cast_CPM_to_dbbid(cast_currency_to_CPM(budget)) for strategy, budget in limits_dict.items()}", "def get_budget_from_api(type_of_thing: int, qty: int):\n payload = {\"multiplier\": qty}\n if type_of_thing == 1:\n payload[\"commodity\"] = \"tomatoes\"\n elif type_of_thing == 2:\n payload[\"commodity\"] = \"broiler-chickens\"\n else:\n return None\n\n cache_key = f\"{type_of_thing}-{qty}\"\n\n val_from_cache = cache.get(cache_key)\n if val_from_cache:\n return val_from_cache\n\n json_payload = json.dumps(payload)\n\n r = requests.post(BUDGET_API_URL, json_payload)\n\n if r.status_code == 200:\n result = r.json()['data']\n cache.set(\n cache_key,\n result,\n BUDGET_CACHE_DURATION\n )\n return result\n\n return None", "def get_debt(self):\n sum_import = self.invoice_set.filter(\n expiration_date__lte=date.today(),\n paid=False,\n debited=False,\n canceled=False,\n uncollectible=False,\n ).aggregate(Sum(\"amount\"))\n return sum_import.get(\"amount__sum\", None)", "def get_order_limit_data():\n\n chargeDB = ChargeDBHelper()\n order_limit_list = []\n\n rxcui_bundles = chargeDB.get_all_charge_bundles()\n clinic_count = clinic_cnt_for_days(chargeDB.get_days_spanned())\n for bundle in rxcui_bundles:\n order_limit_list.append(to_order_limit_row(bundle, clinic_count))\n\n\n\n chargeDB.close()\n return order_limit_list", "def get_billing_data_by_priority(self):\n result = {}\n product = self.get_first_product_by_priority()\n if product:\n sp = self.subscriptionproduct_set.filter(product=product).first()\n if sp.address:\n result = {\n \"route\": sp.route_id,\n \"order\": sp.order,\n \"address\": sp.address.address_1 or sp.subscription.contact.email,\n \"state\": sp.address.state,\n \"city\": sp.address.city,\n \"name\": self.get_billing_name(),\n }\n if not result:\n if getattr(settings, \"FORCE_DUMMY_MISSING_BILLING_DATA\", False):\n result = {}\n return result", "async def __getFromProjectedBill(self, account, premise, currentBillDate) -> dict:\n data = {}\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(\n URL_RESOURCES_PROJECTED_BILL.format(\n account=account,\n premise=premise,\n lastBillDate=currentBillDate.strftime(\"%m%d%Y\"),\n )\n )\n\n if response.status == 200:\n projectedBillData = (await response.json())[\"data\"]\n\n billToDate = float(projectedBillData[\"billToDate\"])\n projectedBill = float(projectedBillData[\"projectedBill\"])\n dailyAvg = float(projectedBillData[\"dailyAvg\"])\n avgHighTemp = int(projectedBillData[\"avgHighTemp\"])\n\n data[\"bill_to_date\"] = billToDate\n data[\"projected_bill\"] = projectedBill\n data[\"daily_avg\"] = dailyAvg\n data[\"avg_high_temp\"] = avgHighTemp\n\n except Exception as e:\n _LOGGER.error(e)\n\n return data", "def GetAllCostByAmountBandFromDB(lowerLimit, upperLimit):\n\n logs.logger.debug(\"Start to get back all Cost object from database\\\n based on amount band.\")\n try:\n searchedCostByAmountBandFromDB = session.query(\n Cost.Cost).filter(Cost.Cost.amount >= lowerLimit, Cost.Cost.amount <= upperLimit).all()\n logs.logger.info(\n \"Get back all Cost object from database based on amount band.\")\n return [item for item in searchedCostByAmountBandFromDB]\n except Exception as e:\n logs.logger.error(e, exc_info=True)", "def budget(self):\n\n budget = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.vars['profit'] - _House.broker_fee\n return float(round(budget, 2))", "def getBudget(movieInfo):\n if \"budget\" in movieInfo:\n return int(movieInfo[\"budget\"])\n else:\n raise AttributeError(\"%s instance has no attribute budget\" % movieInfo)", "def budget_balance(self):\n budget_balance = round(self.budget() - self.total_spent(), 2)\n budget_balance_degree = round( (9000 * self.total_spent()) / (self.budget()), 4) #convert to degrees and round to four decimal places\n return (budget_balance, budget_balance_degree)", "def get(self):\n return {'bills': [bill.json() for bill in BillModel.find_all()]}" ]
[ "0.66888595", "0.6544355", "0.64032984", "0.6293646", "0.62567914", "0.625594", "0.62317514", "0.6158417", "0.6107173", "0.60221004", "0.59583", "0.586195", "0.58142555", "0.57493407", "0.56994355", "0.5688151", "0.5630534", "0.5616468", "0.56075585", "0.5590992", "0.5573143", "0.55689305", "0.5564759", "0.5552304", "0.5542231", "0.5503946", "0.5483847", "0.54810107", "0.5463703", "0.5454207" ]
0.7448803
0
get data from appliance usage
async def __getDataFromApplianceUsage(self, account, lastBilledDate) -> dict: _LOGGER.info("Getting appliance usage data") JSON = {"startDate": str(lastBilledDate.strftime("%m%d%Y"))} data = {} try: async with async_timeout.timeout(TIMEOUT): response = await self.session.post( URL_APPLIANCE_USAGE.format(account=account), json=JSON ) if response.status == 200: electric = (await response.json())["data"]["electric"] full = 100 for e in electric: rr = round(float(e["percentageDollar"])) if rr < full: full = full - rr else: rr = full data[e["category"].replace(" ", "_")] = rr except Exception as e: _LOGGER.error(e) return {"energy_percent_by_applicance": data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })", "def get_application_api_usage_get(self, applicationId, end, start):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/ApiUsage/{applicationId}/\"))", "def get_usage_data(username, password):\n usage_req = XfinityUsage(username, password, browser_name=\"firefox-headless\")\n return usage_req.run()", "def test_getusage(self):\n ret = {\"message\": \"No Random.org api key or api version found.\", \"res\": False}\n self.assertDictEqual(random_org.getUsage(), ret)\n\n self.assertDictEqual(\n random_org.getUsage(api_key=\"peW\", api_version=\"1\"),\n {\n \"bitsLeft\": None,\n \"requestsLeft\": None,\n \"res\": True,\n \"totalBits\": None,\n \"totalRequests\": None,\n },\n )", "async def __getDataFromBalance(self, account) -> dict:\n _LOGGER.info(\"Getting appliance usage data\")\n\n data = {}\n\n URL_BALANCE = API_HOST + \"/api/resources/account/{account}/balance?count=-1\"\n\n try:\n async with async_timeout.timeout(TIMEOUT):\n response = await self.session.get(URL_BALANCE.format(account=account))\n if response.status == 200:\n data = (await response.json())[\"data\"]\n\n indice = [i for i, x in enumerate(data) if x[\"details\"] == \"DEBT\"][\n 0\n ]\n\n deb = data[indice][\"amount\"]\n\n except Exception as e:\n _LOGGER.error(e)\n\n return {\"balance_data\": data}", "def retr_devices_by_app( app ) :\n\n\t\t\t_logger.info( '...retr_devices_by_app...' )\n\t\t\toutput = []\n\t\t\ttry :\n\t\t\t\tdb = mongo.db.auth_devices\n\t\t\t\tfor device in db.find( { 'app_tags' : app } ) :\n\t\t\t\t\toutput.append({'moniker' : device['moniker'] ,\n\t\t\t\t\t\t\t\t 'description' : device['description'] ,\n\t\t\t\t\t\t\t\t 'active' : device['active'] ,\n\t\t\t\t\t\t\t\t 'device_id' : device['device_id'] ,\n\t\t\t\t\t\t\t\t 'enlisted' : device['enlisted'] ,\n\t\t\t\t\t\t\t\t 'last_kown_remote_ip' : device['last_known_remote_ip'] ,\n\t\t\t\t\t\t\t\t 'engaged' : device['engaged'] ,\n\t\t\t\t\t\t\t\t 'canononical_user' : device['canonical_user'] ,\n\t\t\t\t\t\t\t\t 'scope' : device['scope'] ,\n\t\t\t\t\t\t\t\t 'segment' : device['segment']\n\t\t\t\t\t})\n\t\t\texcept Exception as e :\n\t\t\t\t _logger.error( '...retr_devices_by_app %s' % e.message )\n\t\t\treturn jsonify({'result' : output})", "def get_data():\n pass", "def get_info():\n global PERF_APP\n archs = None\n best_arch = None\n cipher_algos = None\n hash_algos = None\n aead_algos = None\n\n cmd = PERF_APP + ' --print-info'\n\n try:\n res = subprocess.run(cmd, stdout=subprocess.PIPE, \\\n stderr=subprocess.STDOUT, \\\n env=ENVS, shell=True, check=True)\n output = res.stdout.decode('utf-8')\n except subprocess.CalledProcessError as e:\n print(\"Error (\" + str(e.returncode) + \")\")\n print(e.output.decode('utf-8'))\n sys.exit(1)\n\n lines = output.rstrip().split('\\n')\n try:\n for line in lines:\n info = line.split(':')\n if info[0] == 'Supported architectures':\n archs = info[1].split()\n if info[0] == 'Best architecture':\n best_arch = info[1].split()\n if info[0] == 'Supported cipher algorithms':\n cipher_algos = info[1].split()\n if info[0] == 'Supported hash algorithms':\n hash_algos = info[1].split()\n if info[0] == 'Supported aead algorithms':\n aead_algos = info[1].split()\n except:\n print(\"Error parsing --print-info output:\\n\" \\\n \"{}\".format(output), file=sys.stderr)\n\n if archs is None or best_arch is None or cipher_algos is None \\\n or hash_algos is None or aead_algos is None:\n print(\"Error parsing system and app information\", file=sys.stderr)\n sys.exit(1)\n\n return archs, best_arch, cipher_algos, hash_algos, aead_algos", "def usage(self, host):", "def getInfo():", "def print_app_data(self):\n print(\"===================================\")\n print(\"== RESULTS: ==\")\n print(\"===================================\")\n\n # Analog application results\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n print(\"Number of analog application processed: {}\".format(len(self.analog_apps)))\n if (self.verbose):\n for app in self.analog_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Bay number : {}\".format(device[\"bay_number\"]))\n print(\" - Channel number : {}\".format(device[\"channel_number\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of faults : {}\".format(len(device[\"faults\"])))\n for fault_id,fault_data in device[\"faults\"].items():\n print(\" Fault data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}_T{}\".format(fault_data[\"name\"], fault_data[\"bit_positions\"][0]))\n print(\" - ID : {}\".format(fault_id))\n print(\" - Name : {}\".format(fault_data[\"name\"]))\n print(\" - Description : {}\".format(fault_data[\"description\"]))\n print(\" - Bit positions : {}\".format(fault_data[\"bit_positions\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"--------------------------\")\n\n # Digital application result\n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n print(\"Number of digital application processed: {}\".format(len(self.digital_apps)))\n if (self.verbose):\n for app in self.digital_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of inputs : {}\".format(len(device[\"inputs\"])))\n for input in device[\"inputs\"]:\n print(\" Input data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}\".format(input[\"name\"]))\n print(\" - Name : {}\".format(input[\"name\"]))\n print(\" - Bit position : {}\".format(input[\"bit_position\"]))\n print(\" - Zero name : {}\".format(input[\"zero_name\"]))\n print(\" - One name : {}\".format(input[\"one_name\"]))\n print(\" - Alarm state : {}\".format(input[\"alarm_state\"]))\n print(\" - Debounce : {}\".format(input[\"debounce\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"----------------------------\")\n\n\n print(\"===================================\")\n\n print('Found {} link nodes:'.format(len(self.link_nodes)))\n for k,v in self.link_nodes.items():\n print('{}: {}'.format(k, v['type']))", "def get_apk(self):", "def get_discovery_summary():\n pass", "def usage_information(self):\n return self._usage_information", "def gather_metric(self):\n result = self._shell.run(self.ADB_COMMAND)\n stdout = result.stdout.splitlines()\n adb_version = stdout[0].split()[-1]\n # Revision information will always be in next line\n adb_revision = stdout[1].split()[1]\n\n response = {\n self.ADB_VERSION: adb_version,\n self.ADB_REVISION: adb_revision\n }\n return response", "def device_overview(self):\r\n data = {}\r\n\r\n # GET DATA\r\n token = request.headers.get('token')\r\n userid = request.headers.get('userid')\r\n vessel_id = request.args.get('vessel_id')\r\n epoch_format = request.args.get('format')\r\n\r\n # CHECK TOKEN\r\n if not self.validate_token(token, userid):\r\n data['alert'] = \"Invalid Token\"\r\n data['status'] = 'Failed'\r\n return self.return_data(data)\r\n\r\n alarm_types = self.get_alarm_types()\r\n\r\n ats = self.get_alarm_trigger()\r\n\r\n devices = self.couch_query.get_all_devices(vessel_id)\r\n\r\n standard_time = self.epoch_day(time.time())\r\n\r\n epoch_time = time.time()\r\n\r\n temp_data = []\r\n\r\n start_date = self.get_start_date(epoch_format)\r\n\r\n if not start_date and epoch_format not in [\"day\", \"hours\"]:\r\n\r\n data['alert'] = \"Invalid format!\"\r\n data['status'] = 'Failed'\r\n\r\n return self.return_data(data)\r\n\r\n for device in devices:\r\n\r\n if device['doc']['device'] in ['PARAMETERS', 'NTWCONF', 'NTWPERF1']:\r\n\r\n continue\r\n\r\n row = {}\r\n row['device'] = device['doc']['device']\r\n row['name'] = device['doc']['device']\r\n row['Alert'] = 0\r\n row['Critical'] = 0\r\n row['Warning'] = 0\r\n row['Info'] = 0\r\n row['Debug'] = 0\r\n for atrigger in ats:\r\n\r\n trigger_type = self.get_alarm_type_name(alarm_types, atrigger['alarm_type_id'])\r\n\r\n at_id = atrigger['alarm_trigger_id']\r\n device_id = device['id']\r\n\r\n datas = self.calc.calculate_trigger([at_id], standard_time,\r\n epoch_time, vessel_id=vessel_id,\r\n device_id=device_id)\r\n\r\n if not datas == \"No Alarm Trigger found.\":\r\n\r\n datas_index_0 = datas[0]\r\n len_datas = datas_index_0['results']\r\n if len_datas:\r\n\r\n row[trigger_type] = 1\r\n\r\n if epoch_format in ['week', 'month', \"quarter\", 'annual']:\r\n\r\n sql_str = \"SELECT COUNT(alarm_trigger_id) FROM alarm_data \"\r\n sql_str += \"WHERE device_id='{0}' \".format(device_id)\r\n sql_str += \"AND epoch_date > {0} \".format(start_date)\r\n sql_str += \"AND epoch_date < {0}\".format(epoch_time)\r\n\r\n res = self.postgres.query_fetch_one(sql_str)\r\n\r\n row[trigger_type] = row[trigger_type] + res['count']\r\n\r\n temp_data.append(row)\r\n\r\n final_data = {}\r\n final_data['data'] = temp_data\r\n final_data['status'] = 'ok'\r\n\r\n return self.return_data(final_data)", "def get_devices_summary():\n\n # This function was created to replace get_devices_information\n # because it wasn't detecting virtual systems in Palo Alto Virtual Systems\n global nipper_xml\n devices = {}\n headings = []\n\n # Add the table headings to a list\n for h in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/headings/heading\"):\n if h not in headings:\n headings.append(h.text)\n\n for device in nipper_xml.findall(\"./summary/table/[@ref='SCOPE.AUDITDEVICELIST.TABLE']/tablebody/tablerow\"):\n values = []\n for i in device.findall('./tablecell/item'):\n if i not in values:\n values.append(i.text)\n if DEBUG:\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Name')], values[headings.index('Name')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('Device')], values[headings.index('Device')])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[0])\n print \"\\t\" + note + \"%s: %s\" % (headings[headings.index('OS')], values[headings.index('OS')].split(\" \")[1])\n devices[values[headings.index('Name')]] = {'name': values[headings.index('Name')],\n 'type': values[headings.index('Device')],\n 'os': values[headings.index('OS')].split(' ')[0],\n 'osversion': values[headings.index('OS')].split(' ')[1]\n }\n\n if DEBUG:\n print info + \"Device Object:\"\n print devices\n raw_input(warn + \"Press enter to continue\")\n return devices", "def getEnergyUsage():\n energy_data = asyncio.run(plug.get_emeter_realtime())\n\n return energy_data", "def get_data():\n return", "def data_setup_appliances():\n appliance_list = []\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance1\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance2\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance3\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance4\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_BASIC, \"appliance5\", gpio_pin_id=None))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance1\", gpio_pin_id=13))\n appliance_list.append(helper_setup_appliances(APPLIANCE_TYPE_NAME_RELAY, \"gpio_appliance2\", gpio_pin_id=15))\n return appliance_list", "def get_data(self):", "def _get_data(self):\n c = Connector(self.host, self.username, self.password)\n return c.getLanDevices()", "def get_data():\n pass", "def get_data():\n pass", "def get_data():\n pass", "def user_sends_get_call_to_the_devices():\n web_app.list_devices()", "def info_equipment_get():\n equipment = _equipment_by_group()\n return equipment, 200", "def retrieve_dial_data(app_name):\n # NOTE: the reference code store the file in the application folder, we read the file in our data folder\n # perhaps other changes will be needed to allow USE_ADDITIONAL_DATA feature to work\n file_path = 'dial_data/' + app_name + '.json'\n if not fileops.file_exists(file_path):\n return {}\n data = fileops.load_file_def(file_path)\n return json.loads(data)", "def get(self):\n try:\n log.debug(\"Device info : \")\n #get the payload to influx DB\n url = \"http://localhost:8086/query\"\n querystring = {\"pretty\": \"true\", \"db\": \"IOT\",\n \"q\":\"SELECT DISTINCT(deviceId) FROM(SELECT deviceId,q1 FROM \\\"ttd_devices\\\" ) \" }\n response = requests.request(\"GET\", url, params=querystring)\n r_d=json.loads(response.text)\n result_d=[]\n for rec in r_d['results'][0]['series']:\n for element in rec['values']:\n result_d.append(element[1])\n result={}\n result['status'] = 1\n result['message']=result_d\n return_status = 200\n except ValueError as e:\n result = {}\n log.exception('Value Exception while fetching device list')\n result['status'] = 0\n return_status = 400\n result['message'] = e.args[0]\n except :\n result = {}\n log.exception('Exception while fetching the device data')\n return_status = 500\n result['status'] = 0\n result['message'] = 'Internal Error has occurred while fetching devie data'\n finally:\n resp = Response(json.dumps(result), status=return_status, mimetype=\"application/json\")\n return resp", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))" ]
[ "0.6542614", "0.6461803", "0.6203412", "0.6135864", "0.5930974", "0.5910394", "0.5888209", "0.5844207", "0.58017164", "0.5787487", "0.57435703", "0.5714978", "0.5709852", "0.564539", "0.5628239", "0.5616134", "0.5614402", "0.56101", "0.5602017", "0.55986047", "0.5594599", "0.5585295", "0.555319", "0.555319", "0.555319", "0.55047464", "0.54400617", "0.5439377", "0.54390097", "0.5437" ]
0.6843256
0
Return the default form class used for user registration.
def get_form_class(self, request): return RegistrationForm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_form_class(self, request):\n\t\treturn RegistrationForm", "def get_form_class(self):\n return self.form_class", "def get_form_class(self):\n if self.form_class:\n return self.form_class\n else:\n raise ImproperlyConfigured(\n \"在定义类视图%s的时候,你必须明确指定一个form_class.\"%self.__class__.__name__)", "def get_form_class(self):\r\n return modelform_factory(self.model)", "def get_form(self, form_class=None):\n\t\tif form_class is None:\n\t\t\tform_class = self.get_form_class()\n\t\treturn form_class(self.request.user, **self.get_form_kwargs())", "def get_token_form_class(self):\n from two_factor.forms import AuthenticationTokenForm\n\n return AuthenticationTokenForm", "def get_form_class():\n return RazorPaymentForm", "def get_form_class(self):\n form_options = self.get_form_options()\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the base_form_class from the model.\n # If that is not defined, use WagtailAdminModelForm.\n model_form_class = getattr(self.model, \"base_form_class\", WagtailAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n return get_form_for_model(\n self.model,\n form_class=base_form_class,\n **form_options,\n )", "def get_form():\n global form_class\n from fluent_comments import appsettings\n\n if form_class is None:\n if appsettings.FLUENT_COMMENTS_FORM_CLASS:\n from django.utils.module_loading import import_string\n\n form_class = import_string(appsettings.FLUENT_COMMENTS_FORM_CLASS)\n else:\n from fluent_comments.forms import FluentCommentForm\n\n form_class = FluentCommentForm\n\n return form_class", "def get_form_class(self, form_key):\n return self.get_form_classes()[form_key]", "def get_form_class(self):\n\t\treturn formset_factory(super(FormsetMixin, self).get_form_class(), **self.get_formset_kwargs())", "def get_default_form(self, display=False):\n form_selector = display_form_selector if display else county_form_selector\n return form_selector.get_combined_form_class(counties=[self.county.slug])", "def get_form_class(self):\n return get_review_form(review=self.get_object(), user=self.request.user)", "def get_form_classes(self):\n return {\n **self.form_classes\n }", "def get_form(self, form_class):\n return form_class(**self.get_form_kwargs())", "def get_form(self, form_class=None):\n if form_class is None:\n form_class = self.get_form_class()\n return form_class(\n token=self.request.session.get('token', False),\n aiid=self.kwargs['aiid'],\n **self.get_form_kwargs()\n )", "def get_form(self):\n kwargs = {\n \"instance\": self.profile if self.form_object == \"profile\" else self.user,\n \"prefix\": self.name,\n }\n\n if self.request.method == \"POST\":\n return self.form_class(self.request.POST, self.request.FILES, **kwargs)\n else:\n return self.form_class(**kwargs)", "def signup_form(request):\n return {'signup_form': UserForm()}", "def _get_bulk_change_form_class(self):\n return BulkChangeFormWizardHandlerPluginsForm", "def _get_bulk_change_form_class(self):\n return BulkChangeFormElementPluginsForm", "def get_form_class(self):\n login_try_count = self.request.session.get('login_try_count', 0)\n\n # If the form has been submitted...\n if self.request.method == \"POST\":\n self.request.session['login_try_count'] = login_try_count + 1\n\n if login_try_count >= 20:\n return CaptchaAuthenticationForm\n\n return super(LoginView, self).get_form_class()", "def register_form(self):\n f = Form()\n self.forms = f\n return f", "def _get_bulk_change_form_class(self):\n return BulkChangeFormHandlerPluginsForm", "def get_form_class(self):\n \n \"\"\"\n Construct a form class that has all the fields and formsets named in\n the children of this edit handler. \n \"\"\"\n if not hasattr(self, 'model'):\n raise AttributeError(\n '%s is not bound to a model yet. Use `.bind_to(model=model)` '\n 'before using this method.' % self.__class__.__name__)\n # If a custom form class was passed to the EditHandler, use it.\n # Otherwise, use the rai_base_form_class from the model.\n # If that is not defined, use RAIAdminModelForm.\n model_form_class = getattr(self.model, 'rai_base_form_class',\n RAIAdminModelForm)\n base_form_class = self.base_form_class or model_form_class\n\n formsets = self.required_formsets()\n\n form_class = rai_modelform_factory(\n self.decorator.get_rai_model(),\n form_class=base_form_class,\n fields=self.required_internal_fields(),\n formsets=formsets,\n widgets=self.widget_overrides())\n form_class.readonly_fields = self.readonly_fields()\n return form_class", "def name(self) -> Text:\n\n return \"user_form\"", "def get_form_class(self):\n if self.survey.get_requires_payment():\n return AuthorizenetSurveyPurchaseForm\n return super(AuthorizenetSurveyPurchaseCreate, self).get_form_class()", "def get_form(self, form_class):\n if self.get_locked_form(form_class):\n return None\n return form_class(**self.get_form_kwargs())", "def get_basic_form(self):\n return self.basic_form", "def _form_for_type(request, C, defn, add_id_and_rev=False):\n form = build(defn, C, add_id_and_rev=add_id_and_rev,\n widget_registry=_widget_registry(request))\n form.renderer = request.environ['restish.templating'].renderer\n return form", "def get_form(self, form_class=None):\n # 设置初始值\n if self.request.method == \"GET\":\n return SecondMenuModelForm(initial={'menu': self.menu_obj})\n else:\n # post提交的时候,不要忘记设置data\n return SecondMenuModelForm(data=self.request.POST)" ]
[ "0.8111784", "0.770102", "0.74282926", "0.72233987", "0.71631217", "0.7097246", "0.7082445", "0.696311", "0.692484", "0.6767896", "0.67418265", "0.66456175", "0.66020036", "0.64767134", "0.64665145", "0.64210325", "0.63494134", "0.6314577", "0.62687373", "0.6240669", "0.6233663", "0.6218351", "0.6175523", "0.6134979", "0.6127915", "0.611254", "0.5986515", "0.5926572", "0.58976483", "0.5886312" ]
0.7956627
1
Creates the sum tree data structure for the given replay capacity.
def __init__(self, capacity): assert isinstance(capacity, int) if capacity <= 0: raise ValueError( 'Sum tree capacity should be positive. Got: {}'.format(capacity)) self.nodes = [] self.depth = int(np.ceil(np.log2(capacity))) self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx self.high_idx = capacity + self.low_idx self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision. self.capacity = capacity self.highest_set = 0 self.max_recorded_priority = 1.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, capacity, tuple, alpha=0.6, beta=0.4):\n self.tree = SumTree(capacity)\n self.capacity = capacity\n self.alpha = alpha\n self.beta = beta\n self.tuple = tuple", "def __init__(self, memory_size, batch_size, alpha):\n self.tree = sum_tree.SumTree(memory_size)\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.alpha = alpha", "def __init__(self, capacity=100):\n \n self.capacity = capacity\n self.size = 0\n self._keys = []\n self._entry = [[] for _ in range(capacity)]", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.table = [None] * capacity", "def __init__(self, capacity: int):\n self._pax_with_carry_on = PaxStack()\n self._pax_without_carry_on = PaxStack()\n self._capacity = capacity\n self._current_pax = 0", "def __init__(self, capacity):\n self.capacity = capacity\n self.map = {}\n self.head = self.Node(0, 0)\n self.tail = self.Node(0, 0)\n self.head.next = self.tail\n self.tail.pre = self.head\n self.cnt = 0", "def __init__(self, memory_size, alpha):\n self.tree = SumTree(memory_size)\n self.memory_size = memory_size\n self.alpha = alpha\n self.bonus_priority = 999 # add bonus priority for transitions that were never sampled\n self.epsilon_priority = 0.000001\n if self.alpha == 0: # revert to full uniform\n self.bonus_priority = 0", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def __init__(self, size):\n self.size = size\n self.queue = []\n self.sum = 0", "def __init__(self, size):\n\n self._root = Node()\n size_left = int(size/2)\n # Initialization of the tree\n self._root.left = self._createSubtree(self._root, 0, size_left) # [a,b[\n self._root.right = self._createSubtree(self._root, size_left, size)\n self._max_priority = 1", "def __init__(self, capacity=4):\n self.capacity = capacity\n self.size = 0\n self.data = [None] * capacity\n self.head = 0\n self.tail = 0", "def __init__(self, size: int):\n self.size = size\n self.queue = deque()\n self.widowSum = 0", "def __init__(self, size):\n self.sum = 0\n self.nums = 0\n self.size = size\n self.deq = collections.deque()", "def __init__(self, memory_size, batch_size, alpha, mu, seed):\n self.tree = SumTree(memory_size)\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.alpha = alpha\n self.__e = 0.01\n self.__mu = mu\n np.random.seed(seed)", "def __init__(self, size: int):\n self.size = size \n self.tracker = deque()\n self.sum = 0", "def __init__(self, size):\n self.size = size\n self.queue = deque([])\n self.cur_sum = 0", "def __init__ (self, size: int):\n self.size = size\n self.queue = []\n self.sum = 0", "def __init__(self, capacity=2):\r\n self._capacity = capacity\r\n self._data = [0] * self._capacity\r\n self._size = 0", "def nocache_create_equal_size_subtrees():\n N = len(self)\n subTrees = [set(range(i, N, numSubTrees)) for i in range(numSubTrees)]\n totalCost = N\n return subTrees, totalCost", "def __init__(self, buffer_size, batch_size, random_seed=1234):\n self.tree = PER.sum_tree.SumTree(buffer_size)\n self.batch_size = batch_size\n self.s_prev, self.s_ori_prev, self.a_prev, self.r_prev = None, None, None, None\n\n # p_i = (p + e)^a\n self.e = 0.00000001\n self.a = 0.6 # values suggested by authors\n self.beta = 0.4 # to 1 - values suggested by authors\n\n self.previous_index = None # TODO\n self.prevQ_s_t_a_t_ = None\n random.seed(random_seed)", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost", "def create_subtrees(maxCost, maxCostRate=0, costMetric=\"size\"):\n\n if costMetric == \"applys\":\n def cost_fn(rem): return len(rem) # length of remainder = #-apply ops needed\n elif costMetric == \"size\":\n def cost_fn(rem): return 1 # everything costs 1 in size of tree\n else: raise ValueError(\"Uknown cost metric: %s\" % costMetric)\n\n subTrees = []\n curSubTree = set([evalOrder[0]])\n curTreeCost = cost_fn(self[evalOrder[0]][1]) # remainder length of 0th evaluant\n totalCost = 0\n cacheIndices = [None] * self.cache_size()\n\n for k in evalOrder:\n iStart, remainder, iCache = self[k]\n\n if iCache is not None:\n cacheIndices[iCache] = k\n\n #compute the cost (additional #applies) which results from\n # adding this element to the current tree.\n cost = cost_fn(remainder)\n inds = set([k])\n\n if iStart is not None and cacheIndices[iStart] not in curSubTree:\n #we need to add the tree elements traversed by\n #following iStart\n j = iStart # index into cache\n while j is not None:\n iStr = cacheIndices[j] # cacheIndices[ iStart ]\n inds.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n\n if curTreeCost + cost < maxCost:\n #Just add current string to current tree\n curTreeCost += cost\n curSubTree.update(inds)\n else:\n #End the current tree and begin a new one\n #print(\"cost %d+%d exceeds %d\" % (curTreeCost,cost,maxCost))\n subTrees.append(curSubTree)\n curSubTree = set([k])\n\n cost = cost_fn(remainder); j = iStart\n while j is not None: # always traverse back iStart\n iStr = cacheIndices[j]\n curSubTree.add(iStr)\n cost += cost_fn(self[iStr][1]) # remainder\n j = self[iStr][0] # iStart\n totalCost += curTreeCost\n curTreeCost = cost\n #print(\"Added new tree w/initial cost %d\" % (cost))\n\n maxCost += maxCostRate\n\n subTrees.append(curSubTree)\n totalCost += curTreeCost\n return subTrees, totalCost", "def grow_tree(self):\n\n decision_node = self.root\n internal_env = copy.copy(self.env)\n\n while (not decision_node.is_final) and decision_node.visits > 1:\n\n a = self.select(decision_node)\n\n new_random_node = decision_node.next_random_node(a, self._hash_action)\n\n (new_decision_node, r) = self.select_outcome(internal_env, new_random_node)\n\n new_decision_node = self.update_decision_node(new_decision_node, new_random_node, self._hash_space)\n\n new_decision_node.reward = r\n new_random_node.reward = r\n\n decision_node = new_decision_node\n\n decision_node.visits += 1\n cumulative_reward = self.evaluate(internal_env)\n\n while not decision_node.is_root:\n random_node = decision_node.father\n cumulative_reward += random_node.reward\n random_node.cumulative_reward += cumulative_reward\n random_node.visits += 1\n decision_node = random_node.father\n decision_node.visits += 1", "def __init__(self):\n self.root = RadixTreeNode()\n self.root.key = \"\"\n self.size = 0", "def __init__(self):\n self.capacity = 10000\n self.table = [[] for _ in range(self.capacity)]", "def __init__(self):\n self._size = 0\n self._array = [None] * BinaryTree.DEFAULT_CAPACITY", "def __init__(self, capacity):\n self.memory = deque([], maxlen=capacity)", "def __init__(self, size: int):\n self.__data = []\n for i in range(0, size):\n self.__data.append(self.Node(pre_index=i))", "def __init__(self, size, alpha):\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha >= 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0" ]
[ "0.6020408", "0.5820842", "0.5724238", "0.55728537", "0.55458015", "0.5542693", "0.5513752", "0.5476557", "0.53817534", "0.53465384", "0.53336674", "0.53084284", "0.5296613", "0.5293262", "0.5262831", "0.5256954", "0.5208593", "0.519735", "0.5179887", "0.51658785", "0.51624656", "0.5124032", "0.5124032", "0.51225233", "0.51113254", "0.51065004", "0.5095909", "0.50948924", "0.5094026", "0.50869215" ]
0.70326006
0
Performs stratified sampling using the sum tree.
def stratified_sample(self, batch_size, rng): if self._total_priority() == 0.0: raise Exception('Cannot sample from an empty sum tree.') indices = parallel_stratified_sample(rng, self.nodes, np.arange(batch_size), batch_size, self.depth) return np.minimum(indices - self.low_idx, self.highest_set)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def uniform_sample(X, y, S, b, d):\n\n\tX['label'] = y\n\n\tW = pd.DataFrame({'group': [1, 1, 0, 0], 'label': [1, 0, 1, 0]})\n\n\t# Calculate weight for each combination of sensitive attribute and class,\n\t# given by the number of examples in each group divided by the number\n\t# that should be in each group if the data were non-discriminatory\n\t# NOTE: Algorithm 4 in the paper actually usees a denominator that appears to be wrong...\n\tweights = [[len(X[X[S] == s]) * len(X[X['label'] == c]) / float(len(X)*0.25) \n\t\t\t\t# / float(len(X) * len(X[(X[S] == s) & (X['label'] == c)])) \\\n\t\t\t\tfor c in [1, 0]] for s in [1, 0]]\n\n\tsizes = [[len(X[(X[S] == s) & (X['label'] == c)]) for c in [1, 0]] for s in [1, 0]]\n\n\tW['weight'] = [i for j in weights for i in j]\n\tW['size'] = [i for j in sizes for i in j]\n\tW = W.assign(num = lambda x: x.size * x.weight)\n\n\t# Divide the data into the four groups based on class/group\n\tdp = X[(X[S] == b) & (X['label'] == d)]\n\tdn = X[(X[S] == b) & (X['label'] != d)]\n\tfp = X[(X[S] != b) & (X['label'] == d)]\n\tfn = X[(X[S] != b) & (X['label'] != d)]\n\n\t# Uniformly sample from each group\n\tdp = dp.sample(n = W.loc[(W['group'] == b) & (W['label'] == d), 'num'].iloc[0].astype(int), replace = True)\n\tdn = dn.sample(n = W.loc[(W['group'] == b) & (W['label'] != d), 'num'].iloc[0].astype(int), replace = True)\n\tfp = fp.sample(n = W.loc[(W['group'] != b) & (W['label'] == d), 'num'].iloc[0].astype(int), replace = True)\n\tfn = fn.sample(n = W.loc[(W['group'] != b) & (W['label'] != d), 'num'].iloc[0].astype(int), replace = True)\n\n\tX_prime = pd.concat([dp, dn, fp, fn])\n\tX.drop('label', axis = 1, inplace = True)\n\ty_prime = X_prime['label'].tolist()\n\tX_prime = X_prime.drop('label', axis = 1)\n\n\treturn(X_prime, y_prime)", "def sampling_algorithm(self, X, y):\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed\")\r\n\r\n X_min = X[y == self.min_label]\r\n X_maj = X[y == self.maj_label]\r\n\r\n # fitting nearest neighbors model to find closest majority points to\r\n # minority samples\r\n nn_params = {**self.nn_params}\r\n nn_params['metric_tensor'] = \\\r\n self.metric_tensor_from_nn_params(nn_params, X, y)\r\n\r\n density = self.calculate_density(X_min, X_maj, nn_params)\r\n\r\n # fitting nearest neighbors model to minority samples to run\r\n # SMOTE-like sampling\r\n n_neighbors = min([len(X_min), self.n_neighbors+1])\r\n nnmt = NearestNeighborsWithMetricTensor(n_neighbors=n_neighbors,\r\n n_jobs=self.n_jobs,\r\n **nn_params)\r\n nnmt.fit(X_min)\r\n ind = nnmt.kneighbors(X_min, return_distance=False)\r\n\r\n samples = self.sample_simplex(X=X_min,\r\n indices=ind,\r\n n_to_sample=n_to_sample,\r\n base_weights=density)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.choice(np.arange(len(density)), p=density)\r\n # random_neighbor_idx = self.random_state.choice(ind[idx][1:])\r\n # X_a = X_min[idx]\r\n # X_b = X_min[random_neighbor_idx]\r\n # samples.append(self.sample_between_points(X_a, X_b))\r\n\r\n return (np.vstack([X, samples]),\r\n np.hstack([y, np.repeat(self.min_label, len(samples))]))", "def _graph_fn_sample_stochastic(distribution):\n return distribution.sample()", "def sampling_algorithm(self, X, y):\r\n\r\n n_to_sample = self.det_n_to_sample(self.proportion)\r\n\r\n if n_to_sample == 0:\r\n return self.return_copies(X, y, \"Sampling is not needed.\")\r\n\r\n # standardization is needed to make the range of the propensity scores\r\n # similar to that of the features\r\n mms = MinMaxScaler()\r\n X_trans = mms.fit_transform(X) # pylint: disable=invalid-name\r\n\r\n X_min = X_trans[y == self.min_label]\r\n\r\n # adding propensity scores as a new feature\r\n X_new = np.column_stack([X_trans, self.propensity_scores(X_trans, y)])\r\n X_min_new = X_new[y == self.min_label] # pylint: disable=invalid-name\r\n\r\n # finding nearest neighbors of minority samples\r\n n_neighbors = min([len(X_new), self.n_neighbors+1])\r\n\r\n ind = self.neighborhood_structure(X_new, y, n_neighbors, X_min_new)\r\n\r\n # noise removal\r\n t_hat = np.sum(y[ind[:, 1:]] == self.min_label, axis=1)\r\n to_remove = np.where(t_hat < self.t * n_neighbors)[0]\r\n\r\n if len(to_remove) >= len(X_min) - 1:\r\n return self.return_copies(X, y,\r\n \"most minority samples indentified as noise\")\r\n\r\n n_to_sample = n_to_sample + to_remove.shape[0]\r\n\r\n samples = self.generate_samples(X_min=X_min,\r\n to_remove=to_remove,\r\n X_trans=X_trans,\r\n y=y,\r\n ind=ind,\r\n n_to_sample=n_to_sample)\r\n\r\n X_min = np.delete(X_min, to_remove, axis=0)\r\n\r\n # do the sampling\r\n #samples = []\r\n #while len(samples) < n_to_sample:\r\n # idx = self.random_state.randint(len(X_min))\r\n # # finding the number of minority neighbors\r\n # t_hat = np.sum(y[ind[idx][1:]] == self.min_label)\r\n # if t_hat < self.t*n_neighbors:\r\n # # removing the minority point if the number of minority\r\n # # neighbors is less then the threshold\r\n # # to_remove indexes X_min\r\n # if idx not in to_remove:\r\n # to_remove.append(idx)\r\n # # compensating the removal of the minority point\r\n # n_to_sample = n_to_sample + 1\r\n #\r\n # if len(to_remove) == len(X_min):\r\n # _logger.warning(self.__class__.__name__ + \": \" +\r\n # \"all minority samples identified as noise\")\r\n # return X.copy(), y.copy()\r\n # else:\r\n # # otherwise do the sampling\r\n # X_b = X_trans[self.random_state.choice(ind[idx][1:])]\r\n # samples.append(self.sample_between_points(X_min[idx], X_b))\r\n\r\n return (mms.inverse_transform(np.vstack([X_trans[y == self.maj_label],\r\n X_min,\r\n samples])),\r\n np.hstack([np.repeat(self.maj_label,\r\n np.sum(y == self.maj_label)),\r\n np.repeat(self.min_label, len(X_min)),\r\n np.repeat(self.min_label, len(samples))]))", "def sample(self, seg_logit, seg_label):", "def stratify(self):\n groups = self.group()\n folds = []\n for group in groups:\n folds.append(self.sample(group))\n return [sum([folds[j][i] for j in range(len(folds))], []) \\\n for i in range(self.nfolds)]", "def diverse_sampler(self):\n\n # Sample number of nonzero idxs\n num_idxs = np.random.randint(low=1, high=self.K-1)\n\n # Sample actual idxs in state that are nonzero\n idxs = []\n # can have nonzero terms up to state[K-2]\n all_states = [i for i in range(self.K - 1)]\n for i in range(num_idxs):\n rand_id = np.random.randint(low=0, high=len(all_states))\n idxs.append(all_states.pop(rand_id))\n\n # sort idxs from largest to smallest to allocate\n # potential correctly\n idxs.sort()\n idxs.reverse()\n\n # allocate potential\n xs = self.simplex_sampler(num_idxs)\n\n # fill with appropriate number of pieces adding on any remaindr\n remainder = 0\n state = np.zeros(self.K+1, dtype=int)\n for i in range(num_idxs):\n idx = idxs[i]\n pot_idx = xs[i] + remainder\n num_pieces = int(pot_idx/self.weights[idx])\n state[idx] += num_pieces\n # update remainder\n remainder = pot_idx - num_pieces*self.weights[idx]\n\n return state", "def get_selected_subsamples(sample_func, clusters, trajs_dict, visit_profile, Nsample, false_rate=80):\n print('The desired false rate is %f'%(false_rate/Nsample))\n crter = 0\n done_first_round = False\n nclusters = len(clusters)\n \n print('Start the first selection until the number of potential profiles is more than Nsample')\n while crter < Nsample:\n i = np.random.choice(range(nclusters))\n if len(clusters[i]) > Nsample*5 or len(clusters[i]) < Nsample: continue\n # try sampling\n selected_spl, plist_spl = sample_func(trajs_dict, plist=None, usrs=clusters[i])\n # do the deterministic attack\n a2 = get_trick_mat(clusters[i] , selected_spl, visit_profile)\n nonzero_list = [np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(a2)] \n crter = np.sum(nonzero_list)\n \n print('Finish the first round selection, %d candidates are selected from cluster %d'%(crter, i))\n round_one_usrs = np.array(clusters[i])[nonzero_list]\n \n crter2 = 0; len_rone = len(round_one_usrs)\n print('Start the second selection until false rate %f'%(false_rate/Nsample))\n while crter2 < false_rate:\n final_selected_usrs = round_one_usrs[np.random.choice(len_rone, Nsample, replace=False)]\n tmp = get_trick_mat(final_selected_usrs, selected_spl, visit_profile)\n crter2 = np.sum([np.sum(np.count_nonzero(ai))>=1 for ai in make_sym_mat(tmp)])\n print('Final false rate for deterministic attack%f'%(crter2/Nsample))\n return selected_spl, final_selected_usrs, plist_spl", "def eval_sampling_point(self, sampling_point):\n return Solution(self, sampling_point)", "def straight_prune_subsample(neuron, number_of_nodes):\n if(neuron.n_node > 200):\n neuron, distance = straight_subsample_with_fixed_number(neuron, 200)\n sp_neuron, state = prune(neuron=neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n while(~state):\n distance += 1\n sp_neuron = straigh_subsample(neuron, distance)\n sp_neuron, state = prune(neuron=sp_neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n return sp_neuron", "def sample(self):\n return self._root.sample()", "def sow_samples(self, n, combos=None, constants=None, verbosity=1):\n fn_args, cases = self.farmer.gen_cases_fnargs(n, combos)\n self.sow_cases(\n fn_args, cases, constants=constants, verbosity=verbosity\n )", "def Sens_t_sample(poly, dist, samples, rule=\"random\"):\n generator = Saltelli(dist, samples, poly, rule=rule)\n\n dim = len(dist)\n zeros = [0] * dim\n variance = numpy.var(generator[zeros], -1)\n return numpy.array(\n [\n 1\n - numpy.mean(\n (generator[~index] - generator[zeros]) ** 2,\n -1,\n )\n / (2 * numpy.where(variance, variance, 1))\n for index in numpy.eye(dim, dtype=bool)\n ]\n )", "def straight_subsample_with_fixed_number(neuorn, num):\n l = sum(neuorn.distance_from_parent)\n branch_number = len(np.where(neuorn.branch_order[neuorn.n_soma:] == 2))\n distance = l/(num - branch_number)\n neuron = straigh_subsample(distance)\n return distance, neuron", "def GetPhyloSebsequentScore(tree, phenotree, phen_ind, skip=0, with_rand=False, dist_only=False, dist=None):\n population = (len(tree) * 2) - 1\n subscore = np.zeros(tree.genotype.shape[1] - skip)\n node_to_arr = lambda n: np.array(n.genotype.todense().astype(np.int))[0]\n for i, (cur_node, phen_node) in tqdm.tqdm(enumerate(zip(tree.traverse(), phenotree.traverse())),\n total=population, desc='Iterating tree'):\n if not cur_node.is_root():\n if not cur_node.is_leaf() and with_rand and cur_node.random[phen_ind]: continue\n node = node_to_arr(cur_node)\n prev_node = node_to_arr(cur_node.up)\n\n gene_state = node[skip:]\n prev_gene_state = prev_node[skip:]\n\n phen_state = phen_node.genotype[0, phen_ind]\n prev_phen_state = phen_node.up.genotype[0, phen_ind]\n\n subscore += np.abs((1.333 * prev_phen_state * prev_gene_state) +\n (.666 * prev_phen_state * gene_state) +\n (.666 * phen_state * prev_gene_state) +\n (1.333 * phen_state * gene_state) -\n phen_state -\n prev_phen_state -\n gene_state -\n prev_gene_state +\n 1)\n\n if dist_only:\n hist_ = np.histogram(subscore, bins=int(1e7))\n fit_dist = rv_histogram(hist_)\n fit_dist.bin = np.diff(hist_[1]).max()\n return fit_dist\n if dist is not None:\n return dist.sf(subscore)\n else:\n return subscore", "def reset(self):\n self.st = segment_tree.SegmentTreeSampler(self.n, np.ones(self.n) * self.reg, self.random_state)", "def test_generated_sample_distribution(\n jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)\n):\n\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n \"{} sampling method taken from upstream, no need to\"\n \"test generated samples.\".format(jax_dist.__name__)\n )\n\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05", "def sample(self, k):\n result = \"\"\n current = self.gen_beginning()\n for i in range(0, k):\n result += current[0] + \" \"\n t = tuple(current)\n if t in self.dict:\n c_sum = self.dict[t][self.sum_index]\n rand = random.randint(0, c_sum)\n new_term = \"\"\n for term, count in self.dict.iteritems():\n if rand > count:\n rand -= count\n else:\n new_term = term\n break\n current.remove(current[0])\n current.append(new_term)\n else:\n current = self.gen_beginning()\n return result", "def straigh_subsample(neuorn, distance):\n\n # Selecting the main points: branching nodes and end nodes\n selected_index = get_main_points()\n\n # for each segment between two consecuative main points, a few nodes from the segment will be added to the selected node.\n # These new nodes will be selected base on the fact that neural distance of two consecuative nodes is around 'distance'.\n # Specifically, it starts from the far main point, and goes on the segment toward the near main point. Then the first node which is\n # going to add has the property that it is the farest node from begining on the segment such that its distance from begining is\n # less than 'distance'. The next nodes will be selected similarly.\n\n for i in selected_index:\n upList = np.array([i], dtype = int)\n index = neuorn.parent_index[i]\n dist = neuorn.distance_from_parent[i]\n while(~np.any(selected_index == index)):\n upList = np.append(upList,index)\n index = neuorn.parent_index[index]\n dist = np.append(dist, sum(neuorn.distance_from_parent[upList]))\n dist = np.append(0, dist)\n (I,) = np.where(np.diff(np.floor(dist/distance))>0)\n I = upList[I]\n selected_index = np.append(selected_index, I)\n selected_index = np.unique(selected_index)\n neuron = neuron_with_selected_nodes(selected_index)\n return neuron", "def test_sampling1 ():\n cpus = list(range(C.N_PARALLEL))\n affinity = dict(cuda_idx=C.CUDA_IDX, workers_cpus=cpus)\n agent_ = findOptimalAgent(reward=None)\n agent = CategoricalPgAgent(AcrobotNet, \n initial_model_state_dict=agent_.state_dict())\n s0 = np.array([1, 0, 1/np.sqrt(2), 1/np.sqrt(2), 4, 2], dtype=np.float)\n sampler = SerialSampler(\n EnvCls=rlpyt_make,\n env_kwargs=dict(id=C.ENV, reward=None, internalStateFn=C.INTERNAL_STATE_FN, s0=s0),\n batch_T=500,\n batch_B=16,\n max_decorrelation_steps=0,\n )\n sampler.initialize(\n agent=agent,\n affinity=affinity,\n seed=0\n )\n _, traj_info = sampler.obtain_samples(0)\n print(np.mean([t['DiscountedReturn'] for t in traj_info]))", "def balanced_sampling(dat: pd.DataFrame, logger=None):\n if logger == None:\n logging.basicConfig(\n level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n logger = logging.getLogger(__name__)\n \n \n # upsampling\n logger.info('Start balanced sampling')\n subsample = []\n num_of_each_class = dat.iloc[:, -1].value_counts().to_numpy()\n if num_of_each_class.std()*1.0 / num_of_each_class.mean() < 0.1:\n logger.info('The given data is balance.')\n # the dataset is balanced\n return dat\n logger.info('Given dataset is unbalance')\n logger.info('Sampling data from each class to generate a new dataset')\n n_smp = num_of_each_class.max()\n for label in dat.iloc[:, -1].value_counts().index:\n samples = dat[dat.iloc[:, -1] == label]\n num_samples = len(samples)\n index_range = range(num_samples)\n # take all from the set\n indexes = list(np.random.choice(index_range, size=num_samples, replace=False))\n indexes2 = list(np.random.choice(\n index_range, size=n_smp-num_samples, replace=True)) # add random items\n indexes.extend(indexes2)\n subsample.append(samples.iloc[indexes, :])\n logger.info('End with sampling')\n out = pd.concat(subsample)\n out = out.sample(frac=1).reset_index(drop=True) # shuffle and re index\n return out", "def sample(self, rng, query_value=None):\n nodes = jnp.array(self.nodes)\n query_value = (\n jax.random.uniform(rng) if query_value is None else query_value)\n query_value *= self._total_priority()\n\n _, index, _ = jax.lax.fori_loop(0, self.depth, step,\n (query_value, 0, nodes))\n\n return np.minimum(index - self.low_idx, self.highest_set)", "def sample(self, root, tree, sample_num, for_d):\n\n # all_score = self.sess.run(self.generator.all_score)\n # all_score is a matrix with shape [n_node, n_node]\n all_score = self.generator.all_score\n samples = []\n paths = []\n n = 0\n\n while len(samples) < sample_num:\n current_node = root\n previous_node = -1\n paths.append([])\n is_root = True\n paths[n].append(current_node)\n while True:\n node_neighbor = tree[current_node][1:] if is_root else tree[current_node]\n # print(\"////\", tree[current_node])\n is_root = False\n if len(node_neighbor) == 0: # the tree only has a root\n return None, None\n if for_d: # skip 1-hop nodes (positive samples)\n if node_neighbor == [root]:\n # in current version, None is returned for simplicity\n return None, None\n if root in node_neighbor:\n node_neighbor.remove(root)\n\n # we retrieve embeddings corresponding to current node's neighbors\n # the multiply of g_v with shape (1, 50) and g_vi with shape(1, 50) is a scala\n # to calculate the multiply of g_v and g_vi: we calculate the \"multiplication\" (inner product) between embedding_matrix with shape(n_node, 50) and its transpose\n # then saved the result in self.score with shape (n_node, n_node) in dis_torch.py\n # all_score has the shape = (5254, 5254), each row is a list of scala, each scala is the \"multiplication\" (inner product) between a particular node to an other node in the graph\n # due to for each current_node, we have a list of its neighbors, saved in [node_neighbor]\n # we can retrieve a list of scalas that equal to the \"multiplications\" (inner product) between g_v(current node) to its neighbor g_vi\n # to do that, we have:\n relevance_probability = all_score[current_node][node_neighbor]\n\n # convert tensor to numpy array\n relevance_probability = relevance_probability.cpu().detach().numpy()\n\n # finally, applying softmax function, we get the relevance probability of current_node and its neighbors, as formed in the paper\n relevance_probability = utils.softmax(relevance_probability)\n \n # pick a random node from its neighbors based on relevance_probability\n next_node = np.random.choice(node_neighbor, size=1, p=relevance_probability)[0] # select next node\n # print(\"???\", next_node)\n paths[n].append(next_node)\n if next_node == previous_node: # terminating condition\n samples.append(current_node)\n break\n previous_node = current_node\n current_node = next_node\n n = n + 1 # n equal to sample_num\n return samples, paths # for each sample, we get one path from root to that sample", "def test_simpSample(self):\n\n #uniform dist\n ulim = [0,1]\n ufun = lambda x: 1.0/np.diff(ulim)\n\n n = int(1e5)\n usample = statsFun.simpSample(ufun,n,ulim[0],ulim[1])\n self.assertGreaterEqual(usample.min(), ulim[0])\n self.assertLessEqual(usample.max(), ulim[1])\n\n nlim = [-10,10]\n nfun = lambda x: np.exp(-x**2./2.0)/np.sqrt(2.0*np.pi)\n nsample = statsFun.simpSample(nfun,n,nlim[0],nlim[1])\n self.assertGreaterEqual(nsample.min(), nlim[0])\n self.assertLessEqual(nsample.min(), nlim[1])\n\n self.assertGreaterEqual(scipy.stats.kstest(usample,'uniform')[1],0.01,'Uniform sample does not look uniform.')\n self.assertGreaterEqual(scipy.stats.kstest(nsample,'norm')[1],0.01,'Normal sample does not look normal.')\n self.assertLessEqual(scipy.stats.kstest(nsample,'uniform')[1],0.01,'Normal sample looks too uniform.')\n self.assertLessEqual(scipy.stats.kstest(usample,'norm')[1],0.01,'Uniform sample looks too normal.')", "def get_subsample_of_nodes(g, sampl=1):\n return sample(g.nodes(), int(len(g.nodes())*sampl))", "def adapted_rand(seg, gt, all_stats=False):\n # just to prevent division by 0\n epsilon = 1e-6\n\n # segA is truth, segB is query\n segA = np.ravel(gt)\n segB = np.ravel(seg)\n n = segA.size\n\n n_labels_A = np.amax(segA) + 1\n n_labels_B = np.amax(segB) + 1\n\n ones_data = np.ones(n)\n\n p_ij = sparse.csr_matrix((ones_data, (segA[:], segB[:])), shape=(n_labels_A, n_labels_B))\n\n a = p_ij[1:n_labels_A, :]\n b = p_ij[1:n_labels_A, 1:n_labels_B]\n c = p_ij[1:n_labels_A, 0].todense()\n d = b.multiply(b)\n\n a_i = np.array(a.sum(1))\n b_i = np.array(b.sum(0))\n\n sumA = np.sum(a_i * a_i)\n sumB = np.sum(b_i * b_i) + (np.sum(c) / n)\n sumAB = np.sum(d) + (np.sum(c) / n)\n\n precision = sumAB / max(sumB, epsilon)\n recall = sumAB / max(sumA, epsilon)\n\n fScore = 2.0 * precision * recall / max(precision + recall, epsilon)\n are = 1.0 - fScore\n\n if all_stats:\n return are, precision, recall\n else:\n return are", "def post(self, s):\n return np.random.choice(self.sample_list)", "def sample(self, s):\n rng = np.random.default_rng()\n return rng.choice(np.arange(self.n_actions), p=self.eval(s))", "def sampling(data,classes,others=None,portion=0.9,max_size_given=None,rng=np.random.RandomState(100)): \n u, indices = np.unique(classes,return_inverse=True)\n indices=np.asarray(indices)\n num_u=len(u)\n sample_sizes=[]\n \n # get sample size of each class\n for i in range(num_u):\n sample_size_this=np.sum(indices==i)\n sample_sizes.append(sample_size_this)\n sample_sizes=np.array(sample_sizes,dtype=int)\n sample_sizes=sample_sizes*portion\n sample_sizes=np.array(sample_sizes,dtype=int)\n # set a ceiling/limit\n if max_size_given is not None:\n sample_sizes[sample_sizes>max_size_given]=max_size_given \n\n indices_all=np.array([],dtype=indices.dtype)\n indices_range=np.array(range(len(indices)))\n\n # sampling\n for i in range(num_u):\n ind_this_num=indices_range[indices==i]\n ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=sample_sizes[i],replace=False)]\n indices_all=np.append(indices_all,ind_this_reduced)\n \n # reduce the data \n data=data[indices_all,:]\n classes=classes[indices_all]\n if np.any(others):\n others=others[indices_all]\n return data,classes,indices_all,others" ]
[ "0.5792323", "0.57242346", "0.5687577", "0.5676991", "0.56017643", "0.55336124", "0.54666936", "0.5430783", "0.53604496", "0.5358043", "0.530985", "0.5304589", "0.5277224", "0.5271483", "0.52695686", "0.5267166", "0.5261225", "0.52445275", "0.52442276", "0.5240669", "0.5205574", "0.5201676", "0.5198351", "0.51800835", "0.5164713", "0.51509655", "0.5133954", "0.512064", "0.510473", "0.5104595" ]
0.76193213
0
Prints a message only when app is in debug mode
def print_debug(message): if current_app.debug: print(message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def debug():\n assert current_app.debug == False, \"Don't panic! You're here by request of debug()\"", "def checkDebug(message):\n if debug == True:\n print(message)", "def debug(msg):\n if not DEBUG_ON:\n return\n print(\"DEBUG:\" + str(msg))", "def print_debug(msg):\n if IS_DEBUG:\n print(msg)", "def debug() -> bool:", "def DebugMessage(message=\"\"):\n if global_debug:\n print(\"\\033[93m DEBUG: \" + message + \"\\033[0m\")", "def debug(msg):\n if(CONFIG['debug']):\n logIt(msg)", "def check_and_print_debug_message(self, msg):\n if self._params.debug:\n print(\"Info: {}\".format(msg))", "def printdebug(self, msg):\n if self.debug > 0:\n print(msg)", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def debug(s):\n if app.config['DEBUG']:\n print(s)", "def debugLog(message):\n if debugFlag != None:\n print \"#debug: \" + str(message)", "def output_debug(text):\n if conf.debug:\n output_message('[DEBUG] ' + text)", "def main(debug):\n click.echo('Debug mode is {{}}'.format(debug))", "def cli(debug):\n print(f\"Debug mode is {'on' if debug else 'off'}\")", "def debug(msg):\n if settings.DEBUG:\n print \"DEBUG: cli.%(msg)s\" % locals()", "def debug(cls, msg, debug=True):\n if debug:\n Console.msg(msg)", "def debug(self, message):\r\n if self._debug:\r\n print('[Debug] %s' % message)", "def debugPrint(text: str):\r\n if DEBUG:\r\n print(text)", "def debug():", "def __debugInfo(self, msg):\n\t\tif self.verbosity:\n\t\t\tprint(stylize(\"[*] DEBUG: {}\".format(msg), colored.fg(\"wheat_1\")))", "def debug(self, msg=\"\"):\n if self.verbose:\n print(\"Debug: \" + msg)", "def _debug_print(message):\n\n if _debug == True:\n print(message)", "def debug(self, msg):\n if self._debug:\n print \"%s\" % (msg)", "def debug_print(text):\r\n if settings.debug:\r\n print (text)", "def debug(mode=True):\r\n global DEBUG\r\n DEBUG = bool(mode)", "def is_debug ():\n\n return __debug__ and DEBUG", "def d_print(msg):\n if (DEBUG == 1):\n print(msg)", "def main(config, debug):\n config.debug = debug\n if config.debug:\n click.echo('Debug info...')", "def debug(msg):" ]
[ "0.8137028", "0.80153483", "0.77376115", "0.7687844", "0.76748055", "0.767379", "0.7656623", "0.7569436", "0.7528649", "0.7510831", "0.7510831", "0.74950415", "0.7483869", "0.74653614", "0.74565285", "0.74526405", "0.7379881", "0.736538", "0.734727", "0.73451245", "0.734026", "0.7325392", "0.7305496", "0.7291941", "0.72741944", "0.72255635", "0.71924275", "0.7152718", "0.7141016", "0.7116224" ]
0.8150175
0
This function will optionally print a header guard for `cl_khr_fp64` if a 64bit type is used as the source or destination and return a bool that indicates whether this guard will need closed after the calling function has finished printing functions that use the 64bit source/destination type.
def conditional_guard(src, dst): int64_count = 0 float64_count = 0 float16_count = 0 if src in int64_types or dst in int64_types: int64_count = 1 if src in float64_types or dst in float64_types: float64_count = 1 if src in float16_types or dst in float16_types: float16_count = 1 if float16_count > 0: print("#ifdef cl_khr_fp16") if float64_count > 0: #In embedded profile, if cl_khr_fp64 is supported cles_khr_int64 has to be print("#ifdef cl_khr_fp64") return 1 + float16_count elif int64_count > 0: print("#if defined cles_khr_int64 || !defined(__EMBEDDED_PROFILE__)") return 1 + float16_count return float16_count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_64_windows():\n return struct.calcsize('P') * 8 == 64", "def have_binary128():\n try:\n ti = type_info(np.longdouble)\n except FloatingError:\n return False\n return (ti['nmant'], ti['maxexp']) == (112, 16384)", "def is_H(self):\n return True", "def is_H(self):\n return True", "def is_64bit(self):\n pass", "def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return", "def is64bit(self):\n return platform.machine().endswith('64')", "def osarch_is_64_bit():\n return osarch_match(\"64-bit\")", "def longdouble_lte_float64():\n return np.longdouble(2**53) == np.longdouble(2**53) + 1", "def is64Bit(program: ghidra.program.model.listing.Program) -> bool:\n ...", "def is_64bit(self):\n return self.machine == 'x86_64'", "def isFloor(self, x, y):\n\t\treturn self.getValue(x, y) == self.floor_char", "def valid(self, nt_header):\n try:\n return (self.OriginalFirstThunk != 0 and\n self.OriginalFirstThunk < nt_header.OptionalHeader.SizeOfImage and\n self.FirstThunk != 0 and\n self.FirstThunk < nt_header.OptionalHeader.SizeOfImage and\n self.Name < nt_header.OptionalHeader.SizeOfImage)\n except obj.InvalidOffsetError:\n return False", "def use_long_headers(header_row, long_to_short_dict):\n col_matches = 0\n for value in header_row:\n if FieldCleaner.clean_string(value) in long_to_short_dict:\n col_matches += 1\n # if most of column headers are in the long format,\n # we'll treat the file as having long headers\n return col_matches > .5 * len(header_row)", "def AssertSomeThumbprint(self, *fp):\n if not fp:\n raise ValueError(\"must specify some thumbprints\")\n cmd = (' ||\\n '.join([('getprop(\"ro.build.thumbprint\") == \"%s\"') % i\n for i in fp]) +\n ' ||\\n abort(\"E%d: Package expects build thumbprint of %s; this '\n 'device has \" + getprop(\"ro.build.thumbprint\") + \".\");') % (\n common.ErrorCode.THUMBPRINT_MISMATCH, \" or \".join(fp))\n self.script.append(cmd)", "def has_image_data (ff_hdus_list, which_hdu=0):\n if (which_hdu == 0): # heuristic for Primary HDU\n if (ff_hdus_list[which_hdu].header.get('NAXIS') == 2):\n return True\n else:\n return False\n else: # it's an extension and so marked\n return ( (len(ff_hdus_list) > which_hdu) and\n (ff_hdus_list[which_hdu].header.get('XTENSION') == 'IMAGE') )", "def AssertFingerprintOrThumbprint(self, fp, tp):\n cmd = ('getprop(\"ro.build.fingerprint\") == \"{fp}\" ||\\n'\n ' getprop(\"ro.build.thumbprint\") == \"{tp}\" ||\\n'\n ' abort(\"Package expects build fingerprint of {fp} or '\n 'thumbprint of {tp}; this device has a fingerprint of \" '\n '+ getprop(\"ro.build.fingerprint\") + \" and a thumbprint of \" '\n '+ getprop(\"ro.build.thumbprint\") + \".\");').format(fp=fp, tp=tp)\n self.script.append(cmd)", "def disk_is_valid(dhandle):\n if is_64bits:\n return dhandle.value != c_uint64(0).value\n else:\n return dhandle.value != c_uint32(0).value", "def is_image_size_64(image):\n return image['height'] == 64 and image['width'] == 64", "def has_supported_header_hormat(cls, csv_reader):\n return csv_reader.fieldnames == cls.INGFormatHeader", "def _check_header_data(self, scan_data, min_rt=None, max_rt=None, ms_level=None, polarity=None):\n \n if min_rt is not None and scan_data['retention_time'] < min_rt:\n return False\n \n if max_rt is not None and scan_data['retention_time'] > max_rt:\n return False\n \n if ms_level is not None and scan_data['ms_level'] != ms_level:\n return False\n \n if polarity is not None and scan_data['polarity'] != polarity:\n return False\n \n return True", "def is_64bit():\n is64bit = sys.maxsize > 2 ** 32\n if sys.platform == \"cli\":\n is64bit = sys.executable.endswith(\"ipy64.exe\")\n return is64bit", "def is_rfft(obj):\n if not (hasattr(obj, 'nx') and hasattr(obj, 'dx') and hasattr(obj, 'ny')\n and hasattr(obj, 'dy') and hasattr(obj, 'fft')):\n return False\n\n return obj.fft.shape == (obj.nx, obj.ny / 2 + 1)", "def uniform_shift_check(optree):\n if isinstance(optree, (BitLogicLeftShift, BitLogicRightShift, BitArithmeticRightShift)):\n return uniform_vector_constant_check(optree.get_input(1)) \\\n or not optree.get_input(1).get_precision().is_vector_format()\n return False", "def is_header(fields):\n if len(fields) < 11:\n return None\n # Test a column which should usually be a number in data lines and never a number in header lines.\n try:\n float(fields[8])\n return False\n except ValueError:\n pass\n first_field = fields[0]\n # An explicitly commented line is a header.\n if first_field.startswith('#'):\n return True\n # The first field in a header is usually these two (and never these in data lines).\n if first_field.lower() == 'sample' or first_field.lower() == 'family':\n return True\n # Fallback 1: There should never be a number in a header line. If we find one, it's a data line.\n for field in fields:\n try:\n float(field)\n return False\n except ValueError:\n pass\n # Fallback 2: Just test whether any of the known labels is in the line.\n for label in LABELS:\n if label in fields:\n return True\n for label in LABELS:\n if label.lower() in fields:\n return True", "def valid(self, nt_header):\n try:\n return (self.AddressOfFunctions < nt_header.OptionalHeader.SizeOfImage and\n self.AddressOfNameOrdinals < nt_header.OptionalHeader.SizeOfImage and\n self.AddressOfNames < nt_header.OptionalHeader.SizeOfImage and\n self.NumberOfFunctions < 0x7FFF and\n self.NumberOfNames < 0x7FFF)\n except obj.InvalidOffsetError:\n return False", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def from_win_64_hex(self):\n try:\n base10_microseconds = int(wh, 16) / 10\n dt_obj = self.epoch_1601 + timedelta(microseconds=base10_microseconds)\n self.in_windows_hex_64 = dt_obj.strftime('%Y-%m-%d %H:%M:%S.%f')\n except Exception as e:\n if not args.log:\n pass\n else:\n logging.error(str(type(e)) + \",\" + str(e))\n self.in_windows_hex_64 = False\n return self.in_windows_hex_64", "def fp_gt(x: float, y: float) -> bool:\n return not fp_eq(x, y) and x > y", "def isThumb(self):\r\n output = False\r\n ea = self.func_ea\r\n while ea < self.func_ea + self.getSize():\r\n size = idc.get_item_size(ea)\r\n if size == 2 and idc.isCode(idc.GetFlags(ea)):\r\n output = True\r\n break\r\n ea = ea + size\r\n return output" ]
[ "0.4971328", "0.46334147", "0.46194386", "0.46194386", "0.4604871", "0.4570718", "0.45214003", "0.45135522", "0.44693208", "0.44124466", "0.44066575", "0.44048572", "0.43934348", "0.43365443", "0.42555895", "0.4250501", "0.42504188", "0.42486706", "0.42251563", "0.4212047", "0.42047936", "0.41862556", "0.4112214", "0.41018462", "0.41007584", "0.40941316", "0.40904295", "0.40823355", "0.40790966", "0.40730295" ]
0.59550357
0
This helper function returns the correct clc core conversion function name for a given source and destination type, with optional size, mode and saturation arguments.
def clc_core_fn_name(dst, size='', mode='', sat=''): return "__clc_convert_{DST}{N}{SAT}{MODE}".format(DST=dst, N=size, SAT=sat, MODE=mode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def src_get_name(converter_type):\n return ffi.string(_lib.src_get_name(converter_type)).decode()", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname", "def function_name_to_string(func):\n if func == statistical_parity_difference:\n return \"Statistical Parity Difference\"\n if func == theil_index:\n return \"Theil Index\"\n if func == equal_opportunity_difference:\n return \"Equal Opportunity Difference\"\n if func == disparate_impact:\n return \"Disparate Impact\"\n if func == average_odds_difference:\n return \"Average Odds Difference\"\n if func == auc:\n return \"AUC\"\n if func == binary_accuracy:\n return \"Binary Accuracy\"", "def cython_functionname(self, t, cycyt=None):\n if cycyt is None:\n t = self.canon(t)\n if isinstance(t, basestring):\n return t, self.cython_functionnames[t]\n elif t[0] in self.base_types:\n return t, self.cython_functionnames[t[0]]\n return self.cython_functionname(t, self.cython_functionnames[t[0]])\n d = {}\n for key, x in zip(self.template_types[t[0]], t[1:-1]):\n if isinstance(x, basestring):\n val = self.cython_functionnames[x] if x in self.cython_functionnames \\\n else x\n elif isinstance(x, Number):\n val = str(x).replace('-', 'Neg').replace('+', 'Pos')\\\n .replace('.', 'point')\n elif x[0] in self.base_types:\n val = self.cython_functionnames[x[0]]\n else:\n _, val = self.cython_functionname(x, self.cython_functionnames[x[0]])\n d[key] = val\n return t, cycyt.format(**d)", "def get_conv(name):\n trans_funs = {\n 'mbconv_transform': MBConv,\n 'mbtalkconv_transform': MBTalkConv,\n }\n assert name in trans_funs.keys(), \\\n 'Transformation function \\'{}\\' not supported'.format(name)\n return trans_funs[name]", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)", "def cfunc_type(self):\n tif = ida_typeinf.tinfo_t()\n result = self.get_func_type(tif)\n if not result:\n return\n return tif", "def get_ctype_name(*args):\n return _ida_hexrays.get_ctype_name(*args)", "def cpp_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cts = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n ct = self.cpp_type(x)\n elif argkind is Arg.LIT:\n ct = self.cpp_literal(x)\n elif isinstance(x, Number):\n ct = self.cpp_literal(x)\n else:\n try:\n ct = self.cpp_type(x) # guess it is a type\n except TypeError:\n ct = x # guess it is a variable\n cts.append(ct)\n fname += '' if 0 == len(cts) else \"< \" + \", \".join(cts) + \" >\"\n return fname", "def cp_name(cp):\n return '%s%04X' % ('u' if cp > 0xffff else 'uni', cp)", "def get_class_decoder_function_name(name):\n name = get_class_functional_name(name)\n return 'decode_{0}'.format(name)", "def convert_C_instruction(self, instruction):\n comp, dest, jump = self.parse(instruction)\n\n return f\"111{convert_comp(comp)}{convert_dest(dest)}\" \\\n f\"{convert_jump(jump)}\"", "def as_function_name(self, string):\n return idaapi.COLSTR(string, idaapi.SCOLOR_CNAME)", "def getconversiontype(self, *args, **kwargs):\n return _coordsys.coordsys_getconversiontype(self, *args, **kwargs)", "def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"", "def get_func_type(self, *args):\n return _ida_hexrays.cfunc_t_get_func_type(self, *args)", "def get_cie1931_color_matching_function():\n\n filename = os.path.dirname(os.path.abspath(__file__))\\\n + os.path.normpath(\"/data/cie_1931_color_matching_function.csv\")\n data = np.loadtxt(filename, delimiter=',', skiprows=1).T\n\n return np.uint16(data[0]), data[1:]", "def convertion_name(idn):\n inputn = 'f522_dh.trainingdata_in.lcv.'+idn+'.hdf5'\n outputn = 'jacobian_'+idn+'.npy'\n return(inputn, outputn)", "def _configure_image_name(self, ccd_operation_mode,\n include_star_mag=False):\n dic = ccd_operation_mode\n em_gain = '_G' + str(dic['em_gain'])\n em_mode = 'CONV'\n if dic['em_mode'] == 1:\n em_mode = 'EM'\n hss = '_HSS' + str(dic['hss'])\n preamp = '_PA' + str(dic['preamp'])\n binn = '_B' + str(dic['binn'])\n t_exp = '_TEXP' + str(dic['t_exp'])\n self.image_name = em_mode + hss + preamp + binn + t_exp + em_gain\n\n if include_star_mag:\n star_flux = '_S' + str(self.star_magnitude)\n self.image_name += star_flux", "def name_from_dist(dist_func):\n return str(dist_func).split()[0].split('.')[-1][:-4]", "def conversion(temp, mode):\n if mode == 1:\n c_to_f = (temp * 9/5) + 32\n return c_to_f\n else:\n f_to_c = (temp - 32) * 5 / 9\n return f_to_c", "def cast(*args):\n return _ITKCostFunctionsPython.itkCostFunction_cast(*args)", "def _get_converter(orig, target):\n try:\n func = getattr(utils, f'convert_{orig}_to_{target}')\n except AttributeError:\n func = partial(convert_unit, orig=orig, to=target)\n return func", "def get_func_type(header):\n func_type = header.functionType\n if func_type == SSE.SCALAR:\n return FunctionType.Scalar\n elif func_type == SSE.AGGREGATION:\n return FunctionType.Aggregation\n elif func_type == SSE.TENSOR:\n return FunctionType.Tensor", "def mode(v_o, Vcc):\n if v_o == Vcc:\n return \"positive saturation\"\n if v_o >= -Vcc and v_o <= Vcc:\n return \"linear region\"\n if v_o == -Vcc:\n return \"negative saturation\"", "def getColorTransferFunction(self):\n\t\treturn self.ctf", "def band_to_cname(input_band: str):\n bands_ref = ((\"red\", \"R\"), (\"green\", \"G\"), (\"blue\", \"B\"), ('nir', \"N\"))\n if isinstance(input_band, int) and 1 <= input_band <= 4:\n return bands_ref[input_band-1][0]\n elif isinstance(input_band, str) and len(input_band) == 1:\n for cname, short_name in bands_ref:\n if input_band == short_name:\n return cname\n elif isinstance(input_band, str) and len(input_band) > 1:\n for cname, short_name in bands_ref:\n if input_band == cname:\n return input_band\n else:\n raise ValueError(f\"Cannot convert given band to valid stac common name. Got: {input_band}\")", "def createSourceName(self, protocol, pfn):\n return pfn", "def rename(op_name):\n return type(op_name, (OpConverter,), {})", "def _type_name(cls, manual_name):\r\n cf_name = ''\r\n if manual_name:\r\n cf_name = manual_name.lower()\r\n else:\r\n camelcase = re.compile(r'([a-z])([A-Z])')\r\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\r\n \r\n cf_name += ccase(cls.__name__)\r\n cf_name = cf_name.lower()\r\n if cls.__use_module_name__:\r\n cf_name = cls.__module__ + '_{}'.format(cf_name)\r\n return cf_name" ]
[ "0.58428967", "0.5695806", "0.56170404", "0.5537308", "0.54060704", "0.5312154", "0.52636075", "0.5209674", "0.5179819", "0.5158412", "0.5142017", "0.51086265", "0.50943804", "0.5006753", "0.5005477", "0.5003055", "0.49839967", "0.4969238", "0.49600613", "0.49500346", "0.48891428", "0.4872849", "0.48529443", "0.48464608", "0.48418045", "0.48335046", "0.48040202", "0.4775935", "0.47739902", "0.47731254" ]
0.80150145
0
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def layer_normalize_(self, ref_point: 'ModelParameters', order=2):\n # in-place normalize each parameter\n for layer_idx, parameter in enumerate(self.parameters, 0):\n parameter *= (ref_point.layer_norm(layer_idx, order) / self.layer_norm(layer_idx, order))", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)" ]
[ "0.7143955", "0.7143955", "0.7143955", "0.71335757", "0.7069364", "0.7069364", "0.70534694", "0.7049442", "0.70303166", "0.6807604", "0.6779128", "0.6764302", "0.67596924", "0.6736109", "0.6710482", "0.66960436", "0.6584008", "0.6554928", "0.6526808", "0.65179425", "0.65166473", "0.6464522", "0.64566517", "0.6440153", "0.6430107", "0.64231014", "0.6402119", "0.6390117", "0.6374696", "0.6365841" ]
0.73184675
1
Register stats for denormalization as buffer.
def register_stats(self, stats): assert stats.endswith(".h5") or stats.endswith(".npy") if stats.endswith(".h5"): mean = read_hdf5(stats, "mean").reshape(-1) scale = read_hdf5(stats, "scale").reshape(-1) else: mean = np.load(stats)[0].reshape(-1) scale = np.load(stats)[1].reshape(-1) self.register_buffer("mean", torch.from_numpy(mean).float()) self.register_buffer("scale", torch.from_numpy(scale).float()) logging.info("Successfully registered stats as buffer.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cast_buffers(self,\n dtype: Optional[torch.dtype] = None,\n memo: Optional[Set] = None) -> None:\n if memo is None:\n memo = set()\n for module in self.modules():\n if module is not self and isinstance(module, XlaFullyShardedDataParallel):\n # Allow any child FSDP instances to handle their own buffers.\n module._cast_buffers(dtype=dtype, memo=memo)\n elif module not in memo:\n memo.add(module)\n for name, buf in module.named_buffers(recurse=False):\n if buf is None:\n continue\n if torch.is_floating_point(buf):\n orig_dtype = buf.dtype\n cast_dtype = dtype or self.buffer_dtype\n if orig_dtype != cast_dtype:\n buf = buf.to(cast_dtype)\n buf._orig_dtype = orig_dtype\n if buf.device != self.xla_device:\n buf = buf.to(self.xla_device)\n setattr(module, name, buf)", "def add_memory(self, **kwarg):\n for name, obs in kwarg.items():\n self.buffers[name] = np.concatenate((self.buffers[name], obs), axis=0)\n # get recent memory\n return self", "def generate_statistics_in_memory(\n record_batch: pa.RecordBatch,\n options: stats_options.StatsOptions = stats_options.StatsOptions()\n) -> statistics_pb2.DatasetFeatureStatisticsList:\n stats_generators = cast(List[stats_generator.CombinerStatsGenerator],\n get_generators(options, in_memory=True))\n partial_stats = generate_partial_statistics_in_memory(record_batch, options,\n stats_generators)\n return extract_statistics_output(partial_stats, stats_generators)", "def create_buffers(self):", "def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][3] for b in buffer.memory] # images\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[1][1] for b in buffer.memory] # gears\n d8 = [b[1][2] for b in buffer.memory] # rpms\n d9 = [b[3] for b in buffer.memory] # terminated\n d10 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n self.data[9] += d9\n self.data[10] += d10\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n self.data.append(d9)\n self.data.append(d10)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n self.data[9] = self.data[9][to_trim:]\n self.data[10] = self.data[10][to_trim:]\n\n return self", "def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][2] for b in buffer.memory] # lidar\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[1][1] for b in buffer.memory] # progress\n d8 = [b[3] for b in buffer.memory] # terminated\n d9 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n self.data[9] += d9\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n self.data.append(d9)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n self.data[9] = self.data[9][to_trim:]\n\n return self", "def register_filters(self):\n n = 0\n # prepare for pytorch\n for k in self.phi_f.keys():\n if type(k) != str:\n # view(-1, 1).repeat(1, 2) because real numbers!\n self.phi_f[k] = torch.from_numpy(\n self.phi_f[k]).float().view(-1, 1)\n self.register_buffer('tensor' + str(n), self.phi_f[k])\n n += 1\n for psi_f in self.psi1_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n # view(-1, 1).repeat(1, 2) because real numbers!\n psi_f[sub_k] = torch.from_numpy(\n psi_f[sub_k]).float().view(-1, 1)\n self.register_buffer('tensor' + str(n), psi_f[sub_k])\n n += 1\n for psi_f in self.psi2_f:\n for sub_k in psi_f.keys():\n if type(sub_k) != str:\n # view(-1, 1).repeat(1, 2) because real numbers!\n psi_f[sub_k] = torch.from_numpy(\n psi_f[sub_k]).float().view(-1, 1)\n self.register_buffer('tensor' + str(n), psi_f[sub_k])\n n += 1", "def __init__(self, T, B, N):\n\n super().__init__()\n self.register_buffer('weight', torch.ones(T, B))\n self.register_buffer('target_output_prob', torch.zeros(T, B))\n self.register_buffer('target_output_entropy', torch.zeros(T, B))\n self.register_buffer('target_output_grad_logits', torch.zeros(T, B, N))\n self.register_buffer('target_output_grad_prob', torch.zeros(T, B, N))\n self.register_buffer('target_output_grad_entropy', torch.zeros(T, B, N))\n self.register_buffer('behaviour_output_prob', torch.zeros(T, B))\n self.register_buffer('importance_weights', torch.zeros(T, B))\n self.register_buffer('returns', torch.zeros(T, B))\n self.register_buffer('advantages', torch.zeros(T, B))\n self.register_buffer('pg_loss', torch.zeros(1))\n self.register_buffer('value_loss', torch.zeros(1))\n self.register_buffer('entropy_loss', torch.zeros(1))\n self.register_buffer('grad_value', torch.zeros(T + 1, B))\n self.register_buffer('grad_target_output', torch.zeros(T, B, N))", "def register_extra_weights(self):\n device = self.weight.device\n\n # Initialize and register the learned parameters 'a' (SCALE) and 'b' (OFFSET)\n # for calculating alpha as a function of context size.\n a = torch.Tensor([0.0]).to(device)\n b = torch.Tensor([0.0]).to(device)\n self.register_parameter(name='a', param=torch.nn.Parameter(a, requires_grad=True))\n self.register_parameter(name='b', param=torch.nn.Parameter(b, requires_grad=True))\n\n # Variables to store the context moments to use for normalizing the target.\n self.register_buffer(name='batch_mean',\n tensor=torch.zeros((1, self.num_features, 1, 1), requires_grad=True, device=device))\n self.register_buffer(name='batch_var',\n tensor=torch.ones((1, self.num_features, 1, 1), requires_grad=True, device=device))\n\n # Variable to save the context size.\n self.register_buffer(name='context_size',\n tensor=torch.zeros((1), requires_grad=False, device=device))", "def __init__(self, aggregation_depth, include_bytes=True):\n\n self._prev_stats = {}\n self._aggregation_depth = aggregation_depth\n self._include_bytes = include_bytes\n\n self.init_cur_stats()", "def set_batch_stats(self, x):\n\n if self.set_stats_f is None:\n self.set_stats_f = theano.function(\n inputs=[self.input],\n updates=[(self.bm, self.m), (self.bv, self.v)]\n )\n\n self.set_stats_f(x.astype(dtype))", "def _setup_stats(self) -> None:\n\n # Save statistics\n self.mass = np.array([0])\n self.mass_balance = np.array([0])\n self.mass_balance_trend = np.array([0])", "def _cast_buffers(\n self,\n device: Optional[torch.device] = None,\n dtype: Optional[Dict[str, torch.dtype]] = None,\n memo: Optional[Set] = None,\n recurse: bool = True,\n ) -> None:\n if memo is None:\n memo = set()\n for module in self.modules():\n if module is not self and isinstance(module, FullyShardedDataParallel) and recurse:\n # Allow any child FSDP instances to handle their own buffers.\n module._cast_buffers(device=device, dtype=dtype, memo=memo, recurse=recurse)\n elif module not in memo:\n memo.add(module)\n for name, buf in module.named_buffers(recurse=False):\n if buf is None:\n continue\n buf = buf.to(device=device or self.compute_device)\n if name not in self._buffer_name_to_orig_dtype:\n self._buffer_name_to_orig_dtype[name] = buf.dtype\n # If given, cast buffer to the given dtype. This is used to\n # suppport mixed precision for buffers\n # (given by self.mixed_precision.buffer_dtype) and also used\n # to restore the buffer dtype to the original precision for\n # state_dict() calls.\n # Note that non-floating point buffers are not casted.\n if torch.is_floating_point(buf):\n # We are restoring the original buffer type in\n # preparation for checkpoint.\n if dtype:\n buf = buf.to(dtype=dtype[name])\n # Note that we don't pass in self.mixed_precision.buffer_dtype\n # recursively into _cast_buffers, as we want to respect\n # mp config for child FSDP instances.\n elif self._mixed_precision_enabled_for_buffers():\n buf = buf.to(self.mixed_precision.buffer_dtype)\n\n setattr(module, name, buf)", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def Buffer(self) -> _n_0_t_7[_n_0_t_6]:", "def fillBuffer():\n buff[bufferCounter].next = dataIn", "def stats(self, stats):\n self._stats = stats", "def _writeBuffers(self):\r\n\r\n logger.info('Writing buffers to disk...')\r\n\r\n for ds in self.datasets.keys():\r\n\r\n if len(self.datasetBuffer[ds]) > 0:\r\n\r\n # write the buffers to disk\r\n self._writeBuffer(self.datasets[ds], ds, self.datasetBuffer[ds])\r\n\r\n # increment the indexes\r\n self.idxs[ds] += len(self.datasetBuffer[ds])\r\n\r\n # Reset the buffers and feature counts\r\n self.datasetBuffer[ds] = []\r\n\r\n self.totalFeatures = 0", "def _add_buffer(self, p_buffer_element:PyTorchIOElement):\r\n\r\n self._buffer.add_element(p_buffer_element)", "def _flush_stats(self, train=True):\n\t\tif train:\n\t\t\tself.train_accuracy.flush_buffer()\n\t\t\tself.train_epochs.flush_buffer()\n\t\t\tself.train_loss.flush_buffer()\n\t\t\tself.train_confusion_matrix.flush_buffer()\n\t\t\tself.learning_rate.flush_buffer()\n\t\telse:\n\t\t\tself.val_accuracy.flush_buffer()\n\t\t\tself.val_epochs.flush_buffer()\n\t\t\tself.val_loss.flush_buffer()\n\t\t\tself.val_confusion_matrix.flush_buffer()\n\n\t\tif self.plot:\n\t\t\tself._plot(train=train)", "def append_buffer(self, buffer):\n\n first_data_idx = self.data[0][-1] + 1 if self.__len__() > 0 else 0\n\n d0 = [first_data_idx + i for i, _ in enumerate(buffer.memory)] # indexes\n d1 = [b[0] for b in buffer.memory] # actions\n d2 = [b[1][0] for b in buffer.memory] # speeds\n d3 = [b[1][1] for b in buffer.memory] # lidar\n d4 = [b[3] or b[4] for b in buffer.memory] # eoes (terminated or truncated)\n d5 = [b[2] for b in buffer.memory] # rewards\n d6 = [b[5] for b in buffer.memory] # infos\n d7 = [b[3] for b in buffer.memory] # terminated\n d8 = [b[4] for b in buffer.memory] # truncated\n\n if self.__len__() > 0:\n self.data[0] += d0\n self.data[1] += d1\n self.data[2] += d2\n self.data[3] += d3\n self.data[4] += d4\n self.data[5] += d5\n self.data[6] += d6\n self.data[7] += d7\n self.data[8] += d8\n else:\n self.data.append(d0)\n self.data.append(d1)\n self.data.append(d2)\n self.data.append(d3)\n self.data.append(d4)\n self.data.append(d5)\n self.data.append(d6)\n self.data.append(d7)\n self.data.append(d8)\n\n to_trim = self.__len__() - self.memory_size\n if to_trim > 0:\n self.data[0] = self.data[0][to_trim:]\n self.data[1] = self.data[1][to_trim:]\n self.data[2] = self.data[2][to_trim:]\n self.data[3] = self.data[3][to_trim:]\n self.data[4] = self.data[4][to_trim:]\n self.data[5] = self.data[5][to_trim:]\n self.data[6] = self.data[6][to_trim:]\n self.data[7] = self.data[7][to_trim:]\n self.data[8] = self.data[8][to_trim:]\n\n return self", "def __init__ (self, pipe, histogram_buffer) :\n\t\tBasicDevice.__init__(self, pipe)\n\t\t# saving the buffer where the spectrum will be saved\n\t\tself.buffer = histogram_buffer", "def stats(self, stats):\n\n self._stats = stats", "def _buffer_all(self):\n self._buffer()", "def set_scribe_buffer(buffer_enabled):\r\n LogOptions._SCRIBE_BUFFER = buffer_enabled", "def flush(self) -> None:\n super().put(self.buffer)\n self.buffer = np.ndarray((0, 1), dtype=np.int16)", "def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])", "def register_statistic(self, func=None, shape=(-1,)):\n if func is not None:\n return self.register_statistic()(func)\n\n def decorator(func):\n\n name = func.__name__\n\n def _wrapper(cluster):\n out = func(cluster)\n self.store.memory_store.store(cluster, **{name: out})\n\n # Add the statistics.\n stats = self.store.items['statistics']\n stats.add(name, _wrapper, shape)\n # Register it in the global cluster store.\n self.store.register_field(name, 'statistics')\n # Compute it on all existing clusters.\n stats.store_all(name=name, mode='force')\n info(\"Registered statistic `{}`.\".format(name))\n\n return decorator", "def fill_buffer(self, num_domains: int):\n if self._randomizer is None:\n raise pyrado.TypeErr(msg=\"The randomizer must not be None to call fill_buffer()!\")\n if not isinstance(num_domains, int) or num_domains < 0:\n raise pyrado.ValueErr(given=num_domains, g_constraint=\"0 (int)\")\n\n self._randomizer.randomize(num_domains)\n self._buffer = self._randomizer.get_params(-1, fmt=\"list\", dtype=\"numpy\")\n self._ring_idx = 0" ]
[ "0.51819456", "0.5043784", "0.49926385", "0.4946909", "0.49447924", "0.49344134", "0.4884747", "0.4879836", "0.48627967", "0.48563662", "0.48147842", "0.47965068", "0.478542", "0.47853506", "0.47853506", "0.47853506", "0.47518125", "0.4747054", "0.47394067", "0.47262183", "0.47090402", "0.46921518", "0.4690415", "0.46888068", "0.46444273", "0.46443155", "0.46159935", "0.45845237", "0.45827752", "0.45766237" ]
0.67643166
0
Remove weight normalization module from all of the layers.
def remove_weight_norm(self): def _remove_weight_norm(m): try: logging.debug(f"Weight norm is removed from {m}.") torch.nn.utils.remove_weight_norm(m) except ValueError: # this module didn't have weight norm return self.apply(_remove_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def remove_norms(self):\n dev = next(self.parameters()).device\n for name, module in self.named_modules():\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_spectral_norm(module, name='weight_hh_l0_reverse')\n print(\"Removed spectral norm from {}\".format(name))\n except:\n pass\n try:\n nn.utils.remove_weight_norm(module)\n print(\"Removed wnorm from {}\".format(name))\n except:\n pass\n self.to(device=dev)", "def remove_norms(model: \"SqueezeWave\") -> \"SqueezeWave\":\n squeeze_wave = model\n for i, wn_layer in enumerate(squeeze_wave.wn_layers):\n squeeze_wave.wn_layers[i] = WN.remove_norms(wn_layer)\n return squeeze_wave", "def remove_activation_hooks(self):\n for h in self.hooks:\n h.remove()\n h = None\n for l in self.list_mods:\n if ('norm' in self.list_mods):\n (b, l) = l\n # Skip non-prunable layers\n if (hasattr(l, 'prune_values')):\n l.prune_values = None\n self.hooks = None", "def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()", "def remove_batchnorm(m: nn.Sequential) -> None:\n ms = list(m._modules.items())\n\n # transfer biases from BN to previous conv / Linear / Whatever\n for (name1, mod1), (name2, mod2) in zip(ms[:-1], ms[1:]):\n if isinstance(mod2, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n if mod1.bias is not None:\n continue\n\n if mod2.bias is not None:\n with torch.no_grad():\n mod1.bias = mod2.bias\n else:\n out_ch = len(mod2.running_mean)\n with torch.no_grad():\n mod1.bias = nn.Parameter(torch.zeros(out_ch))\n # remove bn\n for name, mod in ms:\n if isinstance(mod, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)):\n delattr(m, name)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def RemoveBatchNormLayers(network, batch_norm_names):\n i = 0\n j = 0\n while i < len(network.layer) and j < len(batch_norm_names): \n if network.layer[i].name == batch_norm_names[j]:\n del network.layer[i]\n j += 1\n else:\n i += 1\n \n if j != len(batch_norm_names):\n print j, len(batch_norm_names)\n raise AssertionError('All batch norm layers were not removed')", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def remove_weight_norm_and_equal_lr(module: Module,\n name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'norm_equal_lr', name)", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def _setWeights(self):\r\n for layer in self.layer_names:\r\n raw_w = getattr(self, f'{layer}_raw')\r\n self.module._parameters[layer] = F.dropout(raw_w, p=self.weight_pro, training=self.training)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def reset_weights(self):\n self.head.reset_weights()", "def reset_all_weights(model: nn.Module) -> None:\n\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n\n # Applies fn recursively to every submodule see: https://pytorch.org/docs/stable/generated/torch.nn.Module.html\n model.apply(fn=weight_reset)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def reset(self):\n for layer in self.network:\n layer.clean()", "def remove_weight_scale(module: Module, name: str = 'weight') -> Module:\n return remove_weight_lambda(module, 'scale', name)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def unfreeeze_all_layers(self):\n # Unfreeeze\n logger.info('MODEL: Unfreeze all layers.')\n for i in range(len(self.model.layers)):\n self.model.layers[i].trainable = True\n \n # Compile model\n logger.info('MODEL: Compiling...')\n self.model.compile(optimizer = Adam(lr=1e-4),\n loss={'yolo_loss': lambda y_true, y_pred: y_pred})", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def reset_model(model):\n\n\tfor layer in model.layers:\n\t\t# Note: these are custom depending on the layer type\n\t\tif '.MoleculeConv' in str(layer):\n\t\t\tW_inner = layer.init_inner((layer.inner_dim, layer.inner_dim))\n\t\t\tb_inner = np.zeros((1, layer.inner_dim))\n\t\t\t# Inner weights\n\t\t\tlayer.W_inner.set_value((T.tile(W_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, layer.inner_dim, layer.inner_dim)).eval()).astype(np.float32))\n\t\t\tlayer.b_inner.set_value((T.tile(b_inner, (layer.depth + 1, 1, 1)).eval() + \\\n\t\t\t\tinitializations.uniform((layer.depth + 1, 1, layer.inner_dim)).eval()).astype(np.float32))\n\n\t\t\t# Outer weights\n\t\t\tW_output = layer.init_output((layer.inner_dim, layer.units), scale = layer.scale_output)\n\t\t\tb_output = np.zeros((1, layer.units))\n\t\t\t# Initialize weights tensor\n\t\t\tlayer.W_output.set_value((T.tile(W_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlayer.b_output.set_value((T.tile(b_output, (layer.depth + 1, 1, 1)).eval()).astype(np.float32))\n\t\t\tlogging.info('graphFP layer reset')\n\n\t\telif '.Dense' in str(layer):\n\t\t\tlayer.W.set_value((layer.init(layer.W.shape.eval()).eval()).astype(np.float32))\n\t\t\tlayer.b.set_value(np.zeros(layer.b.shape.eval(), dtype=np.float32))\n\t\t\tlogging.info('dense layer reset')\n\n\t\telif '.Dropout' in str(layer):\n\t\t\tlogging.info('dropout unchanged')\n\t\telse:\n\t\t\traise ValueError('Unknown layer {}, cannot reset weights'.format(str(layer)))\n\tlogging.info('Reset model weights')\n\treturn model", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_tracking(model, norm_type, norm_power=0.2):\n normlayer = select_norm(norm_type, norm_power=norm_power)\n # find total number of childern\n model_len = 0\n for n, child in enumerate(model.children()):\n model_len = n\n\n # for layer 0 which is outside\n conv_shape = model.conv1.out_channels\n w = model.bn1.weight\n b = model.bn1.bias\n model.bn1 = normlayer(conv_shape)\n model.bn1.weight = w\n model.bn1.bias = b\n\n # replace in all other layers\n for n, child in enumerate(model.children()):\n if 4 <= n <= model_len - 2:\n for i in range(len(child)):\n conv_shape = child[i].conv1.out_channels\n w = child[i].bn1.weight\n b = child[i].bn1.bias\n child[i].bn1 = normlayer(conv_shape)\n child[i].bn1.weight = w\n child[i].bn1.bias = b\n\n conv_shape = child[i].conv2.out_channels\n w = child[i].bn2.weight\n b = child[i].bn2.bias\n child[i].bn2 = normlayer(conv_shape)\n child[i].bn2.weight = w\n child[i].bn2.bias = b\n # if model have bn3 as well\n try:\n conv_shape = child[i].conv3.out_channels\n w = child[i].bn3.weight\n b = child[i].bn3.bias\n child[i].bn3 = normlayer(conv_shape)\n child[i].bn3.weight = w\n child[i].bn3.bias = b\n except:\n pass\n try:\n conv_shape = child[i].downsample[0].out_channels\n w = child[i].downsample[1].weight\n b = child[i].downsample[1].bias\n child[i].downsample[1] = normlayer(conv_shape)\n child[i].downsample[1].weight = w\n child[i].downsample[1].bias = b\n print(\"downsample\")\n except:\n print(\"no downsample\")\n\n return model", "def remove_spectral_norm(module, name='weight'):\n for k, hook in module._forward_pre_hooks.items():\n if isinstance(hook, SpectralNorm) and hook.name == name:\n hook.remove(module)\n del module._forward_pre_hooks[k]\n return module\n\n raise ValueError(\"spectral_norm of '{}' not found in {}\".format(\n name, module))", "def reset_weights(self):\r\n self._weights = deepcopy(self._tmp_weights)\r\n self._tmp_weights = None" ]
[ "0.79998016", "0.76628864", "0.7629923", "0.72848314", "0.7045166", "0.67848754", "0.6780856", "0.6746634", "0.6591088", "0.65823525", "0.6566755", "0.65617", "0.65572643", "0.65561306", "0.6538293", "0.6538293", "0.6538293", "0.6529808", "0.6526071", "0.651576", "0.64801204", "0.6455137", "0.64305204", "0.642898", "0.6405197", "0.6397573", "0.63855386", "0.6293032", "0.6251394", "0.6249921" ]
0.7893102
1
Apply weight normalization module from all of the layers.
def apply_weight_norm(self): def _apply_weight_norm(m): if isinstance(m, torch.nn.Conv1d) or isinstance( m, torch.nn.ConvTranspose1d ): torch.nn.utils.weight_norm(m) logging.debug(f"Weight norm is applied to {m}.") self.apply(_apply_weight_norm)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def _init_weights(self):\n for layer in self.modules():\n if isinstance(layer, (nn.Conv1d, nn.Linear)):\n nn.init.xavier_uniform_(layer.weight)\n if layer.bias is not None:\n nn.init.constant_(layer.bias, 0)\n elif isinstance(layer, nn.BatchNorm1d):\n nn.init.constant_(layer.weight, 1)\n nn.init.constant_(layer.bias, 0)", "def associate_normalization_layers(self, model):\n if (len(self.leaf_modules) == 0):\n self.retrieve_leaf_modules(model) \n # Association list\n self.norm_modules = []\n self.prune_modules = []\n # Current weighted layer\n cur_weighted = None\n # Associate norm layers to their immediate previous weighted layers\n for name, m in self.leaf_modules:\n if (m.__class__ in [nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d]):\n cur_weighted = m\n if (m.__class__ in [nn.RNN, nn.GRU, nn.LSTM]):\n cur_weighted = m\n if ('Norm' in str(m.__class__)):\n if (cur_weighted is not None):\n self.norm_modules.append((m, cur_weighted))", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g", "def _init_weights(self):\n for m in self.modules():\n if type(m) in {\n nn.Linear,\n nn.Conv3d,\n nn.Conv2d,\n nn.ConvTranspose2d,\n nn.ConvTranspose3d\n }:\n nn.init.kaiming_normal_(\n m.weight.data, a=0, mode='fan_out', nonlinearity='relu',\n )\n if m.bias is not None:\n fan_in, fan_out = \\\n nn.init._calculate_fan_in_and_fan_out(m.weight.data)\n bound = 1 / math.sqrt(fan_out)\n nn.init.normal_(m.bias, -bound, bound)", "def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)", "def _initialize_weights(self):\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n m.weight.data.normal_(0, 0.05)\r\n if m.bias is not None:\r\n m.bias.data.zero_()", "def initialize_weights(self):\n for layer in self._cnn_layers:\n weights_initializer.WeightsInitializer.initialize_layer_or_model(layer)", "def init_weights(self):\n # We don't use the `init_weights()` function in BaseModule, since it\n # doesn't support the initialization method from `reset_parameters()`\n # in Pytorch.\n if self.with_backbone:\n self.backbone.init_weights()\n\n if self.with_neck:\n for m in self.neck.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()\n\n if self.with_head:\n for m in self.head.modules():\n if isinstance(m, _ConvNd) or isinstance(m, _BatchNorm):\n m.reset_parameters()", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):\n xavier_uniform_(m.weight)\n if m.bias is not None:\n zeros_(m.bias)", "def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)", "def normalizeWeights(self):\n for wt in self.weights:\n wt[wt>1] = 1\n wt[wt<-1] = -1\n for bs in self.bias:\n bs[bs>1] = 1\n bs[bs<-1] = -1", "def _reset_weights(m):\n\n nn = import_optional_dependency(\"torch.nn\")\n init = import_optional_dependency(\"torch.nn.init\")\n if isinstance(m, nn.Conv1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.Conv3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose1d):\n init.normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose2d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.ConvTranspose3d):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n init.normal_(m.bias.data)\n elif isinstance(m, nn.BatchNorm1d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.BatchNorm3d):\n init.normal_(m.weight.data, mean=1, std=0.02)\n init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n init.normal_(m.bias.data)\n elif isinstance(m, nn.LSTM):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.LSTMCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRU):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)\n elif isinstance(m, nn.GRUCell):\n for param in m.parameters():\n if len(param.shape) >= 2:\n init.orthogonal_(param.data)\n else:\n init.normal_(param.data)", "def remove_norms(module_: \"WN\") -> \"WN\":\n module_.start = torch.nn.utils.remove_weight_norm(module_.start_conv)\n module_.cond_layer = torch.nn.utils.remove_weight_norm(module_.cond_layer)\n for i, layer_ in enumerate(module_.in_layers):\n layer_ = DepthwiseSeparableConv1d.remove_batch_norm(layer_)\n module_.in_layers[i] = layer_\n for i, layer_ in enumerate(module_.res_skip_layers):\n layer_ = torch.nn.utils.remove_weight_norm(layer_)\n module_.res_skip_layers[i] = layer_\n return module_", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)", "def _weight_initializer(self):\n for m in self.modules():\n if isinstance(m, nn.ConvTranspose2d):\n nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1.0)\n nn.init.constant_(m.bias, 0.0)", "def init_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n xavier_init(m)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)", "def remove_weight_norm_(self):\n\n def _remove_weight_norm(m):\n try:\n torch.nn.utils.remove_weight_norm(m)\n except ValueError:\n return\n\n self.apply(_remove_weight_norm)", "def init_weight(self):\n init_layer(self.conv1)\n init_layer(self.conv2)\n init_bn(self.norm1)\n init_bn(self.norm2)", "def normalize(self, weights):\n tot = sum(weights)\n newW = [-1] * self.numParticles\n for i in range(len(weights)):\n newW[i] = weights[i] / tot\n return newW", "def layer_normalize_(self, ref_point: 'ModelParameters', order=2):\n # in-place normalize each parameter\n for layer_idx, parameter in enumerate(self.parameters, 0):\n parameter *= (ref_point.layer_norm(layer_idx, order) / self.layer_norm(layer_idx, order))", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def init_weights(layer):\r\n layer_name = layer.__class__.__name__\r\n if layer_name.find(\"Conv\") != -1:\r\n layer.weight.data.normal_(0.0, 0.02)\r\n elif layer_name.find(\"BatchNorm\") != -1:\r\n layer.weight.data.normal_(1.0, 0.02)\r\n layer.bias.data.fill_(0)" ]
[ "0.71430475", "0.71430475", "0.71430475", "0.713435", "0.70688635", "0.70688635", "0.7052796", "0.7049518", "0.70298177", "0.68070155", "0.6778134", "0.676388", "0.6760103", "0.67364126", "0.6710347", "0.6696243", "0.65825963", "0.65547544", "0.6525561", "0.6517765", "0.6516343", "0.6463633", "0.6456279", "0.6439273", "0.6430638", "0.64223105", "0.6402939", "0.6390908", "0.63742155", "0.6366158" ]
0.73182416
0
Returns a humanized rstring representing time difference between now() and the input timestamp. The output rounds up to days, hours, minutes, or seconds. 4 days 5 hours returns '4 days' 0 days 4 hours 3 minutes returns '4 hours', etc...
def time_since(timestamp=None): rstr = "" if not timestamp or not isinstance(timestamp, datetime.datetime): return rstr now = timezone.now() timediff = now - timestamp days = timediff.days weeks = days//7 months = days//30 minutes = timediff.seconds % 3600 // 60 seconds = timediff.seconds % 3600 % 60 hours = minutes // 60 if days > 365: return "> a year" if months > 0: if months == 1: tstr = "month" else: tstr = "months" rstr = rstr + "%s %s" % (months, tstr) return rstr if weeks > 0: if weeks == 1: tstr = "week" else: tstr = "weeks" rstr = rstr + "%s %s" % (weeks, tstr) return rstr if days > 0: if days == 1: tstr = "day" else: tstr = "days" rstr = rstr + "%s %s" % (days, tstr) return rstr elif hours > 0: if hours == 1: tstr = "hour" else: tstr = "hours" rstr = rstr + "%s %s" % (hours, tstr) return rstr elif minutes > 0: if minutes == 1: tstr = "min" else: tstr = "mins" rstr = rstr + "%s %s" % (minutes, tstr) return rstr elif seconds > 0: if seconds == 1: tstr = "sec" else: tstr = "secs" rstr = rstr + "%s %s" % (seconds, tstr) return rstr else: return "Now"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def humanize_ts(timestamp=False):\n now = datetime.now()\n diff = now - datetime.fromtimestamp(timestamp)\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(second_diff)) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(second_diff / 60)) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(second_diff / 3600)) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(int(day_diff / 7)) + \" weeks ago\"\n if day_diff < 365:\n return str(int(day_diff / 30)) + \" months ago\"\n return str(int(day_diff / 365)) + \" years ago\"", "def get_formatted_duration(self, prev_time):\n duration = time() - prev_time\n if duration < 60:\n unit = 's'\n elif duration < 3600:\n duration /= 60\n unit = 'm'\n else:\n duration /= 3600\n unit = 'h'\n return self.format_num(duration) + unit", "def get_elapsed_timestamp(self) -> str:\n t = self.elapsed_time\n minutes = int(t / 60)\n seconds = int(t - (60 * minutes))\n millis = int(100 * (t - int(t)))\n return '{:>02d}:{:>02d}.{:<02d}'.format(minutes, seconds, millis)", "def get_answer_time(self):\n sec = (self.updated_at - self.created_at).total_seconds()\n return f'{int((sec / 60) % 60):02d}:{int(sec):02d}'", "def time_since_as_text(time=False):\n now = datetime.now(timezone.utc)\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"nyss\"\n if second_diff < 60:\n return str(second_diff) + \" sekunder sedan\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(floor(second_diff / 60)) + \" minuter sedan\"\n if second_diff < 7200:\n return \"en timme sedan\"\n if second_diff < 86400:\n return str(floor(second_diff / 3600)) + \" timmar sedan\"\n if day_diff == 1:\n return \"Igår\"\n if day_diff < 7:\n return str(day_diff) + \" dagar sedan\"\n if day_diff < 31:\n return str(floor(day_diff / 7)) + \" veckor sedan\"\n if day_diff < 365:\n return str(floor(day_diff / 30)) + \" månder sedan\"\n return str(day_diff / 365) + \" år sedan\"", "def get_formatted_time() -> datetime.strftime:\n\t\n\tnow = datetime.now() # time now\n\thalf_hour = (now - timedelta(minutes = 30)) # time 30 min ago\n\t# returns half hour ago to accommodate for failed checks\n\t# (bc twint behaves as if none found if check failed)\n\tcurrent_time = half_hour.strftime(\"%Y-%m-%d %H:%M:%S\")\n\treturn current_time", "def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))", "def pretty_date(time=False):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n else:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(round(second_diff, 0))) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(round(second_diff / 60, 0))) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(round(second_diff / 3600, 0))) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(int(round(day_diff, 0))) + \" days ago\"\n if day_diff < 31:\n return str(int(round(day_diff / 7, 0))) + \" weeks ago\"\n if day_diff < 365:\n return str(int(round(day_diff / 30, 0))) + \" months ago\"\n return str(int(round(day_diff / 365, 0))) + \" years ago\"", "def get_time():\n ct = time.time()\n lt = time.gmtime(ct)\n msec = int((ct - int(ct)) * 1000)\n return f'{time.strftime(DATE_FMT, lt)}.{msec:0>3}'", "def _getUpTime(self):\n diff = (datetime.datetime.now() - self._startTime).__str__()\n return diff[:diff.find('.')]", "def pretty_date(time=False):\n from datetime import datetime\n\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return \"\"\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff / 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff / 3600) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff / 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff / 30) + \" months ago\"\n return str(day_diff / 365) + \" years ago\"", "def pretty_date(time=False):\n from datetime import datetime\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time \n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"", "def pretty_deltat(seconds: float) -> str:\n\n # Reject weird stuff\n try:\n seconds = float(seconds)\n except (TypeError, ValueError):\n raise TypeError(\"non-numeric time delta\")\n\n if seconds < 0:\n # If the delta is negative, just print it\n return f\"{seconds:.1f}s\"\n\n hours, seconds = divmod(seconds, 3600)\n minutes, seconds = divmod(seconds, 60)\n\n if hours > 0:\n return f\"{int(hours)}h{int(minutes):02}m{int(seconds):02}s\"\n if minutes > 0:\n return f\"{int(minutes)}m{int(seconds):02}s\"\n\n # For short durations, include tenths of a second\n return f\"{seconds:.1f}s\"", "def realtime_to_ingame_delta_formatted(sec: float) -> str:\n return ingame_delta_formatted(realtime_to_ingame_delta(sec))", "def howLongAgo(time=False):\n now = timezone.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"genau jetzt\"\n if second_diff < 60:\n return \"vor \" + str(second_diff) + \" Sek.\"\n if second_diff < 120:\n return \"vor einer Min.\"\n if second_diff < 3600:\n return \"vor \" + str( second_diff / 60 ) + \" Min.\"\n if second_diff < 7200:\n return \"vor einer St.\"\n if second_diff < 86400:\n return \"vor \" + str( second_diff / 3600 ) + \" St.\"\n if day_diff == 1:\n return \"Gestern\"\n if day_diff < 7:\n return \"vor \" + str(day_diff) + \" Tagen\"\n if day_diff < 31:\n return \"vor \" + str(day_diff/7) + \" Wochen\"\n if day_diff < 365:\n return \"vor \" + str(day_diff/30) + \" Monaten\"\n return \"vor \" + str(day_diff/365) + \" Jahren\"", "def ago(self):\n return human(self.timestamp/1000.0, precision=1, abbreviate=True)", "def time_elapsed(sec):\n if sec < 60:\n return str(sec) + \" sec\"\n elif sec < (60 * 60):\n return str(sec / 60) + \" min\"\n else:\n return str(sec / (60 * 60)) + \" hr\"", "def _get_time_since_tell_send(tell):\n tell_time_sent = int(tell[3])\n\n current_time = int(time.time())\n\n dt1 = datetime.fromtimestamp(tell_time_sent)\n dt2 = datetime.fromtimestamp(current_time)\n rd = dateutil.relativedelta.relativedelta(dt2, dt1)\n\n out = ''\n\n if rd.days == 1:\n out += f'{rd.days} day, '\n elif rd.days != 0:\n out += f'{rd.days} days, '\n\n if rd.hours == 1:\n out += f'{rd.hours} hour, '\n elif rd.hours != 0:\n out += f'{rd.hours} hours, '\n\n if rd.minutes == 1:\n out += f'{rd.minutes} minute and '\n elif rd.minutes != 0:\n out += f'{rd.minutes} minutes and '\n\n if rd.seconds == 1:\n out += f'{rd.seconds} second ago'\n elif rd.seconds != 0:\n out += f'{rd.seconds} seconds ago'\n elif current_time - tell_time_sent == 0:\n out = 'just now'\n\n return out", "def make_it_rw(time_stamp):\r\n seconds, milliseconds = divmod(int(time_stamp), 1000)\r\n minutes, seconds = divmod(seconds, 60)\r\n hours, minutes = divmod(minutes, 60)\r\n days, hours = divmod(hours, 24)\r\n tmp = (\r\n ((str(days) + \" Days, \") if days else \"\")\r\n + ((str(hours) + \" Hours, \") if hours else \"\")\r\n + ((str(minutes) + \" Minutes, \") if minutes else \"\")\r\n + ((str(seconds) + \" Seconds, \") if seconds else \"\")\r\n + ((str(milliseconds) + \" ms, \") if milliseconds else \"\")\r\n )\r\n return tmp[:-2]", "def unixTimeToString_NEW(ut):\n intTime = int(ut)\n frac = ut - intTime\n #print \"\\nfrac is %f, conv is %f\" % (frac, round(frac*1000))\n y, m, d, ho, mi, se, junk1, junk2, junk3 = gmtime(intTime)\n #print \"ut is %f, s is %4d_%02d_%02d_%02d_%02d_%02d.%03d\\n\" % (ut, y, m, d, ho, mi, se, int(frac*1000))\n #return '%4d_%02d_%02d_%02d_%02d_%02d.%03d' % (y, m, d, ho, mi, se, int(frac*1000))\n return '%4d_%02d_%02d_%02d_%02d_%02d.%03d' % (y, m, d, ho, mi, se, round(frac*1000))", "def elapsed_time_formatted(begin_time):\n return time.strftime(\n \"%H:%M:%S\", (time.gmtime(time.perf_counter() - begin_time))\n )", "def start_delta_string(self):\r\n delta = int(self.start_time) - int(self.root().start_time)\r\n return '%02d:%02d' % (delta / 60, delta % 60)", "def humantime(seconds: float) -> str:\n return redivmod(seconds, [(60, \"seconds\"),\n (60, \"minutes\"),\n (24, \"hours\"),\n (7, \"days\"),\n (52, \"weeks\"),\n (0, \"years\")])", "def pretty_date(time=False):\n now = datetime.datetime.utcnow()\n if type(time) is int:\n diff = now - datetime.datetime.fromtimestamp(time)\n elif isinstance(time, datetime.datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n day_diff *= -1\n second_diff *= -1\n if day_diff < 1:\n if second_diff < 10:\n return ugettext('imminently')\n if second_diff < 60:\n return ungettext('{n} second from now', '{n} seconds from now', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('in a minute')\n if second_diff < 3600:\n return ungettext('{n} minute from now', '{n} minutes from now', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('in an hour')\n if second_diff < 86400:\n return ungettext('{n} hour from now', '{n} hours from now', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('tomorrow')\n if day_diff < 7:\n return ungettext('{n} day from now', '{n} days from now', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week from now', '{n} weeks from now', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month from now', '{n} months from now', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year from now', '{n} years from now', day_diff / 365).format(n=day_diff / 365)\n\n if day_diff == 0:\n if second_diff < 10:\n return ugettext('just now')\n if second_diff < 60:\n return ungettext('{n} second ago', '{n} seconds ago', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('a minute ago')\n if second_diff < 3600:\n return ungettext('{n} minute ago', '{n} minutes ago', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('an hour ago')\n if second_diff < 86400:\n return ungettext('{n} hour ago', '{n} hours ago', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('yesterday')\n if day_diff < 7:\n return ungettext('{n} day ago', '{n} days ago', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week ago', '{n} weeks ago', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month ago', '{n} months ago', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year ago', '{n} years ago', day_diff / 365).format(n=day_diff / 365)", "def get_duration_string(duration):\n\n minutes = duration // 60\n seconds = duration % 60\n return \"the game took {} minutes and {} seconds\".format(minutes, seconds)", "def pretty_date(time=False):\r\n from datetime import datetime\r\n import dateutil.parser\r\n now = datetime.now()\r\n if type(time) is str or type(time) is unicode:\r\n time = dateutil.parser.parse(time)\r\n if type(time) is int:\r\n diff = now - datetime.fromtimestamp(time)\r\n elif isinstance(time, datetime):\r\n diff = now - time\r\n elif not time:\r\n diff = now - now\r\n second_diff = diff.seconds\r\n day_diff = diff.days\r\n\r\n if day_diff < 0:\r\n return ''\r\n\r\n if day_diff == 0:\r\n if second_diff < 10:\r\n return \"just now\"\r\n if second_diff < 60:\r\n return str(second_diff) + \" seconds ago\"\r\n if second_diff < 120:\r\n return \"a minute ago\"\r\n if second_diff < 3600:\r\n return ' '.join([str(second_diff / 60), \"minutes ago\"])\r\n if second_diff < 7200:\r\n return \"an hour ago\"\r\n if second_diff < 86400:\r\n return ' '.join([str(second_diff / 3600), \"hours ago\"])\r\n if day_diff == 1:\r\n return \"Yesterday\"\r\n if day_diff < 7:\r\n return ' '.join([str(day_diff), \"days ago\"])\r\n if day_diff < 31:\r\n return ' '.join([str(day_diff / 7), \"weeks ago\"])\r\n if day_diff < 60:\r\n return ' '.join([str(day_diff / 30), \"month ago\"])\r\n if day_diff < 365:\r\n return ' '.join([str(day_diff / 30), \"months ago\"])\r\n if day_diff < (365 * 2):\r\n return ' '.join([str(day_diff / 365), \"year ago\"])\r\n return ' '.join([str(day_diff / 365), \"years ago\"])", "def time_str(num):\n if num > 3600:\n return \"%0.2f hrs\" % (num / 3600)\n elif num > 60:\n return \"%0.2f mins\" % (num / 60)\n else:\n return \"%d seconds\" % num", "def srt(self):\n return '{:02d}:{:02d}:{:02d},{:03d}'.format(self.hours, self.minutes,\n int(self.seconds // 1),\n int(self.seconds % 1 * 100))", "def age(self) -> str:\n tdelta = dt.now() - self.created_timestamp\n if tdelta.days >= 548: # enough to round it up to 2 years\n return f'about {tdelta.days/365:.0f} years'\n elif tdelta.days >= 345: # enough to round it up to 1 year (so it doesn't report '12 months')\n return f'about a year'\n elif tdelta.days > 45: # beyond 1 month (after rounding)\n return f'about {tdelta.days/30:.0f} months'\n elif tdelta.days > 24: # enough to round it up to 1 month (so it doesn't report '4 weeks')\n return f'about a month'\n elif tdelta.days > 7:\n # round to nearest half, dropping '.0' when whole\n return f'{round((tdelta.days/7)*2)/2:g} weeks'\n elif tdelta.days == 7:\n return 'a week'\n elif tdelta.days > 1:\n return f'{tdelta.days} days'\n elif tdelta.days == 1:\n return f'a day'\n # break it down into parts of a day\n hours = tdelta.seconds // 3600\n if hours > 1:\n return f'{hours:.0f} hours'\n elif hours == 1:\n return f'an hour'\n minutes = tdelta.seconds % 3600 / 60\n if minutes > 1:\n return f'{minutes:.0f} minutes'\n elif minutes == 1:\n return f'a minute'\n return 'moments'", "def seconds2human(self, my_time):\n my_days, my_seconds = divmod(my_time, 86400)\n time_delta = timedelta(seconds=my_seconds)\n reminder = strftime(\"%H:%M:%S\", gmtime(time_delta.seconds))\n if my_days > 1:\n return \"%s days, %s\" % (my_days, reminder)\n elif my_days == 1:\n return \"%s day, %s\" % (my_days, reminder)\n else:\n return strftime(\"%H:%M:%S\", gmtime(time_delta.seconds))" ]
[ "0.7199235", "0.6999816", "0.6862485", "0.6823745", "0.6743255", "0.6654757", "0.66247755", "0.65492713", "0.64874226", "0.64806837", "0.6469421", "0.64674145", "0.64304537", "0.6402844", "0.6387063", "0.6360706", "0.6358006", "0.6357516", "0.63572836", "0.63446885", "0.63081884", "0.6295959", "0.62707317", "0.6260188", "0.6257234", "0.6256976", "0.6242369", "0.6215239", "0.6212859", "0.6208622" ]
0.7168039
1
return elements in the message with given parameters match is the type of elements you want to get (check the parse_type variable to see possibilities) using ! at start of match will reverse the value of positive occurences will create the nth indexes elements to capture None will find everything
def finder(self, match="w", occurences=None, start=None, stop=None, trigger=True, positive=True, reverse=False, keep_prefix=False): res = [] length = len(self.parse_type) if occurences != None: occurences = str(occurences) index_array = self.indexes(occurences, 1) is_capturing = (start == None) target = 0 if match == None: match = "xwoifmrcs" if len(match) > 0 and match[0] == "!": positive = (positive == False) for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol if is_capturing == False: if type(start) == type(0): is_capturing = (idx == start) else: is_capturing = (self.parse_type[idx] in start) if stop != None: if trigger == True or is_capturing == True: if type(stop) == type(0) and (idx == stop): break if type(stop) == " " and (self.parse_type[idx] in stop): break if is_capturing == True: if (self.parse_type[idx] in match) == positive: if target in index_array: res.append(self.parse_msg[idx][(keep_prefix == False and self.parse_type[idx] in "ox"):]) target += 1 if len(res) == 0: return None return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _any_depth_parse(match):\n markers = [match.p1, match.p2, match.p3, match.p4, match.p5, match.p6]\n for idx in (4, 5):\n if markers[idx]:\n markers[idx] = mtypes.emphasize(markers[idx])\n return [m for m in markers if m]", "def onNameType(self, match):\n\t\treturn [self.process(match[0]), self.process(match[1])]", "def getMatch(data):\n if len(data) > 15:\n return 'date: {0} {1}, match => {2}, {3}, {4}| 1x2 => {5}, {6}, {7}| handicap => {8}, {9}, {10}, {11}| OU => {12}, {13}, {14}, {15}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13], data[14], data[15], data[16])\n return 'date: {0} {1}, match => {2}, {3}, {4}| handicap => {5}, {6}, {7}, {8}| OU => {9}, {10}, {11}, {12}'.format(data[1], data[2], data[3], data[4], data[5], data[6], data[7], data[8], data[9], data[10], data[11], data[12], data[13])", "def split_match(self, match):\n\n match, line, col, error, warning, message, near = super().split_match(match)\n\n if line is not None and line == -1 and message:\n line = 0\n\n return match, line, col, error, warning, message, near", "def tok_match_record(matchlist, remainder_str, xtoken, matched_substr):\n\tpstr_infostr = matched_substr\n\txtok_infostr = re.sub(r'<([^<>\"]{1,3})\\w*(?: \\w+=\"([^<>\"]{1,3})\\w*\")?>',\n\t r'\\1',\n\t xtoken.tagout)\n\t# print(\"SAVE p-substr:'%s' =~ m/%s/ix\" % (pstr_infostr,xtok_infostr),file=sys.stderr)\n\t\n\t# -a- préparation du substitut balisé en xml\n\t# £ pseudo_out == 'rendu'\n\t# debg\n\tpseudo_out = xtoken.tagout+str_escape(matched_substr)+xtoken.endout\n\t\n\t# -b- enregistrement\n\tmatchlist.append(pseudo_out)\n\ti = len(matchlist)\n\t\n\t# -c- effacement dans le remainder\n\t# (substitution par un renvoi à la \\4 ex: #(#4#)#)\n\t# £todo!!! : interdire matches dans les renvois précédents (exemple n° volume == n° de renvoi) !\n\tremainder_str = re.sub(xtoken.re, \"#(#%i-%s#)#\" % (i, xtok_infostr), remainder_str)\n\t\n\treturn(matchlist, remainder_str)", "def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif key == 'args':\n for (exp, act) in zip(value, record.get(key)):\n if not re.search(str(exp), str(act)):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches", "def onExpressionList(self, match):\n\t\thead=self.process(match[0])\n\t\ttail=self.process(match[1])\n\t\tres=[head]\n\t\tfor _ in tail:\n\t\t\tres.append(_[1])\n\t\treturn res", "def processMatch(match, justLabel=False):\n response = []\n staticAttrs = [ 'gameId' , 'gameDuration' ]\n if not 'participants' in match or len(match['participants']) < 1:\n raise Exception('[-] wrong match!')\n if justLabel:\n response += staticAttrs\n firstPart = match['participants'][0]\n response += extractAttrs(firstPart, justLabel)\n return response\n for participant in match['participants']:\n partAttrs = []\n # firstly adding the static attributes\n for staticKey in staticAttrs:\n partAttrs += [match[staticKey]]\n # then the others\n partAttrs += extractAttrs(participant)\n response.append(partAttrs)\n return response", "def split_match(self, match):\n\n match, line, col, error, warning, message, near = super().split_match(match)\n\n if match:\n message = '[xvlog] ' + message\n\n return match, line, col, error, warning, message, near", "def extractTagsAndParams(self, elements, text, matches):\n stripped = u''\n \n taglist = u'|'.join(elements)\n if taglist not in _startRegexHash:\n _startRegexHash[taglist] = re.compile(ur\"<(\" + taglist + ur\")(\\s+[^>]*?|\\s*?)(/?>)|<(!--)\", re.UNICODE | re.IGNORECASE)\n start = _startRegexHash[taglist]\n \n while text != u'':\n p = start.split(text, 1)\n stripped += p[0]\n if len(p) == 1:\n break\n elif p[4]:\n # comment\n element = p[4]\n attributes = u''\n close = u''\n else:\n element = p[1]\n attributes = p[2]\n close = p[3]\n inside = p[5]\n \n global _extractTagsAndParams_n\n marker = self.uniq_prefix + u'-' + element + u'-' + (u\"%08X\" % _extractTagsAndParams_n) + u'-QINU'\n _extractTagsAndParams_n += 1\n stripped += marker\n \n if close == u'/>':\n # empty element tag, <tag />\n content = ''\n text = inside\n tail = ''\n else:\n if element == u'!--':\n end = _endCommentPat\n else:\n if element not in _endRegexHash:\n _endRegexHash[element] = re.compile(ur'(</' + element + ur'\\s*>)', re.UNICODE | re.IGNORECASE)\n end = _endRegexHash[element]\n q = end.split(inside, 1)\n content = q[0]\n if len(q) < 3:\n # no end tag\n tail = ''\n text = ''\n else:\n tail = q[1]\n text = q[2]\n \n matches[marker] = (\n element,\n content,\n self.decodeTagAttributes(attributes),\n u\"<\" + element + attributes + close + content + tail\n )\n return stripped", "def split_match(self, match):\n match, line, col, error, warning, message, near = super().split_match(match)\n if match:\n message = '[vcom] ' + message\n return match, line, col, error, warning, message, near", "def _matchPart(self, part):\r\n return [{**{key.name:p[key.name] for key in self.groups},\r\n **({#Call recursively on nested subpattern\r\n self.name:self.nestedPattern._matchPart(\r\n #and match\r\n p[0])}\r\n #only if subpattern exists\r\n if self.nestedPattern is not None else {})}\r\n for p in re.finditer(self.regex, part)\r\n #discard any record in ignored\r\n if not any([p[key.name] in self.ignored[key]\r\n for key in self.ignored])]", "def parse_last_exception(message):\n for pattern, response in patterns:\n items_found = re.findall(pattern, repr(message))\n if items_found:\n #print(\"FOUND\", items_found)\n print_exception_message(response, items_found[0])\n break\n else:\n unrecognised_exception(message)", "def logs(self, **kwargs):\n matches = []\n for record in self.buffer:\n found_match = True\n for (key, value) in kwargs.items():\n if key == 'msg':\n # Regexp match\n if not re.search(value, str(record.get(key))):\n found_match = False\n break\n elif not value == record.get(key):\n found_match = False\n break\n if found_match:\n matches.append(record)\n return matches", "def _parse_msg(msg):\n split_args_regex = \"(.*?)\\:(.*)\"\n args_split_regex = \"\\,\"\n match = re.match(split_args_regex, msg)\n if match is not None:\n message = match.group(1)\n arg_str = match.group(2)\n arg_iter = re.finditer(args_split_regex, args)\n args = []\n for arg in arg_iter:\n args.append(arg) \n return None", "def message_matches(cls, msg, regex):\n m = regex.match(msg.text)\n if m:\n return m.groups()\n return None", "def getContentList(self, content, index=-1):\n try:\n if index == -1: # this is a return for a single instance site\n repattern = re.compile(self.RegEx, re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n else: # this is the return for a multisite\n repattern = re.compile(self.RegEx[index], re.IGNORECASE)\n foundlist = re.findall(repattern, content)\n return foundlist\n except:\n self.postErrorMessage(self.ErrorMessage + \" \" + self.FullURL)\n return None", "def split_match(self, match):\n match, line, col, error, warning, message, near = super().split_match(match)\n\n no_doc_index = message.find(\"has no :Doc\")\n if no_doc_index > 0:\n error = False\n warning = \"Warning\"\n near = message[:no_doc_index].strip()\n elif message.startswith(\"@HV\"):\n near = \"@HV\"\n\n if error:\n error = \" \"\n elif warning:\n warning = \" \"\n else:\n error = \" \"\n\n if (match is None) or match.group('filename').startswith('atcc-'):\n return match, line, col, error, warning, message, near\n\n temp_name = match.group('filename')\n if ((self.LastIncludeMatch is not None) and\n (self.LastIncludeMatch[0:2] == (self.filename, temp_name))):\n region = self.LastIncludeMatch[2]\n else:\n region = self.view.find(r\"\\s*File\\s+\" + temp_name, 0)\n self.LastIncludeMatch = (self.filename, temp_name, region)\n\n if region is not None:\n line = self.view.rowcol(region.begin())[0] + 1\n near = temp_name\n return match, line, col, error, warning, message, near\n else:\n return match, None, None, None, None, None, None", "def parse(cls, buf: memoryview, params: Params) \\\n -> tuple[AnyParseable, memoryview]:\n for data_type in params.expected:\n try:\n return data_type.parse(buf, params)\n except NotParseable:\n pass\n raise UnexpectedType(buf)", "def parseResult(self):\n\n # parse all WHYPO tags\n result = []\n for msg in [m for m in self.msg if \"WHYPO\" in m]:\n\n list = self.pattern.findall(msg)\n for prop in list:\n if \"WORD\" in prop:\n value = prop.split('\"')[1]\n result.append(value)\n return result", "def match(self):\n\n # We initate this variable which gonna contain the returned data\n result = []\n\n # We compile the regex string\n to_match = comp(self.regex)\n\n # In case we have to use the implementation of ${BASH_REMATCH} we use\n # re.findall otherwise, we use re.search\n if self.rematch: # pylint: disable=no-member\n pre_result = to_match.findall(self.data)\n else:\n pre_result = to_match.search(self.data)\n\n if self.return_data and pre_result is not None: # pylint: disable=no-member\n if self.rematch: # pylint: disable=no-member\n for data in pre_result:\n if isinstance(data, tuple):\n result.extend(list(data))\n else:\n result.append(data)\n\n if self.group != 0: # pylint: disable=no-member\n return result[self.group] # pylint: disable=no-member\n else:\n result = pre_result.group(\n self.group # pylint: disable=no-member\n ).strip()\n\n return result\n elif (\n not self.return_data # pylint: disable=no-member\n and pre_result is not None\n ):\n return True\n return False", "def test_searchOrMessageSet(self):\n return self._messageSetSearchTest('OR 2:* 2:*', [2, 3, 4, 5])", "def filter_args_num(self, matches: str, args: int) -> List[str]:\n filtered: List[str] = []\n if args == 1:\n for i, match in enumerate(matches):\n if match.endswith(\"/arg\"):\n filtered.append(matches[i][:-4])\n else:\n for i, match in enumerate(matches):\n if match.endswith(\"/arg[%d]\" % args):\n # Make sure we don't cause an IndexError (end of list)\n # Check to make sure arg + 1 doesn't exist\n if (i == (len(matches) - 1) or\n not matches[i + 1].endswith(\"/arg[%d]\" %\n (args + 1))):\n filtered.append(matches[i][:-len(\"/arg[%d]\" % args)])\n\n return filtered", "def test_multi_no_match_return_expr(self):\n eq_(None,line_matches_greps(self.line,[\"foo\",\"idontmatch\"]))", "def test_searchAndMessageSet(self):\n return self._messageSetSearchTest('2:* 3', [3])", "def missed_matches(self, match_type) -> list:\n missed = []\n for result in self.get_results(match_type, TestSearchResult.Source.LEGACY.value):\n if result['pairedIndex'] == -1:\n missed.append(result)\n return missed", "def findall_simple(pattern, string):\n return [x[0] if isinstance(x, tuple) else x for x in re.findall(pattern=pattern, string=string)]", "def process_match(text, pos):\n m, _ = parse_ent('<' + text + '>', pos - len(text))\n return len(text) - len(m) + 2", "def test_multi_match_return_expr(self):\n eq_(None,line_no_matches_ngreps(self.line,[\"foo\",\"bar\"]))", "def handleMatch(self, m):\r\n pass" ]
[ "0.5471574", "0.52397555", "0.5145322", "0.5005529", "0.49990287", "0.49739963", "0.49577978", "0.49357885", "0.49001318", "0.48870462", "0.48827666", "0.4872868", "0.48422822", "0.48398778", "0.48214757", "0.4820572", "0.48020837", "0.47951323", "0.4777706", "0.4765019", "0.4747468", "0.47409493", "0.4740511", "0.47404638", "0.47373354", "0.47322333", "0.47295514", "0.47075036", "0.47044677", "0.4700549" ]
0.5920306
0
return True if parameters does match the parse_type match is the amount of each parse_type elements you want to search. You can write www to check 3 words in a row ranges follow the same syntax as occurences except it targets indexes
def checker(self, match="xw", ranges="0,1", in_a_row=True, reverse=False): res = [] length = len(self.parse_type) if ranges != None: ranges = str(ranges) index_array = self.indexes(ranges) substring = "" for idx in range(length*reverse-reverse, length*(-reverse+1)-reverse, (-reverse)*2+1): #xd lol if idx in index_array: substring += self.parse_type[idx] if in_a_row == True: return (match in substring) if in_a_row == False: target = 0 for i in substring: target += (match[target] == i) return (target == maxi) if in_a_row == None: for i in self.parse_type: if i in match: match = match.replace(i, '', 1) return (match == "") return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _match(self, *token_types):\n for token in token_types:\n if self._check(token):\n self._advance()\n return True\n\n return False", "def _multiindex_row_in(cls, row, parse_list, start=None, stop=None):\n\n row_sub = row[start:stop]\n for tokens in parse_list:\n\n # A single row will never match an empty token list:\n if not tokens:\n continue\n\n # Check whether all of the entries in `row_sub` match some list of\n # tokens. If this loop terminates prematurely because of a mismatch\n # between `row_sub` and some list of tokens in `parse_list`, it will\n # not return True; this forces checking of the subsequent token\n # lists:\n for i, token in enumerate(tokens):\n\n # '*' matches everything:\n if token == '*':\n continue\n\n # Integers and strings must match exactly:\n elif isinstance(token, (int, long, basestring)):\n if row_sub[i] != token:\n break\n\n # Tokens must be in a set of values:\n elif type(token) == list:\n if row_sub[i] not in token:\n break\n\n # Token must be within range of an interval:\n elif type(token) == slice:\n i_start = token.start\n i_stop = token.stop\n\n # Handle intervals with ambiguous start or stop values:\n if (i_start is not None and row_sub[i] < i_start) or \\\n (i_stop is not None and row_sub[i] >= i_stop):\n break\n else:\n continue\n else:\n return True\n\n # If the function still hasn't returned, no match was found:\n return False", "def _index_row_in(cls, row, parse_list):\n\n # Since `row` is a scalar, it need only match the sole entry of one of\n # the lists in `parse_list`:\n for tokens in parse_list:\n if not tokens:\n continue\n if len(tokens) > 1:\n raise ValueError('index row only is scalar')\n if tokens[0] == '*':\n return True\n elif isinstance(tokens[0], (int, long, basestring)):\n if row == tokens[0]:\n return True\n elif type(tokens[0]) == list:\n if row in tokens[0]:\n return True\n elif type(tokens[0]) == slice:\n i_start = tokens[0].start\n i_stop = tokens[0].stop\n if (i_start is None or row >= i_start) and \\\n (i_stop is None or row < i_stop):\n return True\n else:\n continue\n return False", "def match(self, *ial):\n for b, c in ial:\n assert len(b) == len(c), \"parameter length mismatch\"\n if self._.d != len(b):\n continue\n if len(self._match(b, c)) > 0:\n return True\n return False", "def IsValidInputType(self, list_of_matches):\n for entry in list_of_matches:\n if not entry:\n return False\n\n return True", "def match(self) -> bool:", "def matches(inline,groupby,groupvals):\n for i,m in enumerate(groupby):\n if inline[m] == groupvals[i]:\n continue\n else:\n return False\n return True", "def match(self, name, tags):\n or_exprs, tags = self.get_compiled(name, tags)\n \n # or_exprs = [{'a'}, {'c'}, {'d', 'a'}, {'d', 'e'}]\n return any(and_expr <= tags for and_expr in or_exprs)", "def matches(self, python):\n return False", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def match(self, data):\n # self.logger.debug('Running yara, nlp against data')\n # malicious = self._rules.match(data=data)\n # md5 = hashlib.md5(data).hexdigest()\n # if malicious:\n # for match in malicious:\n # self.logger.info('Match found; Rule: \\'%s\\';'\n # 'Namespace: \\'%s\\'; MD5: %s' %\n # (match.rule, match.namespace, md5))\n\n # return True\n \n cnt_name = 0\n cnt_dob = 0\n cnt_acc = 0\n cnt_email = 0\n cnt_line = 0\n\n for line in data: \n cnt_name += self.humanName(line)\n cnt_dob += self.dob(line)\n cnt_acc += self.account_phone(line)\n cnt_email += self.email(line)\n cnt_line += 1\n\n sum = cnt_name + cnt_dob + cnt_acc + cnt_email\n if sum > 100 or sum > cnt_line:\n return True\n else:\n return False\n return False", "def matches(self, test_string, parse_all=True):\n try:\n self.parse_string(text(test_string), parse_all=parse_all)\n return True\n except ParseException:\n return False", "def filt(item):\n result = (((item.done and opt.list_complete) or\n (not item.done and not opt.hide_incomplete)) and\n ((item.time is None) or\n ((opt.start_date is None or opt.start_date < item.time) and\n item.time < opt.end_date)))\n for arg in args:\n result = result and (re.search(arg, item.text) != None)\n return result", "def is_valid(teorema, args):\n if args.ignore_case:\n for value in teorema.values():\n if args.pattern.lower() in value.lower():\n return True\n else:\n for value in teorema.values():\n if args.pattern in value:\n return True\n\n return False", "def _block_matches_all(block_data):\n # do the checks which don't require loading any additional data\n if (\n self._block_matches(block_data, qualifiers) and\n self._block_matches(block_data.fields, settings)\n ):\n if content:\n definition_block = self.get_definition(course_locator, block_data.definition)\n return self._block_matches(definition_block['fields'], content)\n else:\n return True", "def _match_entry_type(self, code_entry, type_tuple):\n matched = False\n if self.loading_from_file:\n type_list = []\n for elem in type_tuple:\n type_list.append(str(elem))\n matched = self._match_entry_type_string(code_entry, type_list)\n else:\n matched = self._match_entry_type_tuple(code_entry, type_tuple)\n return matched", "def test_multi_match_return_expr(self):\n eq_(self.line,line_matches_greps(self.line,[\"foo\",\"bar\"]))", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def route_match(self):\n if self.whole_word_var.get():\n self.whole_word_matches()\n else:\n self.partial_word_matches()", "def _match_array(tipo, array):\n\n return bool(re.match(array, tipo))", "def valid_retag_params(self) -> bool:\n if not (self.action[0] == Actions.RETAG.value):\n return False\n pairs = self.action[1].split(\",\")\n for pair in pairs:\n if not self.correct_retag_pair(pair):\n return False\n return True", "def _match_entry_type_tuple(code_entry, type_tuple):\n entry_type = code_entry['type']\n return entry_type in type_tuple", "def matches(self):\n pass", "def _validate_speech_acts_section(\n protocol_specification: ProtocolSpecification,\n) -> Tuple[bool, str, Optional[Set[str]], Optional[Set[str]]]:\n custom_types_set = set()\n performatives_set = set()\n\n content_names_types: Dict[str, Tuple[str, str]] = {}\n\n # check that speech-acts definition is not empty\n if len(protocol_specification.speech_acts.read_all()) == 0:\n return (\n False,\n \"Speech-acts cannot be empty!\",\n None,\n None,\n )\n\n for (\n performative,\n speech_act_content_config,\n ) in protocol_specification.speech_acts.read_all():\n\n # Validate performative name\n (\n result_performative_validation,\n msg_performative_validation,\n ) = _validate_performatives(performative)\n if not result_performative_validation:\n return (\n result_performative_validation,\n msg_performative_validation,\n None,\n None,\n )\n\n performatives_set.add(performative)\n\n for content_name, content_type in speech_act_content_config.args.items():\n\n # Validate content name\n (\n result_content_name_validation,\n msg_content_name_validation,\n ) = _validate_content_name(content_name, performative)\n if not result_content_name_validation:\n return (\n result_content_name_validation,\n msg_content_name_validation,\n None,\n None,\n )\n\n # check type of content_type\n if not isinstance(content_type, str):\n return (\n False,\n \"Invalid type for '{}'. Expected str. Found {}.\".format(\n content_name, type(content_type)\n ),\n None,\n None,\n )\n\n # Validate content type\n (\n result_content_type_validation,\n msg_content_type_validation,\n ) = _validate_content_type(content_type, content_name, performative)\n if not result_content_type_validation:\n return (\n result_content_type_validation,\n msg_content_type_validation,\n None,\n None,\n )\n\n # check content name isn't repeated with a different type\n if content_name in content_names_types:\n last_performative = content_names_types[content_name][0]\n last_content_type = content_names_types[content_name][1]\n if last_content_type != content_type:\n return (\n False,\n \"Content '{}' with type '{}' under performative '{}' is already defined under performative '{}' with a different type ('{}').\".format(\n content_name,\n content_type,\n performative,\n last_performative,\n last_content_type,\n ),\n None,\n None,\n )\n\n content_names_types[content_name] = (performative, content_type)\n\n for sub_type in (\n list(_get_sub_types_of_compositional_types(content_type))\n if _is_compositional_type(content_type)\n else []\n ) + [content_type]:\n if _is_valid_ct(sub_type):\n custom_types_set.add(sub_type.strip())\n\n return True, \"Speech-acts are valid.\", performatives_set, custom_types_set", "def validate(self, s):\n if len(s) == 0:\n return False\n if s in self.whitelist:\n return True\n if s in self.blacklist:\n return False\n\n # SQL Types are rarely used\n if 't' in s and 'f(t' not in s and 'At' not in s:\n return False\n\n if '1nf' in s:\n return False\n if 's1o' in s:\n return False\n if 'oo' in s:\n return False\n if 'v,s' in s:\n return False\n if 's,v' in s:\n return False\n if 'v,v' in s:\n return False\n if 'v,1' in s:\n return False\n if 'v,n' in s:\n return False\n if 'n,v' in s:\n return False\n if '1,v' in s:\n return False\n if 'Eo(' in s:\n return False\n if '(o(' in s:\n return False\n if '(o1' in s:\n return False\n if '(on' in s:\n return False\n if '(os' in s:\n return False\n if '(of' in s:\n return False\n if '(ov' in s:\n return False\n if 'B(n)' in s:\n return False\n if 'oso' in s:\n return False\n if 'o1o' in s:\n return False\n if 'ono' in s:\n return False\n\n # only 1 special case for this\n # 1;foo:goto foo\n # 1;n:k\n # the 'foo' can only be a 'n' type\n if ':' in s and not 'n:' in s:\n return False\n\n if '11' in s:\n return False\n\n if '))' in s:\n return False\n if '((' in s:\n return False\n if 'v1' in s:\n return False\n\n if 'nv' in s and ';T' not in s:\n return False\n if 'nn' in s and ';T' not in s:\n return False\n\n # select @version foo is legit\n # but unlikely anywhere else\n if 'vn' in s and 'Evn' not in s:\n return False\n\n if 'oE' in s:\n return False\n\n if 'A1' in s:\n return False\n if 'An' in s:\n return False\n if 'A(1' in s:\n return False\n\n if 'vov' in s:\n return False\n if 'vo1' in s:\n return False\n if 'von' in s:\n return False\n\n if 'ns' in s:\n if 'U' in s:\n return True\n if 'T' in s:\n return True\n return False\n\n if 'sn' in s:\n # that is... Tsn is ok\n if s.find('T') != -1 and s.find('T') < s.find('sn'):\n return True\n return False\n\n # select foo (as) bar is only nn type i know\n if 'nn' in s and 'Enn' not in s and ';T' not in s:\n return False\n\n if ',o' in s:\n return False\n\n if 'kk' in s and 'Tkk' not in s:\n return False\n\n if 'ss' in s:\n return False\n\n if 'ff' in s:\n return False\n\n if '1no' in s:\n return False\n\n if 'kno' in s:\n return False\n\n if 'nEk' in s:\n return False\n\n if 'n(n' in s:\n return False\n if '1so' in s:\n return False\n if '1s1' in s:\n return False\n if 'noo' in s:\n return False\n if 'ooo' in s:\n return False\n\n if 'vvv' in s:\n return False\n\n if '1vn' in s:\n return False\n if '1n1' in s:\n return False\n if '&1n' in s:\n return False\n if '&1v' in s:\n return False\n if '&1s' in s:\n return False\n if 'nnk' in s:\n return False\n if 'n1f' in s:\n return False\n # folded away\n if s.startswith('('):\n return False\n\n if '&o' in s:\n return False\n\n if '1,1' in s:\n return False\n if '1,s' in s:\n return False\n if '1,n' in s:\n return False\n if 's,1' in s:\n return False\n if 's,s' in s:\n return False\n if 's,n' in s:\n return False\n if 'n,1' in s:\n return False\n if 'n,s' in s:\n return False\n if 'n,n' in s:\n return False\n if '1o1' in s:\n return False\n if '1on' in s:\n return False\n if 'no1' in s:\n return False\n if 'non' in s:\n return False\n if '1(v' in s:\n return False\n if '1(n' in s:\n return False\n if '1(s' in s:\n return False\n if '1(1' in s:\n return False\n if 's(s' in s:\n return False\n if 's(n' in s:\n return False\n if 's(1' in s:\n return False\n if 's(v' in s:\n return False\n if 'v(s' in s:\n return False\n if 'v(n' in s:\n return False\n if 'v(1' in s:\n return False\n if 'v(v' in s:\n return False\n\n if s.startswith('n('):\n return False\n\n if s.startswith('vs'):\n return False\n\n if s.startswith('o'):\n return False\n\n if ')(' in s:\n return False\n\n # need to investigate T(vv) to see\n # if it's correct\n if 'vv' in s and s != 'T(vv)':\n return False\n\n # unlikely to be sqli but case FP\n if s in ('so1n)', 'sonoE'):\n return False\n\n return True", "def match(self, data_instance: Dict[str, Any]) -> bool:", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def _run_parse_checks(cls, line, filepath, logger=None):\n\n check_funcs = [\n cls.check_column_count,\n cls.check_date_column,\n cls.check_amount_column,\n ]\n checks = [partial(check, line) for check in check_funcs]\n is_parsable = all((check() for check in checks)) # NB short circuit\n logger = logger or logging.getLogger(cls.__name__)\n logger.debug(\"can %s parse this file? %s, %s\" %\n (cls.__name__, \"true\" if is_parsable else \"false\", filepath))\n return is_parsable", "def is_valid_para(self, para_type, type_table):\n # The values of the table contain all known destination types\n if para_type in type_table.values():\n return True\n return True", "def _check_alet_dict(text: str, text_type: str, alet_dict: dict, last_nouns: list) -> (list, str):\n agent_match = [] # Match of text and type\n agent_text_match = [] # Match of text only, not type\n loc_text_match = []\n event_text_match = []\n if not text_type or 'PERSON' in text_type or text_type.endswith('ORG') or \\\n text_type.endswith('GPE') or text_type.endswith('NORP') or text_type.endswith('NOUN'):\n agent_arrays = alet_dict['agents'] if 'agents' in alet_dict else []\n for agent_array in agent_arrays:\n alt_names = agent_array[0]\n agent_type = agent_array[1]\n if text not in personal_pronouns and text in alt_names:\n if text_type and (text_type in agent_type or agent_type in text_type):\n agent_match.append((agent_type, agent_array[2])) # index 2 holds the IRI\n break\n else:\n agent_text_match.append((agent_type, agent_array[2]))\n if not text_type or 'LOC' in text_type or 'GPE' in text_type or 'FAC' in text_type or 'NOUN' in text_type:\n loc_arrays = alet_dict['locs'] if 'locs' in alet_dict else []\n for loc_array in loc_arrays:\n alt_names = loc_array[0]\n loc_map = loc_array[1]\n if text in alt_names:\n loc_text_match.append((loc_map, loc_array[2])) # index 2 holds the IRI\n if not text_type or 'EVENT' in text_type or 'NOUN' in text_type:\n event_arrays = alet_dict['events'] if 'events' in alet_dict else []\n for event_array in event_arrays:\n alt_names = event_array[0]\n if text in alt_names:\n # event_array[1] holds the class mappings and [2] holds the IRI\n event_text_match.append((event_array[1], event_array[2]))\n return (_update_last_nouns(text, agent_match[-1][0], agent_match[-1][1], [get_agent_or_loc_class(text_type)],\n last_nouns) if agent_match\n else (_update_last_nouns(text, agent_text_match[-1][0], agent_text_match[-1][1],\n [get_agent_or_loc_class(text_type)], last_nouns) if agent_text_match\n else (_update_last_nouns(text, text_type, loc_text_match[-1][1], loc_text_match[-1][0], last_nouns)\n if loc_text_match\n else (_update_last_nouns(text, text_type, event_text_match[-1][1], event_text_match[-1][0],\n last_nouns) if event_text_match else [], empty_string))))" ]
[ "0.65751725", "0.56603086", "0.557421", "0.5525039", "0.54756975", "0.5400486", "0.53058875", "0.52834386", "0.5266304", "0.5260166", "0.5238574", "0.5214264", "0.5185073", "0.51797396", "0.51623565", "0.5152065", "0.5134428", "0.5108154", "0.5108154", "0.51043475", "0.5099916", "0.5099904", "0.5077996", "0.50713813", "0.50662524", "0.5049845", "0.5047419", "0.50461596", "0.5033521", "0.5026133" ]
0.62283635
1
Retrieve which events to capture from the config
def set_capture_events_from_config(self): event_config = [ { "config_key": "events_watchlist", "events": [ "watchlist.hit.process", "watchlist.hit.binary", "watchlist.storage.hit.process", "watchlist.storage.hit.binary" ], "options": self.forwarder_options.get("wlhitnotifenabled", "0") }, { "config_key": "events_feed", "events": [ "feed.ingress.hit.process", "feed.ingress.hit.binary", "feed.ingress.hit.host", "feed.storage.hit.process", "feed.storage.hit.binary", "feed.query.hit.process", "feed.query.hit.binary" ], "options": self.forwarder_options.get("feedhitnotif", "0") }, { "config_key": "events_alert", "events": [ "alert.watchlist.hit.ingress.process", "alert.watchlist.hit.ingress.binary", "alert.watchlist.hit.ingress.host", "alert.watchlist.hit.query.process", "alert.watchlist.hit.query.binary" ], "options": self.forwarder_options.get("alertnotifenabled", "0") }, { "config_key": "events_raw_sensor", "events": [ "ingress.event.process", "ingress.event.procstart", "ingress.event.netconn", "ingress.event.procend", "ingress.event.childproc", "ingress.event.moduleload", "ingress.event.module", "ingress.event.filemod", "ingress.event.regmod" "ingress.event.tamper", "ingress.event.crossprocopen", "ingress.event.remotethread", "ingress.event.processblock", "ingress.event.emetmitigation", ], "options": self.forwarder_options.get("rawsensnotifenabled", "0") }, { "config_key": "events_binary_observed", "events": ["binaryinfo.host.observed", "binaryinfo.observed," "binaryinfo.group.observed"], "options": self.forwarder_options.get("binobsnotifenabled", "0") }, { "config_key": "events_binary_upload", "events": ["binarystore.file.added"], "options": self.forwarder_options.get("binuplnotifenabled", "0") } ] self.capture_events = [] for event_type in event_config: events = self.forwarder_options.get(event_type["config_key"], "0").lower() if events == "all": self.capture_events.extend(event_type["events"]) elif events != "0": events_from_config = events.split(",") events_to_capture = list(set(events_from_config) & set(event_type["events"])) self.capture_events.extend(events_to_capture) self.logger.info("Configured to capture events: %s" % self.capture_events)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n return self.events", "def events(self):\n return self.current_events", "def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()", "def events(self):\n return self._events", "def events(self) -> Dict[EventCall, Set[Node]]:\n return self._events", "def getSimulationEventHandlers(self): \r\n return self.__eventHandlers.values()", "def events(self) -> object:\n return self._events", "def get_event_list(self):\n pass", "def events(self):\r\n return ev.Events(self)", "def events(self):\r\n return ev.Events(self)", "def GetEventSources(self):\n return self._GetAttributeContainers('event_source')", "def get_event(self):\n return self.keys.events.get()", "def get_sample_events(self): \n return self.sample_events[:]", "def events(self):\n return self.properties.get('events', EventCollection(self.context, ResourcePath(\"events\", self.resource_path)))", "def events(self):\r\n return e.Events(self)", "def get_events(self):\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def events(self) -> Sequence[Tuple[str, Sequence[Union[np.ndarray, bytes]]]]:\n return self._env.events()", "def get_config_parameter(config):\n\n selected_event = config['selected_event']\n datasource_raw_data = config['datasource_raw_data']['database']\n measurement_raw = config['datasource_raw_data']['measurement']\n measurement_enriched = config['datasource_enriched_data']['measurement']\n datasource_enriched_data = config['datasource_enriched_data']['database']\n datasource_marked_data = config['datasource_marked_data']['database']\n datasource_predicted_data = config['datasource_predicted_data']['database']\n start_time = config['timeframe'][0]\n end_time = config['timeframe'][1]\n register_dict = config['register_dict']\n required_registers = config[f\"{selected_event}_register\"]\n events = config[selected_event]\n measurement_predicted = config['datasource_predicted_data']['measurement']\n return selected_event, datasource_raw_data, measurement_raw, start_time, end_time, register_dict, \\\n required_registers, datasource_enriched_data, datasource_marked_data, \\\n measurement_enriched, events, datasource_predicted_data, measurement_predicted", "def event_handlers(self):\n if self.is_flow:\n return self._event_handlers\n\n try:\n return self._event_handlers\n except AttributeError:\n return self.flow._event_handlers", "def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events", "def event_list(self):\n return self._event_list", "def events(self):", "def get_handlers_for_event(self, event):\n pass # pragma: no cover", "def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())", "def eventList(self):\n return self._eventList" ]
[ "0.7154731", "0.7154731", "0.6736886", "0.6736886", "0.6634429", "0.6563874", "0.6368687", "0.6339225", "0.6153467", "0.6147387", "0.6090058", "0.60785455", "0.6055919", "0.6055919", "0.6025618", "0.60202944", "0.60145056", "0.60046005", "0.59880394", "0.5945153", "0.5929929", "0.5897296", "0.58912104", "0.5885696", "0.5836988", "0.58198863", "0.5816348", "0.5788318", "0.5783439", "0.5779" ]
0.7547521
0
Compares an image to its reference
def compare(self, reference, image): if not os.path.isfile(reference): raise PictureComparatorError("Reference file %s does not exist" % reference) if not os.path.isfile(image): raise PictureComparatorError("Image file %s does not exist" % image) reference_img = cv2.imread(reference, 0) image_img = cv2.imread(image, 0) reference_width, reference_height = reference_img.shape[::-1] image_width, image_height = image_img.shape[::-1] if reference_width < image_width or reference_height < image_height: raise PictureComparatorError("Reference picture must be greater than image to find") method = cv2.TM_CCOEFF_NORMED # Apply template Matching res = cv2.matchTemplate(reference_img, image_img, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val > 0.95: return Rectangle(max_loc[0], max_loc[1], image_width, image_height) else: return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compareTo(self,imagefullpath):\n exc = ExtractColor2(self.k)\n bgrcolor = exc.getColorBGR(imagefullpath)\n\n score = 0\n for i in range(self.k):\n score += np.linalg.norm(bgrcolor[i] - self._ref_BGRcolor[i])/(np.sqrt(255*255*3))\n score /= self.k\n return 1 - score", "def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None", "def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False", "def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None", "def __lt__(self, img):\r\n ordering = self.config['algorithm_ordering']\r\n ordering = ordering[1:] if ordering.startswith('-') else ordering\r\n\r\n if ordering == \"filename\":\r\n return sorted([self.filename, img.filename])[0] == img.filename\r\n if ordering == 'width':\r\n return self.absolute_width <= img.absolute_width\r\n elif ordering == 'height':\r\n return self.absolute_height <= img.absolute_height\r\n elif ordering == 'area':\r\n return self.absolute_width * self.absolute_height <= img.absolute_width * img.absolute_height\r\n else:\r\n return max(self.absolute_width, self.absolute_height) <= max(img.absolute_width, img.absolute_height)", "def assert_image_equal(path1, path2):\n test_im = np.asarray(Image.open(path1))\n ref_im = np.asarray(Image.open(path2))\n npt.assert_array_equal(test_im, ref_im)", "def compare(image_a, image_b, is_camera_image):\n\n # Generate a unique filename\n filename = uuid.uuid4().hex[:3]\n\n if is_camera_image:\n image_a = imutils.rotate_bound(image_a, 90)\n image_b = imutils.rotate_bound(image_b, 90)\n\n # Store original to show in future\n original = image_a\n\n # Convert to greyscale\n image_a = cv2.cvtColor(image_a, cv2.COLOR_BGR2GRAY)\n image_b = cv2.cvtColor(image_b, cv2.COLOR_BGR2GRAY)\n\n # Reduce size and blur to account for shaky handheld camera based images\n if is_camera_image:\n scale_multiplier = 0.03125\n image_a = cv2.resize(image_a, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_b = cv2.resize(image_b, (0, 0), fx=scale_multiplier, fy=scale_multiplier)\n image_a = cv2.GaussianBlur(image_a, (1001, 1001), cv2.BORDER_DEFAULT)\n image_b = cv2.GaussianBlur(image_b, (1001, 1001), cv2.BORDER_DEFAULT)\n\n # Obtain SSIM and determine differences\n try:\n _, differences = structural_similarity(image_a, image_b, full=True, gaussian_weights=True)\n except ValueError:\n print('Images are not the same size')\n return None\n\n # Convert to cv2 array\n differences = (differences * 255).astype('uint8')\n\n # Threshold and find contours (differences)\n thresh = cv2.threshold(differences, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n\n # Draw contours (differences)\n for cont in contours:\n (x, y, w, h) = cv2.boundingRect(cont)\n if is_camera_image:\n multiplier = int(1 / scale_multiplier)\n y *= multiplier\n x *= multiplier\n h *= multiplier\n w *= multiplier\n cv2.rectangle(original, (x, y), (x + w, y + h), (255, 0, 0), 4)\n\n # TODO: Create GIF highlighting differences (instead of statuic image)\n cv2.imwrite('static/images/differences/' + filename + '.jpg', original)\n\n return filename", "def compare_images(self, img1, img2):\n if self.debug:\n cv2.imshow('img1', img1)\n cv2.imshow('img2', img2)\n cv2.waitKey(5)\n time.sleep(2)\n\n # find the mean squared difference between the images\n # http://www.pyimagesearch.com/2014/09/15/python-compare-two-images/\n err = np.sum((img1.astype('float') - img2.astype('float')) ** 2)\n err /= float(img1.shape[0] * img2.shape[1])\n\n # lower is more similar (better)\n return err", "def look_for_reference_image(image):\n match_list = []\n thresh = 8\n final_value = -1\n references = import_reference_images()\n # Initialize the ORB detector algorithm\n orb = cv2.ORB_create()\n\n # Now detect the keypoints and compute\n # the descriptors for the query image\n imgKeypoints, imgDescriptors = orb.detectAndCompute(image, None)\n try:\n for ref in references:\n # Now detect the keypoints and compute\n # the descriptors for the train image\n ref.refKeypoints, ref.refDescriptors = orb.detectAndCompute(ref.img, None)\n\n # Initialize the Matcher for matching\n # the keypoints and then match the\n # keypoints\n matcher = cv2.BFMatcher()\n matches = matcher.knnMatch(imgDescriptors, ref.refDescriptors, k=2)\n\n for m, n in matches:\n if m.distance < 0.75 * n.distance:\n ref.refMatches.append([m])\n\n match_list.append(len(ref.refMatches))\n except:\n pass\n if len(match_list) != 0:\n if max(match_list) > thresh:\n final_value = match_list.index(max(match_list))\n\n return references[final_value].name", "def compare_images(first_img_path, second_img_path):\n img1 = Image.open(first_img_path)\n img2 = Image.open(second_img_path)\n\n diff = ImageChops.difference(img1, img2)\n print(diff.getbbox())", "def img_compare(file1, file2):\n # read image\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n\n # resize \n size = 128, 128\n img1_res = img_resize(img1, size)\n img2_res = img_resize(img2, size)\n\n img1_res.save(\"img_1.thumbnail\", \"JPEG\")\n img2_res.save(\"img_2.thumbnail\", \"JPEG\")\n\n # convert to gray scale\n img1_grayscale = img1_res.convert('LA')\n img1_grayscale.save(\"img_1_grayscale.png\")\n\n img2_grayscale = img2_res.convert('LA')\n img2_grayscale.save(\"img_2_grayscale.png\")\n\n # normalise\n img1_norm = normalize(np.array(img1_grayscale.getdata()).astype(float))\n img2_norm = normalize(np.array(img2_grayscale.getdata()).astype(float))\n\n try:\n # compare two images\n diff = img1_norm - img2_norm\n m_norm = sum(abs(diff)) # Manhattan norm\n z_norm = norm(diff.ravel(), 0) # Zero norm\n\n # print(\"Manhattan norm:\", m_norm, \"/ per pixel:\", m_norm/img1_norm.size)\n # print(\"Zero norm:\", z_norm, \"/ per pixel:\", z_norm*1.0/img1_norm.size)\n\n return m_norm/img1_norm.size, float(z_norm) / img1_norm.size\n except:\n return 100, 100", "def compare_images(img1, img2):\n #normalize scene pixel values\n img1_mean = img1.mean() \n img1_std = img1.std()\n for i in np.nditer(img1, op_flags=['readwrite']):\n i[...] = (i-img1_mean)/img1_std\n\n #normalize template pixel values\n img2_mean = img2.mean() \n img2_std = img2.std()\n for i in np.nditer(img2, op_flags=['readwrite']):\n i[...] = (i-img2_mean)/img2_std\n\n #sums error\n error_array = img1 - img2\n error_array = error_array.astype(np.int8)\n ss_error = 0\n for i in np.nditer(error_array):\n ss_error += abs(i/255.0)**0.5\n #print ss_error\n return ss_error", "def __diff_image(self):\n img = cv2.imread(self.imagefile()).copy()\n Reference.__draw_bugs(img, self.__true_positives, False, 1)\n Reference.__draw_bugs(img, self.__false_negatives, (0, 255, 0))\n Reference.__draw_bugs(img, self.__false_positives, (0, 0, 255))\n return img", "def diff_image_feature(image0, image1):\n return 0", "def compare_images(im1, im2):\n errors = (im1 - im2) / 255\n return np.mean(np.square(errors))", "def img_compare(A, B):\r\n A = cv2.GaussianBlur(A, (5, 5), 5)\r\n B = cv2.GaussianBlur(B, (5, 5), 5)\r\n diff = cv2.absdiff(A, B) # absolute difference\r\n _, diff = cv2.threshold(diff, 200, 255, cv2.THRESH_BINARY)\r\n return np.sum(diff)", "def getImageDiff(referenceFrame, frame):\n return cv2.absdiff(referenceFrame, frame)", "def compare_img(img1, img2, err_function=\"ALL\"):\n\n # make sure images are the same shape #\n height1, width1, height2, width2 = img1.shape[0], img1.shape[1], img2.shape[0], img2.shape[1]\n if img1.shape != img2.shape:\n if width1 * height1 > width2 * height2:\n img1 = resize_image(img1, width2, height2)\n else:\n img2 = resize_image(img2, width1, height1)\n # TODO: create better resize to avoid interpolation when possible\n # compare images#\n func_arr = [mse, ssim, L1_norm]\n err_arr = []\n for func in func_arr:\n if err_function == \"ALL\" or func.__name__.upper() == err_function:\n err_arr.append(func(img1, img2))\n return np.array(err_arr)", "def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)", "def compare_images(image1, image2, method='diff', *, n_tiles=(8, 8)):\n if image1.shape != image2.shape:\n raise ValueError('Images must have the same shape.')\n\n img1 = img_as_float(image1)\n img2 = img_as_float(image2)\n\n if method == 'diff':\n comparison = np.abs(img2 - img1)\n elif method == 'blend':\n comparison = 0.5 * (img2 + img1)\n elif method == 'checkerboard':\n shapex, shapey = img1.shape\n mask = np.full((shapex, shapey), False)\n stepx = int(shapex / n_tiles[0])\n stepy = int(shapey / n_tiles[1])\n for i, j in product(range(n_tiles[0]), range(n_tiles[1])):\n if (i + j) % 2 == 0:\n mask[i * stepx:(i + 1)*stepx, j * stepy:(j + 1) * stepy] = True\n comparison = np.zeros_like(img1)\n comparison[mask] = img1[mask]\n comparison[~mask] = img2[~mask]\n else:\n raise ValueError('Wrong value for `method`. '\n 'Must be either \"diff\", \"blend\" or \"checkerboard\".')\n return comparison", "def cs4243_histmatch(ori_image, refer_image):\n \n ##your code here ###\n\n # get cdf of ori and ref image\n grey_level = 256\n ori_hist, ori_cum_hist, ori_res_image, ori_uni_hist = cs4243_histequ(ori_image, grey_level)\n ref_hist, ref_cum_hist, ref_res_image, ref_uni_hist = cs4243_histequ(refer_image, grey_level)\n \n # map each ori cdf to ref cdf and get the mapped index as matched grey level\n map_value = []\n for i in range(grey_level):\n ori_cdf = ori_cum_hist[i]\n matched_intensity = np.uint8(np.abs(ref_cum_hist - ori_cdf).argmin())\n map_value.append(matched_intensity)\n ##\n\n # Set the intensity of the pixel in the raw image to its corresponding new intensity \n height, width = ori_image.shape\n res_image = np.zeros(ori_image.shape, dtype='uint8') # Note the type of elements\n for i in range(height):\n for j in range(width):\n res_image[i,j] = map_value[ori_image[i,j]]\n \n res_hist = np.bincount(res_image.flatten(), minlength=256)\n \n return ori_hist, ref_hist, res_image, res_hist", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def equals(self, image: 'BaseImage') -> bool:\n assert isinstance(image, BaseImage)\n im1 = pygame.image.tostring(self._surface, 'RGBA')\n im2 = pygame.image.tostring(image._surface, 'RGBA')\n return im1 == im2", "def compare_images(self):\r\n m = round(self.mse(self.image_a, self.image_b), 4)\r\n s = round(ssim(self.image_a, self.image_b) * 100, 5)\r\n return (\r\n m, s)", "def is_equal(image_a, image_b, tolerance=0.0):\n return image_diff_percent(image_a, image_b) <= tolerance", "def image_reference(self, image_id):\n pass", "def apply_and_compare(self, image1_data, image2_data):\n\n return self.transformations_map[self.name](image1_data, image2_data)", "def is_different(image1, image2):\n gray1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)\n gray2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)\n\n (score, diff) = compare_ssim(gray1, gray2, full=True)\n diff = (diff * 255).astype(\"uint8\")\n\n thresh = cv2.threshold(diff, 0, 255,\n cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\n cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n return bool(cnts)", "def compare_faces(\n id_image: bytes,\n cam_image: np.ndarray,\n face_location: List[Tuple[int, ...]],\n save_dest: Union[Path, None] = None,\n) -> bool:\n im1 = bytes_to_np(id_image)\n im1 = im1[:, :, ::-1]\n id_face_loc = get_bounding_boxes(im1)\n im1 = im1[:, :, ::-1]\n face_encodings = face_recognition.face_encodings(im1, id_face_loc, 10, \"large\")[0]\n\n im2 = cam_image[:, :, ::-1]\n face_encodings2 = face_recognition.face_encodings(im2, face_location, 10, \"large\")[0]\n\n if save_dest:\n Image.fromarray(im1).save(os.path.join(save_dest, \"face_one.jpeg\"))\n Image.fromarray(im2).save(os.path.join(save_dest, \"face_two.jpeg\"))\n\n dist = face_recognition.face_distance([face_encodings], face_encodings2)[0]\n print(\"[i] Decision threshold is 0.5.\")\n if dist <= 0.5:\n print(\n f\"[+] Distance between the images is {dist}\"\n \"\\n[+] These images are of the same people!\"\n )\n return True\n else:\n print(\n f\"[-] Distance between the images is {dist}\\n\"\n \"[-] These images are of two different people!\"\n )\n return False", "def compare_image_buffers(imgbuf1, imgbuf2):\n with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:\n img1 = Image.open(imgio1)\n img2 = Image.open(imgio2)\n diff = ImageChops.difference(img1, img2)\n return not diff.getbbox()" ]
[ "0.7269888", "0.69743997", "0.6905206", "0.6846669", "0.67375135", "0.6730658", "0.67301905", "0.67132235", "0.6670823", "0.66276044", "0.65917426", "0.6564198", "0.65512747", "0.6517447", "0.65027654", "0.6423081", "0.64036614", "0.63828444", "0.6322787", "0.6311486", "0.6288014", "0.62783784", "0.6207361", "0.61736023", "0.6167509", "0.6166918", "0.61359656", "0.61344427", "0.61184293", "0.61070365" ]
0.7576849
0
From a matrix of difference pixels (for each pixel, we have 0 if pixel is the same, or nonzero if they are different), creates list of pixels which are different a PNG image of the same size as 'step' image, where each different pixel is coloured RED
def _build_list_of_changed_pixels(self, diff, image_width, image_height, min_width, min_height, exclude_zones): # complete diff "image" to the size of step image diff = numpy.pad(diff, ((0, max(0, image_height - min_height)), (0, max(0, image_width - min_width))), constant_values=1) # ignore excluded pixels diff *= self._build_list_of_excluded_pixels2(exclude_zones, image_width, image_height) # draw mask of differences mask = numpy.ones((image_height, image_width, 1), dtype=uint8) diff_image = numpy.zeros((image_height, image_width, 4), dtype=uint8) cnd = diff[:,:] > 0 # says which pixels are non-zeros diff_image[cnd] = mask[cnd] diff_image *= numpy.array([0, 0, 255, 255], dtype=uint8) # print red pixels diff_pixels = numpy.transpose(diff.nonzero()); return diff_pixels, diff_image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def separate_colors(self):\n colors = self.get_sorted_pixels()\n colors_dict = dict((val[1], Image.new('RGB', self.size, (255,255,255))) \n for val in colors)\n pixel_dict = dict((img, []) for img in colors_dict.keys())\n\n pix = self.image.load()\n for i in range(self.width):\n for j in range(self.height):\n if pix[i,j] in colors_dict:\n colors_dict[pix[i,j]].putpixel((i,j),(0,0,0))\n pixel_dict[pix[i,j]].append((i, j))\n\n return [(color, colors_dict[color], pixels) for color, pixels in pixel_dict.items()]", "def falso_color(img):\n rows,cols = img.shape\n img_red = np.copy(img)\n img_green = np.copy(img)\n img_blue = np.copy(img)\n img_false = np.zeros((rows, cols, 3), dtype=np.uint8)\n\n for i in range(0,rows):\n for j in range(0,cols):\n\n if (0 <= img[i, j] <= 43):\n img_red[i, j] = 255\n img_green[i, j] = img[i, j] * (255 / 43)\n img_blue[i, j] = 0\n\n elif(43 < img[i, j] <= 86):\n img_red[i, j] = (255 - (img[i, j] - 43) * (255 / 43))\n img_green[i, j] = 255\n img_blue[i,j] = 0\n\n elif(86 < img[i, j] <= 128):\n img_red[i, j] = 0\n img_green[i, j] = 255\n img_blue[i, j] = ((img[i, j] - 86) * (255 / 42))\n\n elif(128<img[i, j]<=171):\n img_red[i, j] = 0\n img_green[i, j] = ((171 - img[i, j]) * (255 / 43))\n img_blue[i, j] = 255\n\n elif(171 < img[i, j] <= 214):\n img_red[i, j] = (img[i, j] - 171) * (255 / 43)\n img_green[i, j] = 0\n img_blue[i, j] = 255\n\n elif(214 < img[i, j]):\n img_red[i, j] = 255\n img_green[i, j] = 0\n img_blue[i, j] = ((255 - img[i, j]) * (255 / 41))\n\n img_false[:, :, 0] = img_red\n img_false[:, :, 1] = img_green\n img_false[:, :, 2] = img_blue\n\n return img_false", "def remove_colors(images):\n images = images[:, :, :, :, 0]\n return images", "def create_color_gradient():\n colors = []\n step = 10\n for red, green in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': green, 'blue': 0})\n for green, blue in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': 0, 'green': green, 'blue': blue})\n for blue, red in zip(range(255,-step, -step), range(0, 255, step)):\n colors.append({'red': red, 'green': 0, 'blue': blue})\n return colors", "def diff_image(images):\n prev_image = cv2.absdiff(images[0], images[1])\n cur_image = cv2.absdiff(images[1], images[2])\n return cv2.bitwise_and(prev_image, cur_image)", "def testImageProcessing():\n Im_pix = getRGB( 'in.png' ) # read in the in.png image\n print \"The first two pixels of the first row are\",\n print Im_pix[0][0:2]\n # remember that Im_pix is a list (the image)\n # of lists (each row) of lists (each pixel is [R,G,B])\n New_pix = [ [ [255 - num for num in p] for p in row ] for row in Im_pix ]\n # now, save to the file 'out.png'\n saveRGB( New_pix, 'out.png' )", "def negative(img): \n for pixel in img:\n x, y, col = pixel \n r, g, b = col\n \n new_color = create_color(255 - r, 255 - g, 255 - b)\n set_color(img, x, y, new_color)", "def groupByColor_unlifted(pixmap):\n # Count the number of colors\n nb_colors = int(pixmap.max()) + 1\n # Create a pixmap for each color\n splited = [(pixmap == i) * i for i in range(1, nb_colors)]\n # Filter out empty images\n return [x for x in splited if np.any(x)]", "def diff(self,images):\n diffArray = [0,1,2,3]\n\n # compute the difference bewteen two adjacent images in the same ovtave\n for i in range(1,5):\n diffArray[i-1] = images[i]-images[i-1]\n\n return numpy.array(diffArray)", "def one_color(image,color=[0,0,255]):\r\n output = image.copy()\r\n for line in range(len(image)):\r\n for column in range(len(image[0])):\r\n distance = calc_distance(color,image[line][column])\r\n if distance <=150:\r\n output[line][column]=[255,255,255]\r\n else:\r\n output[line][column]=[0,0,0]\r\n return output", "def diff_image_color(image_path0, image_path1):\n image0 = Image.open(image_path0)\n #color_image0 = get_histogram(image0)\n color_image0 = image0.histogram()\n cut_color_image0 = cut_histogram_min(color_image0)\n image1 = Image.open(image_path1)\n color_image1 = image1.histogram()\n #color_image1 = get_histogram(image1)\n cut_color_image1 = cut_histogram_min(color_image1)\n color_difference = bhattacharyya(color_image0, color_image1)\n return color_difference", "def find_image(grouped):\n for _i in grouped:\n _i[0] = _i[0] * 10 #increases value of red components\n if _i[0] > 225:\n _i[0] = 225\n _i[1] = _i[0] #sets green components equal to red\n _i[2] = _i[0] #sets blue components equal to red\n return grouped", "def _dilate(mat, structure):\n offset_w = int(structure.shape[0]/2)\n offset_h = int(structure.shape[1]/2)\n\n dilated = np.zeros_like(mat)\n for i in range(offset_w, mat.shape[0]-offset_w-1):\n for j in range(offset_h, mat.shape[1]-offset_h-1):\n if mat[i,j] == 255:\n dilated[i-offset_w:i+offset_w+1,j-offset_h:j+offset_h+1] = np.maximum(\n mat[i-offset_w:i+offset_w+1,j-offset_h:j+offset_h+1],\n structure\n )\n return dilated", "def edges(self, step: Vector = 1) -> np.ndarray:\n if isinstance(step, (int, float)):\n step = (step, step)\n nu = self.imgsz[0] / step[0] + 1\n nv = self.imgsz[1] / step[1] + 1\n u = np.linspace(0, self.imgsz[0], int(nu))\n v = np.linspace(0, self.imgsz[1], int(nv))\n return np.vstack(\n (\n np.column_stack((u, np.repeat(0, len(u)))),\n np.column_stack((np.repeat(u[-1], len(v) - 2), v[1:-1])),\n np.column_stack((u[::-1], np.repeat(v[-1], len(u)))),\n np.column_stack((np.repeat(0, len(v) - 2), v[::-1][1:-1])),\n )\n )", "def red_filter(img):\r\n #with Image.open(filename) as img:\r\n w = img.width\r\n h = img.height\r\n\r\n newimg = Image.new('RGB', (w,h))\r\n for y in range(h):\r\n for x in range(w):\r\n r, g, b = img.getpixel((x,y))\r\n \r\n newimg.putpixel((x, y), (r, 0, 0))\r\n \r\n return newimg", "def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))", "def _generate_images(self, trace):\n images = []\n colors = []\n colors_by_shape = {}\n for board in trace:\n width = int(round((float(board.shape[1]) / board.shape[0]) * self._height))\n cellsize = width / board.shape[1] # cell size\n img = np.zeros((self._height, width, 3), dtype=np.uint8)\n\n tiles = {} # map from integer rep. of the tile to a shape\n for y in range(board.shape[0]):\n for x in range(board.shape[1]):\n cell = board[y,x]\n if cell not in tiles:\n tiles[cell] = (x, y, 1, 1) # x, y, w, h\n else:\n cur_x, cur_y, cur_w, cur_h = tiles[cell]\n if x >= cur_x + cur_w:\n cur_w = (x-cur_x) + 1\n if y >= cur_y + cur_h:\n cur_h = (y-cur_y) + 1\n tiles[cell] = (cur_x, cur_y, cur_w, cur_h)\n\n # Colors\n if len(colors_by_shape) == 0:\n for tid in tiles:\n shape = (tiles[tid][2], tiles[tid][3])\n if shape not in colors_by_shape:\n colors_by_shape[shape] = hex_to_rgb(random_unique_color(colors))\n colors.append(colors_by_shape[shape])\n\n for tid in tiles:\n x, y, w, h = tiles[tid]\n shape = (w,h)\n empty = board[y,x] == 0\n x, y, w, h = x*cellsize, y*cellsize, w*cellsize, h*cellsize\n # Draw a filled rectangle without color\n if not empty:\n cv2.rectangle(img, (x, y), (x+w, y+h), colors_by_shape[shape],-1)\n else:\n cv2.rectangle(img, (x, y), (x+w, y+h), [0,0,0], -1) #, 8)-\n # Draw a boundary\n cv2.rectangle(img, (x, y), (x+w, y+h), (0, 0, 0), 2, 8)\n \n images.append(img)\n return images", "def fold_diag(pixels):\n copy = blank_image(len(pixels), len(pixels[0])) \n for r in range(len(pixels)):\n for c in range(len(pixels[0])):\n copy[r][c] = pixels[r][c]\n for r in range(len(pixels)):\n for c in range(r):\n copy[r][c] = [255, 255, 255]\n return copy", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def split_image(img):\n xs = [] # positions\n ys = [] # colors\n for row_i in range(img.shape[0]):\n for col_i in range(img.shape[1]):\n xs.append([row_i, col_i])\n ys.append(img[row_i, col_i])\n \n xs = np.array(xs)\n ys = np.array(ys)\n return xs, ys", "def component_filter_by_color(components, img):\n new_component = []\n for component in components:\n component_left_neighbor = img[component[0].start:component[0].stop,\n max(component[1].start - 10, 0):component[1].start]\n component_right_neighbor = img[component[0].start:component[0].stop,\n component[1].stop:min(component[1].stop + 10, img.shape[1])]\n component_up_neighbor = img[max(component[0].start - 10, 0):component[0].start,\n component[1].start:component[1].stop]\n component_low_neighbor = img[component[0].stop:min(component[0].stop + 10, img.shape[0]),\n component[1].start:component[1].stop]\n left_white_ratio = np.sum(component_right_neighbor > 240) / (\n component_right_neighbor.shape[0] * component_right_neighbor.shape[1])\n right_white_ratio = np.sum(component_left_neighbor > 240) / (\n component_left_neighbor.shape[0] * component_left_neighbor.shape[1])\n up_white_ratio = np.sum(component_up_neighbor > 240) / (\n component_up_neighbor.shape[0] * component_up_neighbor.shape[1])\n low_white_ratio = np.sum(component_low_neighbor > 240) / (\n component_low_neighbor.shape[0] * component_low_neighbor.shape[1])\n if np.sum([left_white_ratio > 0.9, right_white_ratio > 0.9, up_white_ratio > 0.9, low_white_ratio > 0.9]) > 2:\n new_component.append(component)\n return new_component", "def gd(a, step_size=0.1, steps=42):\n out = []\n ### YOUR CODE HERE\n out.append(np.array([256,1]))\n for i in range(steps):\n point = out[i]\n gradient = np.array([0.5*2*a[i],0.5*2*a[i+1]])\n npoint = point - step_size*gradient\n out.append(npoint)\n ### END CODE\n return out", "def __diff_image(self):\n img = cv2.imread(self.imagefile()).copy()\n Reference.__draw_bugs(img, self.__true_positives, False, 1)\n Reference.__draw_bugs(img, self.__false_negatives, (0, 255, 0))\n Reference.__draw_bugs(img, self.__false_positives, (0, 0, 255))\n return img", "def CleanBadPixels(spectraUp,spectraDown):\n \n Clean_Up= []\n Clean_Do = []\n Clean_Av = []\n eps=25. # this is the minumum background Please check\n NBSPEC=len(spectraUp)\n for index in np.arange(0,NBSPEC):\n s_up=spectraUp[index]\n s_do=spectraDown[index]\n \n index_up=np.where(s_up<eps)\n index_do=np.where(s_do<eps)\n \n s_up[index_up]=s_do[index_up]\n s_do[index_do]=s_up[index_do]\n s_av=(s_up+s_do)/2.\n \n Clean_Up.append(s_up)\n Clean_Do.append(s_do)\n Clean_Av.append(s_av)\n \n return Clean_Up, Clean_Do,Clean_Av", "def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target", "def visualize_seam_end_on_image(pixels, end_x):\n\n h = len(pixels)\n w = len(pixels[0])\n\n new_pixels = [[p for p in row] for row in pixels]\n\n min_x = max(end_x - 5, 0)\n max_x = min(end_x + 5, w - 1)\n\n min_y = max(h - 11, 0)\n max_y = h - 1\n\n for y in range(min_y, max_y + 1):\n for x in range(min_x, max_x + 1):\n new_pixels[y][x] = Color(255, 0, 0)\n\n return new_pixels", "def getDiffPercent(path, path2 ):\n global ans\n ans = []\n img = Image.open( path ) \n img2 = Image.open( path2 )\n\n width, height = img.size\n width2, height2 = img2.size\n \n diff = 0\n k = 0\n\n for i in range(width): \n for j in range(height):\n rgb = img.load()[i,j]\n rgb2 = img2.load()[i,j]\n \n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 0 and rgb[1] == 0 and rgb[2] == 0 ):\n k = k+1\n if( rgb[0] == rgb2[0] and rgb[1] == rgb2[1] and rgb[2] == rgb2[2] and rgb[0] == 255 and rgb[1] == 255 and rgb[2] == 255 ):\n k = k+1 \n \n diff = diff + pixelDiff(rgb, rgb2)\n\n img.close()\n img2.close()\n \n mx = 3 * 255 * ( width * height - k)\n return 100*diff/mx", "def get_dark_images(new_path, dataframe):\n\n image_list = [i for i in dataframe['image']]\n return [1 if np.mean(np.array(Image.open(new_path + image))) == 0 else 0 for image in image_list]", "def create_masks(rows, columns):\n mask_red = numpy.zeros((rows, columns), 'uint8')\n mask_green = numpy.zeros((rows, columns), 'uint8')\n mask_blue = numpy.zeros((rows, columns), 'uint8')\n final_red = numpy.zeros((rows, columns), 'uint8')\n final_green = numpy.zeros((rows, columns), 'uint8')\n final_blue = numpy.zeros((rows, columns), 'uint8')\n green = numpy.array([[0, 1], [1, 0]])\n blue = numpy.array([[0, 0], [0, 1]])\n red = numpy.array([[1, 0], [0, 0]])\n p = 0\n u = 0\n for i in range(0, rows - 1, 2):\n for j in range(0, columns - 1, 2):\n mask_green[i, j + 1] = green[p, u + 1]\n mask_green[i + 1, j] = green[p + 1, u]\n mask_red[i, j] = red[p, u]\n mask_blue[i + 1, j + 1] = blue[p + 1, u + 1]\n return mask_blue, mask_green, mask_red", "def split_colors(self, color_count, color_from, color_to):\n colors = []\n for c in range(3):#RGB\n step = np.abs(color_from[c] - color_to[c])/color_count\n if step:\n if color_from[c]>color_to[c]:\n color = np.arange(color_from[c],color_to[c],-step)\n else:\n color = np.arange(color_from[c],color_to[c],step)\n else:\n color = [color_from[c] for i in np.arange(color_count)]\n\n\n colors.append(color)\n colors = [(a,b,c) for a,b,c in zip(colors[0],colors[1],colors[2])]\n return colors" ]
[ "0.594339", "0.5888286", "0.5828619", "0.5812646", "0.5808996", "0.5784285", "0.57349354", "0.57313955", "0.5709643", "0.56814396", "0.566322", "0.56176597", "0.55183667", "0.5507987", "0.54847234", "0.54687953", "0.5410043", "0.53843975", "0.53818494", "0.53818494", "0.5374305", "0.5341885", "0.5327227", "0.5293554", "0.52656937", "0.52650297", "0.524048", "0.5235731", "0.52053887", "0.52048236" ]
0.7071252
0
Check if two things have the same type.
def same_type(one, two): return isinstance(one, type(two))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_equal_same_type(self, other):\n return True", "def is_same(type1, type2):\n nake_type1 = remove_declarated(type1)\n nake_type2 = remove_declarated(type2)\n return nake_type1 == nake_type2", "def sametype(variable1, variable2):\n\n # Return the result\n return isinstance(variable1, type(variable2))", "def _is_equal_same_type(self, other):\n # id\n self_id = self.id\n other_id = other.id\n if (self_id and other_id) and (self_id != other_id):\n return False\n \n # bot\n if self.bot != other.bot:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def _values_of_same_type(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`),\"\n \" therefore I cannot judge whether the two are of same type.\"\n % str(type(val1)),\n str(type(val2)),\n )\n\n return type(val1) is type(val2)", "def _assert_input_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, t in type1.fields.iteritems():\n self.assertEqual(t.type_str(), type2.fields[name].type_str())", "def of_type(self, a):\n return type(a) == type(self.one)", "def _is_equal_same_type(self, other):\n # approximate_online_count\n if self.approximate_online_count != other.approximate_online_count:\n return False\n \n # approximate_user_count\n if self.approximate_user_count != other.approximate_user_count:\n return False\n \n # description\n if self.description != other.description:\n return False\n \n # discovery_splash_hash\n if self.discovery_splash_hash != other.discovery_splash_hash:\n return False\n \n # discovery_splash_type\n if self.discovery_splash_type != other.discovery_splash_type:\n return False\n \n # emojis\n if self.emojis != other.emojis:\n return False\n \n # features\n if self.features != other.features:\n return False\n \n # icon_hash\n if self.icon_hash != other.icon_hash:\n return False\n \n # icon_type\n if self.icon_type != other.icon_type:\n return False\n \n # id\n if self.id != other.id:\n return False\n \n # invite_splash_hash\n if self.invite_splash_hash != other.invite_splash_hash:\n return False\n \n # invite_splash_type\n if self.invite_splash_type != other.invite_splash_type:\n return False\n \n # stickers\n if self.stickers != other.stickers:\n return False\n \n # name\n if self.name != other.name:\n return False\n \n return True", "def check_type_compat(input_a, input_b):\n return return_family_type(input_a) is return_family_type(input_b)", "def is_type_equivalent(self, other):\n mine = self._replace_defaults()\n theirs = other._replace_defaults()\n\n def remove_base(dct):\n # removes base attributes in the phyiscal layer.\n basekeys = Column._replace_defaults(self).keys()\n for k in basekeys:\n del dct[k]\n\n remove_base(mine)\n remove_base(theirs)\n\n return type(self) == type(other) and mine == theirs", "def is_same_type_as_other(cls, other):\r\n return isinstance(other, cls)", "def is_consistent(self, other):\n return self.name != other.name or self.type is other.type", "def _values_of_same_type(self, val1, val2):\n if self.f_supports(val1) != self.f_supports(val2):\n return False\n\n if not self.f_supports(val1) and not self.f_supports(val2):\n raise TypeError(\n \"I do not support the types of both inputs (`%s` and `%s`),\"\n \" therefore I cannot judge whether the two are of same type.\"\n % str(type(val1)),\n str(type(val2)),\n )\n\n if not type(val1) is type(val2):\n return False\n\n # Numpy arrays must agree in data type and shape\n if type(val1) is np.array:\n if not val1.dtype is val2.dtype:\n return False\n\n if not np.shape(val1) == np.shape(val2):\n return False\n\n # For tuples we now from earlier checks that the data is homogeneous.\n # Thus, only the type of the first item and the length must agree.\n if type(val1) is tuple:\n return (type(val1[0]) is type(val2[0])) and (len(val1) == len(val2))\n\n return True", "def pod_equals(x, y):\n return type(x) == type(y) and x.__dict__ == y.__dict__", "def _assert_object_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)\n self.assertEqual(type1.class_descriptor, type2.class_descriptor)\n self.assertEqual(\n set(type1.fields.iterkeys()), set(type2.fields.iterkeys()))\n for name, field1 in type1.fields.iteritems():\n field2 = type2.fields[name]\n self._assert_fields_equal(field1, field2)", "def test_types_are_equal(self):\n self.assertEqual(True, comparator.types_are_equal(None, None))\n self.assertEqual(True, comparator.types_are_equal(True, True))\n self.assertEqual(True, comparator.types_are_equal(True, False))\n self.assertEqual(True, comparator.types_are_equal(int(), int()))\n self.assertEqual(False, comparator.types_are_equal(int(), str()))\n self.assertEqual(True, comparator.types_are_equal(str(), str()))\n self.assertEqual(True, comparator.types_are_equal(list(), list()))\n self.assertEqual(True, comparator.types_are_equal(dict(), dict()))", "def _assert_union_types_equal(self, type1, type2):\n self.assertEqual(type1.name, type2.name)\n self.assertEqual(type1.description, type2.description)\n self._assert_parent_types_equal(type1, type2)", "def __eq__(self, other):\r\n\r\n return type(self) == type(other) and self.ttype == other.ttype", "def is_identical(self, other):\n if self.is_input != other.is_input:\n return False\n\n if self.is_raw() and other.is_raw():\n return True\n if self.is_raw() or other.is_raw():\n return False\n return self.structure.is_identical(other.structure)", "def __eq__(self, other):\n return self.type_id == other.type_id", "def __eq__(self, other):\n if not isinstance(other, Type):\n return False\n\n return self.__dict__ == other.__dict__", "def __eq__(self: 'Cheese', other: 'Cheese') -> bool:\n return isinstance(other, Cheese) and self.size == other.size", "def test_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self.type == other.type and\n self.data == other.data)", "def test_equal_on_type_mismatch(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = \"invalid\"\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __eq__(self, other):\n return isinstance(other, self.__class__)", "def __eq__(self, other: Any) -> bool:\n return isinstance(other, Nothing)", "def __eq__(self, other: Any) -> bool:\n if isinstance(other, OutputSpec):\n return type_utils.get_canonical_name_for_outer_generic(\n self.type) == type_utils.get_canonical_name_for_outer_generic(\n other.type)\n else:\n return False", "def type_is_arg_of(type1, type2):\n if (not isinstance(type2, ComplexType)):\n return False\n return (type1 == type2.first)", "def __eq__(self, other) -> bool:\n if not isinstance(other, type(self)):\n return False\n for attribute in self.classes:\n if getattr(self, attribute) != getattr(other, attribute):\n return False\n return True" ]
[ "0.826196", "0.7960103", "0.7685877", "0.75767535", "0.73899287", "0.7386229", "0.73627245", "0.72946095", "0.7151994", "0.7137583", "0.7062871", "0.7059909", "0.70217156", "0.69869566", "0.6975931", "0.69511235", "0.6951103", "0.682518", "0.6799043", "0.6766386", "0.67605907", "0.6745303", "0.6722047", "0.670175", "0.6641782", "0.6639441", "0.6604472", "0.65744555", "0.6563849", "0.65568525" ]
0.8348503
0
AirInstance constructor name The name of the instance input An object with the YAML description of the IR instance transmit_handler A function to be called to transmit pkts Add support to allow the specification of the MetaIR instance
def __init__(self, name, input, transmit_handler): local_dir = os.path.dirname(os.path.abspath(__file__)) MetaIRInstance.__init__(self, os.path.join(local_dir, 'air_meta.yml')) self.transmit_handler = transmit_handler self.name = name self.tm_started = False self.disabled = True # Add the content to the MetaIR instance self.add_content(input) self.port_count = self.meta_ir_object_map["layout"]["port_count"] # Create the AIR objects: parsers, actinos, tables, pipelines and TMs self.air_value_set = {} self.air_value_map = {} self.air_parser = {} self.air_action = {} self.air_table = {} self.air_pipeline = {} self.air_traffic_manager = {} self.processors = {} self.transmit_processor = TransmitProcessor(transmit_handler) for name, val in self.value_set.items(): self.air_value_set[name] = [] # Just use a list for name, val in self.value_map.items(): self.air_value_map[name] = {} # Just use a dict for name, val in self.parser.items(): self.air_parser[name] = Parser(name, val, self.parse_state, self.header, self.value_set) self.processors[name] = self.air_parser[name] for name, val in self.action.items(): self.air_action[name] = Action(name, val) for name, val in self.table.items(): self.air_table[name] = Table(name, val, self.air_action) for name, val in self.control_flow.items(): self.air_pipeline[name] = Pipeline(name, val, self.air_table, self.air_action) self.processors[name] = self.air_pipeline[name] for name, val in self.traffic_manager.items(): self.air_traffic_manager[name] = SimpleQueueManager(name, val, self.port_count) self.processors[name] = self.air_traffic_manager[name] # Plumb the layout layout = self.meta_ir_object_map["layout"] meta_ir_assert(layout["format"] == "list", "Unsupported layout: not a list") layout_name_list = layout["implementation"] meta_ir_assert(isinstance(layout_name_list, list), "Layout implementation is not a list") proc_count = len(layout_name_list) for idx, processor_name in enumerate(layout_name_list): cur_proc = self.processors[processor_name] if idx == 0: logging.debug("Layout: First processor %s" % cur_proc.name) self.first_processor = cur_proc if idx < proc_count - 1: next_proc = self.processors[layout_name_list[idx + 1]] cur_proc.next_processor = next_proc else: # Last one connects to transmit processor cur_proc.next_processor = self.transmit_processor logging.debug("Layout %s to %s" % (cur_proc.name, cur_proc.next_processor.name)) # Grab table initialization object if present self.table_initialization = {} ext_objs = self.external_object_map if "table_initialization" in ext_objs.keys(): self.table_initialization = ext_objs["table_initialization"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n add_on: Optional[pulumi.Input[pulumi.InputType['InstanceAddOnArgs']]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n additional_info: Optional[pulumi.Input[str]] = None,\n affinity: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n block_device_mappings: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceBlockDeviceMappingArgs']]]] = None,\n cpu_options: Optional[pulumi.Input['InstanceCpuOptionsArgs']] = None,\n credit_specification: Optional[pulumi.Input['InstanceCreditSpecificationArgs']] = None,\n disable_api_termination: Optional[pulumi.Input[bool]] = None,\n ebs_optimized: Optional[pulumi.Input[bool]] = None,\n elastic_gpu_specifications: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceElasticGpuSpecificationArgs']]]] = None,\n elastic_inference_accelerators: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceElasticInferenceAcceleratorArgs']]]] = None,\n enclave_options: Optional[pulumi.Input['InstanceEnclaveOptionsArgs']] = None,\n hibernation_options: Optional[pulumi.Input['InstanceHibernationOptionsArgs']] = None,\n host_id: Optional[pulumi.Input[str]] = None,\n host_resource_group_arn: Optional[pulumi.Input[str]] = None,\n iam_instance_profile: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n instance_initiated_shutdown_behavior: Optional[pulumi.Input[str]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ipv6_address_count: Optional[pulumi.Input[int]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpv6AddressArgs']]]] = None,\n kernel_id: Optional[pulumi.Input[str]] = None,\n key_name: Optional[pulumi.Input[str]] = None,\n launch_template: Optional[pulumi.Input['InstanceLaunchTemplateSpecificationArgs']] = None,\n license_specifications: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceLicenseSpecificationArgs']]]] = None,\n monitoring: Optional[pulumi.Input[bool]] = None,\n network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceNetworkInterfaceArgs']]]] = None,\n placement_group_name: Optional[pulumi.Input[str]] = None,\n private_dns_name_options: Optional[pulumi.Input['InstancePrivateDnsNameOptionsArgs']] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n propagate_tags_to_volume_on_creation: Optional[pulumi.Input[bool]] = None,\n ramdisk_id: Optional[pulumi.Input[str]] = None,\n security_group_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n source_dest_check: Optional[pulumi.Input[bool]] = None,\n ssm_associations: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceSsmAssociationArgs']]]] = None,\n subnet_id: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceTagArgs']]]] = None,\n tenancy: Optional[pulumi.Input[str]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n volumes: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceVolumeArgs']]]] = None):\n if additional_info is not None:\n pulumi.set(__self__, \"additional_info\", additional_info)\n if affinity is not None:\n pulumi.set(__self__, \"affinity\", affinity)\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if block_device_mappings is not None:\n pulumi.set(__self__, \"block_device_mappings\", block_device_mappings)\n if cpu_options is not None:\n pulumi.set(__self__, \"cpu_options\", cpu_options)\n if credit_specification is not None:\n pulumi.set(__self__, \"credit_specification\", credit_specification)\n if disable_api_termination is not None:\n pulumi.set(__self__, \"disable_api_termination\", disable_api_termination)\n if ebs_optimized is not None:\n pulumi.set(__self__, \"ebs_optimized\", ebs_optimized)\n if elastic_gpu_specifications is not None:\n pulumi.set(__self__, \"elastic_gpu_specifications\", elastic_gpu_specifications)\n if elastic_inference_accelerators is not None:\n pulumi.set(__self__, \"elastic_inference_accelerators\", elastic_inference_accelerators)\n if enclave_options is not None:\n pulumi.set(__self__, \"enclave_options\", enclave_options)\n if hibernation_options is not None:\n pulumi.set(__self__, \"hibernation_options\", hibernation_options)\n if host_id is not None:\n pulumi.set(__self__, \"host_id\", host_id)\n if host_resource_group_arn is not None:\n pulumi.set(__self__, \"host_resource_group_arn\", host_resource_group_arn)\n if iam_instance_profile is not None:\n pulumi.set(__self__, \"iam_instance_profile\", iam_instance_profile)\n if image_id is not None:\n pulumi.set(__self__, \"image_id\", image_id)\n if instance_initiated_shutdown_behavior is not None:\n pulumi.set(__self__, \"instance_initiated_shutdown_behavior\", instance_initiated_shutdown_behavior)\n if instance_type is not None:\n pulumi.set(__self__, \"instance_type\", instance_type)\n if ipv6_address_count is not None:\n pulumi.set(__self__, \"ipv6_address_count\", ipv6_address_count)\n if ipv6_addresses is not None:\n pulumi.set(__self__, \"ipv6_addresses\", ipv6_addresses)\n if kernel_id is not None:\n pulumi.set(__self__, \"kernel_id\", kernel_id)\n if key_name is not None:\n pulumi.set(__self__, \"key_name\", key_name)\n if launch_template is not None:\n pulumi.set(__self__, \"launch_template\", launch_template)\n if license_specifications is not None:\n pulumi.set(__self__, \"license_specifications\", license_specifications)\n if monitoring is not None:\n pulumi.set(__self__, \"monitoring\", monitoring)\n if network_interfaces is not None:\n pulumi.set(__self__, \"network_interfaces\", network_interfaces)\n if placement_group_name is not None:\n pulumi.set(__self__, \"placement_group_name\", placement_group_name)\n if private_dns_name_options is not None:\n pulumi.set(__self__, \"private_dns_name_options\", private_dns_name_options)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if propagate_tags_to_volume_on_creation is not None:\n pulumi.set(__self__, \"propagate_tags_to_volume_on_creation\", propagate_tags_to_volume_on_creation)\n if ramdisk_id is not None:\n pulumi.set(__self__, \"ramdisk_id\", ramdisk_id)\n if security_group_ids is not None:\n pulumi.set(__self__, \"security_group_ids\", security_group_ids)\n if security_groups is not None:\n pulumi.set(__self__, \"security_groups\", security_groups)\n if source_dest_check is not None:\n pulumi.set(__self__, \"source_dest_check\", source_dest_check)\n if ssm_associations is not None:\n pulumi.set(__self__, \"ssm_associations\", ssm_associations)\n if subnet_id is not None:\n pulumi.set(__self__, \"subnet_id\", subnet_id)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tenancy is not None:\n pulumi.set(__self__, \"tenancy\", tenancy)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)\n if volumes is not None:\n pulumi.set(__self__, \"volumes\", volumes)", "def __init__(__self__, *,\n availability_zone: pulumi.Input[str],\n blueprint_id: pulumi.Input[str],\n bundle_id: pulumi.Input[str],\n add_on: Optional[pulumi.Input['InstanceAddOnArgs']] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n pulumi.set(__self__, \"blueprint_id\", blueprint_id)\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if add_on is not None:\n pulumi.set(__self__, \"add_on\", add_on)\n if ip_address_type is not None:\n pulumi.set(__self__, \"ip_address_type\", ip_address_type)\n if key_pair_name is not None:\n pulumi.set(__self__, \"key_pair_name\", key_pair_name)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)", "def __init__(__self__, *,\n activation_key: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None):\n if activation_key is not None:\n pulumi.set(__self__, \"activation_key\", activation_key)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if private_link_endpoint is not None:\n pulumi.set(__self__, \"private_link_endpoint\", private_link_endpoint)\n if security_group_arns is not None:\n pulumi.set(__self__, \"security_group_arns\", security_group_arns)\n if subnet_arns is not None:\n pulumi.set(__self__, \"subnet_arns\", subnet_arns)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if vpc_endpoint_id is not None:\n pulumi.set(__self__, \"vpc_endpoint_id\", vpc_endpoint_id)", "def __init__(__self__, *,\n instance_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n etag: Optional[pulumi.Input[str]] = None,\n file_shares: Optional[pulumi.Input[Sequence[pulumi.Input['FileShareConfigArgs']]]] = None,\n kms_key_name: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n location: Optional[pulumi.Input[str]] = None,\n networks: Optional[pulumi.Input[Sequence[pulumi.Input['NetworkConfigArgs']]]] = None,\n project: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input['InstanceTier']] = None):\n pulumi.set(__self__, \"instance_id\", instance_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if etag is not None:\n pulumi.set(__self__, \"etag\", etag)\n if file_shares is not None:\n pulumi.set(__self__, \"file_shares\", file_shares)\n if kms_key_name is not None:\n pulumi.set(__self__, \"kms_key_name\", kms_key_name)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if location is not None:\n pulumi.set(__self__, \"location\", location)\n if networks is not None:\n pulumi.set(__self__, \"networks\", networks)\n if project is not None:\n pulumi.set(__self__, \"project\", project)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def __init__(__self__, *,\n activation_key: Optional[pulumi.Input[str]] = None,\n arn: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None):\n if activation_key is not None:\n pulumi.set(__self__, \"activation_key\", activation_key)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if ip_address is not None:\n pulumi.set(__self__, \"ip_address\", ip_address)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if private_link_endpoint is not None:\n pulumi.set(__self__, \"private_link_endpoint\", private_link_endpoint)\n if security_group_arns is not None:\n pulumi.set(__self__, \"security_group_arns\", security_group_arns)\n if subnet_arns is not None:\n pulumi.set(__self__, \"subnet_arns\", subnet_arns)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if vpc_endpoint_id is not None:\n pulumi.set(__self__, \"vpc_endpoint_id\", vpc_endpoint_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n activation_key: Optional[pulumi.Input[str]] = None,\n ip_address: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_link_endpoint: Optional[pulumi.Input[str]] = None,\n security_group_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n subnet_arns: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vpc_endpoint_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n add_on: Optional[pulumi.Input['InstanceAddOnArgs']] = None,\n arn: Optional[pulumi.Input[str]] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n blueprint_id: Optional[pulumi.Input[str]] = None,\n bundle_id: Optional[pulumi.Input[str]] = None,\n cpu_count: Optional[pulumi.Input[int]] = None,\n created_at: Optional[pulumi.Input[str]] = None,\n ip_address_type: Optional[pulumi.Input[str]] = None,\n ipv6_address: Optional[pulumi.Input[str]] = None,\n ipv6_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n is_static_ip: Optional[pulumi.Input[bool]] = None,\n key_pair_name: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n public_ip_address: Optional[pulumi.Input[str]] = None,\n ram_size: Optional[pulumi.Input[float]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_data: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n if add_on is not None:\n pulumi.set(__self__, \"add_on\", add_on)\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if blueprint_id is not None:\n pulumi.set(__self__, \"blueprint_id\", blueprint_id)\n if bundle_id is not None:\n pulumi.set(__self__, \"bundle_id\", bundle_id)\n if cpu_count is not None:\n pulumi.set(__self__, \"cpu_count\", cpu_count)\n if created_at is not None:\n pulumi.set(__self__, \"created_at\", created_at)\n if ip_address_type is not None:\n pulumi.set(__self__, \"ip_address_type\", ip_address_type)\n if ipv6_address is not None:\n warnings.warn(\"\"\"use `ipv6_addresses` attribute instead\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"ipv6_address is deprecated: use `ipv6_addresses` attribute instead\"\"\")\n if ipv6_address is not None:\n pulumi.set(__self__, \"ipv6_address\", ipv6_address)\n if ipv6_addresses is not None:\n pulumi.set(__self__, \"ipv6_addresses\", ipv6_addresses)\n if is_static_ip is not None:\n pulumi.set(__self__, \"is_static_ip\", is_static_ip)\n if key_pair_name is not None:\n pulumi.set(__self__, \"key_pair_name\", key_pair_name)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if public_ip_address is not None:\n pulumi.set(__self__, \"public_ip_address\", public_ip_address)\n if ram_size is not None:\n pulumi.set(__self__, \"ram_size\", ram_size)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if user_data is not None:\n pulumi.set(__self__, \"user_data\", user_data)\n if username is not None:\n pulumi.set(__self__, \"username\", username)", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n kms_key: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n schedule_config: Optional[pulumi.Input['DataIntegrationScheduleConfigArgs']] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if kms_key is not None:\n pulumi.set(__self__, \"kms_key\", kms_key)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if schedule_config is not None:\n pulumi.set(__self__, \"schedule_config\", schedule_config)\n if source_uri is not None:\n pulumi.set(__self__, \"source_uri\", source_uri)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n db_instance_mode: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n engine: Optional[pulumi.Input[str]] = None,\n engine_version: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['InstanceIpWhitelistArgs']]]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n endpoint_type: pulumi.Input[str],\n entry: pulumi.Input[str],\n instance_id: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"endpoint_type\", endpoint_type)\n pulumi.set(__self__, \"entry\", entry)\n pulumi.set(__self__, \"instance_id\", instance_id)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if module_name is not None:\n pulumi.set(__self__, \"module_name\", module_name)", "def __init__(__self__, *,\n description: pulumi.Input[str],\n instance_series: pulumi.Input[str],\n specification: pulumi.Input[str],\n vswitch_id: pulumi.Input[str],\n zone_id: pulumi.Input[str],\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"description\", description)\n pulumi.set(__self__, \"instance_series\", instance_series)\n pulumi.set(__self__, \"specification\", specification)\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n pulumi.set(__self__, \"zone_id\", zone_id)\n if instance_charge_type is not None:\n pulumi.set(__self__, \"instance_charge_type\", instance_charge_type)\n if mysql_version is not None:\n pulumi.set(__self__, \"mysql_version\", mysql_version)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n entry: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if endpoint_type is not None:\n pulumi.set(__self__, \"endpoint_type\", endpoint_type)\n if entry is not None:\n pulumi.set(__self__, \"entry\", entry)\n if instance_id is not None:\n pulumi.set(__self__, \"instance_id\", instance_id)\n if module_name is not None:\n pulumi.set(__self__, \"module_name\", module_name)", "def __init__(__self__, *,\n name: pulumi.Input[str],\n type: pulumi.Input[str],\n value: pulumi.Input[str],\n zone_name: pulumi.Input[str],\n priority: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"name\", name)\n pulumi.set(__self__, \"type\", type)\n pulumi.set(__self__, \"value\", value)\n pulumi.set(__self__, \"zone_name\", zone_name)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)", "def __init__(__self__, *,\n instance_type: pulumi.Input[str],\n major_version: pulumi.Input[str],\n node_count: pulumi.Input[int],\n pay_type: pulumi.Input[str],\n vswitch_id: pulumi.Input[str],\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n password: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"instance_type\", instance_type)\n pulumi.set(__self__, \"major_version\", major_version)\n pulumi.set(__self__, \"node_count\", node_count)\n pulumi.set(__self__, \"pay_type\", pay_type)\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if auto_renew is not None:\n pulumi.set(__self__, \"auto_renew\", auto_renew)\n if auto_renew_period is not None:\n pulumi.set(__self__, \"auto_renew_period\", auto_renew_period)\n if cluster_name is not None:\n pulumi.set(__self__, \"cluster_name\", cluster_name)\n if data_center_name is not None:\n pulumi.set(__self__, \"data_center_name\", data_center_name)\n if disk_size is not None:\n pulumi.set(__self__, \"disk_size\", disk_size)\n if disk_type is not None:\n pulumi.set(__self__, \"disk_type\", disk_type)\n if enable_public is not None:\n pulumi.set(__self__, \"enable_public\", enable_public)\n if ip_white is not None:\n pulumi.set(__self__, \"ip_white\", ip_white)\n if maintain_end_time is not None:\n pulumi.set(__self__, \"maintain_end_time\", maintain_end_time)\n if maintain_start_time is not None:\n pulumi.set(__self__, \"maintain_start_time\", maintain_start_time)\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if period is not None:\n pulumi.set(__self__, \"period\", period)\n if period_unit is not None:\n pulumi.set(__self__, \"period_unit\", period_unit)\n if security_groups is not None:\n pulumi.set(__self__, \"security_groups\", security_groups)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(__self__, *,\n db_instance_mode: pulumi.Input[str],\n engine: pulumi.Input[str],\n engine_version: pulumi.Input[str],\n vswitch_id: pulumi.Input[str],\n availability_zone: Optional[pulumi.Input[str]] = None,\n create_sample_data: Optional[pulumi.Input[bool]] = None,\n db_instance_category: Optional[pulumi.Input[str]] = None,\n db_instance_class: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n encryption_key: Optional[pulumi.Input[str]] = None,\n encryption_type: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_group_count: Optional[pulumi.Input[int]] = None,\n instance_network_type: Optional[pulumi.Input[str]] = None,\n instance_spec: Optional[pulumi.Input[str]] = None,\n ip_whitelists: Optional[pulumi.Input[Sequence[pulumi.Input['InstanceIpWhitelistArgs']]]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n master_node_num: Optional[pulumi.Input[int]] = None,\n payment_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[str]] = None,\n private_ip_address: Optional[pulumi.Input[str]] = None,\n resource_group_id: Optional[pulumi.Input[str]] = None,\n security_ip_lists: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n seg_node_num: Optional[pulumi.Input[int]] = None,\n seg_storage_type: Optional[pulumi.Input[str]] = None,\n ssl_enabled: Optional[pulumi.Input[int]] = None,\n storage_size: Optional[pulumi.Input[int]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n used_time: Optional[pulumi.Input[str]] = None,\n vector_configuration_status: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"db_instance_mode\", db_instance_mode)\n pulumi.set(__self__, \"engine\", engine)\n pulumi.set(__self__, \"engine_version\", engine_version)\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if availability_zone is not None:\n warnings.warn(\"\"\"Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"availability_zone is deprecated: Field 'availability_zone' has been deprecated from version 1.187.0. Use 'zone_id' instead.\"\"\")\n if availability_zone is not None:\n pulumi.set(__self__, \"availability_zone\", availability_zone)\n if create_sample_data is not None:\n pulumi.set(__self__, \"create_sample_data\", create_sample_data)\n if db_instance_category is not None:\n pulumi.set(__self__, \"db_instance_category\", db_instance_category)\n if db_instance_class is not None:\n pulumi.set(__self__, \"db_instance_class\", db_instance_class)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if encryption_key is not None:\n pulumi.set(__self__, \"encryption_key\", encryption_key)\n if encryption_type is not None:\n pulumi.set(__self__, \"encryption_type\", encryption_type)\n if instance_charge_type is not None:\n warnings.warn(\"\"\"Field `instance_charge_type` has been deprecated from version 1.187.0. Use `payment_type` instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"instance_charge_type is deprecated: Field `instance_charge_type` has been deprecated from version 1.187.0. Use `payment_type` instead.\"\"\")\n if instance_charge_type is not None:\n pulumi.set(__self__, \"instance_charge_type\", instance_charge_type)\n if instance_group_count is not None:\n pulumi.set(__self__, \"instance_group_count\", instance_group_count)\n if instance_network_type is not None:\n pulumi.set(__self__, \"instance_network_type\", instance_network_type)\n if instance_spec is not None:\n pulumi.set(__self__, \"instance_spec\", instance_spec)\n if ip_whitelists is not None:\n pulumi.set(__self__, \"ip_whitelists\", ip_whitelists)\n if maintain_end_time is not None:\n pulumi.set(__self__, \"maintain_end_time\", maintain_end_time)\n if maintain_start_time is not None:\n pulumi.set(__self__, \"maintain_start_time\", maintain_start_time)\n if master_node_num is not None:\n pulumi.set(__self__, \"master_node_num\", master_node_num)\n if payment_type is not None:\n pulumi.set(__self__, \"payment_type\", payment_type)\n if period is not None:\n pulumi.set(__self__, \"period\", period)\n if private_ip_address is not None:\n pulumi.set(__self__, \"private_ip_address\", private_ip_address)\n if resource_group_id is not None:\n pulumi.set(__self__, \"resource_group_id\", resource_group_id)\n if security_ip_lists is not None:\n warnings.warn(\"\"\"Field 'security_ip_list' has been deprecated from version 1.187.0. Use 'ip_whitelist' instead.\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"security_ip_lists is deprecated: Field 'security_ip_list' has been deprecated from version 1.187.0. Use 'ip_whitelist' instead.\"\"\")\n if security_ip_lists is not None:\n pulumi.set(__self__, \"security_ip_lists\", security_ip_lists)\n if seg_node_num is not None:\n pulumi.set(__self__, \"seg_node_num\", seg_node_num)\n if seg_storage_type is not None:\n pulumi.set(__self__, \"seg_storage_type\", seg_storage_type)\n if ssl_enabled is not None:\n pulumi.set(__self__, \"ssl_enabled\", ssl_enabled)\n if storage_size is not None:\n pulumi.set(__self__, \"storage_size\", storage_size)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if used_time is not None:\n pulumi.set(__self__, \"used_time\", used_time)\n if vector_configuration_status is not None:\n pulumi.set(__self__, \"vector_configuration_status\", vector_configuration_status)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(__self__, *,\n active: Optional[pulumi.Input[bool]] = None,\n annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n builtin: Optional[pulumi.Input[bool]] = None,\n checksum: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n external_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n ui_url: Optional[pulumi.Input[str]] = None,\n url: Optional[pulumi.Input[str]] = None,\n whitelist_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if active is not None:\n pulumi.set(__self__, \"active\", active)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if builtin is not None:\n pulumi.set(__self__, \"builtin\", builtin)\n if checksum is not None:\n pulumi.set(__self__, \"checksum\", checksum)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if external_id is not None:\n pulumi.set(__self__, \"external_id\", external_id)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if ui_url is not None:\n pulumi.set(__self__, \"ui_url\", ui_url)\n if url is not None:\n pulumi.set(__self__, \"url\", url)\n if whitelist_domains is not None:\n pulumi.set(__self__, \"whitelist_domains\", whitelist_domains)", "def __init__(__self__, *,\n connection_string: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n port: Optional[pulumi.Input[str]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None):\n if connection_string is not None:\n pulumi.set(__self__, \"connection_string\", connection_string)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if instance_charge_type is not None:\n pulumi.set(__self__, \"instance_charge_type\", instance_charge_type)\n if instance_series is not None:\n pulumi.set(__self__, \"instance_series\", instance_series)\n if mysql_version is not None:\n pulumi.set(__self__, \"mysql_version\", mysql_version)\n if port is not None:\n pulumi.set(__self__, \"port\", port)\n if specification is not None:\n pulumi.set(__self__, \"specification\", specification)\n if vpc_id is not None:\n pulumi.set(__self__, \"vpc_id\", vpc_id)\n if vswitch_id is not None:\n pulumi.set(__self__, \"vswitch_id\", vswitch_id)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n instance_charge_type: Optional[pulumi.Input[str]] = None,\n instance_series: Optional[pulumi.Input[str]] = None,\n mysql_version: Optional[pulumi.Input[int]] = None,\n specification: Optional[pulumi.Input[str]] = None,\n vpc_id: Optional[pulumi.Input[str]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None,\n priority: Optional[pulumi.Input[str]] = None,\n qualified_name: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n zone_name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if priority is not None:\n pulumi.set(__self__, \"priority\", priority)\n if qualified_name is not None:\n pulumi.set(__self__, \"qualified_name\", qualified_name)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value is not None:\n pulumi.set(__self__, \"value\", value)\n if zone_id is not None:\n pulumi.set(__self__, \"zone_id\", zone_id)\n if zone_name is not None:\n pulumi.set(__self__, \"zone_name\", zone_name)", "def __init__(__self__, *,\n active: pulumi.Input[bool],\n builtin: pulumi.Input[bool],\n url: pulumi.Input[str],\n annotations: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n checksum: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n external_id: Optional[pulumi.Input[str]] = None,\n labels: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n ui_url: Optional[pulumi.Input[str]] = None,\n whitelist_domains: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n pulumi.set(__self__, \"active\", active)\n pulumi.set(__self__, \"builtin\", builtin)\n pulumi.set(__self__, \"url\", url)\n if annotations is not None:\n pulumi.set(__self__, \"annotations\", annotations)\n if checksum is not None:\n pulumi.set(__self__, \"checksum\", checksum)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if external_id is not None:\n pulumi.set(__self__, \"external_id\", external_id)\n if labels is not None:\n pulumi.set(__self__, \"labels\", labels)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if ui_url is not None:\n pulumi.set(__self__, \"ui_url\", ui_url)\n if whitelist_domains is not None:\n pulumi.set(__self__, \"whitelist_domains\", whitelist_domains)", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n endpoint_type: Optional[pulumi.Input[str]] = None,\n entry: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n module_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...", "def __init__(__self__, *,\n arn: Optional[pulumi.Input[str]] = None,\n minimum_engine_version: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n name_prefix: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n user_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if arn is not None:\n pulumi.set(__self__, \"arn\", arn)\n if minimum_engine_version is not None:\n pulumi.set(__self__, \"minimum_engine_version\", minimum_engine_version)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if name_prefix is not None:\n pulumi.set(__self__, \"name_prefix\", name_prefix)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if tags_all is not None:\n pulumi.set(__self__, \"tags_all\", tags_all)\n if user_names is not None:\n pulumi.set(__self__, \"user_names\", user_names)", "def __init__(__self__,\n resource_name: str,\n args: Optional[InstanceArgs] = None,\n opts: Optional[pulumi.ResourceOptions] = None):\n ..." ]
[ "0.66051346", "0.6576527", "0.6427389", "0.63597023", "0.62879163", "0.6271562", "0.6235065", "0.6202321", "0.6141576", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.6116739", "0.61087185", "0.60978323", "0.6088997", "0.60772216", "0.606114", "0.6051864", "0.60461396", "0.60259384", "0.6024794", "0.60247785", "0.6013458", "0.6007375", "0.5995422", "0.5993116", "0.59827495" ]
0.7563762
0
Process any table initialization spec from the IR desc The IR specification may provide a set of table initialization operations in a "table_initialization" object. This takes the form of a sequence of table entry specifications.
def process_table_init(self): logging.debug("Processing table initialization, %d entries", len(self.table_initialization)) for init_entry in self.table_initialization: for table_name, entry_desc in init_entry.items(): self.air_table[table_name].add_entry( table_entry.description_to_entry(entry_desc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_up_tables(self):\n tables = []\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_info',\n 'description': desc.SimInfoRow,\n 'tabletitle': 'Simulation Information'})\n tables.append({'groupname': 'metadata',\n 'tablename': 'sim_timeseries',\n 'description': desc.SimTimeseriesRow,\n 'tabletitle': 'Simulation Power Data'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_params',\n 'description': desc.ThMetadataRow,\n 'tabletitle': 'TH Component Parameters'})\n tables.append({'groupname': 'th',\n 'tablename': 'th_timeseries',\n 'description': desc.ThTimeseriesRow,\n 'tabletitle': 'TH Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_timeseries',\n 'description': desc.NeutronicsTimeseriesRow,\n 'tabletitle': 'Neutronics Timeseries'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'neutronics_params',\n 'description': desc.NeutronicsParamsRow,\n 'tabletitle': 'Neutronics Metadata'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'zetas',\n 'description': desc.ZetasTimestepRow,\n 'tabletitle': 'Neutron Precursor Concentrations'})\n tables.append({'groupname': 'neutronics',\n 'tablename': 'omegas',\n 'description': desc.OmegasTimestepRow,\n 'tabletitle': 'Decay Heat Fractions'})\n return tables", "def full_initialization_process():\n\n db1 = Database('TOBACCO_RAW;')\n con1, cur1 = db1.connect()\n cur1.execute('create index idl_doc_field_id_idx on idl_doc_field(id);')\n cur1.execute('create index idl_doc_id_idx on idl_doc(id);')\n add_timestamp_to_idl_doc()\n\n create_utf_text_files()\n\n initialize_tables()\n fill_tables()", "def __init__(self, *args):\n _table.Table_swiginit(self, _table.new_Table(*args))", "def init_line_list():\n # Get str lengths from defs\n len_line = defs.str_len()['ion']\n len_src = defs.str_len()['Source']\n # Load sources to check\n sources = arcl_io.load_source_table()\n src_files = sources['File'].data\n if len(src_files[0]) > len_src:\n raise ValueError(\"Source filename now exceeds table. Should fix source name\")\n dummy_src = str('#')*len_src\n # Arc Line name\n dummy_line = str('#')*len_line\n #\n\n # Dict for Table\n idict = OrderedDict()\n idict['ion'] = dummy_line\n idict['wave'] = 0.\n idict['NIST'] = 0\n idict['Instr'] = 0 # Flag for instrument\n idict['amplitude'] = 0\n idict['Source'] = dummy_src\n\n # Table\n tkeys = idict.keys()\n lst = [[idict[tkey]] for tkey in tkeys]\n init_tbl = Table(lst, names=tkeys)\n\n # Return\n return init_tbl", "def tables(args):\n\n config_file = args.setupfn\n conf_base = os.path.basename(config_file).split('.')[0]\n statfile = os.path.join(args.outputdir,\n \"{}_radvel.stat\".format(conf_base))\n status = load_status(statfile)\n\n assert status.getboolean('mcmc', 'run'), \\\n \"Must run MCMC before making tables\"\n\n P, post = radvel.utils.initialize_posterior(config_file)\n post = radvel.posterior.load(status.get('fit', 'postfile'))\n chains = pd.read_csv(status.get('mcmc', 'chainfile'))\n minafactor = status.get('mcmc', 'minafactor')\n maxarchange = status.get('mcmc', 'maxarchange')\n maxgr = status.get('mcmc', 'maxgr')\n mintz = status.get('mcmc', 'mintz')\n if 'derive' in status.sections() and status.getboolean('derive', 'run'):\n dchains = pd.read_csv(status.get('derive', 'chainfile'))\n chains = chains.join(dchains, rsuffix='_derived')\n derived = True\n else:\n derived = False\n report = radvel.report.RadvelReport(P, post, chains, minafactor, maxarchange, maxgr, mintz, derived=derived)\n tabletex = radvel.report.TexTable(report)\n attrdict = {'priors': 'tab_prior_summary', 'rv': 'tab_rv',\n 'params': 'tab_params', 'derived': 'tab_derived',\n 'crit': 'tab_crit'}\n for tabtype in args.type:\n print(\"Generating LaTeX code for {} table\".format(tabtype))\n\n if tabtype == 'ic_compare':\n assert status.has_option('ic_compare', 'ic'), \\\n \"Must run Information Criteria comparison before making comparison tables\"\n\n compstats = eval(status.get('ic_compare', 'ic'))\n report = radvel.report.RadvelReport(\n P, post, chains, minafactor, maxarchange, maxgr, mintz, compstats=compstats\n )\n tabletex = radvel.report.TexTable(report)\n tex = tabletex.tab_comparison()\n elif tabtype == 'rv':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title, max_lines=None)\n elif tabtype == 'crit':\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n else:\n if tabtype == 'derived':\n assert status.has_option('derive', 'run'), \\\n \"Must run `radvel derive` before making derived parameter table\"\n assert tabtype in attrdict, 'Invalid Table Type %s ' % tabtype\n tex = getattr(tabletex, attrdict[tabtype])(name_in_title=args.name_in_title)\n\n saveto = os.path.join(\n args.outputdir, '{}_{}.tex'.format(conf_base, tabtype)\n )\n with open(saveto, 'w+') as f:\n f.write(tex)\n\n savestate = {'{}_tex'.format(tabtype): os.path.relpath(saveto)}\n save_status(statfile, 'table', savestate)", "def _initialize(self, chain, length):\n # If the table already exists, exit now.\n if chain != 0:\n return\n\n # Determine size\n try:\n size = len(self._getfunc())\n except TypeError:\n size = 1\n\n query = \"create table %s (recid INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, trace int(5), %s FLOAT)\" % (self.name, ' FLOAT, '.join(['v%s' % (x+1) for x in range(size)]))\n self.db.cur.execute(query)", "def fill_table(self, executer, tree, cursor, table):\n counter = 0\n table_content = executer.lots_of_eggs(cursor, table)\n for line in table_content:\n tree.insert('', 'end', text=counter, values=line)\n counter += 1", "def finalize_tables(self):\n self.attrbuilder.finalize(self.ext_type)\n self.vtabbuilder.finalize(self.ext_type)", "def make_table_declarations(ibs):\n # available tables\n TABLENAME_LIST = [\n IMAGE_TABLE,\n ANNOTATION_TABLE,\n # NAME_TABLE,\n IMAGESET_TABLE,\n IMAGE_GRID,\n THUMB_TABLE,\n NAMES_TREE,\n ]\n\n # table nice names\n TABLE_NICE = {\n IMAGE_TABLE: 'Image Table',\n ANNOTATION_TABLE: 'Annotations Table',\n NAME_TABLE: 'Name Table',\n QRES_TABLE: 'Query Results Table',\n IMAGESET_TABLE: 'ImageSet Table',\n IMAGE_GRID: 'Thumbnail Grid',\n THUMB_TABLE: 'Thumbnail Table',\n NAMES_TREE: 'Tree of Names',\n }\n\n # COLUMN DEFINITIONS\n # the columns each wbia table has,\n TABLE_COLNAMES = {\n IMAGE_TABLE: [\n 'gid',\n 'thumb',\n # 'nAids',\n 'img_gname',\n # 'ext',\n 'reviewed', # detection reviewed flag is not fullyused\n 'datetime',\n 'gps',\n 'orientation',\n 'party_tag',\n 'contributor_tag',\n # 'gdconf',\n 'imgnotes',\n 'image_uuid',\n ],\n # debug with\n # --noannottbl\n # --nonametree\n # even just aid seems to be very slow\n ANNOTATION_TABLE: [\n # 'annotation_uuid',\n 'aid',\n 'thumb',\n 'annot_gname',\n 'name',\n 'exemplar',\n 'species', # <put back in\n 'viewpoint',\n 'quality_text',\n 'age_min',\n 'age_max',\n 'sex_text',\n # 'rdconf',\n # 'nGt', # ## <put back in\n 'imagesettext_names',\n 'annotnotes', # ## <put back in\n 'tag_text', # < Hack should have actual tag structure\n # 'annot_visual_uuid',\n # 'nFeats',\n # 'bbox',\n # 'theta',\n # 'verts',\n # 'num_verts',\n ],\n NAME_TABLE: ['nid', 'name', 'nAids', 'namenotes'],\n QRES_TABLE: ['rank', 'score', 'name', 'aid'],\n IMAGESET_TABLE: [\n 'imagesettext',\n 'nImgs',\n # 'num_imgs_reviewed',\n # 'num_annotmatch_reviewed',\n # 'imageset_end_datetime',\n # 'imageset_processed_flag',\n # 'imageset_shipped_flag',\n 'imgsetid',\n ],\n NAMES_TREE: [\n 'name',\n 'nAids',\n 'thumb',\n 'nid',\n # 'exemplar',\n # 'nExAids',\n 'aid',\n # 'annot_gname',\n # 'quality_text',\n # 'age_min',\n # 'age_max',\n # 'sex_text',\n # 'imagesettext_names',\n # 'datetime',\n # 'max_hourdiff',\n # 'max_speed',\n # 'has_split',\n # 'namenotes',\n ],\n IMAGE_GRID: ['thumb'],\n # TEST TABLE\n THUMB_TABLE: ['img_gname', 'thumb'],\n }\n\n # dynamicly defined headers\n if not const.SIMPLIFY_INTERFACE:\n from wbia.control import accessor_decors\n\n if accessor_decors.API_CACHE:\n # Too slow without api cache\n TABLE_COLNAMES[IMAGESET_TABLE].extend(\n ['percent_annotmatch_reviewed_str', 'percent_names_with_exemplar_str']\n )\n TABLE_COLNAMES[IMAGESET_TABLE].extend(\n [\n # 'percent_imgs_reviewed_str',\n 'imageset_start_datetime',\n # 'imageset_end_datetime',\n 'imageset_duration',\n 'imageset_notes',\n ]\n )\n\n if ibs.cfg.other_cfg.show_shipped_imagesets:\n TABLE_COLNAMES[IMAGESET_TABLE].extend(\n ['imageset_processed_flag', 'imageset_shipped_flag']\n )\n\n # THUMB_TABLE : ['thumb' 'thumb' 'thumb' 'thumb'],\n # NAMES_TREE : {('name' 'nid' 'nAids') : ['aid' 'bbox' 'thumb']}\n\n TABLE_TREE_LEVELS = {\n NAMES_TREE: {\n 'name': 0,\n 'namenotes': 0,\n 'nid': 0,\n 'nAids': 0,\n 'nExAids': 0,\n 'sex_text': 0,\n 'exemplar': 1,\n 'thumb': 1,\n 'viewpoint': 1,\n 'quality_text': 1,\n 'age_min': 1,\n 'age_max': 1,\n 'imagesettext_names': 1,\n 'aid': 1,\n 'annot_gname': 1,\n 'datetime': 1,\n 'max_hourdiff': 0,\n 'max_speed': 0,\n 'has_split': 0,\n },\n }\n\n # the columns which are editable\n TABLE_EDITSET = {\n IMAGE_TABLE: {'reviewed', 'imgnotes', 'gps'},\n ANNOTATION_TABLE: {\n 'name',\n 'species',\n 'annotnotes',\n 'exemplar',\n 'viewpoint',\n 'quality_text',\n 'age_min',\n 'age_max',\n 'sex_text',\n 'tag_text',\n },\n NAME_TABLE: {'name', 'namenotes'},\n QRES_TABLE: {'name'},\n IMAGESET_TABLE: {\n 'imagesettext',\n 'imageset_shipped_flag',\n 'imageset_processed_flag',\n },\n IMAGE_GRID: set(),\n THUMB_TABLE: set(),\n NAMES_TREE: {\n 'exemplar',\n 'name',\n 'namenotes',\n 'viewpoint',\n 'quality_text',\n 'age_min',\n 'age_max',\n 'sex_text',\n },\n }\n\n if const.SIMPLIFY_INTERFACE:\n TABLE_EDITSET[NAMES_TREE].remove('name')\n\n TABLE_HIDDEN_LIST = {\n # IMAGE_TABLE : [False, True, False, False, False, True, False, False, False, False, False],\n # ANNOTATION_TABLE : [False, False, False, False, False, False, False, True, True, True, True, True, True],\n # NAMES_TREE : [False, False, False, False, False, False],\n # NAME_TABLE : [False, False, False, False],\n }\n\n TABLE_STRIPE_LIST = {\n IMAGE_GRID: 9,\n }\n\n # Define the valid columns a table could have\n COL_DEF = dict(\n [\n ('annot_visual_uuid', (str, 'Annot Visual UUID')),\n ('image_uuid', (str, 'Image UUID')),\n ('gid', (int, 'Image ID')),\n ('aid', (int, 'Annotation ID')),\n ('nid', (int, 'Name ID')),\n ('imgsetid', (int, 'ImageSet ID')),\n ('nAids', (int, '#Annots')),\n ('nExAids', (int, '#Exemplars')),\n ('nGt', (int, '#GT')),\n ('nImgs', (int, '#Imgs')),\n ('nFeats', (int, '#Features')),\n ('quality_text', (str, 'Quality')),\n ('imagesettext_names', (str, 'ImageSet Names')),\n ('age_min', (int, 'Age (min)')),\n ('age_max', (int, 'Age (max)')),\n ('sex_text', (str, 'Sex')),\n ('rank', (str, 'Rank')), # needs to be a string for !Query\n ('unixtime', (float, 'unixtime')),\n ('species', (str, 'Species')),\n ('viewpoint', (str, 'Viewpoint')),\n ('img_gname', (str, 'Image Name')),\n ('annot_gname', (str, 'Source Image')),\n ('gdconf', (str, 'Detection Confidence')),\n ('rdconf', (float, 'Detection Confidence')),\n ('name', (str, 'Name')),\n ('annotnotes', (str, 'Annot Notes')),\n ('namenotes', (str, 'Name Notes')),\n ('imgnotes', (str, 'Image Notes')),\n ('match_name', (str, 'Matching Name')),\n ('bbox', (str, 'BBOX (x, y, w, h))')), # Non editables are safe as strs\n ('num_verts', (int, 'NumVerts')),\n ('verts', (str, 'Verts')),\n ('score', (str, 'Confidence')),\n ('theta', (str, 'Theta')),\n ('reviewed', (bool, 'Detection Reviewed')),\n ('exemplar', (bool, 'Is Exemplar')),\n ('imagesettext', (str, 'ImageSet')),\n ('datetime', (str, 'Date / Time')),\n ('ext', (str, 'EXT')),\n ('thumb', ('PIXMAP', 'Thumb')),\n ('gps', (str, 'GPS')),\n ('orientation', (str, 'Orientation')),\n ('imageset_processed_flag', (bool, 'Processed')),\n ('imageset_shipped_flag', (bool, 'Commited')),\n ('imageset_start_datetime', (str, 'Start Time')),\n ('imageset_end_datetime', (str, 'End Time')),\n ('imageset_duration', (str, 'Duration')),\n ('imageset_notes', (str, 'Notes')),\n ('party_tag', (str, 'Party')),\n ('contributor_tag', (str, 'Contributor')),\n ('percent_imgs_reviewed_str', (str, '%Imgs Reviewed')),\n ('percent_annotmatch_reviewed_str', (str, '%Queried')),\n ('num_imgs_reviewed', (str, '#Imgs Reviewed')),\n ('num_annotmatch_reviewed', (str, '#Matches Reviewed')),\n ('percent_names_with_exemplar_str', (str, '%Names with Exemplar')),\n ('max_speed', (float, 'Max Speed km/h')),\n ('has_split', (float, 'Needs Split')),\n ('max_hourdiff', (float, 'Max Hour Diff')),\n ('tag_text', (str, 'Tags')),\n ]\n )\n\n declare_tup = (\n TABLENAME_LIST,\n TABLE_NICE,\n TABLE_COLNAMES,\n TABLE_TREE_LEVELS,\n TABLE_EDITSET,\n TABLE_HIDDEN_LIST,\n TABLE_STRIPE_LIST,\n COL_DEF,\n )\n return declare_tup", "def initialize(self):\n\n cursor = self.conn.cursor()\n\n # This table can be used as a parent for a collection of runs\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS RunCollections (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE\n );\"\"\"\n )\n\n # This table holds in which run each appears.\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Runs (\n id INT AUTO_INCREMENT PRIMARY KEY,\n name VARCHAR(14) UNIQUE,\n collection_id INT,\n FOREIGN KEY (collection_id) REFERENCES RunCollections (id) ON DELETE CASCADE);\"\"\"\n )\n\n # This table holds resources, which can be in multiple runs and have multiple varieties\n cursor.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS Resources (\n id INT AUTO_INCREMENT PRIMARY KEY, \n extension VARCHAR(20), \n webpage VARCHAR(30),\n run_id INT NOT NULL,\n FOREIGN KEY (run_id) REFERENCES Runs (id) ON DELETE CASCADE);\"\"\"\n )\n\n cursor.execute(\n 'SELECT Table_name FROM information_schema.tables WHERE table_schema = \"vpntfg0\" AND Table_name LIKE \"%Varieties_%\" ORDER BY Table_name'\n )\n for row in cursor.fetchall():\n self.variety_tables.append(row[0])\n\n cursor.close()\n _logger.info(\"Variety tables are: %s\" % self.variety_tables)\n\n _logger.info(\"Database initialized\")", "def buildConverters(tableSpec, tableNamespace):\n converters = []\n convertersByName = {}\n for tp, name, repeat, aux, descr in tableSpec:\n tableName = name\n if name.startswith(\"ValueFormat\"):\n assert tp == \"uint16\"\n converterClass = ValueFormat\n elif name.endswith(\"Count\") or name in (\"StructLength\", \"MorphType\"):\n converterClass = {\n \"uint8\": ComputedUInt8,\n \"uint16\": ComputedUShort,\n \"uint32\": ComputedULong,\n }[tp]\n elif name == \"SubTable\":\n converterClass = SubTable\n elif name == \"ExtSubTable\":\n converterClass = ExtSubTable\n elif name == \"SubStruct\":\n converterClass = SubStruct\n elif name == \"FeatureParams\":\n converterClass = FeatureParams\n elif name in (\"CIDGlyphMapping\", \"GlyphCIDMapping\"):\n converterClass = StructWithLength\n else:\n if not tp in converterMapping and \"(\" not in tp:\n tableName = tp\n converterClass = Struct\n else:\n converterClass = eval(tp, tableNamespace, converterMapping)\n\n conv = converterClass(name, repeat, aux, description=descr)\n\n if conv.tableClass:\n # A \"template\" such as OffsetTo(AType) knowss the table class already\n tableClass = conv.tableClass\n elif tp in (\"MortChain\", \"MortSubtable\", \"MorxChain\"):\n tableClass = tableNamespace.get(tp)\n else:\n tableClass = tableNamespace.get(tableName)\n\n if not conv.tableClass:\n conv.tableClass = tableClass\n\n if name in [\"SubTable\", \"ExtSubTable\", \"SubStruct\"]:\n conv.lookupTypes = tableNamespace[\"lookupTypes\"]\n # also create reverse mapping\n for t in conv.lookupTypes.values():\n for cls in t.values():\n convertersByName[cls.__name__] = Table(name, repeat, aux, cls)\n if name == \"FeatureParams\":\n conv.featureParamTypes = tableNamespace[\"featureParamTypes\"]\n conv.defaultFeatureParams = tableNamespace[\"FeatureParams\"]\n for cls in conv.featureParamTypes.values():\n convertersByName[cls.__name__] = Table(name, repeat, aux, cls)\n converters.append(conv)\n assert name not in convertersByName, name\n convertersByName[name] = conv\n return converters, convertersByName", "def setup_table(self):\n\n self.setup.create_basic_table_in_dev()\n self.setup.insert_random_records_into_dev()", "def _process(self, tables=None):\n\n if self._tables:\n return self._tables\n\n tables = tables or {}\n\n for row in self.url.generator.iter_rp:\n\n table_id_key = row['Table ID'].strip().lower()\n\n if not row['Line Number'].strip():\n if 'Universe' not in row['Table Title']:\n if table_id_key not in tables:\n tables[table_id_key] = Table(row['Table ID'], row['Table Title'].strip().title(),\n seq=row['Sequence Number'],\n startpos=int(row['Start Position']))\n else:\n tables[table_id_key].seq = row['Sequence Number']\n tables[table_id_key].startpos = row['Start Position']\n tables[table_id_key].subject = row['Subject Area']\n\n else:\n tables[table_id_key].universe = row['Table Title'].replace('Universe: ', '').strip()\n\n else: # column row\n try:\n\n line_no = int(row['Line Number'])\n\n if not line_no in tables[table_id_key].columns:\n tables[table_id_key].columns[line_no] = Column(row['Table ID'],\n f\"{row['Table ID']}_{line_no:03}\",\n line_no,\n description=row['Table Title'])\n else:\n tables[table_id_key].columns[line_no].description = row['Table Title']\n\n\n except ValueError as e:\n # Headings, which have fractional line numebrs\n # print(row)\n pass\n\n self._tables = tables\n\n return self._tables", "def testLR0ParseTable(self):\r\n from pydsl.Parser.LR0 import _slr_build_parser_table, build_states_sets\r\n state_sets = build_states_sets(productionset0)\r\n self.assertEqual(len(state_sets), 5)\r\n #0 . EI: : . exp $ , \r\n # exp : .SR\r\n # transitions: S -> 2,\r\n # goto: exp -> 1\r\n #1 EI: exp . $ ,\r\n # transitions: $ -> 3\r\n #2 exp: S . R,\r\n # transitions: R -> 4\r\n #3 EI: exp $ .\r\n #4 exp: S R .\r\n # reduce\r\n\r\n parsetable = _slr_build_parser_table(productionset0)\r\n self.assertEqual(len(parsetable), 4)", "def init_tables(self) -> None:\n # TODO(#93) maybe raise flag when the schema of existing tables isn't what we expect\n # it to be?\n # \"How to know that schema changes?\"\n # logger.warning(\"some message\")\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n with conn:\n c = conn.cursor()\n c.execute(CREATE_PROJECTS_TABLE)\n c.execute(CREATE_TASKS_TABLE)\n c.execute(CREATE_REQUESTERS_TABLE)\n c.execute(CREATE_TASK_RUNS_TABLE)\n c.execute(CREATE_ASSIGNMENTS_TABLE)\n c.execute(CREATE_UNITS_TABLE)\n c.execute(CREATE_WORKERS_TABLE)\n c.execute(CREATE_AGENTS_TABLE)\n c.execute(CREATE_QUALIFICATIONS_TABLE)\n c.execute(CREATE_GRANTED_QUALIFICATIONS_TABLE)\n c.execute(CREATE_ONBOARDING_AGENTS_TABLE)", "def pre_interface_route_table_create(self, resource_dict):\n pass", "def _init_table(self, table: \"Table\"):\n if not self.columns:\n self.columns = table.columns\n self._data = table.data", "def __init__(self, *args):\n _snap.TTable_swiginit(self, _snap.new_TTable(*args))", "def generate_inicialization_file(id_test,lines,columns):\n \n def _generate_cell_initialization(outputFile,inputLine,fieldNames):\n print(\"_generate_cell_initialization\")\n outputFile.write(\"\\n\")\n outputFile.write(\"rule : { \\n\")\n port_idx =0\n for fieldName in fieldNames[1:]:\n port_idx=port_idx+1\n print(\"Writing \"+str(fieldName+\" for agent \"+str(inputLine[0])))\n outputFile.write(\"\\t\\t~\"+str(fieldName)+\"\\t\\t:= \"+str(inputLine[port_idx].strip())+\";\\n\")\n \n outputFile.write(\" } \\n\")\n outputFile.write(\" 0 \\n\")\n outputFile.write(\" { \\n\")\n outputFile.write(\"\\t\\t(0,0)~\"+fieldNames[1]+\"\\t = -\"+ \\\n str(inputLine[0])+\"\\n\")\n #str(DEFAULT_INITIAL_CELL_VALUE))\n outputFile.write(\" } \\n\")\n #outputFile.write()\n \n \n print(\"generate_inicialization_file\")\n initialization_output_file_name=\"inicializacion.inc\"\n initialization_input_file_name=id_test+\"_initialization.csv\"\n f_output = io.open(INPUT_PARSER_RESULTS_DIR+initialization_output_file_name, \"w\",newline='\\n')\n f_input = io.open(AGRODEVS_INPUT_DIR+initialization_input_file_name, \"r\")\n \n input_reader = csv.reader(f_input, delimiter=',')\n field_names_list = next(input_reader)\n if (field_names_list[0]!=\"agent\"):\n print(\"First field of inicialization input file should be 'agent' but is:\"+field_names_list[0])\n print(\"Cannot generate inicialization file for AgroDevs\")\n return\n else:\n print(field_names_list)\n #Write macro header line\n f_output.write(\"#BeginMacro(inicializar) \\n\")\n \n for line in input_reader:\n if (line[0]==\"default\"):\n #generate default cell initialization\n print(\"generating default cell initialization\")\n else:\n #generate agent cell initialization\n #print(\"generate agent cell initialization\")\n _generate_cell_initialization(f_output,line,field_names_list)\n \n f_output.write(\"#EndMacro \\n\") \n f_input.close()\n f_output.close()", "def init_tables(self) -> None:\n with self.table_access_condition:\n conn = self._get_connection()\n conn.execute(\"PRAGMA foreign_keys = 1\")\n c = conn.cursor()\n c.execute(tables.CREATE_STUDIES_TABLE)\n c.execute(tables.CREATE_SUBMISSIONS_TABLE)\n c.execute(tables.CREATE_REQUESTERS_TABLE)\n c.execute(tables.CREATE_UNITS_TABLE)\n c.execute(tables.CREATE_WORKERS_TABLE)\n c.execute(tables.CREATE_RUNS_TABLE)\n c.execute(tables.CREATE_RUN_MAP_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUPS_TABLE)\n c.execute(tables.CREATE_PARTICIPANT_GROUP_QUALIFICATIONS_MAPPING_TABLE)\n conn.commit()", "def __init__(self, spec, decl=None):\n self._spec = []\n self.initialize()\n self._processDecl(decl)\n self._processSpec(spec)", "def init_rib_tables(self):\n cur = self.sql.cursor()\n cur.execute(\"PRAGMA foreign_keys = on\")\n cur.execute('''\n CREATE TABLE rtr_cache (\n rtr_id INTEGER PRIMARY KEY NOT NULL,\n device TEXT NOT NULL,\n rtrupdt INTEGER,\n UNIQUE (device))''')\n cur.execute('''\n CREATE TABLE rtr_rib (\n rtr_id INTEGER NOT NULL\n REFERENCES rtr_cache(rtr_id)\n ON DELETE CASCADE\n ON UPDATE CASCADE,\n idx INTEGER NOT NULL,\n status TEXT,\n pfx TEXT NOT NULL,\n pfxlen INTEGER NOT NULL,\n pfxstr_min TEXT NOT NULL,\n pfxstr_max TEXT NOT NULL,\n nexthop TEXT NOT NULL,\n metric INTEGER,\n locpref INTEGER,\n weight INTEGER,\n pathbutone TEXT,\n orig_asn INTEGER NOT NULL,\n route_orig TEXT)''')\n self.sql.commit()", "def _create_intermediate_new_tables_structure(self, conn):\n table_names = []\n with conn.cursor() as cursor, CodeProfiler() as cp:\n tblname = self._blacklist_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\n \"\"\").format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._notifications_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._exceptions_lists_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n operator_id TEXT NOT NULL,\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY LIST (operator_id)\n \"\"\").format(sql.Identifier(tblname)))\n table_names.append(tblname)\n self._create_operator_partitions(conn,\n parent_tbl_name=tblname,\n child_name_fn=self._exceptions_lists_new_part_tblname,\n is_unlogged=True)\n\n tblname = self._blocking_conditions_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n cond_name TEXT NOT NULL,\n reason TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._mnc_mcc_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n mcc_mnc_pattern TEXT NOT NULL,\n operator_id TEXT NOT NULL\n )\"\"\")\n .format(sql.Identifier(tblname)))\n table_names.append(tblname)\n\n tblname = self._notifications_imei_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._notifications_triplets_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n msisdn TEXT NOT NULL,\n block_date DATE NOT NULL,\n reasons TEXT[] NOT NULL,\n is_valid BOOLEAN,\n amnesty_granted BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n fallback_operators TEXT[]\n ) PARTITION BY RANGE (virt_imei_shard)\"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True)\n table_names.append(tblname)\n\n tblname = self._pairings_imei_imsi_new_tblname\n cursor.execute(sql.SQL(\"\"\"CREATE UNLOGGED TABLE {0} (\n imei_norm TEXT NOT NULL,\n virt_imei_shard SMALLINT NOT NULL,\n imsi TEXT NOT NULL,\n is_valid BOOLEAN,\n imei_norm_with_check_digit TEXT,\n home_operator TEXT,\n is_blacklisted BOOLEAN\n ) PARTITION BY RANGE (virt_imei_shard) \"\"\")\n .format(sql.Identifier(tblname)))\n partition_utils.create_imei_shard_partitions(conn, tbl_name=tblname, unlogged=True, fillfactor=45)\n table_names.append(tblname)\n\n self._intermediate_table_names.extend(table_names)\n return -1, cp.duration", "def __init__(\n self,\n tableCollection: str,\n activation: str,\n eigenTimeConst: str = \"\",\n expansionTimeConstant: str = \"\",\n ):\n pass", "def test_create_table(self):\n self.assertEqual(\n ['CREATE', 'TABLE', 'T1', '(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n)'],\n grammar._CREATE_TABLE.parseString(\n 'CREATE TABLE IF NOT EXISTS `T1`(\\nc1 ENUM(\"a\", \"b\", \"c\"), c2 SET(\"0\", \"1\", \"2\")\\n);'\n ).asList()\n )", "def _create_TableDescriptor(self):\n\n self.conn.cursor.execute(\"PRAGMA table_info(\" + self.table_name + \")\")\n descriptions = self.conn.cursor.fetchall()\n column_map = {}\n for description in descriptions:\n column_map[description[1]] = description[2]\n td = TD(self.table_name, column_map) \n\n# self.conn.cursor.execute(\"SELECT sql FROM sqlite_master WHERE name='{tb}'\"\\\n# .format(tb=self.table_name))\n# aa = str(self.conn.cursor.fetchone()[0])\n# sindx = aa.find(\"(\")\n# eindx = aa.find(\")\")\n# aa = aa[sindx+1:eindx]\n# aa = aa.split(\",\")\n# column_map = {kyval.split()[0]:kyval.split()[1] for kyval in aa}\n# td = TD(self.table_name, column_map) \n\n return td", "def init_table(row_num):\n # Initialize the number of rows in table\n table = []\n for i in range(row_num):\n row = []\n table.append(row)\n\n # Append the default first cell to the table\n table[0].append(\"Curreny Type\")\n\n return table", "def _db_init_data_tables(self):\n\n #\n # TESTTYPE table\n #\n return self._db_execute(\n \"\"\"\n create table TESTTYPE (\n KEY text unique,\n VALUE text\n )\n \"\"\"\n )", "def defineTABLESECTION(f,layernamelist):\r\n \r\n layercolordict={}\r\n for layername in layernamelist:\r\n t=random.randint(10,17)\r\n layercolordict[layername]=random.randrange(10+t,240+t,10)\r\n \r\n layercolordict[\"Outline\"]=1\r\n layercolordict[\"Mark\"]=5\r\n layercolordict[\"Cutline\"]=2\r\n \r\n f.write(\"0\\nSECTION\\n2\\nTABLES\\n0\\nTABLE\\n2\\nLAYER\\n70\\n2\\n\") \r\n for layername in layernamelist:\r\n f.write(\"0\\nLAYER\\n2\\n\"+layername+\"\\n70\\n0\\n62\\n\"+str(layercolordict[layername])+\"\\n6\\nCONTINUOUS\\n\")\r\n f.write(\"0\\nENDTAB\\n0\\nENDSEC\\n\")", "def make_tables(\n table_cfgs: \"list[tuple[BitPos, BitPos, OffsetType]]\", entries\n) -> \"list[Table]\":\n tables = []\n entry_groups = [entries]\n for (low_bit, cap_bit, offset_type) in table_cfgs:\n table = Table(entry_groups, low_bit, cap_bit, offset_type)\n entry_groups = map(lambda bucket: bucket.entries(), table.buckets())\n tables.append(table)\n return tables" ]
[ "0.52702624", "0.51661", "0.51060444", "0.5094332", "0.50876445", "0.50695586", "0.50232565", "0.49792466", "0.49553454", "0.49386698", "0.49274278", "0.49077904", "0.4889465", "0.48876804", "0.48618603", "0.48576298", "0.48565733", "0.48539078", "0.4839813", "0.48349887", "0.48339733", "0.48286325", "0.48004246", "0.47759512", "0.47651866", "0.47467768", "0.4742607", "0.47310084", "0.46929148", "0.468556" ]
0.7456325
0
Enable the switch instance Start the traffic manager threads and allow packets to enter the processor chain
def enable(self): if not self.tm_started: for name, tm in self.air_traffic_manager.items(): logging.debug("Starting tm %s" % name) tm.start() tm_started = True logging.debug("Enabling switch %s" % self.name) self.disabled = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enable(self):\n self.switch.enable()\n self._enabled = True", "def start_sending_to_switch(self):\n self.switch_active = True\n for message in self.internal_switch_buffer:\n self.switch.buffer.append(message)\n self.internal_switch_buffer = []", "def launch ():\n def start_switch (event):\n log.info(\"switch %s has come up\" % event.dpid)\n log.info(event.connection.ports)\n sw = switches_by_dpid.get(event.dpid)\n\n if sw is None:\n # New switch\n sw = TopoSwitch(event.connection)\n switches_by_dpid[event.dpid] = sw\n sw.connect(event.connection)\n else:\n sw.connect(event.connection)\n core.openflow.addListenerByName(\"ConnectionUp\", start_switch)", "def connect_to_switches(self):\n for p4switch in self.topo.get_p4switches():\n thrift_port = self.topo.get_thrift_port(p4switch)\n self.controllers[p4switch] = SimpleSwitchThriftAPI(thrift_port)", "def _enable(self):\n sub = multiprocessing.Process(target=subproc)\n sub.start()", "def enable(self):\n self.fisica.open()\n self.rx.threadStart()\n self.tx.threadStart()", "def start():\n Networker.stop()\n Networker.Instance = Networker()", "def _start(self):\n\n super(PySwitchLibApiDaemonRunner, self)._start()", "def enable_relays(self):\n #ensure clock and data are low\n self.e.clear_bit(7)\n self.e.clear_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)", "def _init_threads(self):\n\n startTh = Thread(name='InitialStart', target = self._singleUpdate, args=(self.outPs, ))\n self.threads.append(startTh)\n\n sendTh = Thread(name='SteeringListen',target = self._listen_for_steering, args = (self.inPs[0], self.outPs, ))\n self.threads.append(sendTh)", "def on(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self._hub.switch_power.power_on(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_on()\n time.sleep(5) # Small delay to give time for 'dev/tty' to populate\n switchboard = self._get_switchboard_if_initialized()\n if switchboard:\n switchboard.open_all_transports()", "def on_pre_enter(self):\n self.setup()\n self.start()", "def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)", "def start_controller(self, controller):\n srv = SwitchControllerRequest()\n srv.start_controllers = [controller]\n srv.strictness = SwitchControllerRequest.BEST_EFFORT\n self.switch_controller(srv)", "def force_switch_on(self):\n self.turn_on_modem()", "def startManager(self):\n\t\tlogging.info(\"----->>>The DeviceDataManager will be started\")\n\t\tself.sysPerfManager.startManager()\n\t\tself.sensorAdapterManager.startManager()\n\t\tif self.enableRedis:\n\t\t\tself.redisClient.connectClient()\n\t\t\n\t\tif self.enableMqtt:\n\t\t\tself.mqttClient.connectClient()", "def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)", "def server_activate(self):\n\t\tpass", "def activate(self):\n self.start()", "def start_traffic(self):\n raise NotImplementedError", "def turn_on(self, **kwargs):\n if not self.is_on:\n _LOGGER.debug(\"Sending START command to: %s\", self._name)\n self._api.control('START')\n self._mower_status = STATUS_EXECUTING_START\n self.schedule_update_ha_state()", "def start(self):\n def f():\n if (self.started): return\n self.started = True\n with client.ServerProxy(self.host) as proxy:\n while (not self.req_shutdown):\n self.update_speed(proxy)\n time.sleep(self.com_freq)\n self.started = False\n self.req_shutdwon = False\n\n Thread(target=f).start()", "def start():\n global logger\n global client\n global config\n global device\n global circles_config\n global circles\n global mac2circle\n logger = LoggerClient.open(\"PlugwiseMonitor\")\n if not verbose: logger.config(logger.levels.WARNING, logger.schedules.DAILY)\n config = Utils.getconfig(\"plugwise\", logger)\n assert config is not None\n device = plugwise_api.Stick(logger, DEFAULT_SERIAL_PORT)\n\n # circles_config is a list of dictionaries: name, mac, desc. state field is\n # added in next loop to track its value so it can be used to only send\n # messages in state transitions. power1s and power8s field is used to check\n # the relative difference in power in order to reduce the network overhead.\n circles_config = config[\"circles\"]\n circles = []\n mac2circle = {}\n for circle_data in circles_config:\n mac = circle_data[\"mac\"]\n circles.append( plugwise_api.Circle(logger, mac, device, {\n \"name\" : circle_data[\"name\"],\n \"location\" : circle_data[\"desc\"],\n \"always_on\" : \"False\",\n \"production\" : \"True\"\n }) )\n mac2circle[mac] = circles[-1]\n circle_data[\"state\"] = \"NA\"\n for v in OUTPUT_LIST:\n circle_data[\"power\" + v[\"suffix\"]] = -10000.0\n circle_data[\"when\" + v[\"suffix\"]] = 0.0\n \n client = Utils.getpahoclient(logger, __configure)\n client.loop_start()", "def start(self):\n self.active = True", "def init(self):\n logger.info(\"Turn on antenna power\")\n logger.info(\"Register on the network\")\n self.emit('provider-modified', \"Charlie Telecom\")\n self.network_strength = 100\n yield tichy.Service.get('ConfigService').wait_initialized()\n self.config_service = tichy.Service.get(\"ConfigService\")\n logger.info(\"got config service\")\n self.values = self.config_service.get_items(\"call_forwarding\")\n if self.values != None: self.values = dict(self.values)\n logger.info(\"realized values is none\")\n self.SettingReason = tichy.settings.ListSetting('Call Forwarding', 'Reason', tichy.Text, value='unconditional', setter=self.ForwardingSetReason, options=[\"unconditional\",\"mobile busy\",\"no reply\",\"not reachable\",\"all\",\"all conditional\"], model=tichy.List([ListSettingObject(\"unconditional\", self.action),ListSettingObject(\"mobile busy\", self.action),ListSettingObject(\"no reply\", self.action),ListSettingObject(\"not reachable\", self.action),ListSettingObject(\"all\", self.action),ListSettingObject(\"all conditional\", self.action)]), ListLabel = [('title','name')])\n self.SettingChannels = tichy.settings.Setting('Call Forwarding', 'channels', tichy.Text, value=self.ForwardingGet('class'), setter=self.ForwardingSetClass, options=[\"voice\",\"data\",\"voice+data\",\"fax\",\"voice+data+fax\"])\n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Target Number', tichy.Text, value=self.ForwardingGet('number'), setter=self.ForwardingSetNumber)\n self.SettingTargetNumber = tichy.settings.NumberSetting('Call Forwarding', 'Timeout', tichy.Text, value=self.ForwardingGet('timeout'), setter=self.ForwardingSetTimeout)\n \n if len(self.logs) == 0: \n for i in range(3):\n call = Call('0049110', direction='out')\n self.logs.insert(0, call)\n yield None", "def EnableCPU():\n global option\n option['device'] = 'CPU'", "def start(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].start()\n self.globalTimer = Timer(1, self.step)\n self.globalTimer.start()", "def connect(self):\n self.start()", "def __enable_connections(self):\r\n pass", "def enable(self, *args, **kwargs):\n pass" ]
[ "0.633651", "0.63337624", "0.627072", "0.62615603", "0.6093382", "0.6046594", "0.595381", "0.5936305", "0.58609205", "0.58298725", "0.5779756", "0.57681865", "0.5766365", "0.5766356", "0.5763502", "0.57629", "0.57612556", "0.5747415", "0.5741657", "0.57210463", "0.5721009", "0.5691176", "0.56485105", "0.5615015", "0.5595959", "0.5588712", "0.55703497", "0.5539881", "0.55389404", "0.5538847" ]
0.7502978
0
Disable the switch instance Packets on ingress are discarded while the switch is disabled. Traffic manager threads are not stopped.
def disable(self): logging.debug("Disabling switch %s" % self.name) self.disabled = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()", "def disable(self):\n self._disable_monitor()\n self._pinger.stop()", "def disable_switch_port(self, mgr, interface):\n confstr = snipp.CMD_NO_SWITCHPORT % (interface)\n confstr = self.create_xml_snippet(confstr)\n LOG.debug(\"NexusDriver: %s\" % confstr)\n mgr.edit_config(target='running', config=confstr)", "def on_disable(self) -> None:\n self._on_stop_cycle({})", "def disable(self):\n\n super().disable()\n self._slo_image_size.disable()\n self._slo_neural_network.disable()\n self._slo_number_of_epochs.disable()\n self._slo_examples_per_batch.disable()", "def _disable(self):\n self.enabled = False", "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def switch_off_traffic_lights(self):\n for actor in self.world.get_actors():\n if actor.type_id == 'traffic.traffic_light':\n actor.freeze(True)\n # We set the traffic light to 'green' because 'off' state sets the traffic light to\n # 'red'.\n actor.set_state(carla.TrafficLightState.Green)", "def off_switch(self):\n self._switch_callback = None", "def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)", "def stop_traffic(self):\n self._logger.debug(\"stop_traffic()\")", "def set_disabled_switch(self, disabled):\n self.disabled = disabled", "def disable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 0)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return", "def disable(self):\n self.error_code = 'DISABLED'\n self.running = False", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "def disable(self):\n self.enabled = False", "def disable(self):\n self._enabled = False", "async def async_set_wifi_led_off(self):\n return", "def firewallOff():\n pass", "def Bg_ping_stop():\r\n BgPing.stop_traffic()", "def stop_traffic(self):\n raise NotImplementedError(\n \"The TrafficController does not implement\",\n \"the \\\"stop_traffic\\\" function.\")", "def ethernet_off(self):\n if not self.healthy:\n self.health_check()\n if not self._ethernet_switch:\n raise errors.CapabilityNotReadyError(\n device_name=self._device_name,\n msg=\"Not set up for ethernet switching.\")\n self._ethernet_switch.switch_power.power_off(self.ethernet_port_number)", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def disable_relays(self):\n #ensure clock low and data high\n self.e.clear_bit(7)\n self.e.set_bit(5)\n time.sleep(0.01)\n\n #pulse the clock line\n self.e.set_bit(7)\n time.sleep(0.01)\n self.e.clear_bit(7)\n\n #clear the data line\n self.e.clear_bit(5)", "def disable(self):", "def disable():\n request = dict(id='gbn')\n _gbn_disable(request)", "def disable(self) -> None:", "def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)", "def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()" ]
[ "0.66462976", "0.6192181", "0.6160286", "0.6113852", "0.6101358", "0.6030463", "0.6019299", "0.60160685", "0.6007818", "0.59888124", "0.5958932", "0.59445566", "0.5917474", "0.591579", "0.5867535", "0.5856679", "0.5840998", "0.583786", "0.58215153", "0.57709235", "0.5764594", "0.573368", "0.57228184", "0.57125336", "0.5711457", "0.5710529", "0.570228", "0.5700118", "0.56883967", "0.5661413" ]
0.70145595
0
Transmit handler template for documentation out_port The port number to which the packet is to be sent packet A bytearray object holding the packet to transmit
def dummy_transmit_handler(out_port, packet): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, out):", "def _post(self, which_port, msg):\n return _spacegrant_swig.binary_sink_sptr__post(self, which_port, msg)", "def send_traffic_data(serialport, pack):\n pack[0] = 0x01\n pack[1] = 0x00\n serialport.write(pack)\n logging.debug(\"Traffic Data - Sent.\")\n logging.debug(str(pack))", "def process(self, parsed_packet):\n byte_buf = parsed_packet.serialize()\n out_port= parsed_packet.get_field(\"intrinsic_metadata.egress_port\")\n logging.debug(\"Transmit pkt id %d to %d\" % (parsed_packet.id, out_port))\n buf = bytearray(byte_buf)\n for idx in range((len(buf) + 19)/20):\n logging.debug(hexify(buf[20*idx : 20*(idx+1)], 20))\n\n self.transmit_handler(out_port, byte_buf)", "def add_out_port(self, m: int, content: str, **opts) -> None:", "def _send_packet_out(self, packet: Packet, port) -> None:\n try:\n p = self.shell.PacketOut(bytes(packet), egress_port=str(port))\n p.send()\n logging.debug(\"Sending packet out: egress_port {}\".format(port))\n except UserError as e:\n logging.debug(e)\n return", "def __init__(self, port):\n self.port = port\n self.action_type = 'output'", "def send_packet_out(dp, pkt, out_port, in_port=ofp.OFPP_CONTROLLER):\n actions = [parser.OFPActionOutput(out_port)]\n msg = parser.OFPPacketOut(datapath=dp,\n buffer_id=ofp.OFP_NO_BUFFER,\n in_port=in_port,\n actions=actions,\n data=pkt)\n return msg", "def sendto(self, data: bytes, address: Tuple) -> int:\n ...", "def record(self, port_name, t_start=None):", "def port_out(self) -> int:\n return self.proto.port_out", "def OutputPort(*args, **kw):\n return Port.make_shared(OutputPortInterface(*args, **kw))", "def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr__post(self, which_port, msg)", "def _post(self, which_port, msg):\n return _spacegrant_swig.ax25_pdu_packer_sptr__post(self, which_port, msg)", "def transmit(self, value):\n if (self.__sink == None):\n return\n target = self.__sink.target\n target(value)", "def net_send(out_data: bytes, conn: socket.socket) -> None:\n print(\"Sending {} bytes\".format(len(out_data)))\n conn.send(out_data)", "def output_generator(pkt):\r\n ethe_header = pkt[0]\r\n ip_header = pkt[1]\r\n protocol = pkt[1][7]\r\n data_header = pkt[2]\r\n ethe_prefix = \"ETHER: \"\r\n ip_prefix = \"IP: \"\r\n tcp_prefix = \"TCP: \"\r\n udp_prefix = \"UDP: \"\r\n icmp_prefix = \"ICMP: \"\r\n # print ether header information\r\n print(\"\\n\" + ethe_prefix + \"----- Ether Header -----\")\r\n print(ethe_prefix)\r\n print(ethe_prefix + \"Packet size = \" + str(ethe_header[0]) + \" bytes\")\r\n print(ethe_prefix + \"Destination = \" + str(ethe_header[1]))\r\n print(ethe_prefix + \"Source = \" + str(ethe_header[2]))\r\n print(ethe_prefix + \"Ethertype = \" + str(ethe_header[3]) + \" (IP)\")\r\n print(ethe_prefix)\r\n\r\n print(ip_prefix + \"----- IP Header -----\")\r\n print(ip_prefix)\r\n print(ip_prefix + \"Version = \" + str(ip_header[0]))\r\n print(ip_prefix + \"Header length = \" + str(4 * int(ip_header[1])) + \" bytes\")\r\n print(ip_prefix + \"Type of service = 0x\" + str(ip_header[2]))\r\n if str(ip_header[2]) == \"00\":\r\n print(ip_prefix + \"\\txxx. .... = 0 (precedence)\")\r\n print(ip_prefix + \"\\t...0 .... = normal delay\")\r\n print(ip_prefix + \"\\t.... 0... = normal throughput\")\r\n print(ip_prefix + \"\\t.... .0.. = normal reliability\")\r\n print(ip_prefix + \"Total length = \" + str(ip_header[3]) + \" bytes\")\r\n print(ip_prefix + \"Identification = \" + str(ip_header[4]))\r\n print(ip_prefix + \"Flags = 0x\" + str(ip_header[5]))\r\n flag = str(format(int(ip_header[5][0]), '04b'))\r\n if flag[0] == \"0\":\r\n print(ip_prefix + \"\\t0... ... = Reserved bit: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t1... ... = Reserved bit: set\")\r\n if flag[1] == \"0\":\r\n print(ip_prefix + \"\\t.0.. ... = Don't fragment: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t.1.. ... = Don't fragment: set\")\r\n if flag[2] == \"0\":\r\n print(ip_prefix + \"\\t..0. ... = More fragments: Not set\")\r\n else:\r\n print(ip_prefix + \"\\t..1. ... = More fragments: set\")\r\n flag_offset = str((int(ip_header[5][2:3])))\r\n print(ip_prefix + \"Fragment offset = \" + flag_offset + \" bytes\")\r\n print(ip_prefix + \"Time to live = \" + str(ip_header[6]) + \" seconds/hops\")\r\n if protocol == 1:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (ICMP)\")\r\n if protocol == 17:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (UDP)\")\r\n if protocol == 6:\r\n print(ip_prefix + \"Protocol = \" + str(protocol) + \" (TCP)\")\r\n print(ip_prefix + \"Header checksum = \" + str(ip_header[8]))\r\n print(ip_prefix + \"Source address = \" + str(ip_header[9]))\r\n print(ip_prefix + \"Destination address = \" + str(ip_header[10]))\r\n if ip_header[11] == \"\":\r\n print(ip_prefix + \"No options\")\r\n else:\r\n print(ip_prefix + \"Options: \" + ip_header[11])\r\n print(ip_prefix)\r\n\r\n if protocol == 1:\r\n print(icmp_prefix + \"----- ICMP Header -----\")\r\n print(icmp_prefix)\r\n if str(data_header[0]) == \"8\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo request)\")\r\n elif str(data_header[0]) == \"0\":\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]) + \" (Echo reply)\")\r\n else:\r\n print(icmp_prefix + \"Type = \" + str(data_header[0]))\r\n print(icmp_prefix + \"Code = \" + str(data_header[1]))\r\n print(icmp_prefix + \"Checksum = \" + str(data_header[2]))\r\n print(icmp_prefix)\r\n\r\n elif protocol == 6:\r\n print(tcp_prefix + \"----- TCP Header -----\")\r\n print(tcp_prefix)\r\n print(tcp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(tcp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(tcp_prefix + \"Sequence number = \" + str(data_header[2]))\r\n print(tcp_prefix + \"Acknowledgement number = \" + str(data_header[3]))\r\n print(tcp_prefix + \"Data offset = \" + str(data_header[4]) + \" bytes\")\r\n flag = str(data_header[5])\r\n print(tcp_prefix + \"\\tReserved: Not set\")\r\n print(tcp_prefix + \"\\tNonce: Not set\")\r\n if flag[0] == \"0\":\r\n print(tcp_prefix + \"\\tCWR: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tCWR: Set\")\r\n if flag[1] == \"0\":\r\n print(tcp_prefix + \"\\tECN-Echo : No set\")\r\n else:\r\n print(tcp_prefix + \"\\tECN-Echo: Set\")\r\n if flag[2] == \"0\":\r\n print(tcp_prefix + \"\\tUrgent: Not set\")\r\n else:\r\n print(tcp_prefix + \"\\tUrgent: Set\")\r\n if flag[3] == \"0\":\r\n print(tcp_prefix + \"\\tAcknowledgment: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tAcknowledgment: Set\")\r\n if flag[4] == \"0\":\r\n print(tcp_prefix + \"\\tPush: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tPush: Set\")\r\n if flag[5] == \"0\":\r\n print(tcp_prefix + \"\\tReset: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tReset: Set\")\r\n if flag[6] == \"0\":\r\n print(tcp_prefix + \"\\tSyn: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tSyn: Set\")\r\n if flag[7] == \"0\":\r\n print(tcp_prefix + \"\\tFin: No set\")\r\n else:\r\n print(tcp_prefix + \"\\tFin: Set\")\r\n print(tcp_prefix + \"Window = \" + str(data_header[6]))\r\n print(tcp_prefix + \"Checksum 0x= \" + str(data_header[7]))\r\n print(tcp_prefix + \"Urgent pointers = \" + str(data_header[8]))\r\n if data_header[9] != 0:\r\n print(tcp_prefix + \"Options\")\r\n else:\r\n print(tcp_prefix + \"No options\")\r\n print(tcp_prefix)\r\n\r\n elif protocol == 17:\r\n print(udp_prefix + \"----- UDP Header -----\")\r\n print(udp_prefix)\r\n print(udp_prefix + \"Source port = \" + str(data_header[0]))\r\n print(udp_prefix + \"Destination port = \" + str(data_header[1]))\r\n print(udp_prefix + \"Length = \" + str(data_header[2]))\r\n print(udp_prefix + \"Checksum = \" + str(data_header[3]))\r\n print(udp_prefix)", "def out(self, out):\n\n self._out = out", "def packet_out(self, data, in_port, out_port, out_queue, nq=0):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n dpid = self.datapath.id\n #*** First build OF version specific list of actions:\n if nq:\n #*** Packet out with no queue (nq):\n actions = [self.datapath.ofproto_parser.OFPActionOutput \\\n (out_port, 0)]\n\n else:\n #*** Note: out_port must come last!\n actions = [\n parser.OFPActionSetQueue(out_queue),\n parser.OFPActionOutput(out_port, 0)]\n\n #*** Now have we have actions, build the packet out message:\n out = parser.OFPPacketOut(\n datapath=self.datapath, buffer_id=ofproto.OFP_NO_BUFFER,\n in_port=in_port, actions=actions, data=data)\n\n self.logger.debug(\"Sending Packet-Out message dpid=%s port=%s\",\n dpid, out_port)\n #*** Tell the switch to send the packet:\n self.datapath.send_msg(out)", "def writer(self):\n #while self.alive:\n try:\n icmpreq = ethernet.Ethernet(src_s=\"dc:a6:32:00:a7:8b\", dst_s=\"ec:84:b4:3e:c8:20\", type=ethernet.ETH_TYPE_IP) +\\\n ip.IP(p=ip.IP_PROTO_ICMP, src_s=\"192.168.1.35\", dst_s=\"172.217.166.110\") +\\\n icmp.ICMP(type=8) +\\\n icmp.ICMP.Echo(id=1, ts=123456789, body_bytes=b\"12345678901234567890\")\n self.serial.write(icmpreq.bin()+b'~')\n except socket.error as msg:\n print(msg)\n self.stop()", "def write(self, proto):\n pass", "def _vendor_request_out(self, request, value=0, index=0, data=None, timeout=1000):\n return self._vendor_request(usb.ENDPOINT_OUT, request, value=value,\n index=index, length_or_data=data, timeout=timeout)", "def send(self, data):", "def transmit(self, message):\n pass", "def send_byte(byte_out):\n GPIO.output(clock_pin, 0)\n # set the chip select to write\n GPIO.output(chip_select, 1)\n # send the byte \n values = [(ord(byte_out) >> i) % 2 for i in range(0, 8)]\n GPIO.setup(data_pins, GPIO.OUT)\n GPIO.output(data_pins, values)\n # flash the clock pin\n GPIO.output(clock_pin, 1)\n GPIO.output(clock_pin, 0)", "def output(self, p_addr = 0):\n\t\tout_pos = self.get_address(p_addr, 1)\n\t\tself.out_param += [self.get_data(out_pos)]\n\t\tif self.debug:\n\t\t\tprint(\"DIAGNOSTIC:\", self.out_param[-1])\n\t\tself.pos += 2", "def _send_data_to_nn(self,wbtData):\n\t\tself._neuralNetwork.stdin.write(\"COMM IN\\n\") # this shitty COMM IN is not really needed..to modify in closedloop.py\n\t\tself._neuralNetwork.stdin.write(wbtData)", "def act_like_hub (self, packet, packet_in):\n # We want to output to all ports -- we do that using the special\n # OFPP_ALL port as the output port. (We could have also used\n # OFPP_FLOOD.)\n self.resend_packet(packet_in, of.OFPP_ALL)\n\n # Note that if we didn't get arp_req valid buffer_id, arp_req slightly better\n # implementation would check that we got the full data before\n # sending it (len(packet_in.data) should be == packet_in.total_len)).", "def callback_serial_write(data):\n serial_write(data.data)", "def message_ports_out(self):\n return _spacegrant_swig.binary_sink_sptr_message_ports_out(self)" ]
[ "0.5656294", "0.5640042", "0.5389474", "0.53658545", "0.52940327", "0.52939874", "0.5285707", "0.51725745", "0.51605856", "0.5146283", "0.51232743", "0.5088026", "0.5069205", "0.50586265", "0.50550586", "0.50533634", "0.50321823", "0.5028129", "0.49887457", "0.4957033", "0.4946684", "0.49410564", "0.4940173", "0.4939851", "0.4922899", "0.49202242", "0.49056667", "0.48948038", "0.4893408", "0.48837185" ]
0.655866
0
take a field from the csv and expand/split on a delimiter and return a list of individual values. If the return_list flag is set to true, then this method will return the data back as a list of new fields instead of a cleaned up string and normalized with semicolon delimiter
def expand_and_normalize_field(field, return_list=False): if isinstance(field, basestring): field = field.rstrip(';:,') data = [_normalize_expanded_field(r) for r in re.split(",|;|:", field)] if return_list: return data else: return ";".join(data) else: if return_list: return [field] else: return field
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def parse_and_flatten(df, field_name):\n\n # Parse and flatten the list\n lst = list(df[field_name])\n lst = [x.split('|') for x in lst]\n\n lst_flat = []\n for slist in lst:\n for x in slist:\n lst_flat.append(x)\n return lst_flat", "def from_csv_line(line):\r\n return line.strip().split(',')", "def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []", "def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []", "def _parsecsv(x):\n for line in x:\n # decode as utf-8, whitespace-strip and split on delimiter\n yield line.decode('utf-8').strip().split(config.DELIMITER)", "def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''", "def parse_csv2list_upload(file_name):\n with open(file_name) as f:\n records = csv.reader(f)\n csv_list = [[j.strip() for j in record] for record in records]\n return csv_list", "def __obtain_data_from_csv__(self, csvfile):\n data = csvfile.readlines()\n data = self.__parse_string_for_delimiter__(data)\n return data", "def __parse_string_for_delimiter__(self, data):\n parsed = []\n for row in data:\n row = self.__remove_break_line__(row)\n row = self.__split_for_delimiter__(row)\n parsed.append(row)\n return parsed", "def csv_reader(self, file_obj):\n reader = csv.reader(file_obj)\n for row in reader:\n row_1 = (' '.join(row))\n self.data.append(row_1.split(';'))\n return self.data", "def _read_delimited_field(d):\n val = []\n val.append(next(d))\n while val[-1] != FIELD_DELIMITER:\n try:\n val.append(next(d))\n except StopIteration: break\n\n modlogger.debug( \"read:%s\"%val[:-1])\n return field_ctor(val[:-1])", "def getcsv(self, section, option):\n elements = self.get(section, option)\n splitter = ',' if ',' in elements else None\n return [element.strip() for element in elements.split(splitter)]", "def listparse(csvfilename):\r\n output = []\r\n with open(csvfilename, 'r', newline = '') as csvfile:\r\n csvreader = csv.reader(csvfile, skipinitialspace = True)\r\n for row in csvreader:\r\n output.append(row)\r\n return output", "def process_data(line):\n wire_path_data = []\n for i in line:\n wire_path_data.append(i.strip('\\n').split(','))\n return wire_path_data", "def aslist(value, flatten=True):\n values = aslist_cronly(value)\n if not flatten:\n return values\n result = []\n for value in values:\n subvalues = value.split()\n result.extend(subvalues)\n return result", "def _splitFieldValue(self, line):\n found = self.FIELDVALUE.findall(line)\n if found:\n fieldName, value = found[0]\n if fieldName in self.C.ADAPTER_COMMAFIELDS:\n value = self.COMMASPLIT.findall(value)[:-1] # Split and remove last empty part\n return fieldName, value\n return None, None # No field name match on this line.", "def list_process(field, item_list:List[str]):\n # if isinstance(item_list, list):\n if len(item_list) == 0:\n return {\n\n }\n saved_list = []\n\n for i in item_list:\n saved_list.append(f\"{i}\")\n return {\n field: \",\".join(saved_list)\n }", "def line_to_list(self, _line):\n\n\t\tresult = list()\t\t\n\t\t_line_splited = _line.split('\\t')\n\t\t\n\t\tfor value in _line_splited:\n\t\t\tvalue_stripped = value.strip().rstrip()\t\t\t\n\t\t\tresult.append(value_stripped)\t\t\t\t\n\t\t\n\t\treturn result", "def __parseCsvRow(row):\r\n \r\n resultRow = []\r\n for item in row:\r\n if type(item) is str:\r\n if \".\" in item:\r\n try:\r\n f = float(item)\r\n resultRow.append(f)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n try:\r\n i = int(item)\r\n resultRow.append(i)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n resultRow.append(item)\r\n return resultRow", "def _parse_list(string, dtype=int, delimiter=','):\n\n items = string.lower().strip().replace(' ', '').split(delimiter)\n\n if 'none' in items:\n items.pop(items.index('none'))\n contains_none = True\n else:\n contains_none = False\n\n\n if dtype == bool:\n items = [item == 'true' for item in items]\n else:\n items = [dtype(item) for item in items]\n\n if contains_none:\n items.append(None)\n\n return items", "def lineToList(self, line):\n l = [item for item in next(csv.reader(StringIO.StringIO(line), self.CSVDialect))]\n if self.firstLine is None:\n self.firstLine = l\n return None\n return l", "def csv_to_field_Urls(entity, value):\n if value is None or value == '':\n return\n splitter = re.compile(url_splitter)\n entity.string = splitter.split(value)", "def getlist(self, option, sep=',', chars=None):\n return [chunk.strip(chars) for chunk in option.split(sep)]", "def split_field_content(cls, string):\n if \",\" in string and not is_rfc1123_datetime(string):\n return [s.strip() for s in string.split(\",\")]\n else:\n return string", "def transform(self):\n with open(self.csv_path, \"r\") as f:\n csv_entries = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]\n\n nested_fields = get_nested_fieldnames(csv_entries[0])\n # values of these fields should be transformed to a list\n # list_fields = set()\n # for entry in csv_entries:\n # for k, v in entry.items():\n # if '||' in v:\n # list_fields.add(k)\n list_fields = {\n \"BITSTREAM Download URL\",\n \"BITSTREAM License\",\n \"BITSTREAM Webshop URL\",\n \"dc.contributor\",\n \"dc.contributor.author\",\n \"dc.contributor.editor\",\n \"dc.date.available\",\n \"dc.date.accessioned\",\n \"dc.date.issued\",\n \"dc.date.submitted\",\n \"dc.dateSubmitted\",\n \"dc.description.abstract\",\n \"dc.description.provenance\",\n \"dc.grantproject\",\n \"dc.identifier\",\n \"dc.identifier.pr\",\n \"dc.language\",\n \"dc.notes\",\n \"dc.number\",\n \"dc.redirect\",\n \"dc.relation.ispartofseries\",\n \"dc.relationisFundedBy\",\n \"dc.subject\",\n \"dc.subject.classification\",\n \"dc.subject.other\",\n \"dc.title\",\n \"dc.title.alternative\",\n \"dc.type\",\n \"oapen.collection\",\n \"oapen.grant.number\",\n \"oapen.grant.program\",\n \"oapen.imprint\",\n \"oapen.relation.hasChapter\",\n \"oapen.relation.hasChapter_dc.title\",\n \"oapen.relation.isFundedBy\",\n \"oapen.relation.isFundedBy_grantor.name\",\n \"oapen.relation.isPartOfBook\",\n \"oapen.relation.isPartOfBook_dc.title\",\n \"oapen.relation.isPublishedBy_publisher.name\",\n \"oapen.relation.isPublisherOf\",\n \"oapen.relation.isbn\",\n \"oapen.remark.public\",\n \"peerreview.anonymity\",\n \"peerreview.id\",\n \"peerreview.open.review\",\n \"peerreview.publish.responsibility\",\n \"peerreview.review.decision\",\n \"peerreview.review.stage\",\n \"peerreview.review.type\",\n \"peerreview.reviewer.type\",\n }\n # add custom 'dc.subject.classification_code'\n list_fields.add(\"dc.subject.classification_code\")\n entries = transform_dict(csv_entries, convert, nested_fields, list_fields)\n\n # Transform release into JSON Lines format saving in memory buffer\n # Save in memory buffer to gzipped file\n list_to_jsonl_gz(self.transform_path, entries)", "def extractFields(deerfootRDDRecord):\n fieldsList = deerfootRDDRecord.split(\",\")\n return (fieldsList[0], [fieldsList[1], fieldsList[15], fieldsList[46]])", "def get_list(section, option, default):\n\tres = get(section, option, default)\n\n\tif res == default:\n\t\treturn default\n\n\tl = unescape_split(\",\", res)\n\n\tif not l:\n\t\treturn default\n\treturn list(l)", "def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()", "def listify(item, delimiter=\",\"):\n if not item:\n return []\n if type(item) is str:\n item = item.split(delimiter)\n if type(item) is not list:\n raise TypeError(\"'listify' must take None, str, or list!\")\n return item" ]
[ "0.63519025", "0.63037026", "0.6226924", "0.621855", "0.621855", "0.605141", "0.60084903", "0.5960452", "0.5954665", "0.58187693", "0.58148223", "0.57171553", "0.56535655", "0.5637533", "0.56276727", "0.55754817", "0.55633485", "0.55611026", "0.54911935", "0.54818034", "0.5430281", "0.5391499", "0.53894114", "0.53786707", "0.5367213", "0.53666645", "0.53438616", "0.5327231", "0.5324738", "0.53171015" ]
0.7508108
0
Take a row and a field which may have delimited values and convert into a list of new rows with the same data expect for the replaced delimited value.
def expand_rows(row, delimited_fields, expand_row): # _log.debug('expand_row is {}'.format(expand_row)) # go through the delimited fields and clean up the rows copy_row = copy.deepcopy(row) for d in delimited_fields: if d in copy_row: copy_row[d] = expand_and_normalize_field(copy_row[d], False) if expand_row: new_values = [] for d in delimited_fields: fields = [] if d in copy_row: for value in expand_and_normalize_field(copy_row[d], True): fields.append({d: value}) new_values.append(fields) # return all combinations of the lists combinations = list(itertools.product(*new_values)) new_rows = [] for c in combinations: new_row = copy.deepcopy(copy_row) # c is a tuple because of the .product command for item in c: for k, v in item.items(): new_row[k] = v new_rows.append(new_row) return new_rows else: return [copy_row]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def processRow(self, row):\n\t\tif self.delim is not None:\n\t\t\trowArr = row.split(self.delim)\n\t\t\tmsg = \"row does not have expected number of columns found \" + str(len(rowArr)) + \" expected \" + str(self.rowSize)\n\t\t\tassert len(rowArr) == self.rowSize, msg\n\t\telse:\n\t\t\trowArr = row\n\t\t\t\n\t\tnewRowArr = []\n\t\tfor i in range(len(rowArr)):\n\t\t\tcurVal = rowArr[i]\n\t\t\tif (i in self.catValues):\n\t\t\t\tvalues = self.catValues[i]\n\t\t\t\tfor val in values:\n\t\t\t\t\tif val == curVal:\n\t\t\t\t\t\tnewVal = self.trueVal\n\t\t\t\t\telse:\n\t\t\t\t\t\tnewVal = self.falseVal\n\t\t\t\t\tnewRowArr.append(newVal)\n\t\t\telse:\n\t\t\t\tnewRowArr.append(curVal)\n\t\tassert len(newRowArr) == self.newRowSize, \"invalid new row size \" + str(len(newRowArr)) + \" expected \" + str(self.newRowSize)\n\t\tencRow = self.delim.join(newRowArr) if self.delim is not None else newRowArr\n\t\treturn encRow", "def __parseCsvRow(row):\r\n \r\n resultRow = []\r\n for item in row:\r\n if type(item) is str:\r\n if \".\" in item:\r\n try:\r\n f = float(item)\r\n resultRow.append(f)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n try:\r\n i = int(item)\r\n resultRow.append(i)\r\n except ValueError:\r\n resultRow.append(item)\r\n else:\r\n resultRow.append(item)\r\n return resultRow", "def clean_row(row,i):\n # convert string\n char_array = np.array(list(row))\n\n #insert entry dividers, then split by them\n div_ix = (\n np.array([6, 34, 48, 51, 54, 60, 64, 67, 72, 80, 86, 94, 100,\n 107, 112, 119, 125, 137, 141, 145, 156]),\n )\n char_array[div_ix] = ','\n new_csv_row = (''.join(char_array)).split(',')\n\n # remove excess whitespace surrounding data\n new_csv_row = np.array([entry.strip() for entry in new_csv_row])\n\n return new_csv_row", "def parse_and_flatten(df, field_name):\n\n # Parse and flatten the list\n lst = list(df[field_name])\n lst = [x.split('|') for x in lst]\n\n lst_flat = []\n for slist in lst:\n for x in slist:\n lst_flat.append(x)\n return lst_flat", "def expand_and_normalize_field(field, return_list=False):\n\n if isinstance(field, basestring):\n field = field.rstrip(';:,')\n data = [_normalize_expanded_field(r) for r in re.split(\",|;|:\", field)]\n if return_list:\n return data\n else:\n return \";\".join(data)\n else:\n if return_list:\n return [field]\n else:\n return field", "def map_field_value(\n row: DLCSRecord, field_name: str, config: typing.Dict\n) -> typing.Any:\n mapping: mapper.MappigDictValue = mapper.FIELD_MAPPING[field_name]\n\n if mapping is None:\n return None\n\n if callable(mapping):\n return mapping(row)\n\n if isinstance(mapping, str):\n mapping = [mapping]\n\n if not isinstance(mapping, typing.Collection):\n raise TypeError(\n f\"FIELD_MAPPING[field_name] must be iterable, unless it is None, Callable, or a string.\"\n )\n\n output: typing.List[str] = []\n for csv_field in mapping:\n input_value = row.get(csv_field)\n if input_value:\n if isinstance(input_value, str):\n output.extend(input_value.split(\"|~|\"))\n else:\n output.append(input_value)\n\n bare_field_name = get_bare_field_name(field_name)\n if bare_field_name in config.get(\"controlled_fields\", {}):\n terms = config[\"controlled_fields\"][bare_field_name][\"terms\"]\n output = [terms.get(value, value) for value in output]\n\n return [value for value in output if value] # remove untruthy values like ''", "def ConvertRow(self, row):\n i = 0\n data = []\n for entry in row['f']:\n data.append(self.Convert(entry['v'], self.schema[i]))\n i += 1\n return tuple(data)", "def parse_row(input_row, parsers):\n\n return [parser(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]", "def _parse_row(row: str):\n final_row = []\n for char in row:\n\n # any number N expands into N spaces\n if char in \"12345678\":\n for i in range(int(char)):\n final_row.append(EMPTY_SPACE)\n else:\n final_row.append(char)\n\n return final_row", "def _convert_row(self, row) :\n\n self.row_id += 1\n data = [self.row_id]\n\n if type(row) == type({}) :\n data.extend(row.get(col, None) for col in self.cols[1:])\n elif type(row) in [type([]), type(())] :\n data.extend(row)\n elif type(row) == RowReference :\n data.extend(row.values())\n else :\n raise Exception(\n 'Don''t know how to add row from: %s ' % str(row)\n )\n\n if len(data) != len(self.cols) :\n raise Exception(\n 'Wrong number of values for new row with cols %s: %s' % \n (str(self.cols), str(data))\n \n )\n\n return data", "def csv_line(value_parser):\n def convert(string):\n return list(map(value_parser, string.split(',')))\n return convert", "def lst_to_field(table, field, lst):\n if len(lst) == 0:\n message(\"No values to add to '{}'.\".format(field))\n elif field_exists(table, field): \n with arcpy.da.UpdateCursor(table, [field]) as cursor:\n # For row in cursor:\n for i, row in enumerate(cursor):\n row[0] = lst[i]\n cursor.updateRow(row)\n else:\n message(\"{} field not found in {}\".format(field, table))", "def rebuild_row(lst, is_collocation):\n split_list = lst[0].split(\"\\t\")\n if is_collocation:\n return [split_list[0] + \" \" + split_list[1], \"1\"]\n return [split_list[0] + \" \" + split_list[1], \"0\"]", "def _read_delimited_field(d):\n val = []\n val.append(next(d))\n while val[-1] != FIELD_DELIMITER:\n try:\n val.append(next(d))\n except StopIteration: break\n\n modlogger.debug( \"read:%s\"%val[:-1])\n return field_ctor(val[:-1])", "def parse_row(input_row, parsers):\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]", "def expand_row(\n row: Sequence[Union[str, Sequence[Union[str, Sequence[str]]]]]\n) -> List[List[str]]:\n elems_as_lists = []\n for elem in row:\n if isinstance(elem, list):\n elems_as_lists.append(elem)\n else:\n elems_as_lists.append([elem])\n aligned = [list(i) for i in zip_longest(*elems_as_lists, fillvalue=\"\")]\n return aligned", "def fake_clean_row(row):\n\treturn row", "def rows_to_list(records):\n raw_list = []\n for record in records:\n items = record.items()\n raw_list.append({i[0]: i[1].rstrip() if type(\n i[1]) == str else i[1] for i in items})\n\n # Process data for compounding flag to be boolean since SQLite does not have a boolean type\n processed_list = []\n for row in raw_list:\n if 'compounding_flag' in row:\n if row['compounding_flag'] == '1':\n row['compounding_flag'] = True\n else:\n row['compounding_flag'] = False\n processed_list.append(row)\n\n # If processed list is empty, no processing was done, just assign raw list\n if not processed_list:\n processed_list = raw_list\n\n return processed_list", "def format_row(row):\n assert isinstance(row,list)\n \n data_row=[0]*len(header) #Formatted data row to be output and appeneded to 'data'\n \n for i in [0,1,11,13,14,15,16,17,19,20,21,28,31,45,46,47,48]: data_row[i]=row[i] #emptry string will NOT return None\n for i in [2,3,12,18]: data_row[i]=type_cast(lambda x: int(float(x)),row[i])\n for i in [6,7,8,9,10,23,24,25,26,27,29,30]: data_row[i]=type_cast(float,row[i])\n for i in [4,5,22]: data_row[i]=type_cast(datetime.strptime,row[i],'%Y-%m-%d %H:%M:%S')\n for i in range(32,45):\n if row[i]=='False': data_row[i]=False #bool('False') returns True!\n elif row[i]=='True': data_row[i]=True\n else: data_row[i]=None\n return data_row", "def transform(input):\n transformed_file = []\n\n for row in input:\n names = row['name'].split()\n row['fname'] = names[0]\n row['lname'] = names[1]\n del row['name']\n transformed_file.append(row)\n return transformed_file", "def parse_row(input_row, parsers):\n\n return [try_or_none(parser)(value) if parser is not None else value\n for value, parser in zip(input_row, parsers)]", "def parse_rows(self, rows):\r\n rows = [\r\n (row_id, parse_date(created), student_module_id)\r\n for row_id, created, student_module_id in rows\r\n ]\r\n return rows", "def _convert_field_type(row):\n return row", "def process_row(self, row: Union[List[dict], dict]) -> List[dict]:\n rows = listify(row)\n rows = self.do_pre_row(rows=rows)\n row_return = [{\"internal_axon_id\": row[\"internal_axon_id\"]} for row in rows]\n rows = self.do_row(rows=rows)\n self.write_rows(rows=rows)\n del rows, row\n return row_return", "def process_row(self, table, row):\n for index, column in enumerate(table.columns):\n hash_key = hash(frozenset(column.items()))\n column_type = self.column_types[hash_key] if hash_key in self.column_types else self.column_type(column)\n if row[index] == None and ('timestamp' not in column_type or not column['default']):\n row[index] = '\\N'\n elif row[index] == None and column['default']:\n if self.tz:\n row[index] = '1970-01-01T00:00:00.000000' + self.tz_offset\n else:\n row[index] = '1970-01-01 00:00:00'\n elif 'bit' in column_type:\n row[index] = bin(ord(row[index]))[2:]\n elif isinstance(row[index], (str, unicode, basestring)):\n if column_type == 'bytea':\n row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]\n elif 'text[' in column_type:\n row[index] = '{%s}' % ','.join('\"%s\"' % v.replace('\"', r'\\\"') for v in row[index].split(','))\n else:\n row[index] = row[index].replace('\\\\', r'\\\\').replace('\\n', r'\\n').replace(\n '\\t', r'\\t').replace('\\r', r'\\r').replace('\\0', '')\n elif column_type == 'boolean':\n # We got here because you used a tinyint(1), if you didn't want a bool, don't use that type\n row[index] = 't' if row[index] not in (None, 0) else 'f' if row[index] == 0 else row[index]\n elif isinstance(row[index], (date, datetime)):\n if isinstance(row[index], datetime) and self.tz:\n try:\n if row[index].tzinfo:\n row[index] = row[index].astimezone(self.tz).isoformat()\n else:\n row[index] = datetime(*row[index].timetuple()[:6], tzinfo=self.tz).isoformat()\n except Exception as e:\n print e.message\n else:\n row[index] = row[index].isoformat()\n elif isinstance(row[index], timedelta):\n row[index] = datetime.utcfromtimestamp(_get_total_seconds(row[index])).time().isoformat()\n else:\n row[index] = AsIs(row[index]).getquoted()", "def __parse_string_for_delimiter__(self, data):\n parsed = []\n for row in data:\n row = self.__remove_break_line__(row)\n row = self.__split_for_delimiter__(row)\n parsed.append(row)\n return parsed", "def process_data(data, enc=None, delim=None):\n if enc is None:\n enc = detect_encoding(data)\n if delim is None:\n delim = csv_sniff(data[0], enc)\n csv_data = []\n if sys.version_info.major < 3:\n csv_obj = csv.reader(data, delimiter=delim.encode(enc))\n for row in csv_obj:\n row = [str(x, enc) for x in row]\n csv_data.append(row)\n else:\n data = [i.decode(enc) for i in data]\n csv_obj = csv.reader(data, delimiter=delim)\n for row in csv_obj:\n csv_data.append(row)\n return pad_data(csv_data)", "def tidy_split(df, column, sep='|', keep=False):\r\n indexes = list()\r\n new_values = list()\r\n df = df.dropna(subset=[column])\r\n for i, presplit in enumerate(df[column].astype(str)):\r\n values = presplit.split(sep)\r\n if keep and len(values) > 1:\r\n indexes.append(i)\r\n new_values.append(presplit)\r\n for value in values:\r\n indexes.append(i)\r\n new_values.append(value)\r\n new_df = df.iloc[indexes, :].copy()\r\n new_df[column] = new_values\r\n return new_df", "def _parsecsv(x):\n for line in x:\n # decode as utf-8, whitespace-strip and split on delimiter\n yield line.decode('utf-8').strip().split(config.DELIMITER)", "def tidy_split(df, column='Members', sep=', '):\n\n indexes = []\n new_values = []\n for i, presplit in enumerate(df[column].astype(str)):\n for value in presplit.split(sep):\n indexes.append(i)\n new_values.append(value)\n new_df = df.iloc[indexes, :].copy() # the .copy() Prevents a warning\n new_df[column] = new_values\n df = new_df.reset_index(drop=True)\n return df" ]
[ "0.6390672", "0.61461806", "0.61275697", "0.6075597", "0.6021947", "0.60119367", "0.5930786", "0.5849591", "0.5666509", "0.56609106", "0.562658", "0.56255656", "0.5619078", "0.5576439", "0.55647933", "0.55612415", "0.5549309", "0.5546282", "0.5542975", "0.5528512", "0.55209035", "0.550323", "0.5479354", "0.5450724", "0.5448862", "0.5436596", "0.5429882", "0.54264444", "0.5417858", "0.53455025" ]
0.63998896
0
Apply mapping of row data to model.
def map_row(row, mapping, model_class, extra_data_fields=[], cleaner=None, **kwargs): initial_data = kwargs.get('initial_data', None) model = model_class() # _log.debug("map_row's mappings {}".format(mapping)) # If there are any initial states we need to set prior to mapping. if initial_data: model = apply_initial_data(model, initial_data) # concat is not used as of 2016-09-14 # concat = _set_default_concat_config(concat) for raw_field, value in row.items(): is_extra_data = True if raw_field in extra_data_fields else False # Save the value if is is not None, keep empty fields. if value is not None: model = apply_column_value(raw_field, value, model, mapping, is_extra_data, cleaner) return model
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def applyMapping(self):\n pass", "def map(self, function=lambda item: item):\n for i, row in enumerate(self):\n for j, item in enumerate(row):\n row[j] = function(item)", "def map_transfer_to_row(self, transfer):\n pass", "def _do_mapping(self):\n pass", "def Map(dataset, map_func, input_columns=None):\n return dataset.map(map_func)", "def extract_mapping(self) -> DatasetMapping:\n # store fields\n fields = []\n for col in self.data.columns:\n #get field label\n label = col\n #get field type using PANDAS_TYPE (see apps.utils.utils)\n col_type = self.data[col].dtype\n field_type = PANDAS_TYPE[col_type]\n #set field\n field = FieldMapping(label=label, type=field_type)\n fields.append(field)\n self.mapping.append(label)\n return DatasetMapping(fields=fields)", "def mapfn(k, v):\n for row in v:\n # completar\n pass", "def places_process_rows(self):\n\n for index in range(len(self.table)):\n row_rdf = self.places_map_row_to_rdf(self.table.iloc[index])\n if row_rdf is not None:\n self.data += row_rdf", "def map_items(self) -> None:\n self.__attribute_columns = list(self.__DataFrame.columns)\n self.__attribute_columns.remove(self.__surv_col_name)\n self.__attribute_columns.remove(self.__status_col_name)\n\n mapped_int = 0\n\n for attribute in self.__attribute_columns:\n for value in self.__DataFrame[attribute].unique():\n item_reference = (attribute, value)\n self.__item_map[item_reference] = mapped_int\n self.items_list.append(item_reference)\n mapped_int += 1", "def mapRow(this_row, header_dict, precursors_mapping, sequences_mapping, protein_mapping):\n\n if \"FullPeptideName\" in header_dict:\n\n peptide_name = this_row[header_dict[\"FullPeptideName\"]]\n\n transitions = []\n pr_transitions = []\n if \"aggr_Fragment_Annotation\" in header_dict:\n transitions = this_row[ header_dict[\"aggr_Fragment_Annotation\"] ].split(\";\")\n if \"aggr_prec_Fragment_Annotation\" in header_dict:\n pr_transitions = this_row[ header_dict[\"aggr_prec_Fragment_Annotation\"] ].split(\";\")\n\n # Skip row if there are no transitions\n if len(transitions) == 0:\n return\n\n if len(transitions[-1]) == 0:\n transitions = transitions[:-1]\n if len(pr_transitions) > 0 and len(pr_transitions[-1]) == 0:\n pr_transitions = pr_transitions[:-1]\n\n # Get charge state (may be absent)\n charge_state = \"0\"\n if \"Charge\" in header_dict:\n charge_state = this_row[header_dict[\"Charge\"]]\n\n if charge_state == \"NA\" or charge_state == \"\":\n charge_state = \"0\"\n\n key = peptide_name + \"/\" + charge_state\n prkey = peptide_name + \"/\" + charge_state + \"_pr\"\n precursors_mapping [ key ] = transitions\n precursors_mapping [ prkey ] = pr_transitions\n mapped_precursors = sequences_mapping.get( peptide_name, [] )\n mapped_precursors.extend([key, prkey])\n sequences_mapping[peptide_name] = mapped_precursors # = [ key, prkey ]\n\n if \"ProteinName\" in header_dict:\n protein_name = this_row[header_dict[\"ProteinName\"]]\n\n tmp = protein_mapping.get(protein_name, [])\n if peptide_name not in tmp:\n tmp.append(peptide_name)\n protein_mapping[protein_name] = tmp", "def process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup=False, no_pubmed_lookup=False,\n no_doaj_lookup=False, no_title_lookup=False, round_monetary=False,\n offsetting_mode=None, orig_file_path=None, crossref_max_retries=3):\n if len(row) != num_required_columns:\n msg = \"Line %s: \" + MESSAGES[\"num_columns\"]\n logging.error(msg, row_num, len(row), num_required_columns)\n return row\n\n empty_row = True\n for elem in row:\n if has_value(elem):\n empty_row = False\n break\n else:\n msg = \"Line %s: \" + MESSAGES[\"empty_row\"]\n logging.warning(msg, row_num)\n\n current_row = {}\n record_type = None\n\n # Copy content of identified columns and apply special processing rules\n for csv_column in column_map.values():\n index, column_type = csv_column.index, csv_column.column_type\n if empty_row:\n current_row[column_type] = \"\"\n continue\n if column_type == \"euro\" and index is not None:\n current_row[\"euro\"] = _process_euro_value(row[index], round_monetary, row_num, index, offsetting_mode)\n elif column_type == \"period\" and index is not None:\n current_row[\"period\"] = _process_period_value(row[index], row_num)\n elif column_type == \"is_hybrid\" and index is not None:\n current_row[\"is_hybrid\"] = _process_hybrid_status(row[index], row_num)\n elif column_type == \"institution\" and index is not None:\n current_row[\"institution\"] = _process_institution_value(row[index], row_num, orig_file_path, offsetting_mode)\n else:\n if index is not None and len(row[index]) > 0:\n current_row[column_type] = row[index]\n else:\n current_row[column_type] = \"NA\"\n\n doi = current_row[\"doi\"]\n if not has_value(doi) and not empty_row:\n msg = (\"Line %s: No DOI found\")\n logging.info(msg, row_num)\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # lookup the book title in Crossref\n lookup_title = current_row[\"book_title\"]\n if has_value(lookup_title):\n msg = (\"Line %s: Trying to look up the book title ('%s') in Crossref...\")\n logging.info(msg, row_num, lookup_title)\n book_doi = title_lookup(lookup_title, [\"book\", \"monograph\", \"reference-book\"])\n if book_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = book_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if has_value(doi):\n # Normalise DOI\n norm_doi = get_normalised_DOI(doi)\n if norm_doi is not None and norm_doi != doi:\n current_row[\"doi\"] = norm_doi\n msg = MESSAGES[\"doi_norm\"].format(doi, norm_doi)\n logging.info(msg)\n doi = norm_doi\n # include crossref metadata\n if not no_crossref_lookup:\n crossref_result = get_metadata_from_crossref(doi)\n retries = 0\n while not crossref_result[\"success\"] and crossref_result[\"error_msg\"].startswith(\"HTTPError: 504\"):\n if retries >= crossref_max_retries:\n break\n # retry on gateway timeouts, crossref API is quite busy sometimes\n msg = \"%s, retrying...\"\n logging.warning(msg, crossref_result[\"error_msg\"])\n retries += 1\n crossref_result = get_metadata_from_crossref(doi)\n if not crossref_result[\"success\"]:\n exc = crossref_result[\"exception\"]\n # check if a preprint lookup is possible\n if not no_title_lookup and type(exc) == UnsupportedDoiTypeError and exc.doi_type == \"posted-content\":\n msg = (\"Line %s: Found a DOI with type 'posted_content' (%s). This might \" +\n \"be a case of a preprint DOI, trying to find the final version of the article...\")\n logging.info(msg, row_num, doi)\n if not exc.crossref_title:\n msg = \"Line %s: Preprint lookup failed, no title could be extracted.\"\n logging.warning(msg, row_num)\n else:\n article_doi = title_lookup(exc.crossref_title, [\"journal-article\"])\n if article_doi:\n logging.info(\"New DOI integrated, restarting enrichment for current line...\")\n index = column_map[\"doi\"].index\n row[index] = article_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n if crossref_result[\"success\"]:\n data = crossref_result[\"data\"]\n record_type = data.pop(\"doi_type\")\n logging.info(\"Crossref: DOI resolved: \" + doi + \" [\" + record_type + \"]\")\n current_row[\"indexed_in_crossref\"] = \"TRUE\"\n for key, value in data.items():\n new_value = _process_crossref_results(current_row, row_num, key, value)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Crossref: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, crossref_result[\"error_msg\"])\n current_row[\"indexed_in_crossref\"] = \"FALSE\"\n # lookup ISBNs in crossref and try to find a correct DOI\n additional_isbns = [row[i] for i in additional_isbn_columns]\n found_doi, r_type = _isbn_lookup(current_row, row_num, additional_isbns, doab_analysis.isbn_handling)\n if r_type is not None:\n record_type = r_type\n if found_doi is not None:\n # integrate DOI into row and restart\n logging.info(\"New DOI integrated, restarting enrichment for current line.\")\n index = column_map[\"doi\"].index\n row[index] = found_doi\n return process_row(row, row_num, column_map, num_required_columns, additional_isbn_columns,\n doab_analysis, doaj_analysis, no_crossref_lookup, no_pubmed_lookup,\n no_doaj_lookup, no_title_lookup, round_monetary, offsetting_mode, orig_file_path)\n # include pubmed metadata\n if not no_pubmed_lookup and record_type == \"journal-article\":\n pubmed_result = get_metadata_from_pubmed(doi)\n if pubmed_result[\"success\"]:\n logging.info(\"Pubmed: DOI resolved: \" + doi)\n data = pubmed_result[\"data\"]\n for key, value in data.items():\n if value is not None:\n new_value = value\n else:\n new_value = \"NA\"\n msg = \"WARNING: Element %s not found in in response for doi %s.\"\n logging.debug(msg, key, doi)\n old_value = current_row[key]\n current_row[key] = column_map[key].check_overwrite(old_value, new_value)\n else:\n msg = \"Line %s: Pubmed: Error while trying to resolve DOI %s: %s\"\n logging.error(msg, row_num, doi, pubmed_result[\"error_msg\"])\n\n # lookup in DOAJ. try the EISSN first, then ISSN and finally print ISSN\n if not no_doaj_lookup and not empty_row:\n issns = []\n new_value = \"NA\"\n if current_row[\"issn_electronic\"] != \"NA\":\n issns.append(current_row[\"issn_electronic\"])\n if current_row[\"issn\"] != \"NA\":\n issns.append(current_row[\"issn\"])\n if current_row[\"issn_print\"] != \"NA\":\n issns.append(current_row[\"issn_print\"])\n for issn in issns:\n lookup_result = doaj_analysis.lookup(issn)\n if lookup_result:\n msg = \"DOAJ: Journal ISSN (%s) found in DOAJ offline copy ('%s').\"\n logging.info(msg, issn, lookup_result)\n new_value = \"TRUE\"\n break\n else:\n msg = \"DOAJ: Journal ISSN (%s) not found in DOAJ offline copy.\"\n new_value = \"FALSE\"\n logging.info(msg, issn)\n old_value = current_row[\"doaj\"]\n current_row[\"doaj\"] = column_map[\"doaj\"].check_overwrite(old_value, new_value)\n if record_type != \"journal-article\" and not empty_row:\n collected_isbns = []\n for isbn_field in [\"isbn\", \"isbn_print\", \"isbn_electronic\"]:\n # test and split all ISBNs\n current_row[isbn_field] = _process_isbn(row_num, current_row[isbn_field], doab_analysis.isbn_handling)\n if has_value(current_row[isbn_field]):\n collected_isbns.append(current_row[isbn_field])\n additional_isbns = [row[i] for i in additional_isbn_columns]\n for isbn in additional_isbns:\n result = _process_isbn(row_num, isbn, doab_analysis.isbn_handling)\n if has_value(result):\n collected_isbns.append(result)\n if len(collected_isbns) == 0:\n logging.info(\"No ISBN found, skipping DOAB lookup.\")\n current_row[\"doab\"] = \"NA\"\n else:\n record_type = \"book\"\n logging.info(\"Trying a DOAB lookup with the following values: \" + str(collected_isbns))\n for isbn in collected_isbns:\n doab_result = doab_analysis.lookup(isbn)\n if doab_result is not None:\n current_row[\"doab\"] = \"TRUE\"\n msg = 'DOAB: ISBN %s found in normalized DOAB (%s, \"%s\")'\n logging.info(msg, isbn, doab_result[\"publisher\"], doab_result[\"book_title\"])\n if current_row[\"indexed_in_crossref\"] == \"TRUE\":\n msg = \"Book already found in Crossref via DOI, those results take precedence\"\n logging.info(msg)\n else:\n for key in doab_result:\n current_row[key] = doab_result[key]\n if not has_value(current_row[\"isbn\"]):\n current_row[\"isbn\"] = isbn\n break\n else:\n current_row[\"doab\"] = \"FALSE\"\n msg = \"DOAB: None of the ISBNs found in DOAB\"\n logging.info(msg)\n if offsetting_mode:\n current_row[\"agreement\"] = offsetting_mode\n record_type = \"journal-article_transagree\"\n\n if record_type is None:\n msg = \"Line %s: Could not identify record type, using default schema 'journal-article'\"\n logging.warning(msg, row_num)\n record_type = \"journal-article\"\n\n result = []\n for field in COLUMN_SCHEMAS[record_type]:\n result.append(current_row[field])\n\n return (record_type, result)", "def _transform_map_data(self):\n WARD_FMT = '%s-%s'\n self.map_data_trans = []\n lookup = {i.column: ''.join(filter(lambda x: x.isdigit(), i.value)) for i in self.sht[1]}\n\n #skip over header\n rs = iter(self.sht.rows)\n next(rs)\n next(rs)\n for r in rs:\n pka = r[0].value\n for c in r[1:]:\n if c.value is None:\n c.value = 0\n\n self.map_data_trans.append((WARD_FMT%(pka, lookup[c.column]), c.value))", "def applymap(self, func, *args, **kwargs):\n return DataFrameDefault.register(pandas.DataFrame.applymap)(\n self, func, *args, **kwargs\n )", "def iter_rows_raw(self, *args):\n\n for row in super().iter_rows_raw(*args):\n row[0] = row[1] # sequential catalog index not right in this case; overwrite to match finder id\n yield row", "def _map(self, p_input:Element, p_output:Element):\r\n \r\n self._sl_model.eval()\r\n\r\n # Input pre processing\r\n input = self.input_preproc(p_input)\r\n\r\n # Make prediction\r\n output = self.forward(input)\r\n\r\n # Output post processing\r\n output = self.output_postproc(output)\r\n\r\n # Set list to Element\r\n p_output.set_values(output)", "def mapfn(k, v):\n for row in v:\n # rellenar el codigo\n pass", "def prepopulate(self, model, exclude=[]):\n for col in model.columns():\n if col not in exclude and hasattr(self, col):\n setattr(getattr(self, col), 'data', getattr(model, col))", "def mutate_row(self, row_key: bytes, column_family_id: str, val_dict: dict,\n time_stamp: Optional[datetime.datetime] = None\n ) -> bigtable.row.Row:\n row = self.table.row(row_key)\n\n for column, value in val_dict.items():\n row.set_cell(column_family_id=column_family_id, column=column,\n value=value, timestamp=time_stamp)\n return row", "def map(self,Affine,i):\n map_x = np.zeros([self.num,self.d])\n for k in range(self.num):\n map_x[k,:] = Affine.apply(i,self.pick(k))\n Mapped = Model_Points(map_x)\n return Mapped", "def map (a_data,a_column,a_old,a_new) :\n loc_new_data = a_data\n a_data[a_column].replace(a_old,a_new,inplace=True)", "def create_row_processor(self, context, path, reduced_path, mapper, \n row, adapter):\n\n return None, None, None", "def mapper(record):\n matrix, row, col, value = record\n if matrix == A_MATRIX:\n # For all A(i,j) emit key (j, k) for k=1 to number of columns in B\n for k in range(0, B_COLS):\n mr.emit_intermediate((row, k), [matrix, col, value])\n else:\n # For all B(j, k) emit key (j, i) for i=1 to number of rows in B\n for i in range(0, A_ROWS):\n mr.emit_intermediate((i, col), [matrix, row, value])", "def create_row_processor(self, context, path, reduced_path, \n mapper, row, adapter):\n return None, None, None", "def map(self, obj):\n if isinstance(obj, np.ndarray) and obj.ndim >= 2 and obj.shape[0] in (2,3):\n return fn.transformCoordinates(self, obj)\n else:\n return QtGui.QMatrix4x4.map(self, obj)", "def get_model(self, payload):\n return super(BulkEntryTransformer, self).to_model(payload)", "def map_data(self, obj: object):\n pass", "def update(self, data: dict):\n for key in data:\n model_att = getattr(self.__class__, key, None)\n value = data.get(key)\n\n setattr(self, key, type(model_att.type.python_type())(value))\n\n self.commit()\n return self", "def apply_fn(self,fn):\r\n \r\n self.check_Data()\r\n for split,data_ in self.processed_data.items():\r\n x = data_['x']\r\n x = np.array([fn(xi) for xi in x])\r\n data_['x'] = x", "def _add_from_dict(self, row) :\n\n data = [row.get(col, None) for col in self.cols]\n self._insert_internal(self.cols, data)", "def apply_remap(self):\n\n if not has_remap():\n return self\n\n newdata = self.copy()\n newdata._partial_remap()\n return newdata" ]
[ "0.66845304", "0.62534523", "0.61838084", "0.604524", "0.59644157", "0.5678246", "0.56550837", "0.5597702", "0.5595955", "0.5552257", "0.55398905", "0.54507", "0.54413307", "0.542242", "0.54172045", "0.54105145", "0.5367758", "0.5321824", "0.5305091", "0.5284699", "0.52551997", "0.52550423", "0.52549005", "0.5245833", "0.52254117", "0.5220396", "0.52153003", "0.52046514", "0.5201628", "0.51888216" ]
0.77035165
0
Updates stats inside mod_stats_map with data gathered from the file.
def get_file_mod_stats_for_upstream_refs(file_name, mod_stats_map): with open(file_name) as f: lines = f.readlines() upstream_ref = None upstream_start_line = None for line_number, line in enumerate(lines): if REGION_START_TAG in line: tag, ref_name = _extract_tag_and_ref_name_from_line(line, False) if REGION_UPSTREAM_TAG in tag: upstream_ref = ref_name upstream_start_line = line_number elif REGION_END_TAG in line and upstream_ref: mod_stats = mod_stats_map[upstream_ref] mod_stats.mod_count += 1 mod_stats.line_count += line_number - upstream_start_line - 1 upstream_ref = None upstream_start_line = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))", "def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)", "def update_stats(self):\n modEnum = Mod_Enum()\n self.attack = self.baseAttack + self.get_stat(modEnum.MOD_ATTACK)\n self.defense = self.baseDefense + self.get_stat(modEnum.MOD_DEFENSE)\n self.hp[1] = 100 * ( 1 + self.get_stat(modEnum.MOD_HP) )\n self.absorbtion = self.get_stat(modEnum.MOD_ABSORB)\n self.regen = 0.00 + self.get_stat(modEnum.MOD_REGEN)\n self.lifeLeech = 0.00 + self.get_stat(modEnum.MOD_LEECH)\n self.crit = 0.00 + self.get_stat(modEnum.MOD_CRIT)\n self.attackSpeedMultiplier = 1.0 + self.get_stat(modEnum.MOD_ATTACK_SPEED)\n self.moveSpeedMultiplier = 1.0 + self.get_stat(modEnum.MOD_MOVE_SPEED)\n #cap move speed\n if self.moveSpeedMultiplier > 4.0:\n self.moveSpeedMultiplier = 4.0 + 0.25 * (self.moveSpeedMultiplier - 4.0)\n \n self.speed = self.baseSpeed * self.moveSpeedMultiplier", "def readPlayerFileAndFillStats(players_data_filename, game_stats):\n\tplayer_stats = {}\n\tteam_stats = {}\n\tgame_stats_clean = {}\n\tgame_stats = fillGameStats(players_data_filename, game_stats)\n\n\twith open(players_data_filename) as csvfile:\n\t reader = csv.DictReader(csvfile)\n\t for row in reader:\n\t \tgame_id = row['game_id']\n\n\t \tif isGameStatsValid(game_stats[game_id]):\n\t\t \tplayer_id = row['player_id']\n\t\t \tteam_id = row['team_id']\n\t\t \tkills = row['kill']\n\t\t \tdeaths = row['death']\n\t\t \tassists = row['assists']\n\t\t \tgold = row['gold_earned']\n\n\t\t \tif not game_stats.get(game_id):\n\t\t \t\tprint('no game id')\n\n\t\t \tkills = int(kills)\n\t\t \tdeaths = int(deaths)\n\t\t \tassists = int(assists)\n\t\t \tgold = int(gold)\n\n\t\t \tif not game_stats_clean.get(game_id):\n\t \t\t\tgame_stats_clean[game_id] = game_stats[game_id]\n\n\t\t \twin = 0\n\t\t \tif game_stats[game_id]['winner_team_id'] == team_id:\n\t\t \t\twin = 1\n\t\t \t\n\n\t\t \tif not team_stats.get(team_id):\n\t \t\t\tteam_stats[team_id] = {'games_played': 1, 'wins': 0, 'loses': 0, 'kills': 0, 'deaths': 0, 'assists': 0, 'gold': 0, 'player_ids': Set([]), 'game_ids': Set([]), 'player_stats': []}\n\t \t\t\n\t \t\tteam_stats[team_id]['wins'] += win/5\n\t \t\tteam_stats[team_id]['loses'] += (1 - win)/5\n\t\t \tteam_stats[team_id]['kills'] += kills\n\t\t \tteam_stats[team_id]['deaths'] += deaths\n\t\t \tteam_stats[team_id]['assists'] += assists\n\t\t \tteam_stats[team_id]['gold'] += gold\n\t\t \tteam_stats[team_id]['player_ids'].add(player_id)\n\t\t \tteam_stats[team_id]['game_ids'].add(game_id)\n\t\t \tteam_stats[team_id]['games_played'] = len(team_stats[team_id]['game_ids'])\n\n\n\t\t \tif not player_stats.get(player_id):\n\t\t \t\tplayer_stats[player_id] = {'games_played': 1, 'wins': win, 'loses': 1 - win, 'kills': kills, 'deaths': deaths, 'assists': assists, 'gold': gold, 'team_ids': Set([team_id])}\n\t\t \telse:\n\t\t \t\tplayer_stats[player_id]['games_played'] += 1\n\t\t \t\tplayer_stats[player_id]['wins'] += win\n\t\t \t\tplayer_stats[player_id]['loses'] += 1 - win\n\t\t \t\tplayer_stats[player_id]['kills'] += kills\n\t\t \t\tplayer_stats[player_id]['deaths'] += deaths\n\t\t \t\tplayer_stats[player_id]['assists'] += assists\n\t\t \t\tplayer_stats[player_id]['gold'] += gold\n\t\t \t\tplayer_stats[player_id]['team_ids'].add(team_id)\n\n\t\t \t#team_stats[team_id]['player_stats'].append({player_id: player_stats[player_id]})\n\n\treturn game_stats_clean, team_stats, player_stats", "def update_stats():\r\n\turl = \"https://www.pathofexile.com/\" + \"api/trade/data/stats\"\r\n\tsave_path = \"data/stats.json\"\r\n\tr = requests.get(url)\r\n\twith open(save_path, \"w\") as fileID:\r\n\t\tfileID.write(r.text)", "def UpdateFile(self, modID = None):\n if modID is None:\n modID = self.modActive\n\n source = self.modules[modID][1]\n filename = self.modules[modID][2]\n\n try:\n file = open(filename, \"wt\")\n file.write(source)\n finally:\n file.close()", "def updateFileData(self):\n with open(pagePath(self.pageName)) as f:\n self.fileData = f.read()\n self.lastUpdated = time.time()", "def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)", "def update_status(self):\n\n # Memory information can be found in status and statm /proc/PID files\n # status file VmRSS equivalent to top's RES column\n # statm disagrees with status VmRSS, I think it may not include\n # sub-processes\n # From: man proc\n # * VmPeak: Peak virtual memory size.\n # * VmSize: Virtual memory size.\n # * VmHWM: Peak resident set size (\"high water mark\").\n # * VmRSS: Resident set size.\n\n # status_fields should be ordered as in the status file\n fields = iter(self.status_fields)\n field = next(fields)\n with open(self.status_path) as f:\n for line in f:\n if line.startswith(field):\n # separated by white-space, 2nd element is value\n # 3rd is units e.g. kB\n # At the moment all fields are ints\n self.status[field] = int(line.split()[1])\n\n try:\n field = next(fields)\n except StopIteration:\n # Just found the last field in status_fields\n break", "def update(self):\n try:\n with open(self._file_path, encoding=\"utf-8\") as file_data:\n for line in file_data:\n data = line\n data = data.strip()\n except (IndexError, FileNotFoundError, IsADirectoryError, UnboundLocalError):\n _LOGGER.warning(\n \"File or data not present at the moment: %s\",\n os.path.basename(self._file_path),\n )\n return\n\n if self._val_tpl is not None:\n self._state = self._val_tpl.async_render_with_possible_json_value(\n data, None\n )\n else:\n self._state = data", "def loadMetaChunkToServerMap (fileName):\n if not os.path.exists(fileName):\n print \"File \", fileName, \" does not exists\"\n sys.exit(1)\n\n infile = open (fileName, \"r\")\n count = 0\n while infile:\n count = count + 1\n line = infile.readline()\n if not line:\n break\n print \"DEBUGME : processing line %s, %d\" % (line, count)\n lineParts = line.split(' ')\n gChunkMap[lineParts[0]] = ChunkInfo(lineParts[0], lineParts[1], lineParts[2])\n # Add a ChunkHostInfo\n numServers = int(lineParts[2])\n for i in range(numServers):\n i = i * 3\n gChunkMap[lineParts[0]].addChunkHostInfo(ChunkHostInfo(lineParts[i+3], lineParts[i+4], lineParts[i+5]))", "def update_stats():\n list_db = get_list_database()\n\n list_db.group_stats_force_update()\n transaction_commit(None, 'GroupStatsUpdate')\n\n list_db.user_stats_force_update()\n transaction_commit(None, 'UserStatsUpdate')", "def map(item):\n user_services.update_dashboard_stats_log(item.id)", "def _update_base_stats(self, base_stats):\n self.total_samples += base_stats[\"sample_size\"]\n self.sample = base_stats[\"sample\"]\n self._empty_line_count += base_stats[\"empty_line_count\"]\n self.memory_size += base_stats[\"memory_size\"]", "def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)", "def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg", "def update_statistics(status):\n if not os.path.isfile(CONFIG['stats_file']):\n current_stats = {}\n else:\n current_stats = json.loads(open(CONFIG['stats_file'], 'r').read())\n # current_stats = delete_old_statistics(current_stats)\n\n current_key = int(datetime.datetime.now().strftime('%Y%m%d%H%M'))\n for host, state in ((h['host'], h['status']) for h in status):\n if host not in current_stats:\n current_stats[host] = {}\n\n # get newest entry of host\n newest_state = None, None\n for key, entry in current_stats[host].items():\n if newest_state[0] is None or int(key) > int(newest_state[0]):\n newest_state = key, entry\n if newest_state[1] != state:\n # state has changed. Write it.\n current_stats[host][current_key] = state\n\n # write stats\n open(CONFIG['stats_file'], 'w').write(json.dumps(current_stats))", "def refreshMTimes(self):\n del self.mtimesReset[:]\n for fileName, fileInfo in self.data.items():\n oldMTime = self.mtimes.get(fileName,fileInfo.mtime)\n self.mtimes[fileName] = oldMTime\n #--Reset mtime?\n if fileInfo.mtime != oldMTime and oldMTime != -1:\n fileInfo.setMTime(oldMTime)\n self.mtimesReset.append(fileName)", "def load_from_file(self, file):\n\n if (args.replacetopip): #create list of IP addresses and the number of times they occur\n with open(args.dirty) as dirty_file:\n for line in dirty_file:\n ip = self._extract_by_key(line, self._attr_key)\n if (self.ip_dict.has_key(ip)):\n self.ip_dict[ip] += 1\n else:\n self.ip_dict[ip] = 1\n #sort list\n self.top_ip = sorted(self.ip_dict.items(), key=operator.itemgetter(1), reverse=True)\n count = 0\n with open(file) as ip_file:\n for line in ip_file:\n if (args.replacetopip): #replace top IP addresses from the sorted list with new ones from the file\n ip_old = self.top_ip[count][0]\n ip_new = line.strip()\n count += 1\n else:\n ip_old,ip_new = line.split(\",\")\n self._insts[ip_old] = ip_new.strip()", "def fromfile(self,file):\n self.d.update(params_file(file))", "def update_stats(self, idx, key):\n\n stats = self.stats\n if not stats.has_key(idx):\n stats[idx] = {}\n if stats[idx].has_key(key):\n stats[idx][key] += 1\n else:\n stats[idx][key] = 1", "def stats(self, stats):\n self._stats = stats", "def map_file(self, map_file):\n\n self._map_file = map_file", "def get_member_stats(self):\n self.mstats = {}\n # add in members from expanded_def (which includes any merges)\n for qid in self.expanded_def.keys():\n # check for trailing quantity specifier (!, *, +, ?). Not for name space.\n # ! - required (default), * - 0 or more, + - 1 or more, ? - 0 or 1\n id, qty = self.file.parse_qty(qid, \"!\")\n if id in self.mstats.keys():\n print \"** Error, duplicate (%s) id in group\" % id\n traceback.print_stack()\n sys.exit(1)\n type = 'group' if id.endswith('/') else 'dataset'\n self.mstats[id] = { 'ns': self.sdef['ns'], 'qty': qty,\n 'df': self.expanded_def[qid], 'created': [], 'type': type }\n # add in members from any includes\n # print \"** processing includes\"\n for qidq in self.includes:\n qid, qty = self.file.parse_qty(qidq, \"!\")\n # print \"processing include\", qid\n sdef = self.file.get_sdef(qid, self.sdef['ns'], \"Referenced in include\")\n # print \"obtained sdef:\"\n # pp.pprint(sdef)\n modifiers = self.includes[qidq]\n if len(modifiers) > 0:\n # need to incorporate modifications to definition of included child members\n df = copy.deepcopy(sdef['df'])\n # self.modify(df, modifiers)\n self.merge(df, modifiers) # merges modifiers into definition\n # print \"df after merging modifiers:\"\n else:\n df = sdef['df']\n # print \"df after copy:\"\n id = sdef['id']\n type = sdef['type']\n # pp.pprint(df)\n # qty = '!' # assume includes are required\n if id in self.mstats.keys():\n print \"** Error, duplicate (%s) id in group, referenced by include\" % id\n traceback.print_stack()\n sys.exit(1)\n self.mstats[id] = { 'ns': self.sdef['ns'], 'qty': qty,\n 'df': df, 'created': [], 'type': type }\n # print \"after processing all includes, mstats is:\"\n # pp.pprint(self.mstats)", "def load_stats(self, result, **kwargs):\n p_stat = result.get('player_stats').get('stats')\n if not p_stat:\n raise ValueError('No stats for player')\n stat_list = p_stat.get('stat')\n for item in stat_list:\n key = item.get('stat_id')\n value = int(item.get('value'))\n self.stat_data[key] = value", "def loadRatingScoreMappingFromFile(file):\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda line: ((line[0], line[1]), line[2]))\n\t , partial(takewhile, lambda line: len(line) > 2 and line[0] != '')\n\t , lambda t: t[1]\n\t , lambda lines: (pop(lines), lines)\n\t , fileToLines\n \t , partial(join, getDataDirectory())\n\t)(file)", "def updateRequestStats(self, request, stats):\n # this stats dict will be validated on the server side\n self.updateRequestProperty(request, stats)", "def update_freq_dist(filename):\r\n pass", "def updateAll(data):\n if (data.updatePositions):\n data.groups.player.update(data)\n data.groups.projectiles.update(data)\n data.groups.monsters.update(data)\n data.groups.spawners.update(data)", "def update_from_file(self, filename):\n ns = {}\n with open(filename) as handle:\n code = compile(handle.read(), filename, 'exec')\n exec(code, ns)\n values = {\n key: value\n for key, value in ns.items()\n if not key.startswith('_')\n }\n self.__dict__.update(values)" ]
[ "0.63213795", "0.61827755", "0.57400024", "0.5537577", "0.5465735", "0.5393763", "0.5373763", "0.5370881", "0.53650606", "0.5338288", "0.5331641", "0.52704084", "0.52475107", "0.5246663", "0.51833093", "0.5156864", "0.51413554", "0.51362735", "0.5133316", "0.51267594", "0.5123115", "0.51035655", "0.5100884", "0.5093227", "0.5091473", "0.5091051", "0.508375", "0.50814193", "0.5079507", "0.50784093" ]
0.63252497
0
Find the tracking file for the given file. Returns the last path mentioned in the file via a tracking tag or the equivalent thirdparty path given the file's path. If there is no file in the default path and no files mentioned within the file exist, returns None. Normally the thirdparty path must exist. Passing |check_exist|=False will bypass this check when it is not desired. An additional check is enabled by passing |check_uses_tag|=True. In this case the given file MUST use either a file track tag or another modification tag, before a tracking_path is returned. stats is a variable for keeping track of the status of the analyzer, which can be None.
def compute_tracking_path(stats, our_path, our_lines, do_lint_check=False, check_exist=True, check_uses_tags=False): tracking_path = staging.get_default_tracking_path(our_path) base_matcher = re.compile(re.escape(FILE_TRACK_TAG) + r' "([^\"]+)"') tag_matcher = re.compile(re.escape(REGION_START_TAG)) uses_any_tags = False next_lineno = 1 for line in our_lines: if stats: stats['lineno'] = next_lineno match = base_matcher.search(line) if match: tracking_path = match.group(1) if not os.path.exists(tracking_path) and stats: show_error(stats, 'Mod tracking path does not exist:\n' + line) if next_lineno > MAX_ARC_TRACK_SEARCH_LINES: show_error(stats, 'Tracking not allowed on line > %d' % MAX_ARC_TRACK_SEARCH_LINES) uses_any_tags = True break elif not uses_any_tags and tag_matcher.search(line): uses_any_tags = True next_lineno += 1 if (not do_lint_check and (uses_any_tags or not check_uses_tags) and next_lineno > MAX_ARC_TRACK_SEARCH_LINES): break if not tracking_path: return None if check_uses_tags and not uses_any_tags: return None if check_exist and not os.path.exists(tracking_path): return None return tracking_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None", "def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None", "def _resolve_file_or_none(context_dir, conf, conf_file, has_args=False):\n if not conf:\n return None\n base1 = os.path.expanduser(context_dir)\n base2 = os.path.expanduser(conf)\n path = os.path.join(base1, base2)\n path = os.path.abspath(path) # This resolves \"/../\"\n if not os.path.exists(path):\n raise Exception(\"File does not exist: '%s'. This was \"\n \"referenced in the file '%s'.\" % (path, conf_file))\n return path", "def lookup(file, category='undefined'):\n path = os.path.join(self.base_path, doc, file)\n existing_path = os.path.exists(path) and path\n link = doc+'/'+file\n self.log.debug(' %s file %s' % (category, existing_path or\n path+\" (not found)\"))\n return existing_path, link", "def find_cue_path(self, path, verbose=False):\n meta = {}\n if('.flaccuesplit.' not in path and not os.path.exists(path)):\n try:\n path, meta = self._track_cache[path]\n except (AttributeError, NameError, TypeError, KeyError):\n # Not caching or not yet cached.\n raw_path = path\n dir_path = self.clean_path(os.path.dirname(path))\n files = os.listdir(dir_path)\n for cue_file in files:\n if(os.path.splitext(cue_file)[1] == '.cue'):\n try:\n # Don't use verbose here. Overly spammy.\n to_add, metadata, to_remove = self.get_cue_files(os.path.join(dir_path, cue_file))\n base_path = os.path.basename(path)\n if(base_path in to_add):\n path = to_add[base_path]\n meta = metadata[base_path]\n break\n except Exception:\n print(f'Error parsing {cue_file}:', file=sys.stderr, flush=True)\n import traceback\n traceback.print_exc()\n try:\n self._track_cache[raw_path] = (path, meta)\n except (AttributeError, NameError, TypeError):\n # Not caching.\n pass\n if(verbose):\n print(f'{raw_path} -> {path}', flush=True)\n return path, meta", "def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)", "def _getFileLocalOrPath(filename, pathenv):\n if os.path.exists(filename):\n log.info( \"Using local file %s\", filename)\n return filename\n\n pathlist = os.getenv(pathenv,'').split(os.pathsep)\n resolvedfilename = FindFile(filename, pathlist, os.R_OK)\n if resolvedfilename:\n return resolvedfilename\n\n log.fatal(\"No file %s found locally nor in %s\" % (filename, os.getenv('CORAL_DBLOOKUP_PATH')) )\n return None", "def find_file(filename):\n for i in list(_ctx.include_paths) + [ os.path.dirname(_ctx.filename) ]:\n full_path = os.path.join(i, filename)\n if os.path.exists(full_path):\n return full_path\n return filename # failure gets handled later on", "def _findfile(self, path):\n return DataSource._findfile(self, self._fullpath(path))", "def test_get_track_path_returns_file_path_if_track_is_file(self, example_group, monkeypatch):\n monkeypatch.setattr(\"src.music.utils.get_track_list_root_directory\", MagicMock(return_value=\"root/dir/\"))\n monkeypatch.setattr(\"src.music.utils.os.path.isfile\", lambda x: True)\n track_list = example_group.track_lists[0]\n track = track_list.tracks[0]\n track.file = \"file.mp3\"\n path = utils.get_track_path(example_group, track_list, track)\n assert path == \"root/dir/file.mp3\"", "def checkFilePath(self, filename, searchpath=[]):\n\t\tif filename is None:\n\t\t\treturn None\n\t\telif os.path.isfile(filename):\n\t\t\treturn filename\n\t\telse:\n\t\t\t# Append current dir to searchpath and try each in turn\n\t\t\tsearchpath.append(os.path.dirname(__file__))\n\t\t\t# print(searchpath)\n\t\t\tfor folder in searchpath:\n\t\t\t\tfilepath = os.path.join(folder, filename)\n\t\t\t\tif os.path.isfile(filepath):\n\t\t\t\t\treturn filepath\n\n\t\t# File not found\n\t\treturn None", "def locate(tgt_fpath, survey):\n flen = os.stat(tgt_fpath).st_size\n fpaths = survey.get(flen, ())\n if not fpaths:\n return None\n\n for fbase_path in fpaths:\n # print(' '*5, tgt_fpath, fbase_path)\n if not filecmp.cmp(tgt_fpath, fbase_path, shallow=True):\n continue # early reject, try other candidates\n if filecmp.cmp(tgt_fpath, fbase_path, shallow=False):\n # identically equal\n return fbase_path\n\n return None", "def find(self, relative_path):\n found = list(self.grep(relative_path, lazy=True))\n if found:\n return found[0]\n\n return None", "def find_file(file_name):\n if (pathlib.Path(file_name).resolve()):\n file_name = str(file_name)\n logging.info(f' found {file_name}.')\n return file_name\n else:\n logging.error(f' no file {file_name} found for processing.')\n sys.exit()", "def _find_config_file(self) -> str or None:\n import os\n\n for path in self.paths:\n path = os.path.expanduser(path)\n for extension in self.file_extensions:\n for file_name in self.file_names:\n file_path = os.path.join(path, \"{}.{}\".format(file_name, extension))\n if os.path.isfile(file_path):\n return file_path\n\n return None", "def stat_file(self, path, info):\n return {}", "def storage_find_report_file(self, report_id, filename):\n return self._get_queryset(report_id=report_id, filename=filename).get()", "def locate_file(self, filename):\n return locate_file(filename, self.observatory)", "def get_track_info(dirpath, f):\n filepath = os.path.join(dirpath, f)\n track = mutagen.File(filepath)\n if not track:\n if filepath.endswith('.mp3') or filepath.endswith('.m4a'):\n raise ValueError('Skipped an mp3 or an m4a')\n return None\n\n cover = find_cover(dirpath)\n if isinstance(track.tags, mutagen.id3.ID3):\n return get_track_info_mp3(filepath, track.tags, track.info, cover)\n if isinstance(track.tags, mutagen.mp4.MP4Tags):\n return get_track_info_mp4(filepath, track.tags, track.info, cover)\n if isinstance(track, mutagen.oggopus.OggOpus):\n return get_track_info_opus(filepath, track.tags, track.info, cover)\n raise ValueError(\"No parser for file format\")", "def GetResultFile(self):\n\n file_path = self.configfile.map['ResultFilePath']\n\n # Check if several entrie\n if file_path is not None:\n if len(file_path) > 1:\n warning(\n 'Many path for the result file are setted ({}), I will take the first one'\n .format(file_path))\n file_path = file_path[0]\n\n # If the storing file is elsewhere\n if file_path != \"#\":\n sys.path.insert(0, file_path)\n base = DBASE.open('Anna')\n\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None\n\n else:\n base = DBASE.open('Anna')\n if base is not None:\n return base\n else:\n error(\n 'Cannot find Anna file in {}'\n .format(file_path))\n return None", "def get_reffile(self, refs, detector):\n for key in refs:\n if detector in key:\n return refs[key]\n self.logger.error(\"WARNING: no file found for detector {} in {}\"\n .format(detector, refs))", "def file_stat(self, file_path):", "def find_file(file_path=None, args=None, locations=DEFAULT_LOCATIONS,\n file_name='weewx.conf'):\n\n # Start by searching args (if available)\n if file_path is None and args:\n for i in range(len(args)):\n if not args[i].startswith('-'):\n file_path = args[i]\n del args[i]\n break\n\n if file_path is None:\n for directory in locations:\n # If this is a relative path, then prepend with the\n # directory this file is in:\n if not directory.startswith('/'):\n directory = os.path.join(os.path.dirname(__file__), directory)\n candidate = os.path.abspath(os.path.join(directory, file_name))\n if os.path.isfile(candidate):\n return candidate\n\n if file_path is None:\n raise IOError(\"Unable to find file '%s'. Tried directories %s\" %\n (file_name, locations))\n elif not os.path.isfile(file_path):\n raise IOError(\"%s is not a file\" % file_path)\n\n return file_path", "def _get_existing_path(self, file_path):\n test_files_location = self._resource_config.test_files_location\n search_order = [\n os.path.join(test_files_location or \"\", file_path),\n os.path.join(test_files_location or \"\", self.reservation_id, file_path),\n file_path,\n ]\n for path in search_order:\n if os.path.exists(path):\n return path\n raise BPRunnerException(\n self.__class__.__name__,\n 'File {} does not exists or \"Test Files Location\" attribute was not specified'.format(file_path),\n )", "def _find_tif_file(self):\n name = self.results_file.name[:-12] + \".tif\"\n try:\n tif_file = next(self.results_file.parent.glob(name))\n return tif_file\n except StopIteration:\n print(f\"Tif not found for {name}\")\n return None", "def _real_stat(self, path, _exception_for_missing_path=True):\n # Save for error message.\n original_path = path\n # Most code in this method is used to detect recursive link structures.\n visited_paths = set()\n while True:\n # Stat the link if it is one, else the file/directory.\n lstat_result = self._real_lstat(path, _exception_for_missing_path)\n if lstat_result is None:\n return None\n # If the file is not a link, the `stat` result is the same as the\n # `lstat` result.\n if not stat.S_ISLNK(lstat_result.st_mode):\n return lstat_result\n # If we stat'ed a link, calculate a normalized path for the file\n # the link points to.\n dirname, _ = self._path.split(path)\n path = self._path.join(dirname, lstat_result._st_target)\n path = self._path.abspath(self._path.normpath(path))\n # Check for cyclic structure.\n if path in visited_paths:\n # We had seen this path already.\n raise ftputil.error.RecursiveLinksError(\n \"recursive link structure detected for remote path '{}'\".format(\n original_path\n )\n )\n # Remember the path we have encountered.\n visited_paths.add(path)", "def try_stat(path: str) -> Optional[os.stat_result]:\n result = Stat._result(path, throw=False)\n if isinstance(result, BaseException):\n return None\n return result", "def find_file(path):\n return NotImplemented", "def _resolve_relative_path(filepath: str):\n if not filepath:\n return None\n\n inf_path = os.path.join(os.path.dirname(__file__), filepath)\n\n return inf_path", "def get_file(_file):\n _file = pathlib.Path(_file)\n if not _file.is_file():\n _file = None\n return _file" ]
[ "0.54929686", "0.5405405", "0.5390188", "0.538945", "0.52806234", "0.52451473", "0.524215", "0.5137974", "0.5047577", "0.502459", "0.50199705", "0.49906838", "0.4970432", "0.49497023", "0.4910103", "0.4907254", "0.48910886", "0.48726743", "0.48697725", "0.4835479", "0.48274845", "0.48164198", "0.4797407", "0.4785708", "0.47653246", "0.47639638", "0.47590125", "0.47358558", "0.47353184", "0.47322643" ]
0.718714
0
Compute the notices object as if the two paths were properly staged. analyze_diffs needs to be independent of staging. Staging might not have been run, or might be out of date from when analyze_diffs is run. So we make a best attempt to reconstruct the notices that would have occurred poststaging.
def _compute_staged_notices(mods_path, third_party_path): mods_notices = notices.Notices() if mods_path: mods_notices.add_sources([mods_path]) third_party_notices = notices.Notices() if third_party_path: third_party_notices.add_sources([third_party_path]) # If there are mods and third_party notices, pick the one that is more # specific to the file, which is the one that has a deeper path. if (_count_directory_levels_in_license_root(third_party_notices) > _count_directory_levels_in_license_root(mods_notices)): return third_party_notices else: return mods_notices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n continue\n\n prev = history[i]\n curr = history[i+1]\n\n #print(prev['subject'], \"==>\", curr['subject'])\n #print(curr['changes'])\n for c in curr['changes']:\n \n path = c['path']\n\n # Skip the metadata file\n if c['path'].endswith('datapackage.json'): \n continue \n\n # Find a handler for this kind of file...\n handler = None \n for r in representations: \n if r.can_process(path): \n handler = r \n break \n \n if handler is None: \n continue \n\n # print(path, \"being handled by\", handler)\n\n v1_hex = prev['commit']\n v2_hex = curr['commit']\n\n temp1 = tempfile.mkdtemp(prefix=\"dgit-diff-\") \n \n try: \n for h in [v1_hex, v2_hex]: \n filename = '{}/{}/checkout.tar'.format(temp1, h)\n try:\n os.makedirs(os.path.dirname(filename))\n except:\n pass \n extractcmd = ['git', 'archive', '-o', filename, h, path]\n output = run(extractcmd)\n if 'fatal' in output: \n raise Exception(\"File not present in commit\") \n with cd(os.path.dirname(filename)): \n cmd = ['tar', 'xvf', 'checkout.tar']\n output = run(cmd) \n if 'fatal' in output: \n print(\"Cleaning up - fatal 1\", temp1)\n shutil.rmtree(temp1)\n continue \n\n # Check to make sure that \n path1 = os.path.join(temp1, v1_hex, path) \n path2 = os.path.join(temp1, v2_hex, path) \n if not os.path.exists(path1) or not os.path.exists(path2): \n # print(\"One of the two output files is missing\") \n shutil.rmtree(temp1)\n continue \n\n #print(path1, path2) \n\n # Now call the handler\n diff = handler.get_diff(path1, path2)\n\n # print(\"Inserting diff\", diff)\n c['diff'] = diff\n\n except Exception as e: \n #traceback.print_exc() \n #print(\"Cleaning up - Exception \", temp1)\n shutil.rmtree(temp1)", "def apply_decisions(base, decisions):\n\n merged = copy.deepcopy(base)\n prev_path = None\n parent = None\n last_key = None\n resolved = None\n diffs = None\n # clear_parent actions should override other decisions on same obj, so\n # we need to track it\n clear_parent_flag = False\n for md in decisions:\n path, line = split_string_path(merged, md.common_path)\n # We patch all decisions with the same path in one op\n if path == prev_path:\n # Same path as previous, collect entry\n if clear_parent_flag:\n # Another entry will clear the parent, all other decisions\n # should be dropped\n pass\n else:\n if md.action == \"clear_parent\":\n clear_parent_flag = True\n # Clear any exisiting decsions!\n diffs = []\n ad = resolve_action(resolved, md)\n if line:\n ad = push_path(line, ad)\n diffs.extend(ad)\n\n else:\n # Different path, start a new collection\n if prev_path is not None:\n # First, apply previous diffs\n if parent is None:\n # Operations on root create new merged object\n merged = patch(resolved, diffs)\n else:\n # If not, overwrite entry in parent (which is an entry in\n # merged). This is ok, as no paths should point to\n # subobjects of the patched object\n parent[last_key] = patch(resolved, diffs)\n\n prev_path = path\n # Resolve path in base and output\n resolved = merged\n parent = None\n last_key = None\n for key in path:\n parent = resolved\n resolved = resolved[key] # Should raise if key missing\n last_key = key\n diffs = resolve_action(resolved, md)\n if line:\n diffs = push_path(line, diffs)\n clear_parent_flag = md.action == \"clear_parent\"\n # Apply the last collection of diffs, if present (same as above)\n if prev_path is not None:\n if parent is None:\n merged = patch(resolved, diffs)\n else:\n parent[last_key] = patch(resolved, diffs)\n\n merged = nbformat.from_dict(merged)\n return merged", "def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r", "def diff_report(self) -> str:\n graph_a = self.graph_a\n graph_b = self.graph_b\n\n graph_a_str = str(graph_a)\n graph_b_str = str(graph_b)\n\n if graph_a_str == graph_b_str:\n return \"\"\n\n graph_diff = difflib.ndiff(\n graph_a_str.splitlines(True), graph_b_str.splitlines(True)\n )\n graph_diff_report = [\"Graph diff:\", self._indent(\"\".join(graph_diff))]\n\n for node_a, node_b in itertools.zip_longest(graph_a.nodes(), graph_b.nodes()):\n if str(node_a) != str(node_b):\n graph_diff_report.append(\"First diverging operator:\")\n node_diff = difflib.ndiff(\n str(node_a).splitlines(True), str(node_b).splitlines(True)\n )\n source_printout = [\"node diff:\", self._indent(\"\".join(node_diff))]\n\n stack_a = node_a.sourceRange() if node_a else None\n if stack_a:\n source_printout.extend(\n [\"Former source location:\", self._indent(str(stack_a))]\n )\n stack_b = node_b.sourceRange() if node_b else None\n if stack_b:\n source_printout.extend(\n [\"Latter source location:\", self._indent(str(stack_b))]\n )\n\n graph_diff_report.extend(source_printout)\n\n break\n\n return \"\\n\".join(graph_diff_report)", "def compare_readbacks(golden_path,\n readback_path):\n\n errors_cram = 0\n seu_01 = 0\n seu_10 = 0\n mbu_pos = 0\n mbu_neg = 0\n mbu_delta = []\n\n golden = open(golden_path, \"rb\")\n readback = open(readback_path, \"rb\")\n\n golden_array = golden.read()\n readback_array = readback.read()\n print(len(golden_array))\n print(len(readback_array))\n\n for i in range(0, len(golden_array)):\n if golden_array[i] != readback_array[i]:\n gold_byte, = struct.unpack(\"B\", golden_array[i])\n gold_byte_ones = bin(gold_byte).count(\"1\")\n readback_byte, = struct.unpack(\"B\", readback_array[i])\n readback_byte_ones = bin(readback_byte).count(\"1\")\n\n delta = gold_byte_ones - readback_byte_ones\n\n if delta == -1:\n seu_01 += 1\n elif delta == 1:\n seu_10 += 1\n elif delta > 1:\n mbu_pos += 1\n mbu_delta.append(delta)\n print(\"\\n\\n\\n\\n\\n DUPA \\n\\n\\n\\n\\n\")\n elif delta < -1:\n mbu_neg += 1\n mbu_delta.append(delta)\n print(\"\\n\\n\\n\\n\\n DUPA \\n\\n\\n\\n\\n\")\n\n print(gold_byte,\n readback_byte,\n delta)\n\n errors_cram += 1\n\n print(\"\\n\\nseu_01: {0}\\nseu_10: {1}\\nmbu_01: {2}\\nmbu_10: {3}\".format(seu_01, seu_10, mbu_neg, mbu_pos))\n print(mbu_delta)\n golden.close()\n readback.close()\n\n return errors_cram", "def PostProcessDiff(self, diff):\r\n return diff", "def analyze_data():\n attack_free_1 = load_messages(\"data/csv/Attack_free_dataset.csv\", verbose=True)\n\n impersonation_1 = load_messages(\"data/csv/170907_impersonation.csv\", verbose=True)\n impersonation_2 = load_messages(\"data/csv/170907_impersonation_2.csv\", verbose=True)\n impersonation_3 = load_messages(\"data/csv/Impersonation_attack_dataset.csv\", verbose=True)\n\n information = {\n \"Mean time between normal messages\":\n get_mean_time_between_normal_messages(attack_free_1),\n \"Mean time between split messages\":\n get_mean_time_between_split_messages(attack_free_1),\n \"Sum of removed intervals in '170907_impersonation.csv'\":\n get_sum_of_removed_intervals(impersonation_1, 250),\n \"Sum of removed intervals in '170907_impersonation_2.csv'\":\n get_sum_of_removed_intervals(impersonation_2, 250),\n \"Sum of removed intervals in 'Impersonation_attack_dataset.csv'\":\n get_sum_of_removed_intervals(impersonation_3, 250),\n \"Index of split in '170907_impersonation.csv'\":\n get_index_before_time(impersonation_1, 250 - 23.434627056121826),\n \"Index of split in '170907_impersonation_2.csv'\":\n get_index_before_time(impersonation_2, 250 - 20.980855226516724),\n \"Index of split in 'Impersonation_attack_dataset.csv'\":\n get_index_before_time(impersonation_3, 250 - 2.1056361198425293)\n }\n\n return information", "def compare_old_and_new_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"statusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,\"status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"statusfiles\",recipedir,\"status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n myidx=0\n while myidx < len(scratchstatusfile.data):\n oldline = controlstatusfile.data[myidx]\n newline = scratchstatusfile.data[myidx]\n if \"#\" in oldline:\n pass\n else:\n ingred = oldline.split(\":\")[0].strip()\n oldstatus = oldline.split(\":\")[1].strip()\n newstatus = newline.split(\":\")[1].strip()\n if (oldstatus == \"P\") and (newstatus == \"P\"):\n rdict[recipedir][ingred]=\"AVOID\"\n elif (oldstatus == \"C\") and (newstatus == \"C\"):\n rdict[recipedir][ingred]=\"AVOID\"\n else:\n rdict[recipedir][ingred]=\"send\"\n myidx = myidx + 1\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict", "def _get_diff_data(views_index, src_data, ea_index, ddi_data):\n\n def _add_and_del():\n \"\"\"Handles the add's and del import's.\"\"\"\n for add_or_del_row in src_data:\n # Add Check.\n if 'add' in add_or_del_row[0]:\n if add_or_del_row[1] in \\\n ddi_data[views_index[add_or_del_row[15]]]:\n errored_list.append(add_or_del_row)\n continue\n else:\n import_add.append(add_or_del_row)\n continue\n\n # delete check\n if 'del' in add_or_del_row[0] and add_or_del_row[1] in \\\n ddi_data[views_index[add_or_del_row[15]]][\n add_or_del_row[1]]:\n import_delete.append([add_or_del_row[15],\n add_or_del_row[1],\n add_or_del_row[14]])\n continue\n unused_list.append(add_or_del_row)\n\n def _ea_in_disposition_col0_and_empty_ipr_d_col():\n \"\"\"Disposition col0 check and an empty ipr disposition column.\"\"\"\n for disposition_row in unused_list:\n # Check disposition\n ddi_index = views_index[disposition_row[15]]\n # Checks disposition column value and checks for IPR D value.\n # If no IPR D in extattrs dict stores the src data for updates.\n if disposition_row[0] in ea_ipr_d_values and 'IPR Designation' not\\\n in ddi_data[ddi_index][disposition_row[1]]['extattrs']:\n import_merge_disposition.append(\n [disposition_row[15],\n disposition_row[1],\n disposition_row[14],\n disposition_row[0]])\n\n def _comment_check():\n \"\"\"Function for checking ipam comment attribute.\"\"\"\n for comment_row in unused_list:\n ddi_index = views_index[comment_row[15]]\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]]\\\n and comment_row[12] == '':\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if 'comment' not in ddi_data[ddi_index][comment_row[1]] and \\\n comment_row[12] != '':\n import_merge.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if comment_row[12] != \\\n ddi_data[ddi_index][comment_row[1]]['comment']:\n import_override.append([comment_row[15],\n comment_row[1],\n comment_row[14],\n {'comment': comment_row[12]}])\n continue\n\n def _non_listed_ea_columns_check():\n \"\"\"Checks non-listable ea columns.\"\"\"\n for ea_row in unused_list:\n # dup Check in disposition\n ddi_index = views_index[ea_row[15]]\n for key, value in ea_index.items():\n # ea attributes that could be listed.\n if key == 'Datacenter' or key == 'IPR Designation':\n continue\n # Checks for empty src value and empty ddi data value.\n # Continues if True.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Checks a non-empty src value and updates if an\n # empty ddi data value.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n\n def _listed_ea_column_check():\n \"\"\"Checks non-listable ea columns.\"\"\"\n for ea_row in unused_list:\n ddi_index = views_index[ea_row[15]]\n # This check is performed in\n # _ea_in_disposition_col0_and_empty_ipr_d_col\n if ea_row[0] in ea_ipr_d_values and \\\n 'IPR Designation' not in \\\n ddi_data[ddi_index][ea_row[1]]['extattrs']:\n continue\n # Update IPR D src column with ea_row[0] for processing.\n # WORK IN PROGRESS\n elif ea_row[0] in ea_ipr_d_values and 'IPR Designation' \\\n in ddi_data[ddi_index][ea_row[1]]['extattrs']:\n pass\n # Processing listable columns.\n for key, value in ea_index.items():\n # Skip's unused keys.\n if key not in ['Datacenter', 'IPR Designation']:\n continue\n # Check for blank column and blank source column.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] and \\\n ea_row[value] in ['', 'DDI']:\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n if key == 'IPR Designation':\n if ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] in ea_ipr_d_values:\n ea_row[16] = ea_row[16] + ',' + ea_row[0]\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[16]}])\n continue\n # Check for Disposition col, check for comma not in IPR D col\n # value, check value in IPR D col to ea ipr d attribute list,\n # check IPR D col value eq ddi value.\n # On not listed IPR D values.\n elif ea_row[0] in ea_ipr_d_values \\\n and ',' not in ea_row[16] \\\n and ea_row[16] not in ea_ipr_d_values:\n import_override.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[0]}])\n continue\n# # Check Disposition col. and if IPR D listed value needs\n# # updating. On listed IPR D values.\n# if ea_row[0].lower().strip() in ea_ipr_d_values \\\n# and ',' in ea_row[16]:\n# temp_list = ea_row[16].split(',')\n# temp_list = [x.strip() for x in temp_list]\n# if ea_row[0].lower().strip() in temp_list:\n# continue\n# else:\n# temp_list.append(ea_row[0].lower().strip())\n# temp_dict_override.update({key: temp_list})\n# import_override.append([ea_row[15].strip(),\n# ea_row[1].strip(),\n# ea_row[14].strip(),\n# temp_dict_override])\n# continue\n\n # Builds dataset for non-listed values. Final Step.\n # If key not in ddi data and src value is not none.\n # Assign to merge.\n if key not in ddi_data[ddi_index][ea_row[1]]['extattrs'] \\\n and ea_row[value] not in ['', 'DDI']:\n import_merge.append([ea_row[15].strip(),\n ea_row[1].strip(),\n ea_row[14].strip(),\n {key: ea_row[value]}])\n continue\n # Checks diff against src value and a populated value in the\n # ddi data and replaces with src value.\n if ea_row[value] != \\\n ddi_data[ddi_index][\n ea_row[1]]['extattrs'][key]['value']:\n import_override.append([ea_row[15],\n ea_row[1],\n ea_row[14],\n {key: ea_row[value]}])\n continue\n\n # Local scope variables.\n import_add = []\n import_delete = []\n import_merge = []\n import_override = []\n import_merge_disposition = []\n unused_list = []\n errored_list = []\n # Check for extensible attribute in Disposition column[0].\n # If found and IPR D column is empty append for writing.\n ea_ipr_d_values = ['leaf', 'dup', 'followup', 'decom', 'adv', 'divest',\n 'ignore', 're-ip', 'parent', 'drop reserve']\n _add_and_del()\n _ea_in_disposition_col0_and_empty_ipr_d_col()\n _comment_check()\n _non_listed_ea_columns_check()\n _listed_ea_column_check()\n return import_add, \\\n import_delete, \\\n import_merge_disposition, \\\n import_merge, \\\n import_override", "def analyze_state_changes(self):\n graph = self._graph\n lost_chunks = set(self._lost_chunks)\n op_states = self._op_states\n\n # mark lost virtual nodes as lost when some preds are lost\n for n in graph:\n if not isinstance(n.op, VirtualOperand) \\\n or op_states.get(n.op.key) == OperandState.UNSCHEDULED:\n continue\n if any(pred.key in lost_chunks for pred in graph.iter_predecessors(n)):\n lost_chunks.add(n.key)\n\n # collect operands with lost data\n op_key_to_chunks = defaultdict(list)\n lost_ops = set()\n for n in graph:\n op_key_to_chunks[n.op.key].append(n)\n if n.key in lost_chunks:\n lost_ops.add(n.op.key)\n\n # check data on finished operands. when data lost, mark the operand\n # and its successors as affected.\n affected_op_keys = set()\n for op_key in lost_ops:\n affected_op_keys.add(op_key)\n for n in op_key_to_chunks[op_key]:\n affected_op_keys.update(succ.op.key for succ in graph.iter_successors(n))\n\n # scan the graph from bottom and reassign new states\n new_states = dict()\n for chunk in graph.topological_iter(reverse=True):\n op_key = chunk.op.key\n if chunk.op.key not in affected_op_keys:\n continue\n\n can_be_ready = True\n stop_spread_states = (OperandState.RUNNING, OperandState.FINISHED)\n for pred in graph.iter_predecessors(chunk):\n pred_op_key = pred.op.key\n # mark affected, if\n # 1. data of the operand is lost\n # 2. state does not hold data, or data is lost,\n # for instance, operand is freed.\n if pred.key in lost_chunks or op_states.get(pred_op_key) not in stop_spread_states:\n affected_op_keys.add(pred_op_key)\n can_be_ready = False\n\n # update state given data preservation of prior nodes\n chunk_op_state = op_states.get(op_key)\n if can_be_ready and chunk_op_state != OperandState.READY:\n new_states[op_key] = OperandState.READY\n elif not can_be_ready and chunk_op_state != OperandState.UNSCHEDULED:\n new_states[op_key] = OperandState.UNSCHEDULED\n\n op_states.update(new_states)\n return new_states", "def __diff_internal(self):\n assert self.p > 0, \"order of Bspline must be > 0\" # we already handle the other case in diff()\n\n # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html\n #\n t = self.knot_vector\n p = self.p\n bi = BsplineBasis(t[:-1], p - 1)\n bip1 = BsplineBasis(t[1:], p - 1)\n\n numer1 = +p\n numer2 = -p\n denom1 = t[p:-1] - t[:-(p + 1)]\n denom2 = t[(p + 1):] - t[1:-p]\n\n with np.errstate(divide='ignore', invalid='ignore'):\n ci = np.where(denom1 != 0., (numer1 / denom1), 0.)\n cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.)\n\n return (ci, bi), (cip1, bip1)", "def comparison():\n path = \"Data/data_fronts/\"\n path1 = \"Results/labelled_images1010/fronts/\"\n\n #computes the areas for the first frame in order to normalize the other areas\n pol0 = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.0.png.txt\",sep =' '))\n #makes an object polygon in order to compute the area\n pol0 = np.array(pol0)\n pol0 = Polygon(pol0)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_1_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n pol = pd.DataFrame(pd.read_csv(path1 + \"fronts_labelled.m.\"+str(i)+\".png.txt\",sep =' '))\n pol = np.array(pol)\n pol = Polygon(pol)\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",sep ='\\t'))\n polsx.columns = [\"y\",\"x\"]\n poldx = pd.DataFrame(pd.read_csv(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",sep ='\\t'))\n poldx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def fast_comparison(path = \"Data/data_fronts/\",path1 = \"Results/modified_images/fronts/\"):\n #computes the areas for the first frame in order to normalize the other areas\n pol0dx = grid(path1+\"m_0.png_dx.txt\")\n pol0dx.columns = [\"y\",\"x\"]\n pol0sx = grid(path1+\"m_0.png_sx.txt\")\n pol0sx.columns = [\"y\",\"x\"]\n if pol0dx[\"x\"][0]>100:\n pol0dx = pol0dx.reindex(index=pol0dx.index[::-1])\n if pol0sx[\"x\"][0]<100:\n pol0sx = pol0sx.reindex(index=pol0sx.index[::-1])\n pol0sx = pol0sx.append(pol0dx)\n pol0sx = np.array(pol0sx)\n pol0 = Polygon(pol0sx)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_1_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_1_dx.txt\",l = 633,delimiter ='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n #makes an object polygon in order to compute the area\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n pol1 = Polygon(polsx)\n\n\n areas = []\n areas_hand = []\n #computes the areas for all the frames\n for i in range(42):\n poldx = grid(path1+\"m_\"+str(i)+\".png_dx.txt\")\n poldx.columns = [\"y\",\"x\"]\n polsx = grid(path1+\"m_\"+str(i)+\".png_sx.txt\")\n polsx.columns = [\"y\",\"x\"]\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n #makes an object polygon in order to compute the area\n\n pol = Polygon(polsx)\n\n #normalize the areas with respect to the area of the first frame\n areas.append(pol.area/pol0.area)\n\n polsx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_sx.txt\",l = 633,delimiter ='\\t')\n polsx.columns = [\"y\",\"x\"]\n polsx[\"y\"] =polsx[\"y\"]/844*1600\n polsx[\"x\"] =polsx[\"x\"]/633*1200\n poldx = grid(path + \"Sham_8-2-18_Field 5_\"+str(i+1)+\"_dx.txt\",l = 633,delimiter='\\t')\n poldx.columns = [\"y\",\"x\"]\n poldx[\"y\"] =poldx[\"y\"]/844*1600\n poldx[\"x\"] =poldx[\"x\"]/633*1200\n if poldx[\"x\"][0]>100:\n poldx = poldx.reindex(index=poldx.index[::-1])\n if polsx[\"x\"][0]<100:\n polsx = polsx.reindex(index=polsx.index[::-1])\n polsx = polsx.append(poldx)\n polsx = np.array(polsx)\n\n pol2 = Polygon(polsx)\n #normalize the areas with respect to the area of the first frame\n areas_hand.append(pol2.area/pol1.area)\n #returns the two arrays with the normalized areas\n return np.array(areas) , np.array(areas_hand)", "def _analyze(self):\r\n if self.value is None or self.value == self.previous:\r\n pass\r\n elif self._operation == \"add\":\r\n self._additions = self.value\r\n elif self._operation == \"remove\":\r\n self._removals = self.value\r\n elif self.previous is None:\r\n self._assignments = self.value\r\n else:\r\n # partial update time\r\n self._additions = (self.value - self.previous) or None\r\n self._removals = (self.previous - self.value) or None\r\n self._analyzed = True", "def process_traces(subdirs,dates,load_path):\n\n N = 60*60*24*len(dates)*10\n\n firing_rates_storage = np.zeros((N))\n var_storage = np.zeros((N))\n position_storage = np.zeros((N,2))\n firing_rates_storage[:] = np.nan\n var_storage[:] = np.nan\n timestamps = np.zeros((N))\n clusters = np.zeros((N))\n pk_max = 0\n n=0\n\n for subdir,date in zip(subdirs,dates):\n \n dpk = pk_max \n path = load_path+'/%s/'%subdir\n file = [i for i in os.listdir(path) if '.pkl' in i] \n \n if len(file) == 0:\n continue\n \n pd_ob = pkl.load(open(path+file[0],'rb'))\n \n positions = pd_ob['positions']\n sts = pd_ob['sts']\n isis = pd_ob['isis']\n fsts = pd_ob['fsts']\n fisis = pd_ob['fisis']\n et = pd_ob['et']\n ep = pd_ob['ep']\n \n max_time = 0\n for k,v in sts.items():\n max_time = max(max_time,np.max(v))\n \n for t in np.arange(0,np.floor(max_time)):\n\n for i,pk in enumerate(sts.keys()):\n if np.count_nonzero((sts[pk]>t) & (sts[pk]<(t+1))) > 1:\n\n p = positions[pk][:-1]\n\n x = sts[pk]\n y = isis[pk]\n fx = fsts[pk]\n fy = fisis[pk]\n\n firing_rates_storage[n] = np.nanmean(y[(x>t) & (x<t+1)])\n var_storage[n] = np.nanvar(y[(x>t) & (x<t+1)])\n position_storage[n] = np.nanmean(p[(x>t) & (x<t+1)],axis=0)\n timestamps[n] = (date + timedelta(0,int(t))).timestamp()\n clusters[n] = pk + dpk\n n=n+1\n pk_max = max(pk_max,pk+dpk)\n\n firing_rates_storage = firing_rates_storage[:n]\n var_storage = var_storage[:n]\n position_storage = position_storage[:n]\n timestamps = timestamps[:n]\n clusters = clusters[:n]\n\n np.savez(load_path+'processed_traces.npz',frs=firing_rates_storage,vs=var_storage,pos=position_storage,ts=timestamps,cl=clusters)\n return 0", "def test_calculate_indicates_removal_of_unrelated_files(self, m_free):\n # files are unrelated to backup\n walk_paths = {'/dst': [('/dst', ['/a'], ['x0.txt']),\n ('/dst/a', [], ['x1.txt'])]}\n copied_indexes = []\n reconciler = keepfilesreconciler.KeepFilesReconciler(self.resolver, self.options)\n with filesystemhelpers.mock_walk(walk_paths):\n filepaths = reconciler.calculate(self.copyfiles, copied_indexes)\n assert filepaths == {'/dst/a/x1.txt', '/dst/x0.txt'}", "def compare_old_and_new_change_status_files():\n rdict=dict()\n mastcontrol=dirutil.get_mast_control_path()\n mastscratch=dirutil.get_mast_scratch_path()\n recipedirs=dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\"))\n for recipedir in recipedirs:\n mystatus=\"unknown\"\n rdict[recipedir]=dict()\n changelist=list()\n if not os.path.exists(os.path.join(mastscratch,recipedir)):\n mystatus=\"archived\"\n else:\n ingreddirs = dirutil.immediate_subdirs(os.path.join(mastcontrol,\"changestatusfiles\",recipedir))\n for ingreddir in ingreddirs:\n scratchstatusfile = MASTFile(os.path.join(mastscratch,recipedir,ingreddir,\"change_status.txt\"))\n controlstatusfile = MASTFile(os.path.join(mastcontrol,\"changestatusfiles\",recipedir,ingreddir,\"change_status.txt\"))\n if scratchstatusfile.data == controlstatusfile.data:\n mystatus=\"unchanged\"\n else:\n mystatus=\"changed\"\n rdict[recipedir][ingreddir]=\"send\"\n rdict[recipedir][\"MAIN\"]=mystatus\n return rdict", "def check_unstaged_changes(self):\n pass", "def _average_plan_diffs(self, server_config: dict, cycle):\n logging.info(\"start diffs averaging!\")\n logging.info(\"cycle: %s\" % str(cycle))\n logging.info(\"fl id: %d\" % cycle.fl_process_id)\n _model = model_manager.get(fl_process_id=cycle.fl_process_id)\n logging.info(\"model: %s\" % str(_model))\n model_id = _model.id\n logging.info(\"model id: %d\" % model_id)\n _checkpoint = model_manager.load(model_id=model_id)\n logging.info(\"current checkpoint: %s\" % str(_checkpoint))\n model_params = model_manager.unserialize_model_params(_checkpoint.values)\n logging.info(\"model params shapes: %s\" % str([p.shape for p in model_params]))\n\n # Here comes simple hardcoded avg plan\n # it won't be always possible to retrieve and unserialize all diffs due to memory constrains\n # needs some kind of iterative or streaming approach,\n # e.g.\n # for diff_N in diffs:\n # avg = avg_plan(avg, N, diff_N)\n # and the plan is:\n # avg_next = (avg_current*(N-1) + diff_N) / N\n reports_to_average = self._worker_cycles.query(\n cycle_id=cycle.id, is_completed=True\n )\n diffs = [\n model_manager.unserialize_model_params(report.diff)\n for report in reports_to_average\n ]\n\n # Again, not sure max_workers == number of diffs to avg\n diffs = random.sample(diffs, server_config.get(\"max_workers\"))\n\n raw_diffs = [\n [diff[model_param] for diff in diffs]\n for model_param in range(len(model_params))\n ]\n logging.info(\"raw diffs lengths: %s\" % str([len(row) for row in raw_diffs]))\n\n sums = [reduce(th.add, param) for param in raw_diffs]\n logging.info(\"sums shapes: %s\" % str([sum.shape for sum in sums]))\n\n diff_avg = [th.div(param, len(diffs)) for param in sums]\n logging.info(\"diff_avg shapes: %s\" % str([d.shape for d in diff_avg]))\n\n # apply avg diff!\n _updated_model_params = [\n model_param - diff_param\n for model_param, diff_param in zip(model_params, diff_avg)\n ]\n logging.info(\n \"_updated_model_params shapes: %s\"\n % str([p.shape for p in _updated_model_params])\n )\n\n # make new checkpoint\n serialized_params = model_manager.serialize_model_params(_updated_model_params)\n _new_checkpoint = model_manager.save(model_id, serialized_params)\n logging.info(\"new checkpoint: %s\" % str(_new_checkpoint))\n\n # mark current cycle completed\n cycle.is_completed = True\n self._cycles.update()\n\n completed_cycles_num = self._cycles.count(\n fl_process_id=cycle.fl_process_id, is_completed=True\n )\n logging.info(\"completed_cycles_num: %d\" % completed_cycles_num)\n max_cycles = server_config.get(\"num_cycles\")\n if completed_cycles_num < max_cycles:\n # make new cycle\n _new_cycle = self.create(cycle.fl_process_id, cycle.version)\n logging.info(\"new cycle: %s\" % str(_new_cycle))\n else:\n logging.info(\"FL is done!\")", "def compute_error_metrics(original, altered, results, converterOpts=None):\n import math\n from tempfile import TemporaryDirectory\n\n import large_image_source_tiff\n import numpy as np\n import packaging\n import skimage.metrics\n\n lastlog = 0\n with TemporaryDirectory() as tempDir:\n # TODO: check if the original is geospatial; if so appropriate options\n tempPath = os.path.join(tempDir, os.path.basename(original) + '.tiff')\n orig = large_image_converter.convert(original, tempPath, compression='lzw')\n tsOrig = large_image_source_tiff.open(orig)\n numFrames = len(tsOrig.getMetadata().get('frames', [0]))\n tsAlt = large_image_source_tiff.open(altered)\n mse = 0\n ssim = 0\n ssim_count = 0\n maxval = 0\n maxdiff = 0\n sum = 0\n count = 0\n tileSize = 2048\n for frame in range(numFrames):\n tiAlt = tsAlt.tileIterator(tile_size=dict(width=tileSize), frame=frame)\n for tileOrig in tsOrig.tileIterator(tile_size=dict(width=tileSize), frame=frame):\n tileAlt = next(tiAlt)\n do = tileOrig['tile']\n da = tileAlt['tile']\n if do.dtype != da.dtype and da.dtype == np.uint8:\n da = da.astype(int) * 257\n do = do.astype(int)\n da = da.astype(int)\n maxval = max(maxval, do.max(), da.max())\n if do.shape[2] > da.shape[2]:\n do = do[:, :, :da.shape[2]]\n if da.shape[2] > do.shape[2]:\n da = da[:, :, :do.shape[2]]\n diff = np.absolute(do - da)\n maxdiff = max(maxdiff, diff.max())\n sum += diff.sum()\n count += diff.size\n last_mse = np.mean(diff ** 2)\n mse += last_mse * diff.size\n last_ssim = 0\n try:\n kwargs = {}\n if (packaging.version.parse(skimage.__version__) >=\n packaging.version.parse('0.19')):\n kwargs['channel_axis'] = 2 if len(do.shape) > 2 else None\n else:\n kwargs['multichannel'] = len(do.shape) > 2\n last_ssim = skimage.metrics.structural_similarity(\n do.astype(float), da.astype(float),\n data_range=255 if tileOrig['tile'].dtype == np.uint8 else 65535,\n gaussian_weights=True, sigma=1.5, use_sample_covariance=False,\n **kwargs)\n ssim += last_ssim * diff.size\n ssim_count += diff.size\n except ValueError:\n pass\n if time.time() - lastlog >= 10 and ssim_count:\n logger.debug(\n 'Calculating error (%d/%d): rmse %4.2f ssim %6.4f '\n 'last rmse %4.2f ssim %6.4f',\n tileOrig['tile_position']['position'] + 1 +\n tileOrig['iterator_range']['position'] * frame,\n tileOrig['iterator_range']['position'] * numFrames,\n (mse / count) ** 0.5, ssim / ssim_count,\n last_mse ** 0.5, last_ssim)\n lastlog = time.time()\n results['maximum_error'] = maxdiff\n results['average_error'] = sum / count\n results['rmse'] = (mse / count) ** 0.5\n results['psnr'] = 10 * math.log10(\n maxval ** 2 / (mse / count)) if mse else None\n if ssim_count:\n results['ssim'] = ssim / ssim_count\n logger.debug('Calculated error: rmse %4.2f psnr %3.1f ssim %6.4f',\n results['rmse'], results['psnr'] or 0, results['ssim'])", "def get_resulting_diffs():\n diff_dirpath = application.join_abs_path(\n EMPTY_TEST_DIR, application.OUTPUT_DIR_NAME)\n diffleft_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_LEFT_FILENAME)\n diffright_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_RIGHT_FILENAME)\n\n diff_left = read_gzip_file_lines_into_set(diffleft_filename)\n diff_right = read_gzip_file_lines_into_set(diffright_filename)\n\n return diff_left, diff_right", "def avg_metric(sharp_path, deblurred_path): # TODO1 do multiprocessing in those methods\n sum_psnr = 0\n sum_mse = 0\n sum_ssim = 0\n\n # List all files\n files_orig = [f for f in listdir(sharp_path) if isfile(join(sharp_path, f))]\n files_deb = [f for f in listdir(deblurred_path) if isfile(join(deblurred_path, f))]\n\n count = 0\n for orig, deb in zip(files_orig, files_deb):\n orig_fn = join(sharp_path, orig)\n deb_fn = join(deblurred_path, deb)\n # Load images\n orig_img = cv2.imread(orig_fn)\n deb_img = cv2.imread(deb_fn)\n orig_img = np.divide(orig_img, 255)\n deb_img = np.divide(deb_img, 255)\n\n # Compute metrics\n sum_psnr += peak_signal_noise_ratio(orig_img, deb_img)\n sum_mse += mean_squared_error(orig_img, deb_img)\n sum_ssim += structural_similarity(orig_img, deb_img, multichannel=True)\n\n count += 1\n print('Analyzed: {}/{}'.format(count, len(files_orig)))\n\n # Average\n avg_psnr = sum_psnr/len(files_orig)\n avg_mse = sum_mse/len(files_orig)\n avg_ssim = sum_ssim/len(files_orig)\n\n return avg_mse, avg_psnr, avg_ssim", "def check_error(self):\n refine_results = {}\n for phase_path, phase in self.phases.items():\n refine_results[phase_path] = {}\n\n # Save the original grid to the refine results\n tx = phase.options['transcription']\n gd = tx.grid_data\n num_nodes = gd.subset_num_nodes['all']\n numseg = gd.num_segments\n\n refine_results[phase_path]['num_segments'] = numseg\n refine_results[phase_path]['order'] = gd.transcription_order\n refine_results[phase_path]['segment_ends'] = gd.segment_ends\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n refine_results[phase_path]['error'] = np.zeros(numseg, dtype=float)\n\n if isinstance(tx, dm.RungeKutta):\n continue\n\n outputs = phase.list_outputs(units=False, out_stream=None)\n\n out_values_dict = {k: v['value'] for k, v in outputs}\n\n prom_to_abs_map = phase._var_allprocs_prom2abs_list['output']\n\n num_scalar_states = 0\n for state_name, options in phase.state_options.items():\n shape = options['shape']\n size = np.prod(shape)\n num_scalar_states += size\n\n x = np.zeros([num_nodes, num_scalar_states])\n f = np.zeros([num_nodes, num_scalar_states])\n c = 0\n\n # Obtain the solution on the current grid\n for state_name, options in phase.state_options.items():\n prom_name = f'timeseries.states:{state_name}'\n abs_name = prom_to_abs_map[prom_name][0]\n rate_source_prom_name = f\"timeseries.state_rates:{state_name}\"\n rate_abs_name = prom_to_abs_map[rate_source_prom_name][0]\n x[:, c] = out_values_dict[prom_name].ravel()\n f[:, c] = out_values_dict[rate_source_prom_name].ravel()\n c += 1\n\n # Obtain the solution on the new grid\n # interpolate x at t_hat\n new_order = gd.transcription_order + 1\n # Gauss-Lobatto does not allow even orders so increase order by 2 instead\n if gd.transcription == 'gauss-lobatto':\n new_order += 1\n new_grid = GridData(numseg, gd.transcription, new_order, gd.segment_ends, gd.compressed)\n left_end_idxs = new_grid.subset_node_indices['segment_ends'][0::2]\n left_end_idxs = np.append(left_end_idxs, new_grid.subset_num_nodes['all'] - 1)\n\n L = interpolation_lagrange_matrix(gd, new_grid)\n I = integration_matrix(new_grid)\n\n # Call the ODE at all nodes of the new grid\n x_hat, x_prime = self.eval_ode(phase, new_grid, L, I)\n E = {}\n e = {}\n err_over_states = {}\n for state_name, options in phase.state_options.items():\n E[state_name] = np.absolute(x_prime[state_name] - x_hat[state_name])\n for k in range(0, numseg):\n e[state_name] = E[state_name]/(1 + np.max(x_hat[state_name][left_end_idxs[k]:left_end_idxs[k + 1]]))\n err_over_states[state_name] = np.zeros(numseg)\n\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n err_over_states[state_name][k] = np.max(e[state_name][left_end_idxs[k]:left_end_idxs[k + 1]])\n\n self.error[phase_path] = np.zeros(numseg)\n refine_results[phase_path]['error'] = np.zeros(numseg)\n refine_results[phase_path]['need_refinement'] = np.zeros(numseg, dtype=bool)\n\n # Assess the errors in each state\n for state_name, options in phase.state_options.items():\n for k in range(0, numseg):\n if err_over_states[state_name][k] > self.error[phase_path][k]:\n self.error[phase_path][k] = err_over_states[state_name][k]\n refine_results[phase_path]['error'][k] = err_over_states[state_name][k]\n if self.error[phase_path][k] > phase.refine_options['tolerance']:\n refine_results[phase_path]['need_refinement'][k] = True\n\n return refine_results", "def errdump_analysis(errdump_df, switchshow_df, switch_params_aggregated_df, \n portshow_aggregated_df, project_constants_lst):\n \n # imported project constants required for module execution\n project_steps_df, max_title, io_data_names_df, _, report_headers_df, report_columns_usage_sr, *_ = project_constants_lst\n\n # data titles obtained after module execution (output data)\n # data titles which module is dependent on (input data)\n data_names, analyzed_data_names = dfop.list_from_dataframe(io_data_names_df, 'errorlog_analysis_out', 'errorlog_analysis_in')\n # module information\n meop.show_module_info(project_steps_df, data_names)\n # read data from database if they were saved on previos program execution iteration\n data_lst = dbop.read_database(project_constants_lst, *data_names)\n \n # force run when any output data from data_lst is not found in database or \n # procedure execution explicitly requested (force_run flag is on) for any output or input data \n force_run = meop.verify_force_run(data_names, data_lst, project_steps_df, \n max_title, analyzed_data_names)\n if force_run:\n # data imported from init file (regular expression patterns) to extract values from data columns\n pattern_dct, _ = sfop.regex_pattern_import('raslog_split', max_title)\n raslog_message_details_df = sfop.dataframe_import('raslog_details', max_title)\n raslog_message_id_details_df = sfop.dataframe_import('raslog_id_details', max_title, columns=['Message_ID', 'Details', 'Recommended_action'])\n\n # current operation information string\n info = f'Counting RASLog messages'\n print(info, end =\" \")\n\n # get aggregated DataFrames\n errdump_aggregated_df = errdump_aggregated(errdump_df, switchshow_df, switch_params_aggregated_df, \n portshow_aggregated_df, pattern_dct)\n # count how many times event appears during one month for the last six months \n raslog_counter_df, raslog_frequent_df = errdump_statistics(errdump_aggregated_df, raslog_message_details_df, raslog_message_id_details_df)\n # after finish display status\n meop.status_info('ok', max_title, len(info)) \n # partition aggregated DataFrame to required tables\n raslog_report_df = raslog_report(raslog_frequent_df, data_names, report_headers_df, report_columns_usage_sr)\n\n # create list with partitioned DataFrames\n data_lst = [errdump_aggregated_df, raslog_counter_df, raslog_report_df]\n # writing data to sql\n dbop.write_database(project_constants_lst, data_names, *data_lst)\n # verify if loaded data is empty and replace information string with empty DataFrame\n else:\n data_lst = dbop.verify_read_data(max_title, data_names, *data_lst)\n errdump_aggregated_df, raslog_counter_df, *_ = data_lst\n # save data to service file if it's required\n for data_name, data_frame in zip(data_names, data_lst):\n report.dataframe_to_excel(data_frame, data_name, project_constants_lst)\n return errdump_aggregated_df, raslog_counter_df", "def __getHoldingsTransferred(self, dirPath=None):\n trsfD = {}\n insD = {}\n dirPath = dirPath if dirPath else self.__sandboxPath\n\n try:\n fp = os.path.join(dirPath, \"status\", \"theoretical_model_obsolete.tsv\")\n lineL = self.__mU.doImport(fp, \"list\") # pylint: disable=no-member\n #\n obsDateD = {}\n obsIdD = {}\n for line in lineL:\n fields = line.split(\"\\t\")\n if len(fields) < 3:\n continue\n entryId = str(fields[0]).strip().upper()\n obsDateD[entryId] = dateutil.parser.parse(fields[2]) if self.__assignDates else fields[2]\n if len(fields) > 3 and len(fields[3]) > 3:\n obsIdD[entryId] = str(fields[3]).strip().upper()\n logger.debug(\"Read %d obsolete insilico id codes\", len(obsDateD))\n # --------- --------- --------- --------- --------- --------- ---------\n fp = os.path.join(dirPath, \"status\", \"model-archive-PDB-insilico-mapping.list\")\n lineL = self.__mU.doImport(fp, \"list\")\n #\n trD = {}\n for line in lineL:\n fields = line.split(\":\")\n if len(fields) < 2:\n continue\n entryId = str(fields[1]).strip().upper()[:4]\n maId = str(fields[0]).strip()\n trD[entryId] = maId\n logger.debug(\"Read %d model archive id codes\", len(trD))\n #\n # --------- --------- --------- --------- --------- --------- ---------\n fp = os.path.join(dirPath, \"status\", \"theoretical_model_v2.tsv\")\n lineL = self.__mU.doImport(fp, \"list\")\n #\n logger.debug(\"Read %d insilico id codes\", len(lineL))\n for line in lineL:\n fields = str(line).split(\"\\t\")\n if len(fields) < 6:\n continue\n depDate = dateutil.parser.parse(fields[2]) if self.__assignDates else fields[2]\n relDate = None\n if len(fields[3]) >= 10 and not fields[3].startswith(\"0000\"):\n relDate = dateutil.parser.parse(fields[3]) if self.__assignDates else fields[3]\n\n statusCode = \"TRSF\" if fields[1] == \"REL\" else fields[1]\n\n entryId = str(fields[0]).upper()\n title = fields[4]\n #\n auditAuthors = [t.strip() for t in fields[5].split(\";\")]\n repId = None\n repName = None\n if entryId in trD:\n repName = \"Model Archive\"\n repId = trD[entryId]\n\n #\n dD = {\n \"status_code\": statusCode,\n \"deposit_date\": depDate,\n \"repository_content_types\": [\"coordinates\"],\n \"title\": title,\n \"audit_authors\": auditAuthors,\n }\n #\n if relDate:\n dD[\"release_date\"] = relDate\n #\n if repId:\n dD[\"remote_accession_code\"] = repId\n dD[\"remote_repository_name\"] = repName\n if statusCode == \"TRSF\":\n trsfD[entryId] = dD\n #\n #\n dD = {\"status_code\": statusCode, \"deposit_date\": depDate, \"title\": title, \"audit_authors\": auditAuthors}\n #\n if relDate:\n dD[\"release_date\"] = relDate\n #\n if entryId in obsDateD:\n dD[\"remove_date\"] = relDate\n #\n if entryId in obsIdD:\n dD[\"id_codes_replaced_by\"] = [obsIdD[entryId]]\n #\n insD[entryId] = dD\n #\n logger.info(\"Transferred entries %d - insilico models %d\", len(trsfD), len(insD))\n #\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n\n return trsfD, insD", "def main_pipeline(self, image):\n # detection\n t0 = datetime.now()\n bbox_list, score_list, label_list = self.det.inference(image)\n t1 = datetime.now()\n logging.info('main pipeline (det): {}'.format(get_tdiff(t0, t1)))\n \n # estimation\n t0 = datetime.now()\n disp = self.est.inference(image)\n depth_list = self.est.calc_depth(bbox_list)\n t1 = datetime.now()\n logging.info('main pipeline (est): {}'.format(get_tdiff(t0, t1)))\n \n # tracker predict\n t0 = datetime.now()\n for t in self.t_list:\n t.predict()\n t1 = datetime.now()\n logging.info('main pipeline (trk_pred): {}'.format(get_tdiff(t0, t1)))\n \n # associate\n t0 = datetime.now()\n matched_pair, unmatched_bbox_list, _ = associate(bbox_list, label_list, self.t_list)\n t1 = datetime.now()\n logging.info('main pipeline (da_solver): {}'.format(get_tdiff(t0, t1)))\n \n t0 = datetime.now()\n # update trackers for matched_pair\n for m in matched_pair:\n t = self.t_list[m[1]]\n bbox = bbox_list[m[0]]\n depth = depth_list[m[0]]\n est_dict = {\n 'label': label_list[m[0]],\n 'score': score_list[m[0]]}\n t.update(self.frame_idx, bbox, depth, est_dict)\n \n # update in-track status of all trackers\n for t in self.t_list:\n t.update_status(self.frame_idx)\n \n # purge out dead trackers\n self.t_list = [t for t in self.t_list if t.get_status()]\n\n # create new trackers for unmatched_bbox_list\n for b_idx in unmatched_bbox_list:\n bbox = bbox_list[b_idx]\n depth = depth_list[b_idx]\n est_dict = {\n 'label': label_list[b_idx],\n 'score': score_list[b_idx]}\n self.t_list.append(tracker(self.t_cfg, self.tid_new, bbox, depth, est_dict))\n self.tid_new += 1\n\n t1 = datetime.now()\n logging.info('main pipeline (trk_upd): {}'.format(get_tdiff(t0, t1)))\n\n # disparity map for display\n return disp", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def compute_differences(pts_important, jparams):\n print(\"=== Computing differences ===\")\n start = time.time()\n print(\"start measuring time of compute_differences\")\n input_data = rasterio.open(jparams[\"input-file\"])\n out_profile = input_data.profile\n out_profile['dtype'] = 'float32'\n raw_data = input_data.read()\n PixelSizeX = input_data.transform[0]\n PixelSizeY = -input_data.transform[4]\n ###\n nodata_value = input_data.nodata\n ncols = input_data.width\n nrows = input_data.height\n shape = input_data.shape\n ###\n raster_pts = np.array(generate_raster_points(nrows, ncols, raw_data, nodata_value, PixelSizeX, PixelSizeY,0))\n ### generate the simplified TIN\n dt_2d = scipy.spatial.Delaunay([i[0:2] for i in pts_important])\n ###now let's compare them\n outlist = []\n linelist = []\n # print(ncols,nrows)\n # print(shape)\n # print(len(raster_pts))\n # print(len(raw_data[0][1]))\n col_counter = 0\n row_counter = 0\n for point in raster_pts:\n if point[2] == nodata_value:\n linelist.append(nodata_value)\n else:\n triangle_idx = dt_2d.find_simplex(point[0:2])\n if triangle_idx == -1:\n print(\"!!! WARNING: point outside convex hull of simplified dataset !!!\")\n linelist.append(nodata_value)\n else:\n interpolation = TIN_interpolator(pts_important, dt_2d, triangle_idx, point)\n linelist.append(point[2] - interpolation)\n #index counters\n col_counter +=1\n if col_counter == ncols:\n col_counter = 0\n outlist.append(linelist)\n linelist = []\n #print(diff_raster)\n #let's write the output file reusing the settings of the input file\n outputter = rasterio.open(jparams[\"output-file-differences\"], 'w', **out_profile)\n outputter.write(np.array([outlist]).astype(rasterio.float32))\n \n end = time.time()\n print(\"compute_differences takes \",end - start)", "def make_diff(file_before, file_after, file_output_name):\n if os.path.exists(file_output_name):\n shutil.rmtree(file_output_name)\n os.mkdir(file_output_name)\n psd_diff = diff(file_before, file_after)\n diff_content = {}\n for attr in [\"header\", \"layer\"]:\n diff_content[attr] = getattr(psd_diff, attr)\n with open(os.path.join(file_output_name, \"diff.json\"), \"w\") as diff_file:\n json.dump(diff_content, diff_file, indent=4)\n saved_files = []\n for layer_id in psd_diff.layer.keys():\n if len(psd_diff.layer_image[layer_id]) > 1:\n output_image = os.path.join(file_output_name, layer_id)\n psd_diff.layer_image[layer_id][\"before\"].save(output_image + \".before.png\")\n psd_diff.layer_image[layer_id][\"after\"].save(output_image + \".after.png\")\n diff_image_before = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"before\"].size)\n diff_image_before_data = diff_image_before.load()\n diff_image_after = Image.new(\"RGBA\", psd_diff.layer_image[layer_id][\"after\"].size)\n diff_image_after_data = diff_image_after.load()\n width, height = diff_image_before.size\n pixel_index = 1\n for y in xrange(height):\n for x in xrange(width):\n if str(pixel_index) in diff_content[\"layer\"][layer_id][\"pixel\"]:\n diff_image_before_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"before\"])\n diff_image_after_data[x, y] = tuple(diff_content[\"layer\"][layer_id][\"pixel\"][str(pixel_index)][\"after\"])\n else:\n diff_image_before_data[x, y] = (0, 0, 0, 0)\n diff_image_after_data[x, y] = (0, 0, 0, 0)\n pixel_index += 1\n diff_image_before.save(output_image + \".before.diff.png\", \"PNG\")\n diff_image_after.save(output_image + \".after.diff.png\", \"PNG\")\n saved_files.append(output_image + \".before.png\")\n saved_files.append(output_image + \".before.diff.png\")\n saved_files.append(output_image + \".after.diff.png\")\n saved_files.append(output_image + \".after.png\")\n saved_files.append(file_output_name + \"/diff.json\")\n return saved_files", "def update_22(db, filename_persist, snapshots_dir, snapshots_reference_dir):\n data = {\n # 'fail'\n 'test/test_pyglet_vb.py' : {\n 'st': 'fail', 'diag': 'incomplete grossini rendition at first frame'},\n\n # 'error'\n 'test/test_text_movement.py' : {\n 'st': 'error',\n 'diag': 'position should be set at the node level, not at the element level'},\n\n 'test/test_schedule_interval.py' : {\n 'st':'error', 'diag': 'bad timestamps, repeated snapshots'},\n\n 'test/test_transitions_with_pop_recipe.py' : {\n 'st':'error', 'diag': 'bad timestamps, repeated snapshots'},\n\n 'test/test_SequenceScene.py' : {\n 'st':'error', 'diag': 'bad timestamps, black frame'},\n\n 'test/test_camera_orbit.py' : {\n 'st':'error', 'diag': 'alternate snapshots are pure black'},\n\n 'test/test_jumptiles3d.py' : {\n 'st':'error', 'diag': \"snpshots don't folow changes in scene\"},\n\n 'test/test_transition_zoom.py' : {\n 'st':'error', 'diag': 'bad timestamps, repeated snapshots'},\n }\n\n ren_key = {'st':'testrun_success', 'diag':'testrun_diagnostic'}\n testrun_props_by_candidate = {}\n for name in data:\n testrun_props_by_candidate[name] = dict([(ren_key[k], data[name][k]) for k in data[name]])\n \n hl.update_testrun__bad(db, filename_persist, testrun_props_by_candidate,\n snapshots_dir, snapshots_reference_dir)" ]
[ "0.5009308", "0.50050104", "0.47927547", "0.47090858", "0.4646092", "0.46367276", "0.4600246", "0.45939845", "0.4547362", "0.45332745", "0.45131713", "0.45066488", "0.44925582", "0.44843617", "0.44840214", "0.4477567", "0.4472213", "0.44629148", "0.4452782", "0.4435036", "0.44328246", "0.44308645", "0.44265833", "0.4421612", "0.44146124", "0.4413403", "0.4410658", "0.44066277", "0.4400675", "0.43965662" ]
0.5701247
0
Update dictionary from a collection of documents. Each document is a list of tokens.
def add_document_lists(self, docs): for sent in docs: sent = map(self.process_token, sent) self._token_count.update(sent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def add_documents(self, docs):\n if 'sentences' in docs:\n for sent in docs.sentences:\n sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword])\n self._token_count.update(sent)\n\n else:\n sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword]))\n self._token_count.update(sent)", "def add_to_dict(self, tokens):\n# TODO: ?add normalization of a token?\n for token in tokens:\n if self.embedding_words and (token not in self.embedding_words):\n continue\n self.freq[token] += 1\n if token not in self.tok2ind:\n index = len(self.tok2ind)\n self.tok2ind[token] = index\n self.ind2tok[index] = token", "def update(tokens):\n global TOKENS\n\n for token_id in tokens:\n\n if token_id not in TOKENS:\n TOKENS[token_id] = {}\n\n if isinstance(tokens, dict):\n token_info = tokens[token_id]\n if token_info is None:\n token_info = {}\n\n alias = token_info.get(\"alias\")\n if alias is not None:\n TOKENS[token_id][\"alias\"] = alias\n\n decimals = token_info.get(\"decimals\")\n if decimals is not None:\n TOKENS[token_id][\"decimals\"] = decimals", "def intern_documents(documents: Dict[str, List[List[str]]], word_interner: Dict[str, int], unk_token: str):\n ret = dict()\n unk = word_interner[unk_token]\n for docid, sentences in documents.items():\n ret[docid] = [[word_interner.get(w, unk) for w in s] for s in sentences]\n return ret", "def update_existing(doc_data_tples):\n def per_doc(doc, data_tples):\n def per_field(data_tple):\n field, datas = data_tple\n map(_do_append_field(doc, field), datas)\n map(per_field, data_tples)\n return doc\n\n __docs = ( (per_doc(doc, data_tples), data_tples) for doc,data_tples in doc_data_tples )\n return __docs", "def updateDocumentAll(self, documents):\n docs = []\n for document in documents:\n if isinstance(document, couch.Document):\n document = document.getData()\n\n # these are required params\n if \"_id\" not in document or \"_rev\" not in document:\n raise Exception(\"Both _id & _rev fields are required!\")\n\n docs.append(document)\n\n return self.client.post(self.name +\"/_bulk_docs\", None,\n {\"docs\": docs}).getBodyData()", "def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)", "def preprocess(self, documents):\n\n # A dict storing the frequency of each word\n word_freq = {}\n\n # Iterate for each document\n for doc in documents:\n # Split the document into a list of words and iterate on it\n for w in extract_words(doc):\n # Update word frequencies\n '''YOUR CODE HERE'''\n if w not in word_freq.keys():\n word_freq[w] = 1\n else:\n word_freq[w] += 1\n\n ''' END CODE FOR THIS LOOP '''\n\n\n # A set of words with frequency less than 'self.min_freq'\n remove_words = set()\n\n # Check frequency of each word and add to 'remove_words'\n # if it's frequency is below self.min_freq\n\n ''' YOUR CODE HERE '''\n for w in word_freq.keys():\n if word_freq[w] < self.min_freq:\n remove_words.add(w)\n\n # Delete the words in 'remove_words' from 'word_freq'\n for w in remove_words:\n del word_freq[w]\n\n # Fill 'self.word_to_idx' and 'self.idx_to_word' for\n # each word in 'word_freq' (dicts are explained above)\n\n i = 0\n for w in word_freq.keys():\n self.word_to_idx[w] = i\n self.idx_to_word[i] = w \n i += 1\n\n ''' END YOUR CODE HERE '''", "def fit(self, documents):\n # Get a list of all the unique tokens that appear\n vocab = list({\n token for doc in documents\n for token in self.tokenizer(doc)\n if token not in self._word2index\n })\n\n # This is UNK, START, END, and PAD.\n nb_special_tokens = 4\n\n # First, we map token -> ID, leaving the first slots for special tokens\n self._word2index.update({\n word: idx\n for idx, word in enumerate(vocab, nb_special_tokens)\n })\n\n # Next, we invert this map, which we can do since it was built from\n # unique vocabulary elements and is by definition bijective.\n self._index2word.update({\n idx: word\n for word, idx in self._word2index.items()\n })\n\n return self", "def multiple_document_processing(self) -> List:\n batch_list = []\n for doc, idx in self.__documents:\n entities_idx = {'idx': idx}\n entities_result = self.create_entity(document=doc)\n word_cleaned = self.clean_words(doc)\n entities_idx[self.key_spacy_text] = str(word_cleaned)\n entities_idx.update(entities_result)\n batch_list.append(entities_idx)\n return batch_list", "def get_doc_dicts(self, doc_ids):\n pass", "def updateMultipleDocuments(cred, payload):\n\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n data['writes'].append(pathData)\n \n makeRequest(cred, url, 'POST', data)", "def add_doc_in_posting_list(word_posting_list, docs):\n for doc_score in docs:\n if doc_score[\"doc\"] in word_posting_list.keys():\n word_posting_list[doc_score[\"doc\"]] = int(doc_score[\"score\"]) + int(word_posting_list[doc_score[\"doc\"]])\n else:\n word_posting_list[doc_score[\"doc\"]] = doc_score[\"score\"]", "def load(self, documents, uniquify=False):\n assert documents, \"missing list of documents, text single doc per line\"\n assert isinstance(documents, list), \"documents must be list\"\n assert isinstance(documents[0], list), \"each document is also a list\"\n #--------------------------------------------------------------------------------------------\n\n def _get_new_counts(document):\n return Counter(document) if not uniquify else Counter(list(set(document)))\n\n for idx, document in enumerate(documents):\n new_counter = _get_new_counts(document)\n self.counter.update(new_counter)\n if idx % 1000 == 0:\n print(\"load: {}\\r\".format(idx), end='')\n return self", "def parse_doc(self, doc_as_list):\n\n tweet_id = doc_as_list[0]\n tweet_date = doc_as_list[1]\n full_text = doc_as_list[2]\n url = doc_as_list[3]\n indice = doc_as_list[4]\n retweet_text = doc_as_list[5]\n retweet_url = doc_as_list[6]\n retweet_indice = doc_as_list[7]\n quote_text = doc_as_list[8]\n quote_url = doc_as_list[9]\n quoted_indice = doc_as_list[10]\n retweet_quoted_text = doc_as_list[11]\n retweet_quoted_url = doc_as_list[12]\n retweet_quoted_indice = doc_as_list[13]\n\n term_dict = {}\n\n tokenized_text = self.parse_sentence(full_text)\n tokenized_quote = self.parse_sentence(quote_text)\n # tokenized_url = self.handle_url(url)\n\n\n doc_length = len(tokenized_text) # after text operations - length of full_text\n\n new_tokenized_text = tokenized_text + tokenized_quote\n\n # spell checker\n # new_tokenized_text = self.spell.update(new_tokenized_text)\n\n for term in new_tokenized_text:\n if term is not \"\": # or (term.isalpha() and len(term) == 1)\n if term not in term_dict:\n term_dict[term] = 1\n else:\n term_dict[term] += 1\n\n document = Document(tweet_id, tweet_date, full_text, url, retweet_text, retweet_url, quote_text,\n quote_url, term_dict, doc_length)\n return document", "def prepare_dictionary_from_docs(self):\n if os.path.exists(self.DICT_PATH):\n return True\n self.logger.info(\"START PREPARING DICT\")\n for fn in os.listdir(self.wiki_path):\n self.logger.info(\"dict update {0}\".format(fn))\n content = self.get_processed_content(fn)\n self.dictionary.add_documents([content])\n self.dictionary.filter_extremes(no_below=20, no_above=0.1, keep_n=100000)\n self.dictionary.compactify()\n self.dictionary.save(self.DICT_PATH)\n return True", "def get_dict(cleaned_docs):\n data = []\n for doc in cleaned_docs:\n data += doc\n return list(set(data))", "def transform(self, docs):\n return [doc for doc in docs]", "def doc2bow(self, document, allow_update=False, return_missing=False):\n\n doc=[t.text for t in document.tokens]\n\n if isinstance(doc, string_types):\n raise TypeError(\"doc2bow expects an array of unicode tokens on input, not a single string\")\n\n # Construct (word, frequency) mapping.\n counter = defaultdict(int)\n for w in doc:\n counter[w if isinstance(w, str) else str(w, 'utf-8')] += 1\n\n token2id = self.token2id\n if allow_update or return_missing:\n missing = sorted(x for x in iteritems(counter) if x[0] not in token2id)\n if allow_update:\n for w, _ in missing:\n # new id = number of ids made so far;\n # NOTE this assumes there are no gaps in the id sequence!\n token2id[w] = len(token2id)\n result = {token2id[w]: freq for w, freq in iteritems(counter) if w in token2id}\n\n if allow_update:\n self.num_docs += 1\n self.num_pos += sum(itervalues(counter))\n self.num_nnz += len(result)\n # keep track of document and collection frequencies\n for tokenid, freq in iteritems(result):\n self.cfs[tokenid] = self.cfs.get(tokenid, 0) + freq\n self.dfs[tokenid] = self.dfs.get(tokenid, 0) + 1\n\n # return tokenids, in ascending id order\n result = sorted(iteritems(result))\n if return_missing:\n return result, dict(missing)\n else:\n return result", "def _build_token_dict(self, corpus: List[List[str]]):\n self.token2idx = self.load_from_vocab_file(self.vocab_path)\n self.idx2token = dict([(value, key)\n for key, value in self.token2idx.items()])\n logging.debug(f\"build token2idx dict finished, contains {len(self.token2idx)} tokens.\")\n self.dataset_info['token_count'] = len(self.token2idx)", "def docs2ids(self):\n self.docs = [ [self.vocab[word] for word in doc] for doc in self.docs]", "def update_all(cls, documents: List[dict]) -> (List[dict], List[dict]):\n if not documents:\n raise ValidationFailed([], message=\"No data provided.\")\n\n if not isinstance(documents, list):\n raise ValidationFailed(documents, message=\"Must be a list.\")\n\n new_documents = copy.deepcopy(documents)\n\n errors = cls.validate_and_deserialize_update(new_documents)\n if errors:\n raise ValidationFailed(documents, errors)\n\n try:\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Updating {new_documents}...\")\n previous_documents, updated_documents = cls._update_many(new_documents)\n if cls.logger.isEnabledFor(logging.DEBUG):\n cls.logger.debug(f\"Documents updated to {updated_documents}.\")\n return (\n [cls.serialize(document) for document in previous_documents],\n [cls.serialize(document) for document in updated_documents],\n )\n except pymongo.errors.DuplicateKeyError:\n raise ValidationFailed(\n [cls.serialize(document) for document in documents],\n message=\"One document already exists.\",\n )", "def load_documents(data_dir: str, docids: Set[str] = None) -> Dict[str, List[List[str]]]:\n if os.path.exists(os.path.join(data_dir, 'docs.jsonl')):\n assert not os.path.exists(os.path.join(data_dir, 'docs'))\n return load_documents_from_file(data_dir, docids)\n\n docs_dir = os.path.join(data_dir, 'docs')\n res = dict()\n if docids is None:\n docids = sorted(os.listdir(docs_dir))\n else:\n docids = sorted(set(str(d) for d in docids))\n for d in docids:\n with open(os.path.join(docs_dir, d), 'r') as inf:\n lines = [l.strip() for l in inf.readlines()]\n lines = list(filter(lambda x: bool(len(x)), lines))\n tokenized = [list(filter(lambda x: bool(len(x)), line.strip().split(' '))) for line in lines]\n res[d] = tokenized\n return res", "def load_documents_from_file(data_dir: str, docids: Set[str] = None) -> Dict[str, List[List[str]]]:\n docs_file = os.path.join(data_dir, 'docs.jsonl')\n documents = load_jsonl(docs_file)\n documents = {doc['docid']: doc['document'] for doc in documents}\n res = dict()\n if docids is None:\n docids = sorted(list(documents.keys()))\n else:\n docids = sorted(set(str(d) for d in docids))\n for d in docids:\n lines = documents[d].split('\\n')\n tokenized = [line.strip().split(' ') for line in lines]\n res[d] = tokenized\n return res", "def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n if(document_filenames[id].rfind(\".txt\") == len(document_filenames[id]) - 4):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".docx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".pptx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n unique_terms = set(terms)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n postings[term][id] = terms.count(term) # the value is the\n # frequency of the\n # term in the\n # document", "def _create_dictionary(self, document_set):\n words = self._normalize_words(document_set.words)\n unique_words = frozenset(words)\n return dict((word, idx) for idx, word in enumerate(unique_words))", "def set_keyword_map(self):\n \n ret = defaultdict(list)\n for idx, doc in enumerate(self.docs):\n for token in doc:\n if token in self.dictionary.token2id:\n ret[token].append(idx)\n \n self.keyword_map = ret\n return ret", "def clean_docs(self,docs):\n\n # Remove numbers, but not words that contain numbers.\n docs = [[token for token in doc if not token.isnumeric()] for doc in docs]\n\n # Remove words that are only one character.\n docs = [[token for token in doc if len(token) > 1 and token not in stop_words] for doc in docs]\n\n # lemmatizer = WordNetLemmatizer()\n # docs = [[lemmatizer.lemmatize(token) for token in doc] for doc in docs]\n\n # Add bigrams and trigrams to docs (only ones that appear 20 times or more).\n bigram = Phrases(docs, min_count=20)\n for idx in range(len(docs)):\n for token in bigram[docs[idx]]:\n if '_' in token:\n # Token is a bigram, add to document.\n docs[idx].append(token)\n\n # Create a dictionary representation of the documents.\n dictionary = Dictionary(docs)\n\n # Filter out words that occur less than 20 documents, or more than 50% of the documents.\n dictionary.filter_extremes(no_below=20, no_above=0.5)\n\n # Bag-of-words representation of the documents.\n corpus = [dictionary.doc2bow(doc) for doc in docs]\n\n return docs,dictionary,corpus", "def add_new_doc(self, document, documents_list_length=10000):\n\n try:\n document_dictionary = document.term_doc_dictionary\n # self.countDoc += 1\n for term in document_dictionary.keys():\n if self.stemming == 'y':\n my_stemmer = Stemmer()\n term = my_stemmer.stem_term(term)\n # Update inverted index and posting\n if term not in self.inverted_idx.keys():\n self.inverted_idx[term] = [1, [\n (document_dictionary[term], document.tweet_id)]] # amount of doc, freq in the doc, doc id.\n\n else:\n self.inverted_idx[term][0] += 1 # amount of doc\n self.inverted_idx[term][1].append((document_dictionary[term],\n document.tweet_id)) # freq in the doc # doc id\n\n if term not in self.postingDict.keys():\n self.postingDict[term] = [(document.tweet_id, document_dictionary[term])]\n else:\n self.postingDict[term].append((document.tweet_id, document_dictionary[term]))\n # self.countTweet -= 1\n\n if document.tweet_id not in self.tweet_dict.keys():\n self.tweet_dict[document.tweet_id] = [[term, document_dictionary[term]], 1,\n 0] # [term,freq in tweet], amount of unique terms in tweet, amount of terms in tweet\n elif document_dictionary[term] > self.tweet_dict[document.tweet_id][0][\n 1]: # tweet exist, compering between freq in two terms\n if self.tweet_dict[document.tweet_id][0][\n 1] == 1: # before change term check if the last term is unique\n self.tweet_dict[document.tweet_id][\n 1] += 1 # last term is unique: add to the amount of uniqe terms in tweet\n self.tweet_dict[document.tweet_id][0] = [term,\n document_dictionary[term]] # change between the terms\n self.tweet_dict[document.tweet_id][2] += 1\n elif document_dictionary[term] == 1: # tweet exist, not most common, check if unique\n self.tweet_dict[document.tweet_id][1] += 1\n self.tweet_dict[document.tweet_id][2] += 1\n except:\n # print('problem in indexer : add_new_doc')\n # print(traceback.print_exc())\n pass" ]
[ "0.68998206", "0.67045903", "0.63965476", "0.6270014", "0.6202376", "0.61885744", "0.6151995", "0.60434496", "0.60377157", "0.59981596", "0.59295934", "0.58968425", "0.5878697", "0.5846581", "0.5829846", "0.57702076", "0.57415456", "0.5731184", "0.571159", "0.56422395", "0.56206626", "0.56158304", "0.56098443", "0.5605549", "0.56044024", "0.5601467", "0.5599388", "0.5561202", "0.5560279", "0.55516464" ]
0.6872754
1
Get the list of token_id given doc.
def doc2id(self, doc): if isinstance(doc, string_types): raise TypeError("doc2idx expects an array of unicode tokens on input, not a single string") doc = map(self.process_token, doc) return [self.token_to_id(token) for token in doc]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def doc2id(self, doc):\n doc = map(self.process_token, doc)\n return [self.token_to_id(token) for token in doc]", "def get_tokens(self, document):\n raise NotImplementedError()", "def doc2token(self, doc):\n return [self.word2idx[word] if self.word2idx.__contains__(word)\n else self.word2idx['UNK'] for word in doc]", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def get_doc_ids(self):\n cursor = self.connection.cursor()\n cursor.execute(\"SELECT id FROM documents\")\n results = [r[0] for r in cursor.fetchall()]\n cursor.close()\n return results", "def id2doc(self, ids):\n return [self.id_to_token(idx) for idx in ids]", "def id2doc(self, ids):\n return [self.id_to_token(idx) for idx in ids]", "def get_ids(self) -> List[str]:", "def tokens(self):\n return self.rpc.call(MsfRpcMethod.AuthTokenList)['tokens']", "def keys(self):\n return list(self.token2id.values())", "def get_ids(self,tokens, tokenizer, max_seq_length):\n token_ids = tokenizer.convert_tokens_to_ids(tokens,)\n input_ids = token_ids + [0] * (max_seq_length-len(token_ids))\n return input_ids", "def convert_ids_to_tokens(self, tok_ids):\n result = []\n for tok in tok_ids:\n word = self.itos(tok)\n result.append(word)\n return result", "def tok2idx_data(token2id, tok_data):\n idx_data = []\n for toks in tok_data:\n idx_lst = [\n token2id[tok] if tok in token2id else UNK_IDX for tok in toks]\n idx_data.append(idx_lst)\n return idx_data", "def convert_tokens_to_ids(self, tokens):\n ids = []\n for token in tokens:\n ids.append(self.vocab[token])\n if len(ids) > self.max_len:\n logger.warning('Token indices sequence length is longer than the specified maximum sequence length for this BERT model ({} > {}). Running this sequence through BERT will result in indexing errors'.format(len(ids), self.max_len))\n return ids", "def list_ids(token):\n\n init_tenant_context(token, db)\n\n data = []\n LOGGER.debug(f\" Fetching list with known devices\")\n for id in db.session.query(Device.id).all():\n data.append(id[0])\n return data", "def extarct_id_tf(docs):\n\n if len(docs) == 0:\n return []\n docs = docs.split(',')\n ret = []\n for doc in docs:\n doc = doc.split('|')\n # doc_id, tf\n ret.append((int(doc[0]), int(doc[1])))\n return ret", "def list_tokens(user):\n return AppSpecificAuthToken.select().where(AppSpecificAuthToken.user == user)", "def list(uid: int):\n\n return Token.list(uid)", "def convert_tokens_to_ids(self, tokens):\n ids = []\n if isinstance(tokens, str):\n if tokens in self.special_tokens:\n return self.special_tokens[tokens]\n else:\n return self.encoder.get(tokens, self.unk_id)\n for token in tokens:\n if token in self.special_tokens:\n ids.append(self.special_tokens[token])\n else:\n ids.append(self.encoder.get(token, self.unk_id))\n return ids", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_tokens(self) -> List[str]:\n return self.tokens", "def getReviewsWithToken(self, token):\n\n wordid = self.find_word_in_dictionary(token)\n # word is not in the dictionary\n if wordid == -1:\n print(\"Token is not in the dictionary\")\n return 0\n\n with open(self.doc_to_words_path, 'rb') as bin:\n tup = []\n while bin.tell() != os.fstat(bin.fileno()).st_size:\n # get wordid:\n docid_in_file = int.from_bytes(bin.read(4), 'big')\n # get frequency:\n frequency = int.from_bytes(bin.read(4), 'big')\n # count words:\n count = 0\n for i in range(frequency):\n wordid_in_file = int.from_bytes(bin.read(4), 'big')\n if wordid == wordid_in_file:\n count += 1\n tup.append(docid_in_file)\n tup.append(count)\n return tuple(tup)", "def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:\n token_list = self.tokenizer.ids_to_tokens(tokens)\n return token_list", "def _get_vocab_id_list(self, json_obj):\n return json_obj", "def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self._vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids", "def get_document_tags(self, docid):\n return [(key, json.loads(value))\n for key, value\n in self.sql_session.query(Feature)\n .filter(Feature.document == docid)\n .values(Feature.key, Feature.value)]", "def get_token_list():\n token_list = []\n tokens_dir_path = os.path.join(BASE_DIR, TOKENS_DIR)\n for dir, dirs, files in os.walk(tokens_dir_path):\n for file_name in files:\n file = open(os.path.join(tokens_dir_path, file_name), 'r')\n token_list.append(file.read().strip())\n file.close()\n return token_list", "def convert_ids_to_tokens(self, ids):\n tokens = []\n for i in ids:\n tokens.append(self.ids_to_tokens[i])\n return tokens", "def _get_doc_ids(dir_id, docname):\n cur = conn.cursor(cursor_factory=pgx.RealDictCursor)\n querystring = ('select id, source_docid, target_docid from {} where dir_id = %s and docname = %s;'\n result = execute_query(querystring.format(TABLES[3])), (dir_id, docname))\n if result:\n return result['id'], result['source_docid'], result['target_docid']\n return None, None, None", "def get_extent_token_ids(self, **kwargs):\n token_span = self.get_extent_tokens(**kwargs)\n return [t.index for t in token_span]" ]
[ "0.71084183", "0.6638949", "0.64065033", "0.6308615", "0.6308615", "0.6248695", "0.6248695", "0.616698", "0.61223453", "0.60448986", "0.6016193", "0.6015829", "0.6009144", "0.59384847", "0.5869318", "0.5861415", "0.5859866", "0.5852833", "0.58410364", "0.5834465", "0.5816366", "0.58008885", "0.58006424", "0.5793585", "0.5777246", "0.5777149", "0.5776924", "0.57598704", "0.5750446", "0.5728729" ]
0.6642477
1
Get the token_id of given token.
def token_to_id(self, token): token = self.process_token(token) return self.token2id.get(token, len(self.token2id) - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def token_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"token_id\")", "def map_token_to_id(self, token: str):\n if token not in self._token_to_id:\n token = self._unk_token\n return self._token_to_id[token]", "def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")", "def _convert_token_to_id(self, token):\n return self.vocab.get(token, self.vocab.get(self.unk_token))", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(str(token))", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(token)", "def _convert_token_to_id(self, token):\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n spm_id = self.sp_model.PieceToId(token)\n\n # Need to return unknown token if the SP model returned 0\n return spm_id + self.fairseq_offset if spm_id else self.unk_token_id", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def try_get_user_id_from_token(token):\n dot_index = token.find('.')\n if (dot_index > 0):\n token_base64 = token[:dot_index]\n \n try:\n token_string = b64decode(token_base64)\n except Base64DecodeError:\n user_id = 0\n else:\n try:\n user_id = int(token_string)\n except ValueError:\n user_id = 0\n else:\n user_id = 0\n \n return user_id", "def token(cls, token):\n user_db = User.get_by('token', token)\n if not user_db:\n raise ValueError('Sorry, your token is either invalid or expired.')\n return token", "def get_token(self):\n token = self._session.token\n return token", "def get_user_id(jwt_token):\n return (\n jwt_token.payload[\"user\"].get(\"id\")\n if jwt_token.payload.get(\"user\")\n else jwt_token.payload[\"session_id\"]\n )", "def token(self):\n return self[\"token\"]", "def LookupToken(self, dmtoken):\n self.ReadClientStateFile()\n return self._registered_tokens.get(dmtoken, None)", "def _get_auth_token(self):\n\n __logger__.debug(\"Getting auth Token\")\n return self.keystone_client.auth_ref['token']['id']", "def vpp_token_id(self):\n if \"vppTokenId\" in self._prop_dict:\n return self._prop_dict[\"vppTokenId\"]\n else:\n return None", "def get_token(self):\n token_model = TokenModel.find_by_user_id(self.id)\n return token_model.token if token_model else None", "def verify_token(self, token: str) -> str:\n return decode(self.rd.hget(\"auth:by_token\", token))", "def get_token_id(self):\n return f\"{self.document_title}_{self.index}\"", "def get_current_uid():\n # TODO: Find a better way to access the token\n return request.token['id']", "def get_token(self):\n\n return self._token", "def get_token(self):\n return self.__token", "def get_token(self):\n return self.__token", "def get(uid: int, token_id: int):\n\n token = Token.get(uid, token_id).as_dto().to_primitive()\n\n if token:\n return token.to_primitive()\n else:\n raise NotFound(\"Token Not Found\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token():\n return os.environ.get('TOKEN', None)" ]
[ "0.8310672", "0.7980398", "0.79235566", "0.7840283", "0.7578476", "0.730748", "0.71591955", "0.7151534", "0.7099397", "0.6803577", "0.66393685", "0.65554255", "0.6520694", "0.64726806", "0.64626735", "0.6437027", "0.6431865", "0.6408412", "0.63829017", "0.6313626", "0.62993306", "0.6289849", "0.62813705", "0.62648714", "0.62648714", "0.62607694", "0.62452286", "0.62452286", "0.62452286", "0.6183551" ]
0.8298625
1
tokenid to token (string).
def id_to_token(self, idx): return self._id2token[idx]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_id_to_token(self, id: int):\n return self._id_to_token[id]", "def token_to_id(self, token):\r\n return self.encoder.get(token, self.encoder.get(self.unk_token))", "def token_to_id(self, token):\n token = self.process_token(token)\n return self.token2id.get(token, len(self.token2id) - 1)", "def _convert_id_to_token(self, index):\n if index in self.fairseq_ids_to_tokens:\n return self.fairseq_ids_to_tokens[index]\n return self.sp_model.IdToPiece(index - self.fairseq_offset)", "def token_to_id(self, token):\n token = self.process_token(token)\n return self._token2id.get(token, len(self._token2id) - 1)", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(str(token))", "def id_to_token(self, index):\r\n return self.decoder.get(index)", "def _convert_id_to_token(self, index, return_unicode=True):\n token = self.sp_model.IdToPiece(index)\n return token", "def token(self) -> str:", "def _convert_id_to_token(self, index):\n return self.reverse_vocab.get(index, self.unk_token)", "def _convert_id_to_token(self, index, return_unicode=True):\n token = self.sp_model.IdToPiece(index)\n if six.PY2 and return_unicode and isinstance(token, str):\n token = token.decode('utf-8')\n return token", "def token_id_hex(self) -> str: # this is *ALSO* a MINT property\n return self.token_id.hex()", "def _convert_token_to_id(self, token):\n return self.sp_model.PieceToId(token)", "def get_token(self, token_id):\n raise exception.NotImplemented() # pragma: no cover", "def process_id_to(self):\r\n return self._tokens[3]", "def generate_token_string(token):\n if JWT_AUTH:\n return 'JWT {}'.format(token)\n else:\n return 'Token {}'.format(token)", "def token_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"token_id\")", "def _convert_token_to_id(self, token):\n return self.vocab.get(token, self.vocab.get(self.unk_token))", "def token2String(self,tokens):\n return self._support.token2String(tokens)", "def make_token(self, data: object) -> str:\n return self.serializer.dumps(data)", "def get_token_id(self):\n return f\"{self.document_title}_{self.index}\"", "def token(self, id):\r\n return Token(self, id)", "def make_token():\n return secrets.token_urlsafe(36)", "def map_token_to_id(self, token: str):\n if token not in self._token_to_id:\n token = self._unk_token\n return self._token_to_id[token]", "def token(db):\n user = User.find_by_identity('[email protected]')\n return user.serialize_token()", "def tostr(token):\n if token is True:\n return '#t'\n if token is False:\n return '#f'\n if isa(token, Symbol):\n return token\n if isa(token, str):\n import json\n return json.dumps(token)\n if isa(token, complex):\n result = str(token).replace('j', 'i')\n if result.find('(') < 0:\n return result\n return result[1:-1]\n if isa(token, list):\n return '(' + ' '.join(map(tostr, token)) + ')'\n return str(token)", "def _convert_token_to_id(self, token):\n if token in self.fairseq_tokens_to_ids:\n return self.fairseq_tokens_to_ids[token]\n spm_id = self.sp_model.PieceToId(token)\n\n # Need to return unknown token if the SP model returned 0\n return spm_id + self.fairseq_offset if spm_id else self.unk_token_id", "def odb_token():\n return genToken()", "def token(self) -> str:\n return pulumi.get(self, \"token\")", "def token(self) -> str:\n return pulumi.get(self, \"token\")" ]
[ "0.7235237", "0.69436103", "0.6893987", "0.6749872", "0.67123127", "0.6693682", "0.6675571", "0.6672998", "0.6606152", "0.66021", "0.65837336", "0.6532288", "0.6484542", "0.64636326", "0.6327243", "0.62979484", "0.6255819", "0.6240919", "0.6239636", "0.6181812", "0.6127101", "0.61171526", "0.60637593", "0.6061362", "0.6053765", "0.60517514", "0.6048818", "0.6044519", "0.6042805", "0.6042805" ]
0.7267592
1
Delete the current trigger.
def delete(self): request = self.triggers_service.delete(path=self._path) request.execute()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _delTrigger(self, message: IRCMessage) -> IRCResponse:\n triggerName = message.parameterList[1]\n if triggerName in self.storage:\n del self.storage[triggerName]\n return IRCResponse(f\"Trigger {triggerName} deleted!\", message.replyTo)\n else:\n return IRCResponse(f\"No trigger named {triggerName} exists.\", message.replyTo)", "def delete_trigger(self, trigger_id):\n self._delete(path=\"triggers/{}\".format(trigger_id))", "def create_delete_trigger(self):\n self.execute(self.commands.delete_function(\n dest_table=self.name,\n pk_col=self.primary_key_column\n ))\n\n self.execute(self.commands.delete_trigger(\n self.triggers['DELETE'],\n self.source.name,\n self.name\n ))", "def clear(self):\n if os.path.isfile(self._trigger_file):\n os.remove(self._trigger_file)\n logger.debug(\"Removed preview update trigger: %s\", self._trigger_file)", "def delete(self):\n\t\tdel self.scheduler.find(self)\n\t\tdel self", "def delete(self, trigger_id):\n try:\n self._client.delete(self._full_path(trigger_id))\n return False\n except InvalidJSONError:\n return True", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n self.current_revision.delete()", "def deleteOrDelay(self):\n self.delete()", "def delete(self):\n self._instance.delete()\n self._instance = None\n self._data_defs = []", "def __macroDelete(self):\n self.activeWindow().macroDelete()", "def test_remove_trigger(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death, name='on_death')\n self.client.add_trigger(trigger)\n self.assertEqual(len(self.client.triggers), 1)\n self.client.remove_trigger('on_death')\n self.assertEqual(len(self.client.triggers), 0)\n with self.assertRaises(KeyError):\n self.client.remove_trigger('does_not_exist')", "def delete(self):\n # type: () -> BoundAction\n return self._client.delete(self)", "def drop_trigger(self, trig):\n self.vr_trig_queue.put((trig,'done'))", "def delete(self):\n ...", "def delete(self):\n self._client.delete(self)", "def delete(self):\n with self.locked():\n self.path.delete()", "def delete(self):\n with self.locked():\n self.path.delete()", "def delete(self) -> None:\n self.pop()", "def delete():", "def delete(self):\n\n raise NotImplementedError('Must be implemented by subclasses')", "def delete(self):\n os.system(\"rm \"+self._name)", "def delete(self):\n\t\t#self.log.info(\"Deleting file {}\".format(self._filepath))\n\t\tos.remove(self._filepath)", "def delete(self):\r\n delete_tracks(self.project, [self])", "def delete_at_index(self, idx):\n del self.timeseries[idx]\n del self.freq[idx]\n del self.ch_name[idx]\n del self.units[idx]\n\n if self.trigger_idx == idx:\n LGR.warning(\"Removing trigger channel - are you sure you are doing\" \"the right thing?\")\n self.trigger_idx = 0", "def delete(self):\n # exit contains our clean up code\n self.exit()\n GenericAnimatedProp.GenericAnimatedProp.delete(self)", "def delete_template(self):\n try:\n os.remove(self.path)\n except Exception:\n pass" ]
[ "0.7167729", "0.70552254", "0.6779274", "0.6583904", "0.655758", "0.64790803", "0.6461914", "0.6461914", "0.6461914", "0.6461914", "0.645271", "0.64262265", "0.62824124", "0.6266517", "0.62515134", "0.62503153", "0.62478745", "0.62462974", "0.6191452", "0.61563873", "0.61563873", "0.61363184", "0.6109357", "0.6104194", "0.6091601", "0.60879666", "0.60828376", "0.60796404", "0.6074667", "0.60692894" ]
0.8082834
0
Create and return a D > D0 pi Selection object.
def makeDstar2D0Pi( name , config , DecayDescriptor , inputSel ) : daugCuts = "(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)" % locals()['config'] combCuts = "((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)" % locals()['config'] dstarCuts = "(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)" \ "& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)" % locals()['config'] _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor , DaughtersCuts = { "pi+" : daugCuts } , CombinationCut = combCuts , MotherCut = dstarCuts ) return Selection( name+'Sel', Algorithm = _Dstar, RequiredSelections = inputSel )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_selection ( self ,\n tag , \n algotype ,\n inputs , \n *args ,\n **kwargs ) :\n sel_tag = '%s_Selection' % tag\n sel_name = 'Sel%sFor%s' % ( tag , self.name() )\n #\n ## check existing selection\n #\n sel = self._selection ( sel_tag )\n if sel : return sel \n\n #\n ## adjust a bit the arguments\n if not kwargs.has_key('Preambulo') :\n kwargs ['Preambulo' ] = self['Preambulo']\n\n if not kwargs.has_key( 'ParticleCombiners' ) :\n kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' } \n \n # \n ## use \"simple-selection\"\n #\n from PhysSelPython.Wrappers import SimpleSelection\n sel = SimpleSelection (\n sel_name ,\n algotype ,\n inputs , \n *args ,\n **kwargs )\n # \n return self._add_selection( sel_tag , sel )", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def create(cls, selection):\n\t\t\n\t\treturn cls({ true_selector: selection, false_selector: Selection.invert(selection) })", "def from_selection(\n class_,\n selection,\n item_class=None,\n ):\n import abjad\n pitch_segment = abjad.PitchSegment.from_selection(selection)\n return class_(\n pitch_segment,\n item_class=item_class,\n )", "def makeDefault(name,inputSel) :\n from Configurables import OfflineVertexFitter\n Detached4mu = CombineParticles(\"Combine\"+name)\n Detached4mu.DecayDescriptor = \"B_s0 -> mu+ mu- mu+ mu-\"\n # Set the OfflineVertexFitter to keep the 4 tracks and not the J/Psi Kstar:\n Detached4mu.addTool( OfflineVertexFitter )\n Detached4mu.ParticleCombiners.update( { \"\" : \"OfflineVertexFitter\"} )\n Detached4mu.OfflineVertexFitter.useResonanceVertex = False\n Detached4mu.ReFitPVs = True\n Detached4mu.DaughtersCuts = { \"mu+\" : \"(TRCHI2DOF < 2.5 ) \"\\\n \" & (MIPCHI2DV(PRIMARY)> 9.)\"}\n \n Detached4mu.CombinationCut = \"(ADAMASS('B_s0')<1000*MeV) \"\\\n \"& (AMAXDOCA('')<0.3*mm)\"\n Detached4mu.MotherCut = \"(VFASPF(VCHI2/VDOF)<9) \"\\\n \"& (BPVDIRA > 0) \"\\\n \"& (BPVVDCHI2>100)\"\\\n \" & (M>4366.3) & (M<6366.3)\"\\\n \"& (BPVIPCHI2()< 25) \"\n \n\n return Selection (name,\n Algorithm = Detached4mu,\n RequiredSelections = inputSel)", "def make_odorant_selector(name):\n return dcc.Input(\n id=\"cid_%s\" % name,\n placeholder=\"Enter a PubChem ID number...\",\n type=\"number\",\n value=None,\n )", "def makePseudoPsi( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n _daugCuts = \"(PT> %(D0PtLoose)s*MeV)\" % locals()['config']\n _combCuts = \"(APT> %(D0PtLoose)s*MeV)\" % locals()['config']\n\n _Psi = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"D0\": _daugCuts }\n , CombinationCut = _combCuts\n , MotherCut = \"(VFASPF(VCHI2PDOF) < 10000)\"\n )\n\n return Selection( name+'Sel',\n Algorithm = _Psi,\n RequiredSelections = inputSel\n )", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def __init__(self,initial_v,v_select=0,max_dev_semitones=1):\n self.v=initial_v\n self.v_select=v_select\n self.max_dev_semitones=max_dev_semitones", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Geometric'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, pvID, pvP, pvQ, pvDescriptor):\n\n # TODO: implement this", "def makeDstarPartial( name\n , config\n , DecayDescriptor\n , inputSel\n ) :\n\n daugCuts = \"(TRCHI2DOF < %(Daug_TRCHI2DOF_MAX)s)\" % locals()['config']\n combCuts = \"((AM - AM1) < %(Dstar_AMDiff_MAX)s* MeV)\" % locals()['config']\n dstarCuts = \"(VFASPF(VCHI2/VDOF) < %(Dstar_VCHI2VDOF_MAX)s)\" \\\n \"& ((M - M1) < %(Dstar_MDiff_MAX)s* MeV)\" % locals()['config']\n\n _Dstar = CombineParticles( DecayDescriptor = DecayDescriptor\n , DaughtersCuts = { \"pi+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = dstarCuts\n )\n\n return Selection( name+'Sel',\n Algorithm = _Dstar,\n RequiredSelections = inputSel\n )", "def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)", "def createSelector2(self,type='select',speed=2.0):\n self.selector2 = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector2.hide()\n ival = self.selector2.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def __init__(self):\n super().__init__()\n self.p = 0.0\n self.type = 'Bernoulli'\n self.distType = 'Discrete'\n self.lowerBound = 0.0\n self.upperBound = 1.0\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def __init__(self, P, I, D, dt):\n\n\t\tself._Kp = P\n\t\tself._Ki = I\n\t\tself._Kd = D\n\t\tself._dt = dt", "def curve_through_selection(*args):\n sel = cmds.ls(sl=True, fl=True)\n if not sel or len(sel)==1:\n cmds.warning(\"You need to select multiple things to create curve through!\")\n return()\n\n pList = []\n crvType = cmds.radioButtonGrp(widgets[\"crvSelRBG\"], q=True, sl=True)\n\n for obj in sel:\n if cmds.objectType(obj) in [\"transform\"]:\n pos = cmds.xform(obj, q=True, ws=True, rp=True)\n pList.append(pos)\n elif obj in cmds.filterExpand(sm=[28, 30, 31, 32, 34, 46]):\n pos = cmds.pointPosition(obj)\n pList.append(pos)\n\n #add points if only 2 (cv, ep) or 3 (cv) are given, and create the curve\n if crvType == 1:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n if len(pList) == 3:\n vec1 = [pList[1][0]-pList[0][0], pList[1][1]-pList[0][1], pList[1][2]-pList[0][2]]\n newPt1 =[pList[0][0] + (vec1[0]*0.05), pList[0][1] + (vec1[1]*0.05), pList[0][2] + (vec1[2]*0.05)]\n vec2 = [pList[1][0] - pList[2][0], pList[1][1] - pList[2][1], pList[1][2] - pList[2][2]]\n newPt2= [pList[2][0] + (vec2[0]*0.05), pList[2][1] + (vec2[1]*0.05), pList[2][2] + (vec2[2]*0.05)]\n pList.insert(1, newPt1)\n pList.insert(3, newPt2)\n crv = cmds.curve(d=3, p=pList, name=\"newCurve\")\n\n if crvType == 2:\n if len(pList) == 2:\n f = [float(sum(x)/2) for x in zip(*pList)]\n pList.insert(1, f)\n crv = cmds.curve(d=3, ep=pList, name=\"newCurve\")\n\n return(crv)", "def __init__(\n self, name: str, values: List[Dict], index: Optional[int] = 0,\n label: Optional[str] = None, help: Optional[str] = None,\n default: Optional[bool] = None, required: Optional[bool] = False,\n group: Optional[str] = None\n ):\n super(Select, self).__init__(\n dtype=PARA_SELECT,\n name=name,\n index=index,\n label=label,\n help=help,\n default=default,\n required=required,\n group=group\n )\n self.values = values", "def build_selection_spec(client_factory, name):\r\n sel_spec = client_factory.create('ns0:SelectionSpec')\r\n sel_spec.name = name\r\n return sel_spec", "def __init__(self):\n super().__init__()\n self.n = 0.0\n self.p = 0.0\n self.type = 'Binomial'\n self.hasInfiniteBound = True\n self.distType = 'Discrete'\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def pdos_select(self, atoms=None, spin=None, l=None, m=None):\n valid_m_values = {'s': [],\n 'p': ['x', 'y', 'z'],\n 'd': ['xy', 'yz', 'z2-r2', 'xz', 'x2-y2'],\n 'f': ['y(3x2-y2)', 'xyz', 'yz2', 'z3', 'xz2', 'z(x2-y2)', 'x(x2-3y2)']}\n if not atoms:\n atom_idx = list(range(self.number_of_atoms))\n else:\n atom_idx = atoms\n to_return = self.pdos_raw[atom_idx, :, :, :]\n if not spin:\n spin_idx = list(range(self.ispin))\n elif spin == 'up':\n spin_idx = [0]\n elif spin == 'down':\n spin_idx = [1]\n elif spin == 'both':\n spin_idx = [0, 1]\n else:\n raise ValueError\n to_return = to_return[:, :, :, spin_idx]\n\n if not l:\n channel_idx = list(range(self.number_of_channels))\n elif l == 's':\n channel_idx = [0]\n elif l == 'p':\n if not m:\n channel_idx = [1, 2, 3]\n else:\n channel_idx = [1 + i for i, v in enumerate(valid_m_values['p']) if v in m]\n elif l == 'd':\n if not m:\n channel_idx = [4, 5, 6, 7, 8]\n else:\n channel_idx = [4 + i for i, v in enumerate(valid_m_values['d']) if v in m]\n elif l == 'f':\n if not m:\n channel_idx = [9, 10, 11, 12, 13, 14, 15]\n else:\n channel_idx = [9 + i for i, v in enumerate(valid_m_values['f']) if v in m]\n else:\n raise ValueError\n\n return to_return[:, :, channel_idx, :]", "def select(self):\n\t\tbest_num_components = self.n_constant\n\t\treturn self.base_model(best_num_components)", "def __init__(self, parent):\n # parent is the main frame of PyCorrFit\n self.parent = parent\n ## MYID\n # This ID is given by the parent for an instance of this class\n self.MyID = None\n ## Wrapping\n curvedict, labels = self.GetCurvedict()\n self.labels = labels\n self.Selector = UserSelectCurves(parent, curvedict,\n wrapper=self, labels=labels)\n # This is necessary for parent to deselect and select the tool\n # in the tools menu.\n self.Bind = self.Selector.Bind\n if self.parent.notebook.GetPageCount() == 0:\n self.Selector.sp.Disable()", "def __init__(self, p) -> None:\n self._p = p\n self._delegate = TwoQubitAsymmetricDepolarizingChannel(p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15,\n p / 15, p / 15, p / 15)", "def _an_element_(self):\n from sage.rings.integer_ring import ZZ\n return self(self.realization_of().PD().get_point(ZZ.zero()))", "def 选择项目(self, n): # real signature unknown; restored from __doc__\n return self.Select(n)", "def generate_kdtree(self):\n if self.method==2:\n coordinates = self.unassigned_data[0:3,:]\n else:\n coordinates = self.unassigned_data[0:2,:]\n tree = cKDTree(coordinates.T)\n\n return tree", "def _select_single(self, disc):\n return QuadraticFieldClassNumbersTable._select_single(self, -disc)", "def __init__(self, r=1, p=3):\n self.p = p\n self.r = r", "def make_selection(self, num):\n other_doors = None\n if num is 1:\n other_doors = [str(2), str(3)]\n elif num is 2:\n other_doors = [str(1), str(3)]\n elif num is 3:\n other_doors = [str(1), str(2)]\n\n reveal = str(random.choice(other_doors))\n other_doors.remove(reveal)\n third_door = random.choice(other_doors)\n other_doors.remove(third_door)\n\n main_door = getattr(self, 'door' + str(num) + '_counter')\n door_second = getattr(self, 'door' + reveal + '_counter')\n door_third = getattr(self, 'door' + third_door + '_counter')\n main_door_reveal = getattr(self, 'door'+str(num)+'_reveal')\n\n if (main_door is 0 and door_second is 0\n and door_third is 0):\n self.ids['door'+reveal].source = \\\n getattr(self, 'door'+reveal+'_reveal')\n self.ids['button'+reveal].disabled = True\n inc = getattr(self, 'door' + str(num) + '_counter')\n setattr(self, 'door' + str(num) + '_counter', inc + 1)\n elif main_door is 1 and door_second is 0 and door_third is 0:\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()\n elif main_door is 0 and (door_second is 1 or door_third is 1):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n getattr(self, 'door' + str(i) + '_reveal')\n self.ids['button'+str(i)].disabled = True\n if main_door_reveal in ['door-money.png']:\n self.win_popup()\n else:\n self.lose_popup()" ]
[ "0.60390633", "0.5895539", "0.55829453", "0.5348063", "0.5305103", "0.5261155", "0.5167734", "0.51543236", "0.5077886", "0.5058524", "0.50535107", "0.50340015", "0.5013713", "0.49337313", "0.49296355", "0.48771095", "0.48653218", "0.48559302", "0.48356175", "0.48229763", "0.47938704", "0.47937343", "0.47884813", "0.4786957", "0.47820017", "0.47796422", "0.4728191", "0.47270483", "0.4726566", "0.47224525" ]
0.6516377
0
Load all quest handlers here
def load_quests(self): raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def _handler_init(self):\r\n\t\tself._handlers[\"player-join\"] = FunctionDelegate()\r\n\t\tself._handlers[\"player-quit\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-start\"] = FunctionDelegate()\r\n\t\tself._handlers[\"game-stop\"] = FunctionDelegate()", "def __luanch_handlers(self):\n\n self.__updater = Updater(self.__token, use_context=True)\n self.__dp = self.__updater.dispatcher\n # on different commands - answer in Telegram\n self.__dp.add_handler(CommandHandler(\"start\", self.start_message))\n self.__dp.add_handler(CommandHandler(\"help\", self.help))\n self.__dp.add_handler(CommandHandler(\"history\", self.history))\n self.__dp.add_handler(CommandHandler(\"request\", self.request))\n self.__dp.add_handler(CommandHandler(\"cancel\", self.cancel))\n self.__dp.add_handler(CommandHandler(\"show\", self.show))\n self.__dp.add_handler(CommandHandler(\"promote\", self.promote))\n self.__dp.add_handler(CommandHandler(\"demote\", self.demote))\n self.__dp.add_handler(CommandHandler(\"checkadmin\", self.check_admin))\n self.__dp.add_handler(CommandHandler(\"kick\", self.kick))\n self.__dp.add_handler(CommandHandler(\"stop\", self.stop_all))\n self.__dp.add_handler(CommandHandler(\"whatsmyid\", self.__whatsmyid))\n self.__updater.start_polling()", "def loadTreeHandlers(self):\n #\n # Paths for key folders\n plugin_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\")\n self.handler_path = handler_path = g.os_path_join(g.app.loadDir, \"..\", \"plugins\", \"trees\")\n #\n if not g.os_path_isdir(handler_path):\n g.es(\"No tree handler folder found\", color=\"red\")\n else:\n g.es(\"Scanning for tree handlers\", color=\"blue\")\n #\n # Add folder locations to path\n old_path = sys.path[:]\n sys.path.insert(0, plugin_path)\n sys.path.insert(0, handler_path)\n #@+<< Get plugin manager module >>\n #@+node:ekr.20050329082101.135: *4* << Get plugin manager module >>\n # Get the manager\n try:\n self.plugin_manager = __import__(\"plugin_manager\")\n except ImportError as err:\n g.es(\"Autotrees did not load plugin manager: %s\" % (err,), color=\"red\")\n self.plugin_manager = None\n #@-<< Get plugin manager module >>\n #@+<< Find all handlers >>\n #@+node:ekr.20050329082101.136: *4* << Find all handlers >>\n # Find all handlers\n for filename in glob.glob(g.os_path_join(handler_path, \"*.py\")):\n handler_name = g.os_path_splitext(g.os_path_split(filename)[1])[0]\n g.es(\"... looking in %s\" % handler_name, color=\"blue\")\n try:\n self.loadHandlersFrom(handler_name)\n except BadHandler as err:\n g.es(\"... unable to load '%s' handler: %s\" % (handler_name, err), color=\"red\")\n #@-<< Find all handlers >>\n # Restore\n sys.path = old_path", "def handle_loadall(bot, ievent):\n plugs.loadall(plugin_packages, force=True)\n ievent.done()", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "def import_all_handlers(self):\n import os\n exclude_list=[\"base\"]\n\n #\n # the list of handlers (excluding base. Add more you dont want\n # to be loaded or inspected to exclude_list above.)\n #\n mods=[]\n module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'handlers'))\n #print(\"importing handlers from: \" + module_path)\n for mod in os.listdir( module_path ):\n mod = mod.split(\".\")[0]\n if not mod.startswith(\"_\") and not mod in exclude_list:\n #print(\" now processing: \" + str(mod))\n mods.append(mod)\n \n #print(\"mods: \" + str(mods))\n class_list = []\n # load all the models from their modules (mods)\n #print(str(mods))\n import importlib\n for m in mods:\n #print(\"importing: \" + 'pow_comments.handlers.' + m) \n try:\n mod = importlib.import_module('pow_comments.handlers.' + m)\n except:\n pass\n #print(dir(mod))", "def load(self):\n\n self.commands = {\n # Usual text commands (e.g. \"/echo 123\")\n 'user': {},\n 'owner': {\n 'load': self.load,\n 'modprobe': self.modprobe,\n 'rmmod': self.rmmod\n },\n # Modules for bot's reaction to a different message types\n 'text': {},\n 'photo': {},\n 'audio': {},\n 'video': {},\n 'sticker': {},\n 'voice': {}\n }\n\n for file in os.listdir('modules'):\n if file.endswith('.py'):\n command_type, command = file.split('_', 1)\n self.modprobe(self, command[:-3])", "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def loadAllCommand(self, player):\n for eachCmd in self.commands.keys():\n player.addCommand(eachCmd, self.commands[eachCmd]())", "def init_bot(self):\n dispatcher = self.updater.dispatcher\n\n dispatcher.add_handler(CommandHandler(\"start\", self.on_bot_start))\n dispatcher.add_handler(CommandHandler(\"help\", self.on_bot_help))\n dispatcher.add_handler(CommandHandler(\"about\", self.on_bot_about))\n dispatcher.add_handler(CommandHandler(\"vreausaajut\", self.on_bot_offer_to_help))\n dispatcher.add_handler(CommandHandler(\"status\", self.on_status))\n dispatcher.add_handler(CommandHandler(\"Da\", self.on_accept))\n dispatcher.add_handler(CommandHandler(\"Nu\", self.on_reject))\n\n dispatcher.add_handler(CallbackQueryHandler(self.negotiate_time, pattern=\"^eta.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_dispatch, pattern=\"^caution.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_handle, pattern=\"^handle.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wellbeing, pattern=\"^state.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_symptom, pattern=\"^symptom.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_wouldyou, pattern=\"^wouldyou.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_further, pattern=\"^further.*\"))\n dispatcher.add_handler(CallbackQueryHandler(self.confirm_activities, pattern=\"^assist.*\"))\n\n dispatcher.add_handler(MessageHandler(Filters.photo, self.on_photo))\n dispatcher.add_handler(MessageHandler(Filters.contact, self.on_contact))\n dispatcher.add_handler(MessageHandler(Filters.text, self.on_text_message))\n dispatcher.add_error_handler(self.on_bot_error)", "def load():\r\n global database\r\n global ranks\r\n \r\n osPlatform = (\"Windows\" if os.name == \"nt\" else \"Linux\" if os.name == \"posix\" else os.name)\r\n debug.write('Log file started at %s' % time.strftime(\"%A %d %B %Y - %H:%M:%S\"), 0, False)\r\n debug.write('\\n*******************************************************', 0, False)\r\n debug.write('[SourceRPG]: Turning your Server into a Role Playing Game', 0, False)\r\n debug.write('[SourceRPG]: Current Version - %s' % info.version, 0, False)\r\n debug.write('[SourceRPG]: Made by %s' % info.author, 0, False)\r\n debug.write('\\nSystem Info:', 0, False)\r\n debug.write('\\tOS: %s' % osPlatform, 0, False)\r\n debug.write('\\tEventscripts Version: %s' % es.ServerVar('eventscripts_ver'), 0, False)\r\n debug.write('\\tCorelib Version: %s' % es.ServerVar('es_corelib_ver'), 0, False)\r\n debug.write('\\tEventscript Tools Version: %s' % es.ServerVar('est_version'), 0, False)\r\n debug.write('\\tEventscripts Noisy: %s' % es.ServerVar('eventscripts_noisy'), 0, False)\r\n debug.write('\\tPopuplib version: %s' % popuplib.info.version, 0, False) \r\n \r\n cmdlib.registerSayCommand(\"rpgmenu\", sayCommands.mainMenu, \"Opens the rpg main menu\")\r\n cmdlib.registerSayCommand(\"rpgupgrade\", sayCommands.upgradeMenu, \"Opens the upgrade menu\")\r\n cmdlib.registerSayCommand(\"rpgsell\", sayCommands.sellMenu, \"Opens the sell menu\")\r\n cmdlib.registerSayCommand(\"rpghelp\", sayCommands.helpMenu, \"Opens the help menu\")\r\n cmdlib.registerSayCommand(\"rpgstats\", sayCommands.stats, \"Opens the stats menu for the user or another player\")\r\n cmdlib.registerSayCommand(\"rpgrank\", sayCommands.rank, \"Tells the player their rank or another player's rank\")\r\n cmdlib.registerSayCommand(\"rpgpopup\", sayCommands.togglePopup, \"Tells the player their rank or another player's rank\")\r\n cmdlib.registerSayCommand(\"rpgtop10\", sayCommands.top10, \"Sends the player the last updated top 10 scores\")\r\n \r\n es.server.cmd(\"exec sourcerpg/skill_loader.cfg\")\r\n \r\n es.server.cmd(\"exec sourcerpg/addon_loader.cfg\")\r\n \r\n skillConfig.write(True)\r\n skillConfig.execute(True, True)\r\n\r\n debug.write('[SourceRPG] Starting the popup creation', 0, False)\r\n\r\n \"\"\" Create the default popups which aren't unique to players \"\"\"\r\n rpgmenu = popuplib.easymenu(\"sourcerpg_rpgmenu\", \"_popup_choice\", popups.rpgmenu)\r\n rpgmenu.settitle(\"=== %s Menu ===\" % prefix)\r\n rpgmenu.addoption(1, \"Upgrade Skills\")\r\n rpgmenu.addoption(2, \"Sell Skills\")\r\n rpgmenu.addoption(3, \"RPG Help\")\r\n rpgmenu.addoption(4, \"RPG Stats\")\r\n rpgmenu.addoption(5, \"Reset Skills\")\r\n \r\n helpMenu = popuplib.easymenu('sourcerpg_help', '_popup_choice', popups.helpmenu)\r\n helpMenu.settitle('=== %s Help ===' % prefix)\r\n helpMenu.addoption(1, 'About SourceRPG')\r\n helpMenu.addoption(2, 'List of Commands')\r\n helpMenu.addoption(3, 'About SourceRPG Skills')\r\n helpMenu.addoption(4, 'Credit')\r\n helpMenu.submenu(10, \"sourcerpg_rpgmenu\")\r\n \r\n confirmation = popuplib.easymenu('sourcerpg_confirm', '_popup_choice', popups.confirm)\r\n confirmation.settitle(\"=== %s Reset Stats ===\" % prefix)\r\n confirmation.setdescription(\"\"\"Are you sure you want to remove\r\nyour skills? There is no chance\r\nor recovering them again!\"\"\")\r\n confirmation.addoption(True, \"Yes\")\r\n confirmation.addoption(False, \"No\")\r\n \r\n about = popuplib.create('sourcerpg_about')\r\n about.addline('=== About %s ===' % prefix)\r\n about.addline('-' * 30)\r\n about.addline('SourceRPG is a python coded mod')\r\n about.addline('for EventScripts 2+. It enables')\r\n about.addline('players to gain Levels, by gaining')\r\n about.addline('XP from certain events, such as')\r\n about.addline('planting the bomb, or killing')\r\n about.addline('another player. Each level gives')\r\n about.addline('%s Credits, which allows you to' % creditsReceived)\r\n about.addline('buy certain skills which aid you')\r\n about.addline('in killing other players.')\r\n about.addline('-' * 30)\r\n about.addline('->8. Back')\r\n about.addline('0. Cancel')\r\n about.submenu(8, 'sourcerpg_help')\r\n \r\n commandspopup = popuplib.create('sourcerpg_commands')\r\n commandspopup.addline(\"=== %s Commands ===\" % prefix)\r\n commandspopup.addline(\"-\" * 30)\r\n commandspopup.addline(\"rpghelp - displays the help menu\")\r\n commandspopup.addline(\"rpgmenu - displays the main menu\")\r\n commandspopup.addline(\"rpgrank - displays your RPG rank\")\r\n commandspopup.addline(\"rpgpopup - toggles on / off automatic popup display\")\r\n commandspopup.addline(\"rpgupgrade - upgrade skills\")\r\n commandspopup.addline(\"rpgsell - sell skills\")\r\n commandspopup.addline(\"rpgstats - display your stats\")\r\n commandspopup.addline(\"-\" * 30)\r\n commandspopup.addline(\"->8. Back\")\r\n commandspopup.addline(\"0. Cancel\")\r\n commandspopup.submenu(8, 'sourcerpg_help')\r\n \r\n creditmenu = popuplib.create('sourcerpg_creditmenu') \r\n creditmenu.addline('=== %s Credits ===' % prefix)\r\n creditmenu.addline('-' * 30)\r\n creditmenu.addline(info.author)\r\n creditmenu.addline(' Script Creator')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('SumGuy14 and Murphey')\r\n creditmenu.addline(' Letting me use their Long Jump code')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('SuperDave')\r\n creditmenu.addline(' He turned my failing SmogNade code into')\r\n creditmenu.addline(' a working code! Thank him for that skill.')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('JoeyT2008 (Jordan Thomas)')\r\n creditmenu.addline(' Awesome scripter who made the database conversion')\r\n creditmenu.addline(' ')\r\n creditmenu.addline('EventScripts Community')\r\n creditmenu.addline(' Help and support, and such a good plugin.')\r\n creditmenu.addline('-' * 30)\r\n creditmenu.addline('8. Back')\r\n creditmenu.addline('0. Cancel')\r\n creditmenu.submenu(8, 'sourcerpg_help')\r\n \r\n debug.write('[SourceRPG] Popups created', 0, False)\r\n \r\n \r\n if int(turboMode):\r\n database = DATABASE_STORAGE_METHOD(\":memory:\")\r\n else:\r\n database = DATABASE_STORAGE_METHOD(databasePath)\r\n \r\n ranks = RankManager()\r\n \r\n \"\"\" If the script is loaded late then make sure all players are inserted \"\"\"\r\n if es.getplayercount():\r\n for player in es.getUseridList():\r\n players.addPlayer( player )\r\n \r\n es.server.queuecmd('mp_restartgame 1')\r\n\r\n if str( es.ServerVar('eventscripts_currentmap')):\r\n es_map_start({})\r\n\r\n \"\"\" If we want to save by intervals then create a repeat to save the database \"\"\"\r\n if str( saveType ) == \"intervals\":\r\n gamethread.delayedname(float(saveLength), 'sourcerpg_databasesave', saveDatabase)\r\n \r\n debug.write('[SourceRPG]: Finished Loading... Enjoy your stay!', 0, False)\r\n debug.write('*******************************************************\\n', 0, False)", "def makeHandlers(self):\n\n yield self.loadGrids.start(funcSelf=self)\n yield self.updateClientWatchedGrids.start(funcSelf=self)\n logger.debug(\"RPCs started\")", "def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n self.structuralNavigation.inputEventHandlers)\n\n self.inputEventHandlers[\"sayAllHandler\"] = \\\n input_event.InputEventHandler(\n Script.sayAll,\n cmdnames.SAY_ALL)\n\n self.inputEventHandlers[\"panBrailleLeftHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleLeft,\n cmdnames.PAN_BRAILLE_LEFT,\n False) # Do not enable learn mode for this action\n\n self.inputEventHandlers[\"panBrailleRightHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleRight,\n cmdnames.PAN_BRAILLE_RIGHT,\n False) # Do not enable learn mode for this action", "def loadPlayerCommands(self, player):\n player.addCommand('get', self.commands['get']())\n player.addCommand('drop', self.commands['drop']())\n player.addCommand('go', self.commands['go']())\n player.addCommand('say', self.commands['say']())\n player.addCommand('look', self.commands['look']())\n player.addCommand('quit', self.commands['quit']())\n player.addCommand('commands', self.commands['commands']())\n player.addCommand('color', self.commands['color']())", "def loadallskills(self):\r\n for skill in os.listdir( os.path.join( es.getAddonPath( info.basename ), \"skills\" )):\r\n es.load(\"%s/skills/%s\" % (info.basename, skill))", "def loadHandlersFrom(self, name):\n try:\n module = __import__(name)\n except Exception as err:\n raise BadHandler(\"Failed import: %s\" % err)\n #\n # Look for handler classes\n for cls_name in dir(module):\n object = getattr(module, cls_name)\n try:\n is_handler = issubclass(object, BaseTreeHandler)\n except TypeError:\n is_handler = False\n if is_handler:\n g.es(\"... found handler '%s'\" % (cls_name,), color=\"blue\")\n self.handlers[cls_name.lower()] = object", "def u2handlers(self):\n return []", "def __setupCommandHandlerTypes(self):\n # dict saving all command handler types\n self.__commandHandlers = {'channel': {}, 'query': {}, 'not_authed_dcc': {}, 'authed_dcc': {}}", "def load_data(self):\n super(MudderyNPC, self).load_data()\n \n data = self.get_data_record()\n if not data:\n return\n\n # set NPC's default dialogues.\n self.set_dialogue(data.dialogue)", "def load_all_resources():\n\n # Load the fonts\n ResourcesManager._load_font(\"Munro.ttf\")\n\n # Load images\n ResourcesManager.HIBER_NATION_IMG = ResourcesManager._load_image(\"hiber_nation.png\")\n ResourcesManager.SHIP_IMG = ResourcesManager._load_image(\"ship.png\")\n ResourcesManager.MISSILE_IMG = ResourcesManager._load_image(\"missile.png\")\n\n # Load sounds\n # ResourcesManager.MENU_MUSIC = ResourcesManager._load_sound(\"menu.ogg\")", "def get_handlers_for_event(self, event):\n pass # pragma: no cover", "def setup(cls):\n super().setup()\n cls.default_dialogues = cast(\n DefaultDialogues, cls._skill.skill_context.default_dialogues\n )\n cls.tac_dialogues = cast(TacDialogues, cls._skill.skill_context.tac_dialogues)\n cls.oef_search_dialogues = cast(\n OefSearchDialogues, cls._skill.skill_context.oef_search_dialogues\n )", "def load_shutit_modules(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif self.loglevel <= logging.DEBUG:\n\t\t\tself.log('ShutIt module paths now: ',level=logging.DEBUG)\n\t\t\tself.log(self.host['shutit_module_path'],level=logging.DEBUG)\n\t\tfor shutit_module_path in self.host['shutit_module_path']:\n\t\t\tself.load_all_from_path(shutit_module_path)", "def quests(self, quests):\n\n self._quests = quests", "def register_handlers(path = EXPLOIT_FOLDER):\n\n exploit_folder = './{}/{}'.format(os.path.dirname(__file__), path)\n handlers = []\n\n for module in os.listdir(exploit_folder):\n\n if not module.endswith(\".py\") or module == \"__init__.py\":\n continue\n\n # Execute the script\n # We assume that each executed script registers himself to the handlers dictionary.\n try:\n execfile('./{}/{}'.format(path, module))\n except Exception as e:\n log.failure(\"Could not register handler '{}' : {}\".format(module, e))\n\n log.info(\"Registered {} handler(s).\".format(len(handlers)))\n for handler in handlers:\n\n handler_name = handler.__name__\n log.info(\"- Registered '{}' handler\".format(handler_name))\n\n return handlers", "def get_handlers(self):\n raise NotImplementedError()", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def request_plugins(self):", "def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)" ]
[ "0.6526081", "0.6394275", "0.60561556", "0.59975696", "0.596935", "0.5927591", "0.59075165", "0.5878692", "0.569096", "0.5556501", "0.5527142", "0.5512644", "0.55046976", "0.54805523", "0.54044414", "0.5394727", "0.5393246", "0.5391057", "0.5354453", "0.5347178", "0.53144395", "0.5286961", "0.5270832", "0.52531284", "0.5250341", "0.5240783", "0.52251697", "0.52249855", "0.5177247", "0.51489383" ]
0.71654475
0
Add a quest handler to the aiohttp app
def add_quest(self, method: str, route: str, handler): self.aiohttp.router.add_route(method, route, handler)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def _response_handler(self):", "async def startup_handler(app):\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)", "def launch_request_handler(handler_input):\n speech_text = \"Hello! Are you looking to connect and play with others?\"\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello! Are you looking to connect and play with others?\", speech_text)).set_should_end_session(False)\n return handler_input.response_builder.response", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = f\"Yo yo yo what's popping. Come checkout what is up with your Monzo\"\n\n handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Hello World\", speech_text)).set_should_end_session(\n False)\n return handler_input.response_builder.response", "async def init_server() -> aiohttp.web.Application:\n app = aiohttp.web.Application(\n middlewares=[\n swift_browser_ui.common.common_middleware.add_cors, # type: ignore\n swift_browser_ui.common.common_middleware.check_db_conn, # type: ignore\n swift_browser_ui.common.common_middleware.handle_validate_authentication, # type: ignore\n swift_browser_ui.common.common_middleware.catch_uniqueness_error, # type: ignore\n swift_browser_ui.common.common_middleware.error_handler, # type: ignore\n ]\n )\n\n async def on_prepare(\n _: aiohttp.web.Request, response: aiohttp.web.StreamResponse\n ) -> None:\n \"\"\"Modify Server headers.\"\"\"\n response.headers[\"Server\"] = \"Swift Browser Request\"\n\n # add custom response headers\n app.on_response_prepare.append(on_prepare)\n\n app.add_routes(\n [\n aiohttp.web.get(\"/health\", handle_health_check),\n ]\n )\n\n app.add_routes(\n [\n aiohttp.web.options(\n \"/request/user/{user}/{container}\",\n swift_browser_ui.common.common_handlers.handle_delete_preflight,\n ),\n aiohttp.web.post(\n \"/request/user/{user}/{container}\", handle_share_request_post\n ),\n aiohttp.web.delete(\n \"/request/user/{user}/{container}\", handle_user_share_request_delete\n ),\n aiohttp.web.get(\"/request/user/{user}\", handle_user_made_request_listing),\n aiohttp.web.get(\"/request/owner/{user}\", handle_user_owned_request_listing),\n aiohttp.web.get(\n \"/request/container/{container}\", handle_container_request_listing\n ),\n ]\n )\n\n app.add_routes(\n [\n aiohttp.web.options(\n \"/token/{project}/{id}\",\n swift_browser_ui.common.common_handlers.handle_delete_preflight,\n ),\n aiohttp.web.post(\"/token/{project}/{id}\", handle_user_add_token),\n aiohttp.web.delete(\"/token/{project}/{id}\", handle_user_delete_token),\n aiohttp.web.get(\"/token/{project}\", handle_user_list_tokens),\n ]\n )\n\n app.on_startup.append(resume_on_start)\n app.on_startup.append(swift_browser_ui.common.common_util.read_in_keys)\n app.on_shutdown.append(graceful_shutdown)\n\n return app", "def cli(app, aiohttp_client):\n return asyncio.get_event_loop().run_until_complete(aiohttp_client(app.app))", "def cli(loop, aiohttp_client, known_domain_data):\n app = web.Application()\n\n async def get_handler(request):\n return web.json_response(known_domain_data)\n\n async def bad_get_handler(request):\n return web.json_response(\n {'errors': [{'code': '50004', 'detail': 'URL is not found.'}]},\n status=500\n )\n\n async def post_handler(request):\n json_data = await request.json()\n response_dict = known_domain_data\n response_dict.update(json_data)\n if request.query:\n response_dict['query_args'] = dict(request.query)\n return web.json_response(response_dict)\n\n async def put_handler(request):\n json_data = await request.json()\n response_dict = known_domain_data\n response_dict.update(json_data)\n if request.query:\n response_dict['query_args'] = dict(request.query)\n return web.json_response(response_dict)\n\n app.router.add_get(path='/cli-test', handler=get_handler)\n app.router.add_post(path='/cli-test', handler=post_handler)\n app.router.add_put(path='/cli-test', handler=put_handler)\n app.router.add_get(path='/cli-test-bad', handler=bad_get_handler)\n\n return loop.run_until_complete(aiohttp_client(app))", "async def serve(app, flow: http.HTTPFlow):\n\n scope = make_scope(flow)\n done = asyncio.Event()\n received_body = False\n sent_response = False\n\n async def receive():\n nonlocal received_body\n if not received_body:\n received_body = True\n return {\n \"type\": \"http.request\",\n \"body\": flow.request.raw_content,\n }\n else: # pragma: no cover\n # We really don't expect this to be called a second time, but what to do?\n # We just wait until the request is done before we continue here with sending a disconnect.\n await done.wait()\n return {\"type\": \"http.disconnect\"}\n\n async def send(event):\n if event[\"type\"] == \"http.response.start\":\n flow.response = http.Response.make(\n event[\"status\"], b\"\", event.get(\"headers\", [])\n )\n flow.response.decode()\n elif event[\"type\"] == \"http.response.body\":\n assert flow.response\n flow.response.content += event.get(\"body\", b\"\")\n if not event.get(\"more_body\", False):\n nonlocal sent_response\n sent_response = True\n else:\n raise AssertionError(f\"Unexpected event: {event['type']}\")\n\n try:\n await app(scope, receive, send)\n if not sent_response:\n raise RuntimeError(f\"no response sent.\")\n except Exception:\n logger.error(f\"Error in asgi app:\\n{traceback.format_exc(limit=-5)}\")\n flow.response = http.Response.make(500, b\"ASGI Error.\")\n finally:\n done.set()", "def on_startup():\n\n async def startup_handler(app):\n \"\"\"Run all initialization tasks.\n These are tasks that should be run after the event loop has been started but before the HTTP\n server has been started.\n \"\"\"\n\n spotify_client_id = os.environ.get(SPOTIFY_CLIENT_ID)\n spotify_client_secret = os.environ.get(SPOTIFY_CLIENT_SECRET)\n\n # Save dependencies in the HTTP app.\n http.register_dependency(app, SPOTIFY_CLIENT_ID, spotify_client_id)\n http.register_dependency(app, SPOTIFY_CLIENT_SECRET, spotify_client_secret)\n\n async def cleanup(app):\n \"\"\"Perform required cleanup on shutdown\"\"\"\n # await client_session.close()\n\n app.on_shutdown.append(cleanup)\n\n return startup_handler", "def _run_aiohttp(port):\n loop = aio.get_event_loop()\n aio_app = init_app(loop)\n handler = aio_app.make_handler()\n srv = loop.run_until_complete(\n loop.create_server(\n handler,\n '0.0.0.0',\n port,\n ))\n print(\"serving on\", srv.sockets[0].getsockname())\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n finally:\n srv.close()\n loop.run_until_complete(srv.wait_closed())\n loop.run_until_complete(aio_app.shutdown())\n loop.run_until_complete(handler.shutdown(_HANDLER_SHUTDOWN_SEC))\n loop.run_until_complete(aio_app.cleanup())\n loop.close()", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = \"Welcome to the Merriam-Webster Dictionary. What word can I look up for you?\"\n reprompt = \"You can say: definition of word, example of word, or synonym of word.\"\n\n handler_input.response_builder.speak(speech).ask(reprompt)\n return handler_input.response_builder.response", "def config(self):\n\n # Set up on_startup listener for connecting to the server\n self.aiohttp.on_startup.append(self.ws.connect)\n\n # Await websocket and client session termination\n async def shutdown(app):\n await self.ws.close()\n await self.client.close()\n\n # Set up on_shutdown listeners for graceful shutdown\n self.aiohttp.on_shutdown.append(shutdown)\n\n # Add a default route\n self.aiohttp.router.add_route('*', '/', lambda request: web.json_response({ \"msg\": \"I'm alive\" }))\n\n # Load user defined quests\n self.load_quests()", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In LaunchRequestHandler\")\n lang = handler_input.request_envelope.request.locale\n try:\n speech = welcome_speech[lang]\n except:\n speech = \"Language \" + lang + \" is not supported.\"\n\n handler_input.response_builder.speak(\n speech).ask(help_text)\n return handler_input.response_builder.response", "def apiai_hook():\n\n route = {\n 'artist_bio': artist_bio,\n 'artist_top_tracks': artist_top_tracks,\n 'artist_similar': artist_similar,\n 'track_similar': track_similar,\n }\n\n req = request.get_json(silent=True, force=True)\n response = {}\n try:\n response = route[req.get('result').get('action')](req)\n except (KeyError, AttributeError) as e:\n logger.error('Invalid action specified, error=\"{0}\".'.format(e))\n return jsonify(response)\n\n return response", "def on_intent(request, session):\n\n intent = request['intent']\n\n print(\"on_intent:\", intent)\n\n if intent[\"name\"] == \"AntwortIntent\":\n return handle_answer_request(intent, session)\n elif intent[\"name\"] == \"DontKnowIntent\":\n return handle_answer_request(intent, session)\n elif intent['name'] == \"AMAZON.RepeatIntent\":\n return handle_repeat_request(intent, session)\n elif intent['name'] == \"AMAZON.StopIntent\" or intent['name'] == \"AMAZON.CancelIntent\":\n return handle_finish_session_request(intent, session)\n elif intent['name'] == \"AMAZON.HelpIntent\":\n return get_help(intent, session)\n elif intent['name'] == \"StartQuizIntent\" or intent['name'] == \"AMAZON.StartoverIntent\":\n if session[\"new\"] == False:\n return get_welcome_message(restart=True)\n #if no intent is identified:\n return get_help(intent, session)", "async def handle_async(req):\n return await logic_async(req)", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech = \"Welcome to the Alexa Skills Kit color session sample.\"\n\n handler_input.response_builder.speak(\n speech + \" \" + help_text).ask(help_text)\n return handler_input.response_builder.response", "async def test_async_handler(dm):\n assert not dm.called_async_handler\n request = create_request(\"domain\", \"async\")\n response = create_responder(request)\n result = await dm.apply_handler(request, response)\n assert dm.called_async_handler\n assert result.dialogue_state == \"async_handler\"\n assert len(result.directives) == 1\n assert result.directives[0][\"name\"] == \"reply\"\n assert result.directives[0][\"payload\"] == {\"text\": \"this is the async handler\"}", "def launch_request_handler(handler_input):\n # type: (HandlerInput) -> Response\n speech_text = \"Welcome to the Transit Time skill, ask when the next bus is coming!\"\n\n return handler_input.response_builder.speak(speech_text).set_card(\n SimpleCard(\"Transit Time\", speech_text)).set_should_end_session(\n False).response", "def add_handler(self, handler):\n pass", "def on_intent(event):\n\n intent = event[\"request\"][\"intent\"][\"name\"]\n\n if intent in (\"AMAZON.CancelIntent\", \"AMAZON.StopIntent\", \"AMAZON.NoIntent\"):\n return handle_session_end_request()\n\n if intent == \"AMAZON.YesIntent\":\n if \"attributes\" in event[\"session\"] and \"previousIntent\" in \\\n event[\"session\"][\"attributes\"]:\n\n if event[\"session\"][\"attributes\"][\"previousIntent\"] == \"AMAZON.HelpIntent\":\n return main_handler(event)\n\n speech_output = event[\"session\"][\"attributes\"][\"nextStations\"]\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n speech_output = \"Sorry, something went wrong.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)\n\n if intent == \"isBikesAvailable\":\n return main_handler(event)\n\n if intent == \"AMAZON.HelpIntent\":\n return handle_help_intent()\n\n speech_output = \"Sorry, I don\\'t know that.\"\n resp = build_speechlet_response(CARD_TITLE, speech_output, True)\n return build_response(resp)", "async def main():\n bot = triogram.make_bot()\n async with bot, trio.open_nursery() as nursery:\n nursery.start_soon(bot)\n nursery.start_soon(echo, bot)\n nursery.start_soon(echo_once, bot)", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n handler_input.response_builder.speak(help_text).ask(help_text)\n return handler_input.response_builder.response", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n handler_input.response_builder.speak(help_text).ask(help_text)\n return handler_input.response_builder.response", "def help_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n handler_input.response_builder.speak(help_text).ask(help_text)\n return handler_input.response_builder.response", "def unhandled_intent_handler(handler_input):\n # type: (HandlerInput) -> Response\n intent_name = get_intent_name(handler_input)\n if intent_name == 'ChallengeBossIntent':\n speech_text = 'You need to be in the boss room to challenge the boss. '\n elif intent_name == 'EnterMazeIntent':\n speech_text = 'You already have a maze in progress. Would you like to resume the maze or discard the maze? '\n elif intent_name == 'ResumeMazeIntent' or intent_name == 'DiscardMazeIntent':\n speech_text = 'You are already in a maze or you don\\'t have a maze in progress. Say enter the maze or discard the maze. '\n elif intent_name == 'LocationIntent':\n speech_text = 'You need to be in a maze to locate yourself. Say enter the maze or resume the maze. '\n elif intent_name == 'MoveIntent':\n speech_text = 'You need to be in a maze to take a move. Say enter the maze or resume the maze. '\n else:\n speech_text = 'I am not sure what you are saying. '\n\n handler_input.response_builder.speak(\n speech_text).set_should_end_session(False)\n return handler_input.response_builder.response", "def launch_request_handler(handler_input: HandlerInput) -> Response:\n day = events.get_date()\n text = events.for_day(day)\n log.info(f\"launch: events for {day} = {text}\")\n return (\n handler_input.response_builder.speak(text)\n .set_card(SimpleCard(f\"Hillbrook events for {day.strftime('%A')}:\\n{text}\"))\n .set_should_end_session(True)\n .response\n )", "async def app(scope, receive, send):\n html = b\"\"\"\n <!doctype html>\n <html>\n <head>\n <title>Hello ASGI!</title>\n </head>\n <body>\n <main>\n <h1>Hello ASGI!</h1>\n </main>\n </body>\n </html>\n \"\"\"\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": 200,\n \"headers\": [[b\"content-type\", b\"text/html\"], [b\"content-length\", b\"269\"],],\n }\n )\n await send(\n {\"type\": \"http.response.body\", \"body\": html, \"more_body\": False,}\n )", "async def handle_request(self, request: aioweb.request.Request):", "def event_handler(self, response):\n pass" ]
[ "0.5878627", "0.5799518", "0.57429504", "0.57207054", "0.5717911", "0.5691735", "0.5681638", "0.5614569", "0.55652535", "0.5559621", "0.5534234", "0.5512763", "0.5455151", "0.5423512", "0.5397953", "0.5394697", "0.5374808", "0.5370889", "0.53686166", "0.53405356", "0.5336915", "0.5332253", "0.53307897", "0.53307897", "0.53307897", "0.53082174", "0.53078187", "0.52974576", "0.5297153", "0.52573425" ]
0.6682835
0
Representation of the linked list
def __repr__(self): return "LinkedList([{}],{}/{})".format(self.cur_node, self.cur_pos, self.length)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __repr__(self):\n\n return \"LinkedList created\"", "def __repr__(self):\r\n return \"ListNode({})\".format(self.data)", "def __repr__(self):\n return 'LinkedList({!r})'.format(self.items())", "def __repr__(self):\n return \"{}\".format(self._head)", "def __init__(self):\n self.head = ListNode()", "def __init__(self):\n node = ListNode(0) # dummy\n self.head = node\n self.tail = node\n self.len = 0", "def simple_ll():\n ll = LinkedList()\n ll.push(20)\n ll.push(4)\n ll.push(15)\n ll.push(85)\n return ll", "def __init__(self):\n\n self.head = linkNode()\n self.tail = None\n # print(self.head.val)", "def __init__(self, head: ListNode):\n self.head = head\n self.list = []\n while head:\n self.list.append(head.val)\n head = head.next", "def __init__(self, linked_list: object):\n self.current_node = linked_list._head", "def __repr__(self):\n return '<List %r>' % (self.name)", "def __init__(self, head: ListNode):\n self.l = []\n while head:\n self.l.append(head.val)\n head = head.next", "def l1():\n head = l1 = ListNode(3)\n l1.next = ListNode(4)\n l1.next.next = ListNode(5)\n return head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self, head: ListNode):\n self.head = head", "def __init__(self):\n self.head = None\n self.tail = None\n self.current_node = None", "def __init__(self, lst=[]):\r\n self.__length = 0 # current length of the linked list\r\n self.__head = None # pointer to the first node in the list\r\n self.__last = None # pointer to the last node in the list\r\n lst.reverse() # reverse to ensure elements will appear in same order\r\n for e in lst: # add elements of input list lst one by one\r\n self.add(e)", "def __repr__(self):\n\n nodes = []\n current = self.head\n\n while current:\n if current is self.head:\n nodes.append('[Head: %s]' % current.data)\n elif current.next_node is None:\n nodes.append('[Tail: %s]' % current.data)\n else:\n nodes.append('[%s]' % current.data)\n current = current.next_node\n\n return '-> '.join(nodes)", "def get_list_node(self):\n return self.list_node", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self):\n self.head = None\n self.tail = None", "def __init__(self):\n self.head = None\n self.tail = self.head", "def __init__(self):\n self.head = None\n self.length = 0", "def __init__(self):\n self.head = None\n self.length = 0", "def __init__(self, head: ListNode):\n self.nodes = []\n\n while(head):\n self.nodes.append(head)\n head = head.next", "def __init__(self):\n self.head = None\n self.tail = None\n self.size = 0", "def __init__(self):\n\t\tself.current = None\n\t\tself.head = None" ]
[ "0.77007776", "0.76717454", "0.7663137", "0.7242428", "0.72094405", "0.7121683", "0.7095792", "0.6991777", "0.6974106", "0.69680715", "0.69224", "0.68925244", "0.6854505", "0.6827989", "0.6827989", "0.6827989", "0.6827989", "0.6827989", "0.6827928", "0.67615014", "0.6742917", "0.67423606", "0.67416453", "0.67416453", "0.67233217", "0.667861", "0.667861", "0.66432977", "0.6643191", "0.66429615" ]
0.76858366
1
Print the linked list
def list_print(self): node = self.cur_node # cant point to ll! while node: print(node.data) node = node.next
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_list(self) -> None:\n cur_node = self.head\n while cur_node:\n print(cur_node.data)\n cur_node = cur_node.next", "def print_list(self):\n\n current = self.head\n\n while current is not None:\n print current.data\n current = current.next", "def travel_print(self):\n if self.is_empty():\n print(\"Linked list's length is 0\")\n else:\n node = self.head\n print(\"head -->\", node.data, end=' ')\n while node.next:\n node = node.next\n print(\"-->\", node.data, end=' ')\n print(\" \")", "def printList(self): \r\n aux = self.head \r\n while(aux): \r\n print(aux.data , end = ' ') \r\n aux = aux.next", "def print(self):\n temp = self.head\n while temp.next!=None:\n temp = temp.next\n \n print(temp.value, end= ' ')\n print(\"\")", "def print_list(self):\n p = self.head\n i = 0\n\n while i < self.size():\n print(p.data)\n i += 1\n p = p.next_node", "def show(self):\n if self.empty():\n return \"Linked List is Empty\"\n\n l = self.head\n while l is not None:\n print(l.data, end=\" ----> \")\n l = l.next\n print()\n return", "def print(self):\n current = self.head.next\n for i in range(0,self.count):\n print(current.item)\n current = current.next", "def show(self):\n\n traverse = self.head\n\n if self.head == None:\n print(\"Linked List is empty\")\n return\n\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n\n print(traverse.data)", "def showListFromNode(self, node):\n if self.empty():\n return \"Linked List is Empty\"\n\n l = node\n while l is not None:\n print(l.data, end=\" ----> \")\n l = l.next\n print()\n return", "def display(self):\n printval = self.head \n while printval.next is not None:\n print (printval.__repr__(), end=\"\")\n printval = printval.next\n else:\n print (printval.__repr__())", "def show(self):\n traverse = self.head\n while traverse.next != None:\n print(traverse.data)\n traverse = traverse.next\n print(traverse.data)", "def print_list(self):\r\n head = self\r\n tail = self.__next # go to my next node\r\n if tail is not None: # as long as the end of the list has not been reached\r\n print(head, end=\" \") # print my head\r\n tail.print_list() # recursively print remainder of the list\r\n else: # print the last element\r\n print(head, end=\" \")", "def display(self):\n\n current = self.head\n\n while current is not None:\n print(current.data)\n\n current = current.next", "def show(self):\n current = self._head\n print(current._data)\n while current._next:\n current = current._next\n print(current._data)", "def display(self):\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tprint pointer.state + \"\\t\" + pointer.info\t\n\t\t\tpointer = pointer.next", "def print_linked_list(head):\n while head != None:\n print head.val, \n head = head.sibling\n print", "def show(self):\n\n if self.front == None:\n print(\"Linked List is empty\")\n return\n\n while self.front.next != None:\n print(self.front.data)\n self.front = self.front.next\n\n print(self.front.data)", "def print_list(self):\r\n pass", "def print_list(self):\n node = self.head\n\n string = '['\n while node:\n if node.next:\n string += str(node.value) + ' -> '\n else:\n string += str(node.value)\n node = node.next\n string += ']'\n return string", "def printList(head) :\n \n # Iterate through the list, printing all values\n ptr = head\n while ptr :\n print(ptr.data, end=\" \")\n ptr = ptr.next\n print()", "def printList(head):\n print(deconstructList(head))", "def dump(self, mark='----'):\n print(mark)\n node = self.head\n while node:\n print(node, \" \", end='')\n node = node.next\n print()", "def print_list(head=None):\n print head\n if(head.next):\n print_list(head.next)", "def print(self):\n output_string = \"Printing List of Nodes.\\n\"\n print(\"Printing List of Nodes\")\n for node in self.nodes:\n if node:\n output_string += str(node)\n node.print()\n return output_string", "def dump(self) -> NoReturn:\n index = self._head\n while index:\n print(index.data, end=\" \")\n index = index.next", "def displayNode(self):\n for x in self.__node:\n print(x)", "def display(self):\r\n elems = [] #create a list of elements we've seen\r\n current_node = self.head\r\n while current_node.next!=None:\r\n current_node = current_node.next\r\n elems.append(current_node.data)\r\n print(elems)", "def printLR(headNode):\n node = headNode\n \n while node is not None:\n print(node.item, end = \"\\t\")\n node = node.rightL\n\n print(\"end of linked list\")", "def print(self, index):\n count=0\n start = self.head\n while start:\n if count==index:\n print(count, ' : ', start.getMember())\n break\n start=start.getLink()\n count+=1" ]
[ "0.8673001", "0.86067754", "0.85906494", "0.8569578", "0.85571915", "0.8512224", "0.84492445", "0.8400473", "0.82561886", "0.8178204", "0.8120253", "0.8110577", "0.809658", "0.7893961", "0.7872987", "0.7842627", "0.78114355", "0.77788997", "0.77637863", "0.7760473", "0.77422404", "0.77183205", "0.758904", "0.7575949", "0.74362737", "0.74153066", "0.73767954", "0.73752975", "0.7250266", "0.7212496" ]
0.88183177
0
Get the data of the next node
def get_next(self): return self.cur_node.next.data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next(node):\n return node['next']", "def data(self):\n return self.first_node.data", "def get_data(node):\n return node['data']", "def node_data(self):\n return self.node_data_", "def _next(self):\n node = self.head\n while node != None:\n yield node.data\n node = node.right", "def next_data(self):\n return self.data.pop()", "def next_node(self):\n return self.__next_node", "def next_node(self):\n return self.__next_node", "def next_node(self):\n return self.__next_node", "def get_data(self):\n self._zmq_request.send(b'next')\n return msgpack.loads(self._zmq_request.recv())", "def next_node(self):\n\n return self.__next_node", "def get_next(self):\n return self.next", "def get_next(self):\n return self.next", "def get_next(self) -> dict:\n raise NotImplementedError", "def traverse_forward(self) -> str:\n data = \"\"\n current = self.head\n while current is not None:\n data += f\"{current.get_data()} \"\n current = current.get_next_node()\n return data", "def get_next_node_address(self):\n result = self.other_nodes[self.current_node]\n self.current_node = (self.current_node + 1) % self.other_nodes_len\n return result", "def __init__(self, data, next_node=None): #self.next_node ??\n self.data = data\n self.next_node = next_node", "def getNext(self):\n\t\t\treturn self.next", "def getNext(self):\n return self.__nextListNode", "def peek(self):\n return self.head.data", "def get_next(node, offset):\n row, column = node\n row_offset, column_offset = offset\n return row + row_offset, column + column_offset", "def get_next_item(self):\n pass", "def get(self, index):\r\n if index >= self.length():\r\n print(\"ERROR\")\r\n return None\r\n current_index = 0\r\n current_node = self.head\r\n while True:\r\n current_node = current_node.next\r\n if current_index == index: return current_node.data\r\n current_index += 1", "def read_index(self, index):\n current = self.head\n if index == 0:\n return current.data\n elif index >= self.size() :\n return None\n else:\n position = 0\n while position < index:\n current = current.next_node\n position += 1\n return current.data", "def getNext(self):", "def next(self):\n return self.first_node.next", "def getNodeRRDData(self,node):\n data = self.connect('get','nodes/%s/rrddata' % (node),None)\n return data", "def getNext(self):\n return self.__next", "def _get_next_nodes(self):\n next_nodes = self.data[5] if not is_nan(self.data[5]) else \"eos\"\n if is_nan(next_nodes):\n next_nodes = \"eos\"\n return next_nodes", "def first(self):\r\n if self.head == None: #check if first(head) node is empty\r\n return 'null' #if yes, then return null\r\n else: #if it is not empty\r\n return self.head.data #return the data of head node\r" ]
[ "0.7340955", "0.72560537", "0.71013474", "0.70286286", "0.70277715", "0.6898941", "0.67155683", "0.67155683", "0.67155683", "0.6683656", "0.6547143", "0.65334487", "0.65334487", "0.64254665", "0.6382568", "0.63816816", "0.63672584", "0.63516414", "0.630895", "0.6291807", "0.6252606", "0.62454075", "0.6210823", "0.619794", "0.61662865", "0.6164937", "0.61575896", "0.61573625", "0.6126518", "0.6120131" ]
0.8156502
0
Representation of the spinlock
def __repr__(self): return "Spinlock({})".format(self.stepforward)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lock_control(self):\n raise NotImplementedError('PlatformService: Implementation incomplete')", "def spinlocks(self):\n return self._spinlocks", "def lock(self):\n raise NotImplementedError", "def lock(self):\r\n return self._lock", "def f_lock(self):\n self._locked = True", "def lock(*args):", "def i_am_locking(self):\r\n pass", "def lock_blocks(self) -> int:", "def v_locked(self):\n return self._locked", "def action_lock(self):\n self.state = 'locked'", "def lock(self, item_type):", "def lock(self):\n print(\"DEPRECATED lock\")\n return self._operations.lock()", "def _lock_key(self):\n return hash_string_64bit('dirbs-listgen')", "def is_locked(self):\r\n pass", "def get_spin(self, i):\n \n return 1 if self.spins[i] else -1", "def lock(self):\n return self._lock", "def break_lock(self):\r\n pass", "def getSpinControl(*args):", "def _b_spin_changed(self):\n self.bLine.setValue(self.bSpin.value())", "def lock_object(self):\n return gevent.thread.allocate_lock()", "def pilotLock (self):\n return self.unlock()", "def lock (self):\n self.locked = True\n self._changed = False", "def lock_clock(self):\n self.sem.acquire()", "def __getattr__(self, name):\n return getattr(self._lock, name)", "def locked(self):\n\t\treturn self.__locked", "def __enter__(self):\n return self._lock.__enter__()", "def unlocked():\r\n return Lock(None)", "def read_lock_bits(self):\n self.writecmd(self.APP, self.WRITE3_READ1, 4, [0x54, 0x00, 0x00, 0x00])\n return [(ord(self.data[0]) >> x) & 1 for x in range(5)]", "def __getstate__(self):\n state = self.__dict__\n state['_lock'] = None\n return state", "def spin(self, *args, **kwargs) -> Any:\n pass" ]
[ "0.6281956", "0.61268324", "0.60570246", "0.60079235", "0.5999252", "0.5991468", "0.5932551", "0.5889748", "0.57721347", "0.5762535", "0.5744118", "0.57419753", "0.5740945", "0.5726452", "0.57194465", "0.56717205", "0.56714046", "0.56600225", "0.5617723", "0.5606084", "0.5601709", "0.5601595", "0.55989456", "0.5584011", "0.55203074", "0.55102026", "0.54959446", "0.5492584", "0.548023", "0.5472547" ]
0.71650106
0
Given the tile location (x,y) and zoom level z, fetch the corresponding tile from the server and save it to the location specfied in fpath. Note, this saves just one tile; usually, want to use `positive_dataset` instead.
def save_tile(x,y,z,fpath): UA = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0" tile_url = f"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png" # cmd = f"wget --user-agent='please download' -O {fpath} {url}" if os.path.exists(fpath): print(f"Already have tile {fpath}!") return 0 if os.path.isdir(fpath): raise ValueError(f"requested path {fpath} exists and is a directory!") try: res = rq.get( url=tile_url, headers={'User-Agent': UA} ) status = res.status_code if status == 200: with open(fpath,'wb') as of: of.write(res.content) return 0 else: print(f"Error: response {status} from server:\n{res.reason}") return status except Exception as e: print(f"Error getting tile: {e}") return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_tile(map_layer, zoom, x, y):\n try:\n tile_url = map_layer.get_tile_url(zoom, x, y)\n tmp_file, headers = urllib.request.urlretrieve(tile_url)\n return (x, y), tmp_file\n except URLError as e:\n app.logger.info(\"Error downloading tile x={}, y={}, z={} for layer {}: {}\".format(\n x, y, zoom, map_layer, e.reason))\n return (x, y), pkg_resources.resource_filename(\"geos\", \"static/empty_tile.png\")", "def tile(self, z, x, y_tms):\n logger.debug(_(\"Download tile %s\") % ((z, x, y_tms),))\n # Render each keyword in URL ({s}, {x}, {y}, {z}, {size} ... )\n size = self.tilesize\n s = self.tiles_subdomains[(x + y_tms) % len(self.tiles_subdomains)];\n y_osm = (2**int(z) - 1) - int(y_tms)\n try:\n url = self.tiles_url.format(**locals())\n except KeyError, e:\n raise DownloadError(_(\"Unknown keyword %s in URL\") % e)\n logger.debug(_(\"Retrieve tile at %s\") % url)\n r = DOWNLOAD_RETRIES\n sleeptime = 1\n while r > 0:\n try:\n request = urllib2.Request(url)\n for header, value in self.headers.items():\n request.add_header(header, value)\n stream = urllib2.urlopen(request)\n assert stream.getcode() == 200\n return stream.read()\n except (AssertionError, IOError), e:\n logger.debug(_(\"Download error, retry (%s left). (%s)\") % (r, e))\n r -= 1\n time.sleep(sleeptime)\n # progressivly sleep longer to wait for this tile\n if (sleeptime <= 10) and (r % 2 == 0):\n sleeptime += 1 # increase wait\n raise DownloadError(_(\"Cannot download URL %s\") % url)", "def download_tile(self, xtile, ytile):\n location = 'http://maps.six.nsw.gov.au/arcgis/rest/services/public/NSW_Imagery/MapServer/tile/'\n destination = 'downloaded_tiles/'\n save_name = str(self.zoom_level) + '_' + str(xtile) + '_' + str(ytile)\n tile_url = location + save_name.replace('_', '/')\n tile = requests.get(tile_url, stream=True)\n with open(destination + save_name + '.png', 'wb') as out_file:\n tile.raw.decode_content = True\n shutil.copyfileobj(tile.raw, out_file)\n tilepng = png.Reader(file=tile.raw)\n # shutil.copyfileobj(tilepng, out_file)\n del tile", "def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n # logger.info(_(\"TilesManager.tile calling sources.tile: \") )\n pass\n output = self.reader.tile(z, x, y)\n if output is None:\n return None\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output", "def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))", "def get_tile(tilefile,level,x,y):\n\t\n\ttf=file(tilefile,\"r\")\n\t\n\ttd=pickle.load(tf)\n\ta=td[(level,x,y)]\n\t\n\ttf.seek(a[0],1)\n\tret=tf.read(a[1])\n\t\n\ttf.close()\n\treturn ret", "def run(tile_x, tile_y, zoom, mbtiles_file):\n conn = sqlite3.connect(mbtiles_file)\n c = conn.cursor()\n c.execute(\n (\"SELECT tile_data FROM tiles WHERE \"\n \"zoom_level=? AND tile_column=? AND tile_row=?\"),\n (zoom, tile_x, tile_y))\n mvt_content = c.fetchone()[0]\n return mvt_content", "def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)", "def tile_to_url(tile_x, tile_y, tile_z):\n subdomain = random.choice([\"a\", \"b\", \"c\"])\n resource_url = \"https://{0}.tile.openstreetmap.org/{1}/{2}/{3}.png\"\n return resource_url.format(subdomain, tile_z, tile_x, tile_y)", "def get_tile(geojson, base_url):\n # open geojson and get tile index\n with open(geojson, 'r') as data:\n tile_geojson = json.load(data)\n features = tile_geojson[\"features\"]\n # get the tile index as x, y, z formats.\n xyz = [features[i]['properties']['tiles'] for i in range(len(features))]\n\n # create tile folder\n tiles_folder = op.splitext(geojson)[0]\n if not op.isdir(tiles_folder):\n makedirs(tiles_folder)\n\n # download and get the list of tiles\n tiles = list()\n for i in range(len(xyz)):\n x=str(xyz[i][0])\n y=str(xyz[i][1])\n z=str(xyz[i][2])\n url = base_url.replace('{x}', x).replace('{y}', y).replace('{z}', z)\n o = urlparse(url)\n _, image_format = op.splitext(o.path)\n tile_bn =\"{}-{}-{}{}\".format(z, x, y,image_format)\n r = requests.get(url)\n tile= op.join(tiles_folder, tile_bn)\n tiles.append(tile)\n with open(tile, 'wb')as w:\n w.write(r.content)\n return tiles", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def tile(self, (z, x, y)):\n output = self.cache.read((z, x, y))\n if output is None:\n output = self.reader.tile(z, x, y)\n # Blend layers\n if len(self._layers) > 0:\n logger.debug(_(\"Will blend %s layer(s)\") % len(self._layers))\n output = self._blend_layers(output, (z, x, y))\n # Apply filters\n for f in self._filters:\n image = f.process(self._tile_image(output))\n output = self._image_tile(image)\n # Save result to cache\n self.cache.save(output, (z, x, y))\n self.rendered += 1\n return output", "def get_tile(self, tile, as_png=False, overwrite=True):\n zoom, row, col = tile\n output_path = self.config[\"output_name\"]\n zoomdir = os.path.join(output_path, str(zoom))\n rowdir = os.path.join(zoomdir, str(row))\n image_path = os.path.join(rowdir, str(col)+\".png\")\n if os.path.isfile(image_path):\n return send_file(image_path, mimetype='image/png')\n else:\n try:\n self.save_tile(tile)\n except:\n print \"tile not available\", tile\n size = self.tile_pyramid.tile_size\n empty_image = Image.new('RGBA', (size, size))\n return empty_image.tobytes()\n return send_file(image_path, mimetype='image/png')", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def tile_coords_zoom_and_tileserver_to_url(\n tile_x: int, tile_y: int, tile_z: int, tile_server: dict\n) -> str:\n\n if tile_server[\"name\"] == \"bing\":\n quadKey = tile_coords_and_zoom_to_quadKey(tile_x, tile_y, tile_z)\n url = quadKey_to_Bing_URL(quadKey, tile_server[\"apiKey\"])\n elif tile_server[\"name\"] == \"sinergise\":\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n layer=tile_server[\"wmtsLayerName\"],\n )\n elif \"maxar\" in tile_server[\"name\"]:\n # maxar uses not the standard TMS tile y coordinate,\n # but the Google tile y coordinate\n # more information here:\n # https://www.maptiler.com/google-maps-coordinates-tile-bounds-projection/\n tile_y = int(math.pow(2, tile_z) - tile_y) - 1\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n elif \"{-y}\" in tile_server[\"url\"]:\n # this uses not the standard TMS tile y coordinate,\n # but the Google tile y coordinate\n # more information here:\n # https://www.maptiler.com/google-maps-coordinates-tile-bounds-projection/\n tile_y = int(math.pow(2, tile_z) - tile_y) - 1\n url = tile_server[\"url\"].replace(\"{-y}\", \"{y}\")\n url = url.format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n else:\n url = tile_server[\"url\"].format(\n key=tile_server[\"apiKey\"],\n x=tile_x,\n y=tile_y,\n z=tile_z,\n )\n\n return url", "def save_tiles(df,output_dir,namefunc = None):\n if not isinstance(df,pd.core.frame.DataFrame):\n raise TypeError(\"df must be a pandas DataFrame!\")\n if any(e not in df.columns for e in ('z','x','y')):\n raise ValueError(\"df must have columns x, y, and z\")\n if namefunc is None:\n def namefunc(x,y,z):\n return f'{z}_{x}_{y}.png'\n\n opath = os.path.abspath(os.path.expanduser(output_dir))\n Path(opath).mkdir(parents=True, exist_ok=True)\n L = df.shape[0]\n flocs = [''] * L\n for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])):\n x,y,z = xyz\n print(f\"({i+1} of {L})...\")\n sleep(0.75)\n outloc = os.path.join(opath,namefunc(x,y,z))\n if save_tile(x,y,z,outloc) == 0:\n flocs[i] = outloc\n df = df.assign(file_loc = flocs)\n return df[df['file_loc'] != '']", "def render_tile(self, filename, tile_x, tile_y, zoom):\n print 'Rendering %s' % (filename)\n\n # Calculate pixel positions of bottom-left & top-right\n half_width = self.width / 2\n half_height = self.height / 2\n px0 = (tile_x * self.width, (tile_y + 1) * self.height)\n px1 = ((tile_x + 1) * self.width, tile_y * self.height)\n\n # Convert tile coords to LatLng\n ll0 = self.tile_projection.fromPixelToLL(px0, zoom);\n ll1 = self.tile_projection.fromPixelToLL(px1, zoom);\n \n # Convert LatLng to map coords\n c0 = self.map_projection.forward(mapnik2.Coord(ll0[0], ll0[1]))\n c1 = self.map_projection.forward(mapnik2.Coord(ll1[0], ll1[1]))\n\n # Create bounding box for the render\n bbox = mapnik2.Box2d(c0.x, c0.y, c1.x, c1.y)\n\n self.mapnik_map.zoom_to_box(bbox)\n self.mapnik_map.buffer_size = max([half_width, half_height]) \n\n # Render image with default Agg renderer\n image = mapnik2.Image(self.width, self.height)\n mapnik2.render(self.mapnik_map, image)\n image.save(filename, self.filetype)", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def open_tile(filename):\n geoimg = gippy.GeoImage(filename, True)\n z, x, y = map(int, geoimg.basename().split('-')[0:4])\n tile = Tile.from_google(google_x=x, google_y=y, zoom=z)\n geoimg.set_srs('EPSG:3857')\n minpt = tile.bounds[0].meters\n maxpt = tile.bounds[1].meters\n affine = np.array(\n [\n minpt[0], (maxpt[0]-minpt[0])/geoimg.xsize(), 0.0,\n maxpt[1], 0.0, -(maxpt[1]-minpt[1])/geoimg.ysize()\n ])\n geoimg.set_affine(affine)\n geoimg.set_nodata(-1)\n return geoimg", "def tile(\n sceneid, tile_x, tile_y, tile_z, bands=(\"04\", \"03\", \"02\"), tilesize=256, **kwargs\n):\n scene_params = _sentinel_parse_scene_id(sceneid)\n\n if not isinstance(bands, tuple):\n bands = tuple((bands,))\n\n for band in bands:\n if band not in scene_params[\"valid_bands\"]:\n raise InvalidBandName(\"{} is not a valid Sentinel band name\".format(band))\n\n preview_file = os.path.join(\n scene_params[\"aws_bucket\"],\n scene_params[\"aws_prefix\"],\n scene_params[\"preview_file\"],\n )\n with rasterio.open(preview_file) as src:\n bounds = transform_bounds(src.crs, \"epsg:4326\", *src.bounds, densify_pts=21)\n\n if not utils.tile_exists(bounds, tile_z, tile_x, tile_y):\n raise TileOutsideBounds(\n \"Tile {}/{}/{} is outside image bounds\".format(tile_z, tile_x, tile_y)\n )\n\n mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)\n tile_bounds = mercantile.xy_bounds(mercator_tile)\n\n path_prefix = os.path.join(scene_params[\"aws_bucket\"], scene_params[\"aws_prefix\"])\n if scene_params[\"processingLevel\"] == \"L2A\":\n bands = [_l2_prefixed_band(b) for b in bands]\n else:\n bands = [\"B{}\".format(b) for b in bands]\n\n def _read_tile(path):\n with rasterio.open(path) as src_dst:\n return utils.tile_read(\n src_dst, bounds=tile_bounds, tilesize=tilesize, nodata=0, **kwargs\n )\n\n addresses = [\"{}/{}.jp2\".format(path_prefix, band) for band in bands]\n with futures.ThreadPoolExecutor(max_workers=MAX_THREADS) as executor:\n data, masks = zip(*list(executor.map(_read_tile, addresses)))\n mask = np.all(masks, axis=0).astype(np.uint8) * 255\n\n return np.concatenate(data), mask", "def get_tile(self, x, y):\n\n try:\n # if tile in cache, return it from there\n return self.tile_cache[(x,y)]\n except KeyError:\n # else not in cache: get image, cache and return it\n # exceptions are normally slow,\n # but we are reading a file if we get exception, so ...\n img_name = os.path.join(self.tile_level_dir,\n 'tile_%d_%d.png' % (x, y))\n\n# Optimization\n# removed since we *know* tiles are there, we generated them!\n# don't need to do filesystem operation.\n# maybe put back if tiles come from internet?\n# if not os.path.exists(img_name):\n# # if tile not there, use 'missing tile' file\n# img_name = os.path.join(self.tile_dir, MissingTileFilename)\n\n img = wx.Image(img_name, wx.BITMAP_TYPE_ANY)\n pic = img.ConvertToBitmap()\n self.tile_cache[(x,y)] = pic\n return pic", "def getTileFromEmptyDirectory(self, x, y, z, **kwargs):\n basez = z\n scale = 1\n dirlist = self._tiffDirectories\n frame = self._getFrame(**kwargs)\n if frame > 0 and hasattr(self, '_frames'):\n dirlist = self._frames[frame]['dirs']\n while dirlist[z] is None:\n scale *= 2\n z += 1\n while z - basez > self._maxSkippedLevels:\n z -= self._maxSkippedLevels\n scale = int(scale / 2 ** self._maxSkippedLevels)\n tile = PIL.Image.new('RGBA', (\n min(self.sizeX, self.tileWidth * scale), min(self.sizeY, self.tileHeight * scale)))\n maxX = 2.0 ** (z + 1 - self.levels) * self.sizeX / self.tileWidth\n maxY = 2.0 ** (z + 1 - self.levels) * self.sizeY / self.tileHeight\n for newX in range(scale):\n for newY in range(scale):\n if ((newX or newY) and ((x * scale + newX) >= maxX or\n (y * scale + newY) >= maxY)):\n continue\n subtile = self.getTile(\n x * scale + newX, y * scale + newY, z,\n pilImageAllowed=True, numpyAllowed=False,\n sparseFallback=True, edge=False, frame=frame)\n if not isinstance(subtile, PIL.Image.Image):\n subtile = PIL.Image.open(io.BytesIO(subtile))\n tile.paste(subtile, (newX * self.tileWidth,\n newY * self.tileHeight))\n return tile.resize((self.tileWidth, self.tileHeight),\n getattr(PIL.Image, 'Resampling', PIL.Image).LANCZOS)", "def _download_tile_wrapper(args):\n return download_tile(*args)", "def burn_tiles(region, zone, train_tier = 1, zoom_level = 19):\n \n os.system(f'cat ../../data/raw/train_tier_{train_tier}/{region}/{zone}/{zone}.json | supermercado burn {zoom_level} | mercantile shapes | fio collect > ../../data/raw/train_tier_{train_tier}/{region}/{zone}/tiles_{region}_{zone}_{zoom_level}.geojson')\n os.system(f'echo done with {region}_{zone}_{zoom_level}')", "def getTile(self, lat, lon):\r\n if self.childFileListDownload is not None and self.childFileListDownload.is_alive():\r\n '''print \"Getting file list\"'''\r\n return 0\r\n elif not self.filelist:\r\n '''print \"Filelist download complete, loading data\"'''\r\n data = open(self.filelist_file, 'rb')\r\n self.filelist = pickle.load(data)\r\n\r\n try:\r\n continent, filename = self.filelist[(int(lat), int(lon))]\r\n except KeyError:\r\n '''print \"here??\"'''\r\n return 0\r\n\r\n if not os.path.exists(os.path.join(self.cachedir, filename)):\r\n if self.childTileDownload is None or not self.childTileDownload.is_alive():\r\n self.childTileDownload = multiprocessing.Process(target=self.downloadTile, args=(continent, filename))\r\n self.childTileDownload.start()\r\n '''print \"Getting Tile\"'''\r\n return 0\r\n elif self.childTileDownload is not None and self.childTileDownload.is_alive():\r\n '''print \"Still Getting Tile\"'''\r\n return 0\r\n # TODO: Currently we create a new tile object each time.\r\n # Caching is required for improved performance.\r\n try:\r\n return SRTMTile(os.path.join(self.cachedir, filename), int(lat), int(lon))\r\n except InvalidTileError:\r\n return 0", "def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory", "def tile_at(self, zoom, position):\n x, y = self.project_pixels(position, zoom)\n return (zoom, int(x/self.tileSize), int(y/self.tileSize))", "def renderMetaTile(z, x, y, ntiles, hypsoreliefMap, landcoverreliefMap, areasMap, oceanMap, contoursMap, featuresMap):\n hypsorelief = renderLayer('hypsorelief', z, x, y, ntiles, hypsoreliefMap, 'png')\n landcoverrelief = renderLayer('landcoverrelief', z, x, y, ntiles, landcoverreliefMap, 'png')\n areas = renderLayer('areas', z, x, y, ntiles, areasMap, 'png')\n ocean = renderLayer('ocean', z, x, y, ntiles, oceanMap, 'png', True)\n contours = renderLayer('contours', z, x, y, ntiles, contoursMap, 'png', True)\n features = renderLayer('features', z, x, y, ntiles, featuresMap, 'png', True)\n base_h = getComposite((hypsorelief, areas, ocean))\n base_l = getComposite((landcoverrelief, ocean))\n composite_h = getComposite((base_h, contours, features))\n composite_l = getComposite((base_l, contours, features))\n saveTiles(z, x, y, ntiles, 'composite_h', composite_h)\n saveTiles(z, x, y, ntiles, 'composite_l', composite_l)\n if SAVE_JPEG_COMPOSITE:\n basename = 'jpeg' + str(JPEG_COMPOSITE_QUALITY)\n saveTiles(z, x, y, ntiles, basename+'_h', composite_h, 'jpg', basename)\n saveTiles(z, x, y, ntiles, basename+'_l', composite_l, 'jpg', basename)\n if SAVE_INTERMEDIATE_TILES:\n saveTiles(z, x, y, ntiles, 'base_h', base_h)\n saveTiles(z, x, y, ntiles, 'base_l', base_l)\n saveTiles(z, x, y, ntiles, 'contours', contours)\n saveTiles(z, x, y, ntiles, 'hypsorelief', hypsorelief)\n saveTiles(z, x, y, ntiles, 'landcoverrelief', landcoverrelief)\n saveTiles(z, x, y, ntiles, 'areas', areas)\n saveTiles(z, x, y, ntiles, 'ocean', ocean)\n saveTiles(z, x, y, ntiles, 'features', features)", "def getTile( self, url, pathname ):\n \n # retry counters\n tries = 1; max_tries = 3\n while tries <= max_tries:\n\n try:\n\n # setup curl object - include ssl certificates\n curl = pycurl.Curl()\n curl.setopt(pycurl.CAINFO, certifi.where())\n curl.setopt(pycurl.URL, url )\n\n # write binary data to file\n fp = open( pathname, \"wb\" )\n curl.setopt(pycurl.WRITEDATA, fp)\n curl.perform()\n\n # close object and file\n curl.close()\n fp.close()\n\n print ( '{}: {} -> {}'. format( self._idx, url, pathname ))\n break\n\n except Exception as e:\n\n # increment retry counter - wait for random interval\n print ( 'Download Exception {}: {} -> {}'.format( str( e ), url, pathname ) )\n tries += 1\n time.sleep ( random.randrange( 5 ) )\n\n # delete file if download failed \n if tries > max_tries:\n os.remove( pathname )\n\n return", "def tile(sceneid, tile_x, tile_y, tile_z, bands=None, tilesize=256, **kwargs):\n if not bands:\n raise InvalidBandName(\"bands is required\")\n\n if not isinstance(bands, tuple):\n bands = tuple((bands,))\n\n for band in bands:\n if band not in SENTINEL_BANDS:\n raise InvalidBandName(\"{} is not a valid Sentinel band name\".format(band))\n\n scene_params = _sentinel_parse_scene_id(sceneid)\n sentinel_address = \"{}/{}/measurement\".format(SENTINEL_BUCKET, scene_params[\"key\"])\n\n mercator_tile = mercantile.Tile(x=tile_x, y=tile_y, z=tile_z)\n tile_bounds = mercantile.xy_bounds(mercator_tile)\n\n addresses = [\n \"{}/{}-{}.tiff\".format(sentinel_address, scene_params[\"beam\"].lower(), band)\n for band in bands\n ]\n\n def _s1_tiler(src_path):\n with rasterio.open(src_path) as src_dst:\n with WarpedVRT(\n src_dst,\n src_crs=src_dst.gcps[1],\n src_transform=transform.from_gcps(src_dst.gcps[0]),\n src_nodata=0,\n ) as vrt_dst:\n if not utils.tile_exists(vrt_dst.bounds, tile_z, tile_x, tile_y):\n raise TileOutsideBounds(\n \"Tile {}/{}/{} is outside image bounds\".format(\n tile_z, tile_x, tile_y\n )\n )\n\n return utils._tile_read(vrt_dst, bounds=tile_bounds, tilesize=tilesize)\n\n with futures.ThreadPoolExecutor() as executor:\n data, masks = zip(*list(executor.map(_s1_tiler, addresses)))\n mask = numpy.all(masks, axis=0).astype(numpy.uint8) * 255\n\n return numpy.concatenate(data), mask" ]
[ "0.7046244", "0.6987471", "0.6816313", "0.6353406", "0.6337127", "0.63041425", "0.6291108", "0.6263484", "0.6197", "0.6188584", "0.6176481", "0.6158082", "0.60673577", "0.60509396", "0.60040617", "0.58420604", "0.5838847", "0.58381325", "0.5794229", "0.5779506", "0.57553935", "0.57083213", "0.5698197", "0.56559926", "0.56326807", "0.562034", "0.5591855", "0.5576654", "0.55550414", "0.55212045" ]
0.7838271
0
Save the tiles whose coordinates are in the input DataFrame, defined by columns x, y, and z
def save_tiles(df,output_dir,namefunc = None): if not isinstance(df,pd.core.frame.DataFrame): raise TypeError("df must be a pandas DataFrame!") if any(e not in df.columns for e in ('z','x','y')): raise ValueError("df must have columns x, y, and z") if namefunc is None: def namefunc(x,y,z): return f'{z}_{x}_{y}.png' opath = os.path.abspath(os.path.expanduser(output_dir)) Path(opath).mkdir(parents=True, exist_ok=True) L = df.shape[0] flocs = [''] * L for i,xyz in enumerate(zip(df['x'],df['y'],df['z'])): x,y,z = xyz print(f"({i+1} of {L})...") sleep(0.75) outloc = os.path.join(opath,namefunc(x,y,z)) if save_tile(x,y,z,outloc) == 0: flocs[i] = outloc df = df.assign(file_loc = flocs) return df[df['file_loc'] != '']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_coords(self, coords, output_dir):\n new_rows = []\n for i, (lat, lon) in enumerate(coords):\n row = {\n 'tile_id': i,\n 'lat':lat,\n 'long':lon,\n 'side_length': self.side_len \n }\n\n new_rows.append(row)\n\n coord_df = pd.DataFrame(new_rows)\n coord_df.to_csv(f\"{output_dir}/coordinate_map.csv\", index=False)\n print(\"done saving coordinates!\")", "def saveTiles(z, x, y, ntiles, mapname, image, suffix = 'png', imgtype = None):\n for dx in range(0, ntiles):\n tilex = x*ntiles + dx\n ensureDirExists(getTileDir(mapname, z, tilex))\n for dy in range(0, ntiles): \n tiley = y*ntiles + dy\n offsetx = BORDER_WIDTH + dx*TILE_SIZE\n offsety = BORDER_WIDTH + dy*TILE_SIZE\n view = image.view(offsetx, offsety, TILE_SIZE, TILE_SIZE)\n if imgtype:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix), imgtype)\n else:\n view.save(getTilePath(mapname, z, tilex, tiley, suffix))", "def batch_save_tile_mask(tiles_gdf, label_poly_series, tile_size, region, zone, save_path, channels=3, display=False):\n \n import warnings; warnings.simplefilter('ignore')\n\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n tile_poly = get_specific_tile(idx, tiles_gdf)\n save_tile_mask(label_poly_series, tile_poly, tile['xyz'], tile_size, dataset,\n region, zone, save_path, channels, display)", "def find_tiles(x_index = None, y_index = None):\n db_cursor2 = self.db_connection.cursor()\n\n sql = \"\"\"-- Check for any existing tiles\nselect\n tile_id,\n x_index,\n y_index,\n tile_type_id,\n tile_pathname,\n dataset_id,\n tile_class_id,\n tile_size\nfrom tile_footprint\ninner join tile using(x_index, y_index, tile_type_id)\nwhere (%(x_index)s is null or x_index = %(x_index)s)\n and (%(y_index)s is null or y_index = %(y_index)s)\n and tile_type_id = %(tile_type_id)s\n and dataset_id = %(fc_dataset_id)s\n\n and ctime is not null -- TODO: Remove this after reload\n;\n\"\"\"\n params = {'x_index': x_index,\n 'y_index': y_index,\n 'tile_type_id': tile_type_info['tile_type_id'],\n 'fc_dataset_id': dataset_info['fc_dataset_id']}\n \n log_multiline(logger.debug, db_cursor2.mogrify(sql, params), 'SQL', '\\t')\n db_cursor2.execute(sql, params)\n tile_info = {}\n for record in db_cursor2:\n tile_info_dict = {\n 'x_index': record[1],\n 'y_index': record[2],\n 'tile_type_id': record[3],\n 'tile_pathname': record[4],\n 'dataset_id': record[5],\n 'tile_class_id': record[6],\n 'tile_size': record[7]\n }\n tile_info[record[0]] = tile_info_dict # Keyed by tile_id\n \n log_multiline(logger.debug, tile_info, 'tile_info', '\\t')\n return tile_info", "def to_xyz_tiles(\n self, root: str, tile_size: int, zoom_levels: list, driver=\"GTiff\", **kwargs\n ):\n mName = os.path.normpath(os.path.basename(root))\n\n def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n def tile_window(shape, px):\n \"\"\"Yield (left, upper, width, height).\"\"\"\n nr, nc = shape\n lu = product(range(0, nc, px), range(0, nr, px))\n\n ## create the window\n for l, u in lu:\n h = min(px, nr - u)\n w = min(px, nc - l)\n yield (l, u, w, h)\n\n vrt_fn = None\n prev = 0\n nodata = self.nodata\n obj = self._obj.copy()\n zls = {}\n for zl in zoom_levels:\n diff = zl - prev\n pxzl = tile_size * (2 ** (diff))\n\n # read data from previous zoomlevel\n if vrt_fn is not None:\n obj = xr.open_dataarray(vrt_fn, engine=\"rasterio\").squeeze(\n \"band\", drop=True\n )\n x_dim, y_dim = obj.raster.x_dim, obj.raster.y_dim\n obj = obj.chunk({x_dim: pxzl, y_dim: pxzl})\n dst_res = abs(obj.raster.res[-1]) * (2 ** (diff))\n\n if pxzl > min(obj.shape):\n logger.warning(\n f\"Tiles at zoomlevel {zl} smaller than tile_size {tile_size}\"\n )\n\n # Write the raster paths to a text file\n sd = join(root, f\"{zl}\")\n create_folder(sd)\n txt_path = join(sd, \"filelist.txt\")\n file = open(txt_path, \"w\")\n\n for l, u, w, h in tile_window(obj.shape, pxzl):\n col = int(np.ceil(l / pxzl))\n row = int(np.ceil(u / pxzl))\n ssd = join(sd, f\"{col}\")\n\n create_folder(ssd)\n\n # create temp tile\n temp = obj[u : u + h, l : l + w]\n if zl != 0:\n temp = temp.coarsen(\n {x_dim: 2**diff, y_dim: 2**diff}, boundary=\"pad\"\n ).mean()\n temp.raster.set_nodata(nodata)\n\n if driver == \"netcdf4\":\n path = join(ssd, f\"{row}.nc\")\n temp = temp.raster.gdal_compliant()\n temp.to_netcdf(path, engine=\"netcdf4\", **kwargs)\n elif driver in gis_utils.GDAL_EXT_CODE_MAP:\n ext = gis_utils.GDAL_EXT_CODE_MAP.get(driver)\n path = join(ssd, f\"{row}.{ext}\")\n temp.raster.to_raster(path, driver=driver, **kwargs)\n else:\n raise ValueError(f\"Unkown file driver {driver}\")\n\n file.write(f\"{path}\\n\")\n\n del temp\n\n file.close()\n # Create a vrt using GDAL\n vrt_fn = join(root, f\"{mName}_zl{zl}.vrt\")\n gis_utils.create_vrt(vrt_fn, file_list_path=txt_path)\n prev = zl\n zls.update({zl: float(dst_res)})\n del obj\n\n # Write a quick data catalog yaml\n yml = {\n \"crs\": self.crs.to_epsg(),\n \"data_type\": \"RasterDataset\",\n \"driver\": \"raster\",\n \"path\": f\"{mName}_zl{{zoom_level}}.vrt\",\n \"zoom_levels\": zls,\n }\n with open(join(root, f\"{mName}.yml\"), \"w\") as f:\n yaml.dump({mName: yml}, f, default_flow_style=False, sort_keys=False)", "def xyz_from_grid(x,y,z, pnts_out):\n\tx_flt=x.flatten()\n\ty_flt=y.flatten()[::-1]\n\tz_flt=z.flatten()\n\n\tutil.check_output_dir(pnts_out)\n\tfout = open(pnts_out, 'w')\n\tfout.write(\"x,y,z\\n\")\n\n\tprint(\"Writing out %i xyz triples to %s\" %(len(z_flt),pnts_out))\n\tfor i in range(0, len(z_flt)):\n\t\tif not np.isnan(z_flt[i]):\n\t\t\tfout.write(\"%.6f,%.6f,%.2f\\n\" %(x_flt[i], y_flt[i], z_flt[i]))\n\n\tfout.close()", "def savez(d,file):\n np.savez(file,row=d.row,col=d.col,data=d.data,shape=d.shape)", "def shapely_tileset(processed_query,min_ovp = 0,max_ovp = 1,\n n_neg = None,buffer = 0):\n types, xx, yy, qual, tags = [],[],[],[],[]\n z = processed_query['zoom']\n for elem in processed_query['elements']:\n for tile in elem['tiles']:\n qq = tile[1]\n if qq >= min_ovp and qq <= max_ovp:\n x,y,_ = find_tile_coords(tile[0],z)\n xx.append(x)\n yy.append(y)\n qual.append(tile[1])\n tags.append(json.dumps(elem['tags']))\n types.append(elem['type'])\n \n pos_df = pd.DataFrame({\n 'z': z, 'x' : xx, 'y': yy, \n 'entity': types,\n 'overlap': qual,'tags': tags,\n 'placename': processed_query['query_info']['placename']\n }) \\\n .drop_duplicates(subset = ['x','y']) \\\n .sort_values(by = ['x','y'])\n if n_neg is None: n_neg = pos_df.shape[0]\n negt = sample_complement(pos_df['x'],pos_df['y'],n_neg,buffer)\n neg_df = pd.DataFrame({'z': z,'x': negt[0],'y': negt[1]}) \\\n .sort_values(by = ['x','y'])\n return { \n 'positive': add_latlon(pos_df),\n 'negative': add_latlon(neg_df)\n }", "def batch_save_tile_img(tiles_gdf, tif, tile_size, region, zone, save_path, display=False):\n for idx, tile in tqdm(tiles_gdf.iterrows()):\n dataset = tile['dataset']\n save_tile_img(tif, tile['xyz'], dataset, tile_size, region, zone, save_path, display=False)", "def save(self,filepath):\n d = self.X.tocoo(copy=False)\n v = self.col_view.tocoo(copy=False)\n np.savez(filepath,row=d.row,col=d.col,data=d.data,shape=d.shape,\n v_row=v.row,v_col=v.col,v_data=v.data,v_shape=v.shape)", "def basic_tileset(geo_dict, zooms, buffer = 0,n_neg = None):\n if not len(geo_dict['elements']):\n raise ValueError(\"The query is empty - cannot continue!\")\n if type(zooms) is int:\n zooms = [zooms]\n if any(z < 2 or z > 19 for z in zooms):\n raise ValueError(\"all zoom levels must be between 2 and 19\")\n \n nodes = atomize_features(geo_dict)\n points_list = [(node['lat'],node['lon']) for node in nodes]\n pos_DFs, neg_DFs = [], []\n\n for zoom in zooms:\n\n zxy = [(zoom,*deg2num(x,y,zoom)) for x,y in points_list]\n pos_df = pd.DataFrame.from_records(zxy,columns = ['z','x','y'])\\\n .drop_duplicates(subset = ['x','y'])\n num_neg = pos_df.shape[0] if n_neg is None else int(n_neg)\n neg_x, neg_y = sample_complement(pos_df['x'],pos_df['y'],num_neg,buffer)\n neg_df = pd.DataFrame({'z': zoom,'x': neg_x,'y': neg_y}).sort_values(by = ['z','x','y'])\n pos_DFs.append(pos_df)\n neg_DFs.append(neg_df)\n \n out_pos = add_latlon(pd.concat(pos_DFs,axis = 0))\n out_neg = add_latlon(pd.concat(neg_DFs,axis = 0))\n\n common_row = pd.merge(out_pos,out_neg,on = ['z','x','y']).shape[0]\n if common_row > 0:\n raise RuntimeError(f\"Somehow there are {common_row} common rows!\")\n return {'positive': out_pos, 'negative': out_neg }", "def save_tile_img(tif, xyz, dataset, tile_size, region, zone, save_path, display=False):\n \n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tile, mask = rt_main.tile(tif, x,y,z, tilesize=tile_size)\n if display: \n plt.imshow(np.moveaxis(tile,0,2))\n plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}.png',np.moveaxis(tile,0,2), check_contrast=False)", "def read_cells(filename):\n\n import pandas as pd\n\n min_x = -1.77\n min_y = 174.0\n min_z = -183.0\n\n size_x = 0.972\n size_y = 3.69\n size_z = 0.976\n\n frame = pd.read_csv(filename, skiprows=3)\n # frame = pd.read_csv(filename)\n\n# print(\"X range:\",min(frame['Position X']), max(frame['Position X']), \"dynamic range:\", max(frame['Position X'])-min(frame['Position X']))\n# print(\"Y range:\",min(frame['Position Y']), max(frame['Position Y']), \"dynamic range:\", max(frame['Position Y'])-min(frame['Position Y']))\n# print(\"Z range:\",min(frame['Position Z']), max(frame['Position Z']), \"dynamic range:\", max(frame['Position Z'])-min(frame['Position Z']))\n#\n # will need to check IMARIS for correspondence between exported um files and pixel values\n # X and Z on csv files are my X and Y on resliced images\n\n frame[\"Pixel X\"] = (frame['Position X'] - min_x) / size_x\n frame[\"Pixel X\"] = frame[\"Pixel X\"].round().astype(int)\n\n frame[\"Pixel Y\"] = (frame['Position Z'] - min_z) / size_z\n frame[\"Pixel Y\"] = frame[\"Pixel Y\"].round().astype(int)\n\n frame[\"Pixel Z\"] = (frame['Position Y'] - min_y) / size_y\n frame[\"Pixel Z\"] = frame[\"Pixel Z\"].round().astype(int)\n\n print(\"X pixel range:\", min(frame[\"Pixel X\"]), max(\n frame[\"Pixel X\"]), \"dynamic range:\", max(frame[\"Pixel X\"]) - min(frame[\"Pixel X\"]))\n print(\"Y pixel range:\", min(frame[\"Pixel Y\"]), max(\n frame[\"Pixel Y\"]), \"dynamic range:\", max(frame[\"Pixel Y\"]) - min(frame[\"Pixel Y\"]))\n print(\"Z pixel range:\", min(frame[\"Pixel Z\"]), max(\n frame[\"Pixel Z\"]), \"dynamic range:\", max(frame[\"Pixel Z\"]) - min(frame[\"Pixel Z\"]))\n# print(frame)\n frame.to_csv(\"frame.csv\")\n return frame", "def save_tile_mask(label_poly_series, tile_poly, xyz, tile_size, dataset, region, zone, save_path, channels = 3, display=False):\n \n \n\n prefix = f'{region}{zone}{dataset}_'\n x,y,z = xyz\n tfm = from_bounds(*tile_poly.bounds, tile_size, tile_size) \n \n cropped_polys = [poly for poly in label_poly_series if poly.intersects(tile_poly)]\n cropped_polys_gdf = gpd.GeoDataFrame(geometry=cropped_polys, crs={'init': 'epsg:4326'})\n \n fbc_mask = burn_mask(cropped_polys_gdf, tfm, tile_size, channels)\n # fbc_mask = sol.vector.mask.df_to_px_mask(df=cropped_polys_gdf,\n # channels=['footprint', 'boundary', 'contact'],\n # affine_obj=tfm, shape=(tile_size,tile_size),\n # boundary_width=5, boundary_type='inner', contact_spacing=5, meters=True)\n \n if display: \n plt.imshow(fbc_mask); plt.show()\n \n skimage.io.imsave(f'{save_path}/{prefix}{z}_{x}_{y}_mask.png',fbc_mask, check_contrast=False)", "def save_tile(x,y,z,fpath):\n UA = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/77.0\"\n tile_url = f\"https://{random.choice('abc')}.tile.openstreetmap.org/{z}/{x}/{y}.png\"\n # cmd = f\"wget --user-agent='please download' -O {fpath} {url}\"\n if os.path.exists(fpath):\n print(f\"Already have tile {fpath}!\")\n return 0\n if os.path.isdir(fpath):\n raise ValueError(f\"requested path {fpath} exists and is a directory!\")\n try:\n res = rq.get(\n url=tile_url,\n headers={'User-Agent': UA}\n )\n status = res.status_code\n if status == 200:\n with open(fpath,'wb') as of:\n of.write(res.content)\n return 0\n else:\n print(f\"Error: response {status} from server:\\n{res.reason}\")\n return status\n except Exception as e:\n print(f\"Error getting tile: {e}\")\n return 1", "def write_towhee_coord(self, filename):\n with open(filename, 'w') as f:\n df = self.contents[['X', 'Y', 'Z']].copy()\n np.savetxt(f, df.values, fmt=\" %20.15f\"*3)", "def tile(self, z, x, y):\n logger.debug(_(\"Render tile %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,tilesize,[z])\n return self.render(mercator.tile_bbox((z, x, y)))", "def positions(self, tileID, numSamples):", "def grid(self, z, x, y, fields, layer):\n logger.debug(_(\"Render grid %s\") % ((z, x, y),))\n mercator = GlobalMercator(False,self.tilesize,[z])\n return self.render_grid(mercator.tile_bbox((z, x, y)), fields, layer)", "def save_geotiff(df, data_col, crs, x_col='x', y_col='y', time_col=None, nfiles='many', export_path='geotiff.tif', grid_res=None):\n\n ### create the xy coordinates\n if time_col is None:\n xy1 = df[[x_col, y_col]]\n else:\n time = df[time_col].sort_values().unique()\n xy1 = df.loc[df[time_col] == time[0], [x_col, y_col]]\n if any(xy1.duplicated()):\n raise ValueError('x and y coordinates are not unique!')\n\n ### Determine grid res\n if grid_res is None:\n res_df1 = (xy1.loc[0] - xy1).abs()\n res_df2 = res_df1.replace(0, nan).min()\n x_res = res_df2[x_col]\n y_res = res_df2[y_col]\n elif isinstance(grid_res, int):\n x_res = y_res = grid_res\n else:\n raise ValueError('grid_res must either be None or an integer.')\n\n ### Make the affline transformation for Rasterio\n trans2 = transform.from_origin(xy1[x_col].min() - x_res/2, xy1[y_col].max() + y_res/2, x_res, y_res)\n\n ### Make the rasters\n if time_col is None:\n z = df.set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset = ras_open(export_path, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=1, dtype=df[data_col].dtype, crs=convert_crs(crs, pass_str=True), transform=trans2)\n new_dataset.write(z, 1)\n new_dataset.close()\n else:\n if nfiles == 'one':\n new_dataset = ras_open(export_path, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=len(time), dtype=df[data_col].dtype, crs=convert_crs(crs), transform=trans2)\n for i in range(1, len(time)+1):\n z = df.loc[df[time_col] == time[i - 1]].set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset.write(z, i)\n new_dataset.close()\n elif nfiles == 'many':\n file1 = path.splitext(export_path)[0]\n for i in time:\n str_date = to_datetime(i).strftime('%Y-%m-%d_%H')\n file2 = file1 + '_' + str_date + '.tif'\n z = df.loc[df[time_col] == i].set_index([y_col, x_col])[data_col].unstack().values[::-1]\n new_dataset = ras_open(file2, 'w', driver='GTiff', height=len(xy1[y_col].unique()), width=len(xy1[x_col].unique()), count=1, dtype=df[data_col].dtype, crs=convert_crs(crs), transform=trans2)\n new_dataset.write(z, 1)\n new_dataset.close()", "def write_base_tile(self, tx, ty, tz, xyzzy):\n\n data_bands = range(1, self.data_bands_count+1)\n data = self.out_ds.ReadRaster(xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize,\n xyzzy.wxsize, xyzzy.wysize, band_list=data_bands)\n\n image_format = self.get_base_tile_format(tx, ty, tz, xyzzy)\n\n if image_format is None:\n return\n else:\n num_bands = self.get_num_bands(image_format)\n\n if self.verbose:\n print \"\\tReadRaster Extent: \", (xyzzy.rx, xyzzy.ry, xyzzy.rxsize, xyzzy.rysize),\n print 'z =',tz,' ; x =',tx,' ; y =',ty, (xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize)\n\n dstile = self.mem_drv.Create('', self.tile_size, self.tile_size, num_bands)\n\n path = self.get_full_path(tx, ty, tz, format_extension[image_format])\n\n # Query is in 'nearest neighbour' but can be bigger in then the tilesize\n # We scale down the query to the tilesize by supplied algorithm.\n if self.tile_size == xyzzy.querysize:\n self.fill_init_dest(dstile)\n\n # Use the ReadRaster result directly in tiles ('nearest neighbour' query)\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dstile.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha, band_list=[num_bands])\n\n gdal_write(path, dstile, image_format)\n\n # Note: For source drivers based on WaveLet compression (JPEG2000, ECW, MrSID)\n # the ReadRaster function returns high-quality raster (not ugly nearest neighbour)\n # TODO: Use directly 'near' for WaveLet files\n else:\n # Big ReadRaster query in memory scaled to the tilesize - all but 'near' algo\n dsquery = self.mem_drv.Create('', xyzzy.querysize, xyzzy.querysize, num_bands)\n self.fill_init_dest(dsquery)\n\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, data, band_list=data_bands)\n if image_format == \"PNG\":\n dsquery.WriteRaster(xyzzy.wx, xyzzy.wy, xyzzy.wxsize, xyzzy.wysize, self.alpha,band_list=[num_bands])\n\n self.resampler(path, dsquery, dstile, image_format)\n\n self.alpha = None", "def store(self, dataFrame, filename):\n columns = [\"longitude\", \"latitude\", \"elevation\", \"noise_mean_day\", \"noise_mean_evening\", \"noise_mean_night\", \"noise_weighted_24h\", \"noise_mean_24h\"]\n self.store_in_csv(dataFrame, filename=filename, columns=columns)\n\n columns.insert(0, \"id\") # pandas adds a id in the front\n self.store_in_database(filename=filename, columns=columns)", "def query_image_tile(self, coord):", "def save_tile_data(tile_summary):\n\n time = Time()\n\n csv = summary_title(tile_summary) + \"\\n\" + summary_stats(tile_summary)\n\n csv += \"\\n\\n\\nTile Num,Row,Column,Tissue %,Tissue Quantity,Col Start,Row Start,Col End,Row End,Col Size,Row Size,\" + \\\n \"Color Factor,S and V Factor,Quantity Factor,Score\\n\"\n\n for t in tile_summary.tiles:\n line = \"%d,%d,%d,%4.2f,%s,%d,%d,%d,%d,%d,%d,%4.0f,%4.2f,%4.2f,%0.4f\\n\" % (\n t.tile_num, t.r, t.c, t.tissue_percentage, t.tissue_quantity().name, t.c_s, t.r_s, t.c_e, t.r_e, t.c_e - t.c_s,\n t.r_e - t.r_s,t.color_factor,\n t.s_and_v_factor, t.quantity_factor, t.score)\n csv += line\n\n data_path = slide.get_tile_data_path(tile_summary.slide_name)\n csv_file = open(data_path, \"w\")\n csv_file.write(csv)\n csv_file.close()\n\n print(\"%-20s | Time: %-14s Name: %s\" % (\"Save Tile Data\", str(time.elapsed()), data_path))", "def write_datastore(df, data_store, columns=None):\n if columns is None:\n columns = df.columns\n if 'hemisphere' in columns:\n columns.remove('hemisphere')\n df = df.sort_index()\n df[columns].to_pickle(data_store)\n log.info('saved data store: {}'.format(data_store))", "def output_grid_information():\n # translate = [-74.26, 40.50]\n # scale = [0.02, 0.02]\n # step = 1\n\n translate = [0, 0]\n scale = [1, 1]\n step = 0.02\n\n lon_limits = [(-74.26 - translate[0]) / scale[0], (-73.76 - translate[0]) / scale[0]]\n lat_limits = [(40.48 - translate[1]) / scale[1], (40.94 - translate[1]) / scale[1]]\n\n lons = np.arange(lon_limits[0], lon_limits[1] - step, step)\n lats = np.arange(lat_limits[0], lat_limits[1] - step, step)\n\n all_json = {\n \"type\": \"FeatureCollection\"\n }\n\n gr_id = 0\n grid_df = pd.DataFrame(columns=['gr_id', 'c_lat', 'c_lon', 's_lon', 'w_lat', 'n_lon', 'e_lat'])\n features = []\n\n for lat in lats:\n for lon in lons:\n w_lon = lon\n e_lon = lon + step\n s_lat = lat\n n_lat = lat + step\n\n c_lon = lon + step / 2\n c_lat = lat + step / 2\n\n grid_df = grid_df.append(pd.DataFrame({\"gr_id\": [gr_id],\n \"c_lon\": [c_lon], \"c_lat\": [c_lat],\n \"w_lon\": [w_lon], \"s_lat\": [s_lat],\n \"e_lon\": [e_lon], \"n_lat\": [n_lat]}))\n\n coor = [[[s_lat, w_lon], [n_lat, w_lon], [n_lat, e_lon],\n [s_lat, e_lon], [s_lat, w_lon]]]\n\n feature = {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": coor\n },\n \"properties\": {\n \"id\": str(gr_id)\n }\n }\n\n features.append(feature)\n\n gr_id += 1\n\n all_json['features'] = features\n\n with open(BaseDir + '/grid.geojson', 'w') as f:\n json.dump(all_json, f)\n\n grid_df.to_csv(BaseDir + '/grid_locs.csv', index=False)", "def create_tiles(self, zoom):\n # Compute the tile x-y-z index range for the rasterlayer for this zoomlevel\n bbox = self.rasterlayer.extent()\n indexrange = tiler.tile_index_range(bbox, zoom)\n\n # Compute scale of tiles for this zoomlevel\n tilescale = tiler.tile_scale(zoom)\n\n # Count the number of tiles that are required to cover the raster at this zoomlevel\n nr_of_tiles = (indexrange[2] - indexrange[0] + 1) * (indexrange[3] - indexrange[1] + 1)\n\n # Create destination raster file\n self.log('Snapping dataset to zoom level {0}'.format(zoom))\n\n bounds = tiler.tile_bounds(indexrange[0], indexrange[1], zoom)\n sizex = (indexrange[2] - indexrange[0] + 1) * self.tilesize\n sizey = (indexrange[3] - indexrange[1] + 1) * self.tilesize\n dest_file = os.path.join(self.tmpdir, 'djangowarpedraster' + str(zoom) + '.tif')\n\n snapped_dataset = self.dataset.warp({\n 'name': dest_file,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'width': sizex,\n 'height': sizey,\n })\n\n self.log('Creating {0} tiles for zoom {1}.'.format(nr_of_tiles, zoom))\n\n counter = 0\n for tilex in range(indexrange[0], indexrange[2] + 1):\n for tiley in range(indexrange[1], indexrange[3] + 1):\n # Log progress\n counter += 1\n if counter % 250 == 0:\n self.log('{0} tiles created at zoom {1}'.format(counter, zoom))\n\n # Calculate raster tile origin\n bounds = tiler.tile_bounds(tilex, tiley, zoom)\n\n # Construct band data arrays\n pixeloffset = (\n (tilex - indexrange[0]) * self.tilesize,\n (tiley - indexrange[1]) * self.tilesize\n )\n\n band_data = [\n {\n 'data': band.data(offset=pixeloffset, size=(self.tilesize, self.tilesize)),\n 'nodata_value': band.nodata_value\n } for band in snapped_dataset.bands\n ]\n\n # Add tile data to histogram\n if zoom == self.max_zoom:\n self.push_histogram(band_data)\n\n # Warp source raster into this tile (in memory)\n dest = GDALRaster({\n 'width': self.tilesize,\n 'height': self.tilesize,\n 'origin': [bounds[0], bounds[3]],\n 'scale': [tilescale, -tilescale],\n 'srid': WEB_MERCATOR_SRID,\n 'datatype': snapped_dataset.bands[0].datatype(),\n 'bands': band_data,\n })\n\n # Store tile\n RasterTile.objects.create(\n rast=dest,\n rasterlayer=self.rasterlayer,\n tilex=tilex,\n tiley=tiley,\n tilez=zoom\n )\n\n # Store histogram data\n if zoom == self.max_zoom:\n bandmetas = RasterLayerBandMetadata.objects.filter(rasterlayer=self.rasterlayer)\n for bandmeta in bandmetas:\n bandmeta.hist_values = self.hist_values[bandmeta.band].tolist()\n bandmeta.save()\n\n # Remove snapped dataset\n self.log('Removing snapped dataset.', zoom=zoom)\n snapped_dataset = None\n os.remove(dest_file)", "def find_tile(loc, dir):\n #returns the integer tile number\n \n # should be looking in the directory with supergrid data (probably \"fix\" directory)\n filename_pattern = '*grid.tile*.nc'\n \n #find all supergrid files in the directory\n grid_fnames = []\n for f_name in os.listdir(dir):\n if fnmatch.fnmatch(f_name, filename_pattern):\n grid_fnames.append(f_name)\n if not grid_fnames:\n message = 'No filenames matching the pattern {0} found in {1}'.format(filename_pattern,dir)\n logging.critical(message)\n raise Exception(message)\n \n #non-polar tiles can use traditional 2D point-in-polygon methods; if a point is not in a non-polar tile,\n #it is in one of the polar tiles, and the tile can be distinguished by the sign of latitude of the point\n polar_tile_filenames = []\n found_tile = False\n for f_name in grid_fnames:\n if not found_tile:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n longitude = np.array(nc_file['x']).swapaxes(0,1)\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n adj_long = False \n #look for reversal of longitude; if found, adjust longitude so that 0-360 transition doesn't exist\n for row in longitude:\n if not (np.all(np.diff(row) >= 0) or np.all(np.diff(row) <= 0)):\n adj_long = True\n if adj_long:\n longitude[longitude < 180] += 360\n \n #get lon/lat pairs for all edges of the tiles\n \n edge_1_lon = longitude[0,:]\n edge_1_lat = latitude[0,:]\n edge_1 = list(zip(edge_1_lon, edge_1_lat))\n \n edge_2_lon = longitude[:,-1]\n edge_2_lat = latitude[:,-1]\n edge_2 = list(zip(edge_2_lon, edge_2_lat))\n \n edge_3_lon = longitude[-1,:]\n edge_3_lat = latitude[-1,:]\n edge_3 = list(zip(edge_3_lon, edge_3_lat))\n edge_3.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n edge_4_lon = longitude[:,0]\n edge_4_lat = latitude[:,0]\n edge_4 = list(zip(edge_4_lon, edge_4_lat))\n edge_4.reverse() #need to reverse the direction of this edge to form a regular polygon\n \n polygon_points = edge_1 + edge_2 + edge_3 + edge_4\n \n tile_polygon = Polygon(polygon_points)\n tile_polygon = tile_polygon.simplify(0)\n \n if tile_polygon.is_valid: #this will be True unless the tile is a polar tile, which will not form a regular polygon in Cartesian space using lon/lat data\n temp_loc = copy.deepcopy(loc)\n if adj_long:\n if loc[0] < 180:\n temp_loc[0] += 360\n loc_point = Point(temp_loc)\n if tile_polygon.contains(loc_point):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n else:\n polar_tile_filenames.append(f_name)\n \n #if the tile hasn't been found by this point, it must be contained within a polar tile\n for f_name in polar_tile_filenames:\n nc_file = Dataset('{0}/{1}'.format(dir,f_name))\n latitude = np.array(nc_file['y']).swapaxes(0,1)\n nc_file.close()\n \n #if the sign of the mean latitude of the tile is the same as that of the point, the tile has been found\n if np.sign(np.mean(latitude)) == np.sign(loc[1]):\n found_tile = True\n return f_name.split('tile')[1].split('.nc')[0] \n return -1", "def save_GRID( self , filename ):\n self._fwrite_GRID( filename )", "def save_tiles(self, tiles, output_dir):\n save_path = f\"{output_dir}/tiles.npy\"\n tiles_np = np.asarray(tiles)\n np.save(save_path, tiles_np)\n print(\"done saving .npy!\")" ]
[ "0.6151705", "0.60504067", "0.5954003", "0.576589", "0.5702577", "0.5660171", "0.56481516", "0.5625405", "0.55865616", "0.5577034", "0.5532618", "0.55072516", "0.5442371", "0.542948", "0.5415182", "0.5410632", "0.54070646", "0.53605324", "0.5332602", "0.5332529", "0.5324701", "0.5306444", "0.5279124", "0.521914", "0.51857334", "0.51820284", "0.51386905", "0.51168895", "0.5113415", "0.51119965" ]
0.7190305
0
add latitude/longitude values to a dataframe
def add_latlon(df): LLs = [num2deg(x,y,z) for x,y,z in zip(df['x'],df['y'],df['z'])] LLdf = pd.DataFrame.from_records(LLs,columns = ['latitude','longitude']) return pd.concat([df.reset_index(drop=True),LLdf],axis = 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_demo_location_history() -> geopandas.GeoDataFrame:\n np.random.seed(123)\n\n time = pd.date_range(start=datetime.fromtimestamp(1624241116), end=datetime.now(), freq=\"1min\").values\n\n center_point = (-36.875990410695394, 174.76398830024274)\n lat = np.random.normal(loc=center_point[0], scale=0.01, size=len(time))\n lon = np.random.normal(loc=center_point[1], scale=0.01, size=len(time))\n\n geometry = [Point(lon, lat) for lon, lat in zip(lon, lat)]\n return geopandas.GeoDataFrame(pd.DataFrame(dict(time=time, lat=lat, lon=lon)), geometry=geometry)", "def convert_to_geopandas(df):\n df['geometry'] = [Point(xy) for xy in zip(df.latitude, df.longitude)]\n crs = {'init': 'epsg:4326'}\n df = gpd.GeoDataFrame(df, crs=crs, geometry=df['geometry'])\n\n return df", "def add_ll(project_data):\n assert isinstance(project_data, pd.DataFrame)\n \n search = uszipcode.SearchEngine() #Set up SearchEngine() function from uszipcode\n location_list = list(project_data['Location']) #Get list of each report\n longitude_list = [] #Create list to store longitude\n latitude_list = [] #Create list to store latitude\n zip_list = [] #Create list to store zip code\n\n #Iterate through every location and update longitude, latitude, zip code lists\n for location in location_list:\n lo = (re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", location))[0] #Extract longitude from Location string\n la = (re.findall(r\"[-+]?\\d*\\.\\d+|\\d+\", location))[1] #Extract latitude from Location string\n zp = search.by_coordinates(float(la), float(lo), returns=1)[0].zipcode #Get zip code for coordinate\n longitude_list.append(lo)\n latitude_list.append(la)\n zip_list.append(zp)\n \n #Add the Longitude, Latitude, Zip Code data in new columns in dataframe\n project_data.insert(len(project_data.columns)-1, \"Longitude\", longitude_list, True)\n project_data.insert(len(project_data.columns)-1, \"Latitude\", latitude_list, True)\n project_data.insert(len(project_data.columns)-1, \"Zip\", zip_list, True)\n \n return project_data", "def createCoordTuples(data):\n data['xy'] = None\n for i, row in data.iterrows():\n data['xy'][i] = [np.round(row['geometry'].x, decimals=5), np.round(row['geometry'].y, decimals=5)]\n return data", "def _add_coordinate_data(self, df, geom_col):\n x = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='x',\n axis=1)\n\n y = df.apply(self._get_coords,\n geom_col=geom_col,\n coord_type='y',\n axis=1)\n return x, y", "def map_coord_transformer(df, proj_string, lat_column_name, long_column_name):\n logging.info('Generating coordinate reference systems... ')\n #generate coordinate reference system objects for details of how this works \n from_crs = pyproj.CRS.from_string(proj_string)\n from_proj = pyproj.Proj(from_crs)\n gps_proj = pyproj.Proj('epsg:4326')\n original_coordinates_to_latlong_obj = pyproj.Transformer.from_proj(from_proj, gps_proj)\n logging.info('Defining transformation functions...')\n def original_coordinates_to_latlong(adf):\n (lat,long) = original_coordinates_to_latlong_obj.transform(adf[lat_column_name], adf[long_column_name])\n return lat, long\n \n #apply converter to generate series\n logging.info('Converting coordinates...')\n latlong_series = df.apply(original_coordinates_to_latlong, axis=1)\n \n #get calculated values and put back into df.\n logging.info('Splitting series...')\n lat_series = latlong_series.copy().apply(lambda x: x[0])\n long_series = latlong_series.copy().apply(lambda x: x[1])\n \n #return the values as \n logging.info('Preparing to return calc_lat and calc_long...')\n df.loc[:,'calc_lat'] = lat_series.copy()\n df.loc[:,'calc_long'] = long_series.copy()\n \n return df", "def find_center_points(df, lat1, long1, lat2, long2):\n df['center_latitude'] = (df[lat1].values + df[long2].values) / 2\n df['center_longitude'] = (df[long1].values + df[lat2].values) / 2\n\n return df", "def add_odometer(df, lat, lon):\n import pandas as pd\n import math\n df_use = df.loc[:, [(lat), (lon)]]\n df_use['prev_LAT'] = df_use.loc[:, (lat)].shift(periods=1)\n df_use['prev_LON'] = df_use.loc[:, (lon)].shift(periods=1)\n df_use['distance2'] = df_use.apply(lambda row: haversine(row['prev_LAT'], row['prev_LON'], row[(lat)], row[(lon)]),\n axis=1)\n df_use = df_use.reset_index(drop=True)\n df_use.loc[:, 'distance'] = df_use.apply(lambda x: nanthing(x.distance2), axis=1)\n df_use['prev_dist'] = df_use.loc[:, 'distance'].shift(periods=1)\n df_use['odometer'] = df_use['distance'].cumsum()\n df_use['prevod'] = df_use.loc[:, 'odometer'].shift(periods=1)\n df_use['dif'] = df_use.apply(lambda x: x.odometer - x.prevod, axis=1)\n df_use['dif'] = df_use.apply(lambda x: nanthing(x.dif), axis=1)\n return (pd.merge(df, df_use.loc[:, [(lat), (lon), 'odometer', 'distance']], on=[(lat), (lon)]))", "def geolocate_address(self):\n self.geolocator = Nominatim(user_agent=\"fundaft\")\n\n # If latitude / longitude are missing, try to geocode them on the basis\n # of the address \n self.coords = [self.get_coords(address) if np.isnan(lat)\n else (lat, lon) for address, lat, lon in\n zip(self.df_ads['property_title'], \n self.df_ads['latitude'], \n self.df_ads['longitude'])]\n \n df = pd.DataFrame(self.coords, columns=['latitude', 'longitude'])\n \n # If new coordinates are not in Dublin, change to na again\n df = self.is_in_dublin(df)\n\n self.df_ads[[\"latitude\",\"longitude\"]] = df", "def add_loc_cols(df):\r\n\r\n\tdf['STATE'] = [int(i[1:3]) for i in df.gisjoin]\r\n\tdf['COUNTY'] = [int(i[4:7]) for i in df.gisjoin]\r\n\tdf['TRACT'] = [int(i[7:-4]) for i in df.gisjoin]\r\n\tdf['BLOCK'] = [int(i[-4:]) for i in df.gisjoin]\r\n\r\n\tif df.STATE[0] > 9:\r\n\t\traise Exception(\"Warning! Code might be incorrect for states with fips code > 9\")\r\n\r\n\treturn df", "def add_loc_ocean2df(df=None, LatVar='lat', LonVar='lon'):\n from geopandas.tools import sjoin\n # Get the shapes for the ocean\n featurecla = 'ocean'\n group = get_shapes4oceans(rtn_group=True, featurecla=featurecla)\n # Turn the dataframe into a geopandas dataframe\n gdf = geopandas.GeoDataFrame(\n df, geometry=geopandas.points_from_xy(df[LonVar], df[LatVar]))\n # Work out if any of the points are within the polygons\n pointInPolys = sjoin(gdf, group, how='left')\n # Check how many were assigned to a region\n Nnew = float(pointInPolys['name'].dropna().shape[0])\n N = float(df.shape[0])\n if N != Nnew:\n pstr = 'WARNING: Only {:.2f}% assigned ({} of {})'\n print(pstr.format((Nnew/N)*100, int(Nnew), int(N)))\n # Add the ocean assingnment back into the orginal dataframe\n df[featurecla] = pointInPolys['name'].values\n return df", "def wgs84_to_mercator(df, lon, lat):\n k = 6378137\n df[\"x\"] = df[lon] * (k * np.pi/180.0)\n df[\"y\"] = np.log(np.tan((90 + df[lat]) * np.pi/360.0)) * k\n return df", "def _point_in_mbr(self, df):\n if df.empty:\n return df\n df = df[(df[\"lat\"] >= self._min_lat) &\n (df[\"lat\"] <= self._max_lat) &\n (df[\"lon\"] >= self._min_lon) &\n (df[\"lon\"] <= self._max_lon)\n ]\n return df", "def _prepare_geocode_result(results):\n # Prepare the data for the DataFrame as a dict of lists\n d = defaultdict(list)\n index = []\n\n for i, s in iteritems(results):\n address, loc = s\n\n # loc is lat, lon and we want lon, lat\n if loc is None:\n p = Point()\n else:\n p = Point(loc[1], loc[0])\n\n if address is None:\n address = np.nan\n\n d['geometry'].append(p)\n d['address'].append(address)\n index.append(i)\n\n df = gpd.GeoDataFrame(d, index=index)\n df.crs = from_epsg(4326)\n\n return df", "def get_time_series_at_location(data, lat, lon, feature):\n\n ts = data.sel(lat=lat, lon=lon, method='nearest', drop=True).to_series()\n index = ts.index.get_level_values('time')\n values = ts.values\n\n return pd.DataFrame({'Date': index.values, feature: values})", "def build_turbine_loc(turbine_x, turbine_y):\n turbineLoc = pd.DataFrame({'x': turbine_x, 'y': turbine_y})\n return turbineLoc", "def add_shortest_route(df):\n\n df['gmaps_dist'] = df.apply(lambda row: gmaps.getTotDist((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)\n df['gmaps_dur'] = df.apply(lambda row: gmaps.getTotDur((row['pick_lon'], row['pick_lat']), (row['drop_lon'], row['drop_lat'])), axis=1)", "def create_pseudo_epsg4326_coordinates(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['lat'] = 180*(pi/2 - np.arccos(self.df_attributes['coord_z']))/pi\n self.df_attributes['lon'] = 180*np.arctan2(self.df_attributes['coord_y'], self.df_attributes['coord_x'])/pi", "def build_geoseries(self, dataframe):\n geo_list = []\n with click.progressbar(dataframe.iterrows(), label='Pulling site plans and geographic title data', length=len(dataframe)) as d:\n for index, row in d:\n geo_list.append(self.map_property(row['linc']))\n\n geo_series = gpd.GeoSeries([Point(mark) for mark in geo_list], index=dataframe.index)\n\n return geo_series", "def map_points(df, lat_col='latitude', lon_col='longitude', zoom_start=11, \\\n plot_points=False, pt_radius=15, \\\n draw_heatmap=False, heat_map_weights_col=None, \\\n heat_map_weights_normalize=True, heat_map_radius=15):\n\n ## center map in the middle of points center in\n middle_lat = df[lat_col].median()\n middle_lon = df[lon_col].median()\n\n curr_map = folium.Map(location=[middle_lat, middle_lon],\n zoom_start=zoom_start)\n\n # add points to map\n if plot_points:\n for _, row in df.iterrows():\n folium.CircleMarker([row[lat_col], row[lon_col]],\n radius=pt_radius,\n popup=row['name'],\n fill_color=\"#3db7e4\", # divvy color\n ).add_to(curr_map)\n\n # add heatmap\n if draw_heatmap:\n # convert to (n, 2) or (n, 3) matrix format\n if heat_map_weights_col is None:\n cols_to_pull = [lat_col, lon_col]\n else:\n # if we have to normalize\n if heat_map_weights_normalize:\n df[heat_map_weights_col] = \\\n df[heat_map_weights_col] / df[heat_map_weights_col].sum()\n\n cols_to_pull = [lat_col, lon_col, heat_map_weights_col]\n\n stations = df[cols_to_pull].as_matrix()\n curr_map.add_children(plugins.HeatMap(stations, radius=heat_map_radius))\n\n return curr_map", "def split_df(data: pd.DataFrame, start: int, stop: int) -> pd.DataFrame:\n df = data.iloc[start:stop, :].copy()\n df['coord'] = df.apply(lambda x: f\"{x['stopLat']}, {x['stopLon']}\", axis=1)\n return df.sort_values('coord')", "def insert_row(self, row_value, index):\n row = pd.DataFrame(row_value, columns=['lat', 'long', 'alt', 'descr'])\n self.df = pd.concat([self.df.iloc[:index], row, self.df.iloc[index:]]).reset_index(drop=True)", "def _add_latlon(ds, n=50):\n\n nx = ncols(ds)\n ny = nrows(ds)\n src_crs = get_crs(ds)\n dst_crs = CRS(init='epsg:4326')\n idx_x = np.linspace(0, nx - 1, n, dtype=int)\n idx_y = np.linspace(0, ny - 1, n, dtype=int)\n xs = ds.x[idx_x]\n ys = ds.y[idx_y]\n xgrid, ygrid = np.meshgrid(xs, ys)\n lon, lat = rasterio.warp.transform(src_crs, dst_crs, xgrid.flatten(),\n ygrid.flatten())\n lon_sparse = np.empty((ny, nx))\n lat_sparse = np.empty((ny, nx))\n lon_sparse[:] = np.nan\n lat_sparse[:] = np.nan\n # idx_y needs to be a column vector\n lon_sparse[idx_y[:, None], idx_x] = np.array(lon).reshape((n, n))\n lat_sparse[idx_y[:, None], idx_x] = np.array(lat).reshape((n, n))\n ds.coords['lat'] = (('y', 'x'), lat_sparse)\n ds.coords['lon'] = (('y', 'x'), lon_sparse)", "def distance_coord(df):\n temp_list_distance=[]\n list_distance=[]\n for i in range(len(df)-1):\n coord1 = (df['lat'][i], df['lon'][i])\n coord2 = (df['lat'][i+1], df['lon'][i+1])\n dist = geopy.distance.geodesic(coord1, coord2).km\n temp_list_distance.append(dist)\n list_distance.append(sum(temp_list_distance)) \n return(list_distance)", "def geocode(df, col):\r\n pass", "def _add_location_id(df: pd.DataFrame):\n if CommonFields.LOCATION_ID in df.columns:\n raise ValueError(\"location_id already in DataFrame\")\n df[CommonFields.LOCATION_ID] = df[CommonFields.FIPS].apply(pipeline.fips_to_location_id)", "def insert_df_xy(\n df: DataFrame,\n name: str,\n ws: str = \"memory\",\n spatial_reference: int = 3857) -> None:\n fields = _df_to_fields(df, 2)\n rows = df.collect()\n insert_rows_xy(rows, name, fields, ws, spatial_reference)", "def lat_lng(row):\r\n lat = row[\"latitude\"]\r\n lng = row[\"longitude\"]\r\n n = int(lat/GRANULARITY)\r\n nlat_start = n * GRANULARITY\r\n nlat_end = nlat_start + GRANULARITY\r\n nlg=int(lng/GRANULARITY)\r\n nlng_start = nlg * GRANULARITY\r\n nlng_end = nlng_start + GRANULARITY\r\n latlng=[(nlat_start,nlng_start), (nlat_start,nlng_end), (nlat_end,nlng_end), (nlat_end,nlng_start)]\r\n return latlng", "def build_polling_location_txt(self):\n self.base_df['address_line'] = self.base_df.apply(\n lambda row: self.get_address_line(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['directions'] = self.base_df.apply(\n lambda row: self.get_directions(), axis=1)\n #\n self.base_df['hours'] = self.base_df.apply(\n lambda row: self.get_hours(row['index'],row['start_time'], row['end_time']), axis=1)\n\n self.base_df['photo_uri'] = self.base_df.apply(\n lambda row: self.get_photo_uri(), axis=1)\n\n self.base_df['hours_open_id'] = self.base_df.apply(\n lambda row: self.create_hours_open_id(row['index'], row['address1'], row['address2'], row['city'],\n row['state'], row['zip_code']), axis=1)\n\n self.base_df['is_drop_box'] = self.base_df.apply(\n lambda row: self.is_drop_box(), axis=1)\n\n self.base_df['is_early_voting'] = self.base_df.apply(\n lambda row: self.is_early_voting(), axis=1)\n\n self.base_df['latitude'] = self.base_df.apply(\n lambda row: self.get_latitude(), axis=1)\n\n self.base_df['longitude'] = self.base_df.apply(\n lambda row: self.get_longitude(), axis=1)\n\n self.base_df['latlng_source'] = self.base_df.apply(\n lambda row: self.get_latlng_source(), axis=1)\n\n self.base_df['id'] = self.base_df.apply(\n lambda row: self.create_id(row['index'], row['ocd_division'],row['address1'], row['address2'],\n row['city'], row['state'], row['zip_code']), axis=1)\n\n return self.base_df", "def raster_to_geodataframe(*a, **kw) -> gpd.GeoDataFrame:\n kw[\"geo\"] = True\n return raster_to_dataframe(*a, **kw)" ]
[ "0.63746387", "0.63202995", "0.60851926", "0.6078423", "0.6073065", "0.60551715", "0.60244256", "0.59730685", "0.59513515", "0.5890462", "0.5884484", "0.58564293", "0.58458704", "0.58379817", "0.58235085", "0.5796834", "0.5791018", "0.5752113", "0.5700064", "0.5698395", "0.56887555", "0.5678902", "0.5663754", "0.56479895", "0.56412184", "0.56067526", "0.55637765", "0.5534452", "0.55316406", "0.55287206" ]
0.79349303
0
Infinite sequence of integers.
def integers(): i = 1 while True: yield i i = i + 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def int_to_seq(i):\n\ts = []\n\tprime = xprimes()\n\twhile i != 1:\n\t\ts.append(0)\n\t\tp = next(prime)\n\t\twhile i % p == 0:\n\t\t\ts[-1] += 1\n\t\t\ti /= p\n\treturn s", "def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2", "def simple_range(limit):\n i = 0\n while i < limit:\n yield i\n i += 1", "def xrange1(value):\n try:\n i = int(value)\n return [x+1 for x in xrange(i)]\n except:\n return []", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def int_to_inv_seq(i, n):\n\tinv_seq = []\n\twhile n > 1:\n\t\tfct = factorial(n - 1)\n\t\tinv_seq.append(i / fct)\n\t\ti %= fct\n\t\tn -= 1\n\tinv_seq.append(0)\n\treturn inv_seq", "def xrange0(value):\n try:\n i = int(value)\n return list(xrange(i))\n except:\n return []", "def id_generator():\n start_value = 0\n while True:\n yield start_value\n start_value += 1", "def local_seq():\n return st.integers(min_value=1)", "def numeric_sequence_iteration(self) -> global___Statement.Iteration.NumericSequenceIteration:", "def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n def is_num_simple(n):\n \"\"\"\n Return: True if n is a simple number or False if it is not\n \"\"\"\n for i in range(n, 1, -1):\n if n % i == 0 and i < n and n != 1:\n return False\n return True\n\n # generator part\n n = 2\n while n < 200:\n if is_num_simple(n):\n yield n\n n = n + 1", "def generate_sequence(n):\n\n sequence = []\n\n # generate sequence\n while n != 1:\n sequence.append(n)\n n = next_integer(n)\n\n # append 1 to sequence since all sequences assumed to end in 1\n sequence.append(1)\n\n return sequence", "def gen_num(lim=10000):\n n = 1\n yield 2\n yield 3\n while 6 * n + 1 <= lim:\n yield 6 * n - 1\n yield 6 * n + 1\n n += 1", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def numbers():\n for number in range(1, 76):\n yield number", "def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element", "def simple_seq(seq):\n for i in seq:\n yield i", "def iseq(start=0, stop=None, inc=1):\n if stop is None: # allow isequence(3) to be 0, 1, 2, 3\n # take 1st arg as stop, start as 0, and inc=1\n stop = start; start = 0; inc = 1\n return range(start, stop+inc, inc)", "def to_int(a):\n i = 0\n while a:\n i += 1\n a = a.next\n return i", "def natural_numbers():\n \n acc=0\n for x in range(1000): \n if x%3==0 or x%5==0:\n acc=acc+x\n return acc", "def fib(limit):\n a, b = 0, 1\n while a <= limit:\n yield a\n a, b = b, a + b", "def range() -> List[int]:\n pass", "def section_4_7():\n import itertools\n\n def test1():\n def count(n):\n while True:\n yield n\n n += 1\n\n c = count(0)\n for x in itertools.islice(c, 10, 20):\n print(x)\n\n test1()", "def get_integers(bitwidth: int, unsigned: bool, limit: int = 0) -> Generator:\n if unsigned:\n start, stop = 0, ((1 << bitwidth) - 1)\n else:\n start, stop = (-(1 << bitwidth - 1)), (1 << (bitwidth - 1) - 1)\n\n for num in _fuzzdb_integers(limit):\n if num >= start and num <= stop:\n yield num", "def renumber():\n\n counter = itertools.count(1)\n while True:\n yield 's%s'%counter.next()", "def fibonacci() -> Iterator[int]:\n a, b = 0, 1\n while True:\n yield a\n a, b = b, a + b", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def fibonacci():\n\ta, b = 0, 1\n\tyield 0\n\twhile True:\n\t\ta, b = b, a + b\n\t\tyield a" ]
[ "0.7470997", "0.7343358", "0.6424951", "0.6344632", "0.63421345", "0.62673986", "0.60785764", "0.60572314", "0.60339946", "0.6026884", "0.5996939", "0.59359825", "0.5839094", "0.58256906", "0.58099836", "0.5782609", "0.57588905", "0.5748371", "0.5706836", "0.5637029", "0.5626966", "0.56118757", "0.5589854", "0.5553567", "0.55522346", "0.555092", "0.5545826", "0.5543459", "0.5538988", "0.5504037" ]
0.7472032
1
Returns first n values from the given sequence.
def take(n, seq): seq = iter(seq) result = [] try: for i in range(n): result.append(next(seq)) except StopIteration: pass return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def take(n, seq):\n return itertools.islice(seq, n)", "def nth(n, seq):\n try:\n return seq[n]\n except TypeError:\n return next(itertools.islice(seq, n, None))", "def take(iterable, n):\n return list(itertools.islice(iterable, n))", "def lookahead(n, iterable):\n for value in islice(copy.copy(iterable), n, None):\n return value\n raise IndexError(n)", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def take(n, iterable):\n return list(islice(iterable, n))", "def take(n, iterable):\n return list(islice(iterable, n))", "def first_n(n):\r\n return Quantifier(\"first_{}\".format(n),\r\n isom=False, cons=True, lcons=False, rmon=True, lmon=None,\r\n fn=lambda seq: first_n_ver(seq, n),\r\n gen_fn=lambda verify_fn, truth_value, max_length: first_n_gen(n, verify_fn, truth_value, max_length))", "def look_ahead(self, n: int = 1):\n return self.data[self.end:self.end+n]", "def firstn(reader, n):\n\n # TODO(yuyang18): Check if just drop the reader, could clean the opened\n # resource or not?\n\n def firstn_reader():\n for i, item in enumerate(reader()):\n if i == n:\n break\n yield item\n\n return firstn_reader", "def first_n_ver(seq, n):\r\n # TODO: more complicated presupposition handling instead of just false?\r\n if len(seq) < n:\r\n return Quantifier.F\r\n\r\n num_AB = 0\r\n for item in seq:\r\n if num_AB >= n:\r\n return Quantifier.T\r\n # if an A-not-B found before n ABs are, return F\r\n if np.array_equal(item, Quantifier.AnotB) and num_AB < n:\r\n return Quantifier.F\r\n elif np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n\r\n if num_AB >= n:\r\n return Quantifier.T\r\n\r\n # there are less than n ABs in total\r\n return Quantifier.F", "def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def next_n(iterator, N):\n try:\n items = []\n for _ in range(N):\n items.append(next(iterator))\n return items\n except StopIteration:\n if items:\n return items\n return None", "def take(n, iterable, islice=islice):\n return islice(iterable, n)", "def top_n(values, first_n=10):\n values = iter(values)\n top = [val for val in islice(values, first_n)]\n if len(top) < first_n:\n return top\n heapq.heapify(top)\n for val in values:\n heapq.heappushpop(top, val)\n return top", "def drop(n, seq):\n return itertools.islice(seq, n, None)", "def take(iterable, n):\n\n def taking(iterable_):\n for i, e in enumerate(iterable_):\n if i < n:\n yield e\n\n return taking(iterable)", "def nth(n, iterable, default = None):\n return next(islice(iterable, n, None), default)", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def genslices(n):\n def range_with_none():\n yield None\n yield from range(-n, n+1)\n\n for t in product(range_with_none(), range_with_none(), range_with_none()):\n s = slice(*t)\n if s.step != 0:\n yield s", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(np.ceil(i * length / num))]", "def window(seq, n):\n seq_it = iter(seq)\n result = tuple(it.islice(seq_it, n))\n if len(result) == n:\n yield result \n for elem in seq_it:\n result = result[1:] + (elem,)\n yield result", "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def take(self, n): # noqa: N805\n return List(_islice(self, n))" ]
[ "0.7429517", "0.6876003", "0.6727532", "0.67118037", "0.66985476", "0.66985476", "0.66961426", "0.6665714", "0.6665714", "0.66652995", "0.6647308", "0.64990807", "0.6490624", "0.6476307", "0.6448948", "0.6416278", "0.6414325", "0.64115244", "0.6388718", "0.63765794", "0.6330259", "0.63142514", "0.6313415", "0.6304847", "0.6304847", "0.6304847", "0.63014895", "0.62594324", "0.6253964", "0.6246031" ]
0.7366886
1
Report Method to Get Work Order Details.
def get_work_order_detail(self, date_range): work_order_obj = self.env["task.line"] start = datetime.strptime(date_range.get("date_from"), "%Y-%m-%d") end = datetime.strptime(date_range.get("date_to"), "%Y-%m-%d") step = timedelta(days=1) workorder_detail = [] while start <= end: sdate = str( datetime.strptime( str(start.date()) + " 00:00:00", DEFAULT_SERVER_DATETIME_FORMAT ) ) edate = str( datetime.strptime( str(start.date()) + " 23:59:59", DEFAULT_SERVER_DATETIME_FORMAT ) ) work_order_ids = work_order_obj.search( [("date_issued", ">=", sdate), ("date_issued", "<=", edate)] ) if work_order_ids: parts_data = {} parts_value = [] for parts_line in work_order_ids: if ( parts_line.fleet_service_id and parts_line.fleet_service_id.state == "done" ): parts_dict = { "wo_name": parts_line.fleet_service_id and parts_line.fleet_service_id.name or "", "vehicle_id": parts_line.fleet_service_id and parts_line.fleet_service_id.vehicle_id and parts_line.fleet_service_id.vehicle_id.name or "", "part_no": parts_line.product_id and parts_line.product_id.default_code or "", "part_name": parts_line.product_id and parts_line.product_id.name or "", "vehicle_make": parts_line.vehicle_make_id and parts_line.vehicle_make_id.name or "", "qty": parts_line.qty or 0.0, "uom": parts_line.product_uom and parts_line.product_uom.name or "", "old_part_return": parts_line.old_part_return and "Yes" or "No", "issued_by": parts_line.issued_by and parts_line.issued_by.name or "", "remarks": parts_line.fleet_service_id and parts_line.fleet_service_id.note or "", } parts_value.append(parts_dict) if parts_value: parts_value = sorted(parts_value, key=lambda k: k["wo_name"]) parts_data = {"date": start.date(), "value": parts_value} workorder_detail.append(parts_data) start += step return workorder_detail
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def order_report():", "def work_order_receipt_retrieve(self, work_order_id, id=None):\n pass", "def get_order_detail(orderid): \n data = order_obj.get_order_detail(orderid)\n return data", "def open_workorders(self, cr, uid, ids, context=None):\n context = context or {}\n models_data = self.pool.get('ir.model.data')\n data = self.browse(cr, uid, ids[0])\n wo_ids = self._make_query_result(cr, uid, data, context=context)\n\n # Get workorder views\n dummy, form_view = models_data.get_object_reference(cr, uid, 'l10n_in_mrp_subcontract', 'mrp_production_workcenter_form_cost_report')\n dummy, tree_view = models_data.get_object_reference(cr, uid, 'l10n_in_mrp_subcontract', 'mrp_production_workcenter_tree_view_cost_report')\n\n context.update({'group_by':'production_id'})\n\n return {\n 'domain': \"[('id','in',[\"+','.join(map(str, wo_ids))+\"])]\",\n 'name': _('WorkOrder Cost Analysis'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'context':context,\n 'res_model': 'mrp.production.workcenter.line',\n 'views': [(tree_view or False, 'tree'), (form_view or False, 'form')],\n 'type': 'ir.actions.act_window',\n }", "def getOrderInfo(self):\n return self.__orderinfo", "def order_item_details(self) -> 'outputs.OrderItemDetailsResponse':\n return pulumi.get(self, \"order_item_details\")", "def get_wo_mthly_smry(self, workorder_browse):\n wo_summary_data = []\n wo_check_dict = {}\n no = 0\n if workorder_browse:\n for work_rec in workorder_browse:\n if work_rec.state and work_rec.state == \"done\":\n no += 1\n identification = \"\"\n repair_line_data = \"\"\n if work_rec.vehicle_id:\n identification += work_rec.vehicle_id.name\n if work_rec.vehicle_id.f_brand_id:\n identification += \" \" + work_rec.vehicle_id.f_brand_id.name\n if work_rec.vehicle_id.model_id:\n identification += \" \" + work_rec.vehicle_id.model_id.name\n for repaire_line in work_rec.repair_line_ids:\n if repaire_line.complete is True:\n if (\n repaire_line.repair_type_id\n and repaire_line.repair_type_id.name\n ):\n repair_line_data += (\n repaire_line.repair_type_id.name + \", \"\n )\n if work_rec.parts_ids:\n for parts_line in work_rec.parts_ids:\n if work_rec.id in wo_check_dict.keys():\n parts_data = {\n \"no\": -1,\n \"location\": \"\",\n \"type\": \"\",\n \"wo\": \"\",\n \"identification\": \"\",\n \"vin\": \"\",\n \"plate_no\": \"\",\n \"work_performed\": \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n wo_check_dict[work_rec.id] = work_rec.id\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"part\": parts_line.product_id\n and parts_line.product_id.default_code\n or \"\",\n \"qty\": parts_line.qty or 0.0,\n \"uom\": parts_line.product_uom\n and parts_line.product_uom.name\n or \"\",\n }\n wo_summary_data.append(parts_data)\n else:\n parts_data = {\n \"no\": no,\n \"location\": work_rec.team_id\n and work_rec.team_id.name\n or \"\",\n \"type\": work_rec.main_type or \"\",\n \"wo\": work_rec.name or \"\",\n \"identification\": identification or \"\",\n \"vin\": work_rec.vehicle_id\n and work_rec.vehicle_id.vin_sn\n or \"\",\n \"plate_no\": work_rec.vehicle_id\n and work_rec.vehicle_id.license_plate\n or \"\",\n \"work_performed\": repair_line_data\n and repair_line_data[:-2]\n or \"\",\n \"vehicle_make\": \"\",\n \"qty\": \"\",\n \"uom\": \"\",\n }\n wo_summary_data.append(parts_data)\n if not wo_summary_data:\n msg = _(\n \"Warning! \\n\\\n No data Available for selected work order.\"\n )\n raise UserError(msg)\n return wo_summary_data", "def get_details(self):", "def get_order_details(game_id: int, user_id: int, start_time: float = None, end_time: float = None):\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n query = \"\"\"\n SELECT\n o.id as order_id,\n relevant_orders.status,\n relevant_orders.order_status_id,\n symbol,\n relevant_orders.timestamp,\n buy_or_sell,\n quantity,\n order_type,\n time_in_force,\n price,\n relevant_orders.clear_price\n FROM orders o\n INNER JOIN (\n SELECT os_full.id,\n os_full.timestamp,\n os_full.order_id,\n os_full.clear_price,\n os_full.status,\n os_relevant.order_status_id\n FROM order_status os_full\n INNER JOIN (\n SELECT os.order_id, grouped_os.max_id as order_status_id\n FROM order_status os\n INNER JOIN\n (SELECT order_id, max(id) as max_id\n FROM order_status\n GROUP BY order_id) grouped_os\n ON\n os.id = grouped_os.max_id\n WHERE os.status NOT IN ('cancelled', 'expired')\n ) os_relevant\n ON os_relevant.order_id = os_full.order_id\n ) relevant_orders\n ON relevant_orders.order_id = o.id\n WHERE game_id = %s AND user_id = %s AND relevant_orders.timestamp >= %s AND relevant_orders.timestamp <= %s;\"\"\"\n\n with engine.connect() as conn:\n df = pd.read_sql(query, conn, params=[game_id, user_id, start_time, end_time])\n\n df = pivot_order_details(df)\n df[\"status\"] = \"fulfilled\"\n df.loc[df[\"timestamp_fulfilled\"].isna(), \"status\"] = \"pending\"\n return df", "def __str__(self):\n return f'Order: {self.size} {self.drink_name} from {self.shop}\\n' \\\n f'Details: {self.details}\\n' \\\n f'Location: {self.location}\\n' \\\n f'Contact Info: {self.customer_name}, {self.customer_number}'", "def test_get_order_address(self):\n pass", "def test_get_order(self):\n pass", "def work_order_receipt_retrieve(self, work_order_id, id=None):\n if work_order_id is None or not is_hex(work_order_id):\n logging.error(\"Work order id is empty or Invalid\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"Worker id is empty or Invalid\")\n\n json_rpc_request = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"WorkOrderReceiptRetrieve\",\n \"id\": id,\n \"params\": {\n \"workOrderId\": work_order_id\n }\n }\n response = self.__uri_client._postmsg(json.dumps(json_rpc_request))\n return response", "def get_order(self, order_id):\n request = OrdersGetRequest(order_id)\n # 3. Call PayPal to get the transaction\n response = self.client.execute(request)\n # 4. Save the transaction in your database. Implement logic to save transaction to your database for future reference.\n print('Status Code: ', response.status_code)\n print('Status: ', response.result.status)\n print('Order ID: ', response.result.id)\n print('Intent: ', response.result.intent)\n print('Links:')\n for link in response.result.links:\n print('\\t{}: {}\\tCall Type: {}'.format(\n link.rel, link.href, link.method))\n print('Gross Amount: {} {}'.format(response.result.purchase_units[0].amount.currency_code,\n response.result.purchase_units[0].amount.value))", "def ZeusOrderDetails(request):\n\n\tif request.method == \"GET\":\n\t\t\n\t\tform = ZeusOrderDetailsForm(request.GET)\n\n\t\tif form.is_valid():\n\t\t\t\n\t\t\ttry:\n\t\t\t\t# Get the Data of the Order being viewed\n\t\t\t\torder_data = Orders.objects.get(hash_key=form.cleaned_data.get(\"order\"))\n\n\t\t\t\torder_data.total = \"{:,.2f}\".format(float(order_data.subtotal) + float(order_data.shipping_cost))\n\t\t\t\torder_data.subtotal = \"{:,.2f}\".format(order_data.subtotal)\n\t\t\t\torder_data.shipping_cost = \"{:,.2f}\".format(order_data.shipping_cost)\n\n\t\t\t\t# Get the data needed for the cart product\n\t\t\t\tfor a in range(len(order_data.cart_data)):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tproduct = Products.objects.get(hash_key=order_data.cart_data[a][\"product_id\"])\n\t\t\t\t\t\torder_data.cart_data[a][\"image_0\"] = (product.image_0.url).replace(\"&export=download\", \"\") if product.image_0.url else None\n\t\t\t\t\t\torder_data.cart_data[a][\"price\"] = product.price\n\t\t\t\t\t\torder_data.cart_data[a][\"discount_per\"] = order_data.discount_per\n\t\t\t\t\t\torder_data.cart_data[a][\"d_price\"] = \"{:,.2f}\".format((product.price * (100 - order_data.discount_per[\"user_discount\"]) / 100) * (100 - order_data.discount_per[\"coupon_discount\"]) / 100 if order_data.discount_per else product.price * (100 - order_data.discount_per[\"coupon_discount\"]) / 100)\n\t\t\t\t\t\torder_data.cart_data[a][\"card_color\"] = product.card_color\n\t\t\t\t\texcept Products.DoesNotExist:\n\t\t\t\t\t\torder_data.cart_data[a][\"price\"] = \"N/A\"\n\n\t\t\t\thtml_content = {\n\t\t\t\t\t\"order_data\": order_data\n\t\t\t\t}\n\t\t\t\treturn render(request, \"lost-empire/site_templates/zeus/orders/order_details.html\", html_content)\n\t\t\texcept Orders.DoesNotExist:\n\t\t\t\tmessages.error(request, \"Order is not available in the Database.\")\n\t\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\telse:\n\t\t\t# Handle errors if form is invalid\n\t\t\tform_error_catcher(request, form, [\"order\"])\n\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\n\telif request.method == \"POST\":\n\n\t\t# Validate the inputs\n\t\tform = ZeusOrderDetailsForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\t\n\t\t\t# Check if the order is being completed\n\t\t\tif request.GET.get(\"p\") == \"order_completed\":\n\t\t\t\t\n\t\t\t\t# Shipping Company name is required even tho in forms.py is set to False\n\t\t\t\tif not form.cleaned_data.get(\"shippingcompany\"):\n\t\t\t\t\tmessages.warning(request, \"Shipping company is required. Please provide the name of the shipping company.\")\n\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\telse:\n\n\t\t\t\t\t# Check if the order is still in the Database\n\t\t\t\t\ttry:\n\t\t\t\t\t\t# Get the Data of that order\n\t\t\t\t\t\torder_data = Orders.objects.get(hash_key=form.cleaned_data.get(\"order\"))\n\n\t\t\t\t\t\t# Set it to completed\n\t\t\t\t\t\torder_data.order_status = \"COMPLETED\"\n\n\t\t\t\t\t\t# Add the Shipping company name\n\t\t\t\t\t\torder_data.shipping_company = form.cleaned_data.get(\"shippingcompany\")\n\n\t\t\t\t\t\t# Check if the tracker code/id is available\n\t\t\t\t\t\tif form.cleaned_data.get(\"trackercode\"):\n\t\t\t\t\t\t\t# Add it to the orders data\n\t\t\t\t\t\t\torder_data.tracker_id = form.cleaned_data.get(\"trackercode\")\n\n\t\t\t\t\t\t# Commit to the Database (Save the changes to the Database)\n\t\t\t\t\t\torder_data.save()\n\n\t\t\t\t\t\tmessages.success(request, \"Order has been completed.\")\n\t\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\t\texcept Orders.DoesNotExist:\n\t\t\t\t\t\tmessage.error(request, \"The order is no longer available in the Database. Most likely it has been removed\")\n\t\t\t\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\t\t\n\t\t\t# Check if the order is being denied\n\t\t\telif request.GET.get(\"p\") == \"denied_order\":\n\t\t\t\t\n\t\t\t\t# Check if the order is still in the Database\n\t\t\t\ttry:\n\t\t\t\t\t# Get the Data of that order\n\t\t\t\t\torder_data = Orders.objects.get(hash_key=form.cleaned_data.get(\"order\"))\n\n\t\t\t\t\t# Set it to denied\n\t\t\t\t\torder_data.order_status = \"DENIED\"\n\n\t\t\t\t\t# Add the Shipping company name\n\t\t\t\t\tif form.cleaned_data.get(\"deniedmessage\"):\n\t\t\t\t\t\torder_data.denied_msg = form.cleaned_data.get(\"deniedmessage\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tmessages.error(request, \"A message of denial is required to successfully deny an order\")\n\t\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\n\t\t\t\t\t# Check if refund is enabled\n\t\t\t\t\tif form.cleaned_data.get(\"refund_order_checkbox\"):\n\t\t\t\t\t\torder_data.refund_amount = order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"amount\"][\"value\"]\n\t\t\t\t\t\trefund_status = RefundOrder(order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"id\"], refund_amount=\"{:.2F}\".format(float(order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"amount\"][\"value\"])), currency_code=order_data.paypal_data[\"purchase_units\"][0][\"payments\"][\"captures\"][0][\"amount\"][\"currency_code\"])\n\n\t\t\t\t\t\t# Check if the ReFund was successful\n\t\t\t\t\t\tif not refund_status:\n\t\t\t\t\t\t\tmessages.error(request, \"Refund failed. Please go to the Merchant's PayPal Account and check the status of refund for this order.\")\n\t\t\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\t\telse:\n\t\t\t\t\t\torder_data.refund_amount = 0\n\t\t\t\t\t\t\n\t\t\t\t\t# Commit to the Database (Save the changes to the Database)\n\t\t\t\t\torder_data.save()\n\n\t\t\t\t\tmessages.success(request, \"Order has been denied.\")\n\t\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\t\t\texcept Orders.DoesNotExist:\n\t\t\t\t\tmessage.error(request, \"The order is no longer available in the Database. Most likely it has been removed\")\n\t\t\t\t\treturn HttpResponseRedirect(reverse(\"ZeusOrders\"))\n\t\t\t\n\t\t\t# Else tell the user that the option p is missing\n\t\t\telse:\n\t\t\t\tmessages.error(request, \"Missing p option.\")\n\t\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")\n\t\telse:\n\t\t\t# Handle errors if form is invalid\n\t\t\tform_error_catcher(request, form, [\"shippingcompany\", \"trackercode\", \"deniedmessage\"])\n\t\t\treturn HttpResponseRedirect(f\"/zeus/orders/order_details?order={form.cleaned_data.get('order')}\")", "def order_details(request, order_id, **kwargs):\n order = Order.objects.get(pk=order_id)\n if order.receiver != request.user and request.user.shipper_info.shipper_type != ShipperInfo.ShipperType.FRIENDSHIP_BIDDER:\n messages.error(request, 'You do not have permission to view this page.')\n return redirect('friendship:index')\n\n actions = OrderAction.objects.filter(order=order)\n\n # default currency to USD\n if \"currency\" not in request.session:\n request.session[\"currency\"] = Money.Currency.USD\n\n # calculate subtotal\n currency = request.session[\"currency\"]\n\n subtotal = 0\n min_bid = get_min_bid(order)\n \n if min_bid:\n if min_bid.retail_price:\n subtotal += min_bid.retail_price.get_value(currency)\n if min_bid.service_fee:\n subtotal += min_bid.service_fee.get_value(currency)\n\n data_dict = {}\n if len(order.url) > 50:\n order_url = order.url[0:47] + \"...\"\n else:\n order_url = order.url\n\n us_tracking = TrackingNumber.objects.filter(\n order=order\n ).filter(\n shipping_stage=TrackingNumber.ShippingStage.MERCHANT_TO_SHIPPER\n )\n thai_tracking = TrackingNumber.objects.filter(\n order=order\n ).filter(\n shipping_stage=TrackingNumber.ShippingStage.DOMESTIC_TO_RECEIVER\n )\n\n data_dict.update({\n 'us_tracking': us_tracking[0] if us_tracking else None,\n 'thai_tracking': thai_tracking[0] if thai_tracking else None\n })\n\n if min_bid:\n thb_total = math.ceil(min_bid.get_total(currency=Money.Currency.THB))\n else:\n thb_total = 0\n\n data_dict.update({\n 'order': order,\n 'order_url': order_url,\n 'actions': reversed(actions),\n 'latest_action': order.latest_action,\n 'min_bid': min_bid,\n 'subtotal': Money.format_value(subtotal, currency),\n 'usd': Money.Currency.USD,\n 'thb': Money.Currency.THB,\n 'usd_str': str(Money.Currency.USD).upper(),\n 'thb_str': str(Money.Currency.THB).upper(),\n 'thb_total': str(thb_total),\n 'currency': currency,\n 'manual_wire_transfer_form': ManualWireTransferForm(),\n })\n data_dict.update(kwargs)\n\n data_dict.update({ k : v.value\n for (k,v)\n in OrderAction.Action._member_map_.items()\n })\n\n new_val = math.ceil(thb_total - thb_total * settings.MANUAL_BANK_TRANSFER_DISCOUNT)\n\n # Manual bank transfer discount\n data_dict[\"manual_bank_transfer_total_str\"] = \"\\u0E3F{}\".format(\n new_val\n )\n data_dict[\"discount_str\"] = \"-\\u0E3F{}\".format(thb_total - new_val)\n\n # Braintree Setup\n if settings.DEBUG:\n env = \"sandbox\"\n else:\n env = \"production\"\n\n gateway = braintree.BraintreeGateway(access_token=settings.BRAINTREE_ACCESS_TOKEN)\n client_token = gateway.client_token.generate()\n client = \"{\" + \\\n f\"{env}: '{client_token}'\" + \\\n \"}\"\n data_dict[\"braintree_client\"] = client\n data_dict[\"payment_env\"] = env\n\n return render(request, 'friendship/order_details.html', data_dict)", "def get_order(self, walletId, orderId):\n return", "def get_order(self):\n #store the orders for the current cycle inside the class\n self.orders = self.firebase.get_data(\"orders\")", "def test_get_orders(self):\n pass", "def returnOrderTrades(self, order_number):", "def show_order_detail(self, order_id):\n\n data = cur.execute(\"\"\"SELECT productid, productname, quantity, location FROM orderitems WHERE orderid = ?\"\"\",\n (order_id,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Quantity\", \"Location\"]))", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def GetOrder(order_id): \n\t\"\"\"Method to get order\"\"\"\n\trequest = OrdersGetRequest(order_id)\n\tresponse = client.execute(request)\n\treturn response.result.__dict__[\"_dict\"]", "def __str__(self):\n return self.order_id", "def test_get_order_buyer_info(self):\n pass", "def trackOrderRequest(self):\n\t\tstart_dat=datetime.today()\n\t\tstart_date = start_dat - timedelta( hours=start_dat.time().hour,minutes=start_dat.time().minute,seconds=start_dat.time().second ) \n\t\tend_date=start_dat\n\t\tans=None\n\t\t#print start_dat.time().hour\n\t\tprint end_date\n\t\tans=Order.objects.filter(date_of_order__range=(start_date,end_date))\n\t\tlst=[]\n\t\tfor b in ans:\n\t\t\towneradd=b.owner_id.address\n\t\t\tuseradd=b.userid.address\n\t\t\tusername=b.userid.email\n\t\t\townername=b.owner_id.email\n\t\t\tuserphone=b.userid.contact_no\n\t\t\townerphone=b.owner_id.contact_no\n\t\t\tbookname=b.bookid.title\n\t\t\tstatus=b.paymentid.ispending\n\t\t\tbook=b.__dict__\n\t\t\tbook['owneradd']=owneradd\n\t\t\tbook['useradd']=useradd\n\t\t\tbook['username']=username\n\t\t\tbook['ownername']=ownername\n\t\t\tbook['userphone']=userphone\n\t\t\tbook['ownerphone']=ownerphone\n\t\t\tbook['name']=bookname\n\t\t\tif status==True:\n\t\t\t\tbook['status']=\"Pending\"\n\t\t\telse:\n\t\t\t\tbook['status']=\"Delivered\"\n\t\t\tlst.append(book)\n\t\t#print ans\n\t\t\n\t\treturn lst", "def work_order_receipt_create(self, work_order_id, worker_id,\n worker_service_id,\n requester_id,\n receipt_create_status,\n work_order_request_hash,\n id=None):\n pass", "def work_order_receipt_lookup(self, worker_service_id,\n worker_id,\n requester_id,\n receipt_status, id=None):\n pass" ]
[ "0.68440074", "0.6475144", "0.6159926", "0.6083596", "0.608028", "0.59921384", "0.5808397", "0.58080214", "0.5693121", "0.5688188", "0.56651086", "0.5659879", "0.5623415", "0.56180996", "0.5615256", "0.5562105", "0.55497396", "0.55432594", "0.55332184", "0.5496018", "0.54649526", "0.54066366", "0.54066366", "0.54066366", "0.53959954", "0.53732836", "0.5371083", "0.53640014", "0.53638804", "0.5361521" ]
0.70237696
0
Generate xlsx format print report.
def generate_xlsx_report(self, workbook, data, parts_data): worksheet = workbook.add_worksheet("daily_parts_issuance_wizard") worksheet.set_column(0, 0, 10) worksheet.set_column(1, 1, 15) worksheet.set_column(2, 2, 20) worksheet.set_column(3, 3, 15) worksheet.set_column(4, 4, 10) worksheet.set_column(5, 5, 12) worksheet.set_column(6, 6, 10) worksheet.set_column(7, 7, 10) worksheet.set_column(8, 8, 15) worksheet.set_column(9, 9, 10) worksheet.set_column(10, 10, 15) worksheet.set_column(11, 11, 10) worksheet.set_column(12, 12, 20) worksheet.set_column(13, 13, 5) worksheet.set_column(14, 14, 5) worksheet.set_column(15, 15, 5) bold = workbook.add_format( {"bold": True, "font_name": "Arial", "font_size": "10"} ) tot = workbook.add_format( {"border": 2, "bold": True, "font_name": "Arial", "font_size": "10"} ) border = workbook.add_format( {"border": 2, "font_name": "Arial", "font_size": "10"} ) merge_format = workbook.add_format({"border": 2, "align": "center"}) format1 = workbook.add_format( {"border": 2, "bold": True, "font_name": "Arial", "font_size": "10"} ) format1.set_bg_color("gray") date = workbook.add_format({"num_format": "dd/mm/yy"}) worksheet.merge_range("C3:F3", "Merged Cells", merge_format) row = 0 row += 1 row += 1 worksheet.write(row, 2, "DAILY PARTS ISSUANCE", tot) row += 1 worksheet.write(row, 2, "Date From:", tot) worksheet.write(row, 3, data["form"]["date_from"] or "", border) worksheet.write(row, 4, "To:", tot) worksheet.write(row, 5, data["form"]["date_to"] or "", border) row += 2 worksheet.write(row, 0, "CMF", bold) row = 3 for objec in self.get_work_order_detail(data["form"]): row += 3 worksheet.write(row, 0, "DATE ISSUED :", bold) worksheet.write(row, 1, objec.get("date") or "", date) row += 2 worksheet.write(row, 0, "NO.", format1) worksheet.write(row, 1, "WO NO.", format1) worksheet.write(row, 2, "VEHICLE ID", format1) worksheet.write(row, 3, "PART NO.", format1) worksheet.write(row, 4, "PART NAME", format1) worksheet.write(row, 5, "VEHICLE MAKE", format1) worksheet.write(row, 6, "USED", format1) worksheet.write(row, 7, "UNIT TYPE", format1) worksheet.write(row, 8, "OLD PART RETURND", format1) worksheet.write(row, 9, "ISSUED BY", format1) worksheet.write(row, 10, "REMARKS", format1) line_row = row + 1 line_col = 0 counter = 1 for obj in objec.get("value"): worksheet.write(line_row, line_col, counter, border) line_col += 1 worksheet.write(line_row, line_col, obj.get("wo_name") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("vehicle_id") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("part_no") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("part_name") or "", border) line_col += 1 worksheet.write( line_row, line_col, obj.get("vehicle_make") or "", border ) line_col += 1 worksheet.write(line_row, line_col, obj.get("qty") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("uom") or "", border) line_col += 1 worksheet.write( line_row, line_col, obj.get("old_part_return") or "", border ) line_col += 1 worksheet.write(line_row, line_col, obj.get("issued_by") or "", border) line_col += 1 worksheet.write(line_row, line_col, obj.get("remarks") or "", border) line_col = 0 line_row += 1 counter += 1 worksheet.write(line_row, line_col, "********", border)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)", "def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()", "def printreport():\n report = createreport()\n print(report[0])\n print(report[1])\n print(report[2])", "def generate_xls(self):\n self.wb = xlwt.Workbook()\n ws = self.wb.add_sheet('Sheet1')\n heading_style = xlwt.easyxf('font: bold true; alignment: horizontal center, wrap true;')\n extra_row = 0\n if self.date:\n date_style = xlwt.easyxf('font: bold true; alignment: horizontal left, wrap true;')\n ws.write_merge(0,0,0,self.table.no_of_columns()-1,'Date : '+self.date,date_style) \n extra_row = 1\n for i in range(len(self.headings)):\n ws.write_merge(i+extra_row,i+extra_row,0,self.table.no_of_columns()-1,self.headings[i],heading_style)\n ws.set_panes_frozen(True)\n ws.set_horz_split_pos(len(self.headings)+extra_row+1)\n ws.set_remove_splits(True)\n self.table.to_xls(ws,start_row=len(self.headings)+extra_row,start_col=0)\n return self.wb", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def print_report(stocks_to_print):\n\n print(\"=========== REPORT ============\")\n for stock in stocks_to_print:\n stock.print_one_line_report()", "def generate_service_odometer_xlsx_report(self, res, next_service):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(\"next_service_by_odometer\")\n worksheet.col(0).width = 5000\n worksheet.col(1).width = 12500\n worksheet.col(2).width = 10000\n worksheet.col(3).width = 6000\n worksheet.col(4).width = 7500\n worksheet.col(5).width = 7500\n worksheet.col(6).width = 7500\n worksheet.col(7).width = 7500\n worksheet.col(8).width = 10000\n\n font = xlwt.Font()\n font.bold = True\n font.name = \"Arial\"\n font.height = 200\n # pattern = xlwt.Pattern()\n border = xlwt.easyxf(\"font: bold 1; font: name 1; font: height 200\")\n format1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200;\\\n pattern: pattern solid, fore_colour yellow;\"\n )\n xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200\", num_format_str=\"DD/MM/YYYY\"\n )\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"Scheduled Maintenance By Mileage\", format1)\n row += 3\n worksheet.write(row, 7, \"Date :\", format1)\n worksheet.write(row, 8, time.strftime(\"%d-%B-%Y\"), format1)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"VEHICLE ID\", format1)\n worksheet.write(row, 2, \"VIN NO.\", format1)\n worksheet.write(row, 3, \"MAKE\", format1)\n worksheet.write(row, 4, \"MODEL\", format1)\n worksheet.write(row, 5, \"LAST SERVICE DATE\", format1)\n worksheet.write(row, 6, \"LAST MILEAGE\", format1)\n worksheet.write(row, 7, \"NEXT MILEAGE\", format1)\n worksheet.write(row, 8, \"REGISTRATION STATE\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in next_service:\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.name or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.vin_sn or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.f_brand_id and obj.f_brand_id.name or \"\", border\n )\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.model_id and obj.model_id.name or \"\", border\n )\n line_col += 1\n date = \"\"\n if obj.last_service_date:\n date = format_date(\n self.env,\n obj.last_service_date,\n self._context.get(\"lang\"),\n date_format=False,\n )\n worksheet.write(line_row, line_col, date or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.odometer or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.due_odometer or \"\", border)\n line_col += 1\n # worksheet.write(line_row, line_col,\n # obj.vechical_location_id and\n # obj.vechical_location_id.name or '', border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)\n fp = io.BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n res = base64.encodebytes(data)\n return res", "def _generate_report(self):\n raise NotImplementedError", "def generate_waiter_financial_report_excel_file(self, staff_info, period, month_report, path):\n try:\n workbook = xlw.Workbook(path)\n worksheet = workbook.add_worksheet()\n\n file_header_format = workbook.add_format({\n 'font_size':20,\n 'align': 'center',\n 'valign': 'vcenter'\n })\n table_header_format = workbook.add_format({\n 'bold': 1,\n 'border': 1,\n 'align': 'center',\n 'valign': 'vcenter',\n 'font_size': 12,\n 'fg_color': '#C0C0C0'})\n cell_format = workbook.add_format({\n 'font_size': 12,\n 'align':'center',\n 'valign':'vcenter'\n })\n sum_format = workbook.add_format({\n 'font_size': 12,\n 'align': 'center',\n 'valign': 'vcenter',\n 'fg_color': '#99FF99'\n })\n\n worksheet.set_column('A:A', 10)\n worksheet.set_column('B:B', 30)\n worksheet.set_column('C:C', 20)\n worksheet.set_column('D:D', 20)\n worksheet.set_column('E:E', 20)\n worksheet.set_column('F:F', 10)\n worksheet.set_column('G:G', 15)\n\n worksheet.merge_range('A1:G2', f'{staff_info[3]} {staff_info[1]} {period}', file_header_format)\n\n row = 4\n column = 0\n\n for line in month_report:\n for item in line:\n if row == 4:\n worksheet.write(row, column, item.__str__(), table_header_format)\n else:\n if month_report.index(line) == len(month_report)-1 and line.index(item) == len(line)-1:\n worksheet.write(row, column, item.__str__(), sum_format)\n else:\n worksheet.write(row, column, item.__str__(), cell_format)\n column += 1\n row += 1\n column = 0\n\n workbook.close()\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def to_xls(self,ws,start_row = 0,start_col = 0,width_ratio = 1): \n if self.col_width_dict: \n for c in range(self.no_of_columns()):\n ws.col(start_col+c).width = int(35*self.col_width(c)*width_ratio); \n \n boldstyle = xlwt.XFStyle()\n boldstyle.font.bold = True\n \n for r in range(self.no_of_rows()):\n for c in range(self.no_of_columns()):\n if r == 0:\n ws.write(start_row + r,start_col + c,self.cell(r,c),boldstyle)\n else:\n ws.write(start_row + r,start_col + c,self.cell(r,c))", "def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response", "def pdf_report_generate(self, cnx, mysql=False, postgres=False):\n\n # Instantiating the controller helper class.\n aux = ControllerHelper()\n\n ret = aux._EXIT_SUCCESS\n\n # Instantiating the model class.\n model = ReporterModel()\n\n # Retrieving a list of all data items stored in the database.\n# (hdr_set, row_set) = model.get_all_data_items(cnx, mysql)\n\n # Retrieving a list of data items for a given date period.\n (hdr_set, row_set) = model.get_data_items_by_date(self.FROM, self.TO,\n cnx, mysql, postgres)\n\n # In case of getting an empty result set, informing the user.\n if (not(row_set)):\n ret = aux._EXIT_FAILURE\n\n print(__name__ + aux._COLON_SPACE_SEP + aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP + aux._ERROR_NO_DATA)\n\n return ret\n\n # ---------------------------------------------------------------------\n # --- Debug output - Begin --------------------------------------------\n # ---------------------------------------------------------------------\n dbg_output = PrettyTable(hdr_set)\n\n # Populating table rows.\n # Note: For PostgreSQL and SQLite databases the following simple loop\n # between dash separators is quite sufficient,\n # but for MySQL database it needs to decode\n # row_set cells.\n i = 0\n\n # ---------------------------------------------------------------------\n if (not(mysql)):\n # Simply traversing through row_set rows.\n while (i < len(row_set)):\n dbg_output.add_row(row_set[i])\n\n i += 1\n # ---------------------------------------------------------------------\n else:\n # Traversing through row_set rows with cells post-processing.\n while (i < len(row_set)):\n row_ary = row_set[i]\n\n j = 0\n\n # Decoding row_set cells.\n while (j < len(hdr_set)):\n if ((j != 4) and (j != 5)):\n row_ary[j] = row_ary[j].decode()\n\n j += 1\n\n dbg_output.add_row(row_ary)\n\n i += 1\n\n # Left-aligning table columns.\n dbg_output.align=\"l\"\n\n print(dbg_output)\n\n print(str(len(row_set)) + self._ROWS_IN_SET_FOOTER + aux._NEW_LINE)\n # ---------------------------------------------------------------------\n # --- Debug output - End ----------------------------------------------\n # ---------------------------------------------------------------------\n\n time.sleep(1) # <== Waiting one second... just for fun... :-)... -- OK.\n\n # ---------------------------------------------------------------------\n # --- Generating the PDF report - Begin -------------------------------\n # ---------------------------------------------------------------------\n pdf_report_path = self._get_pdf_report_path(__file__, aux)\n\n report = canvas.Canvas(pdf_report_path,\n pagesize=A4, # <== 210 x 297 mm.\n pdfVersion=(1, 4), # <== PDF version 1.4.\n # --- Page boxes ------------------------------------------------------\n# cropBox=( (10 / self.MM), (10 / self.MM), (200 / self.MM), (287 / self.MM)),\n# artBox=( (15 / self.MM), (15 / self.MM), (195 / self.MM), (282 / self.MM)),\n# trimBox=((210 / self.MM), (297 / self.MM) ),\n#bleedBox=( (5 / self.MM), (5 / self.MM), (205 / self.MM), (292 / self.MM))\n )\n\n # --- Report metadata -------------------------------------------------\n report.setTitle (self._REPORT_TITLE )\n report.setAuthor (self._REPORT_AUTHOR )\n report.setSubject (self._REPORT_SUBJECT )\n report.setKeywords(self._REPORT_KEYWORDS)\n report.setCreator (self._REPORT_CREATOR )\n\n # --- Page body (data) x MAX_PAGES ------------------------------------\n i = 0\n\n while (i < self.MAX_PAGES):\n ret = self._page_body_draw(report, hdr_set, row_set)\n\n if (ret == aux._EXIT_FAILURE):\n print(__name__ + aux._COLON_SPACE_SEP+aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP+aux._ERROR_NO_REPORT_GEN)\n\n return ret\n\n report.showPage()\n\n i += 1\n\n # Trying to save the report.\n try:\n report.save()\n except Exception as e:\n ret = aux._EXIT_FAILURE\n\n print(__name__ + aux._COLON_SPACE_SEP + aux._ERROR_PREFIX\n + aux._COLON_SPACE_SEP + str(e))\n\n return ret\n\n print(self._PDF_REPORT_SAVED_MSG + aux._COLON_SPACE_SEP\n + pdf_report_path)\n # ---------------------------------------------------------------------\n # --- Generating the PDF report - End ---------------------------------\n # ---------------------------------------------------------------------\n\n return ret", "def create_sheet(self):\n workbook = xlwt.Workbook()\n borders = Borders()\n header_border = Borders()\n header_border.left,header_border.right,header_border.top,header_border.bottom = Borders.THIN,Borders.THIN,Borders.THIN,Borders.THICK\n borders.left,borders.right,borders.top,borders.bottom = Borders.THIN,Borders.THIN,Borders.THIN,Borders.THIN\n header_bold = xlwt.easyxf(\"font: bold on, height 200; pattern: pattern solid, fore_colour gray25;alignment: horizontal center ,vertical center\")\n header_bold.borders=header_border\n body_style = xlwt.easyxf(\"font: height 200; alignment: horizontal left\")\n body_style.borders=borders\n \n ## style for different colors in columns\n xlwt.add_palette_colour(\"light_blue_21\", 0x21)\n workbook.set_colour_RGB(0x21, 153, 255, 255) \n qty_cell_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour light_blue_21; borders: top thin,right thin,bottom thin,left thin\")\n \n xlwt.add_palette_colour(\"custom_orange\", 0x22)\n workbook.set_colour_RGB(0x22, 255, 204, 153)\n value_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour custom_orange; borders: top thin,right thin,bottom thin,left thin\")\n \n xlwt.add_palette_colour(\"custom_mandys_pink\", 0x20)\n workbook.set_colour_RGB(0x20, 246, 228, 204)\n value_style2 = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz right, vert center; pattern: pattern solid, fore_colour custom_mandys_pink; borders: top thin,right thin,bottom thin,left thin\")\n \n \n xlwt.add_palette_colour(\"custom_yellow\", 0x25)\n workbook.set_colour_RGB(0x25, 255, 255, 179)\n blank_cell_style = xlwt.easyxf(\"font: height 200,bold on, name Arial; align: horiz center, vert center; pattern: pattern solid, fore_colour custom_yellow; borders: top thin,right thin,bottom thin,left thin\")\n return workbook,header_bold,body_style,qty_cell_style,value_style,blank_cell_style,value_style2", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def printFrame(self,outfile=None,sheet=None,filename=None,rowoffset=0,empty=\"\",delimiter=','):\n# \t\tprint outfile,filename,sheet\n# \t\tsys.exit()\n\n\t\tstyle=xlwt.XFStyle()\t\n\t\tstyle.num_format_str = '#0.00'\n\n\t\trows = [self.header]\n\t\tfor d in self.data:\n\t\t\trow = []\n\t\t\tfor h in self.header:\n\t\t\t\ttry:\n\t\t\t\t\trow.append(d[h])\n\t\t\t\texcept KeyError:\n\t\t\t\t\trow.append(empty)\n\t\t\trows.append(row)\n\n\n\t\tif outfile == None:\n\t\t\treturn rows\n\t\telif isinstance(outfile,xlwt.Workbook) and sheet and filename:\n\t\t\tif isinstance(sheet,xlwt.Worksheet):\n\t\t\t\tws=sheet\n\t\t\telse:\n\t\t\t\tws = outfile.add_sheet(sheetname)\n\t\t\tfor ri, row in enumerate(rows):\n\t\t\t\tfor ci, val in enumerate(row):\n# \t\t\t\t\tval = unicode(val).encode(\"utf8\")\n\t\t\t\t\t#ws.write(ri+rowoffset,ci,val,xlwt.easyxf(num_format_str='#0.00'))\n\t\t\t\t\tif ci >= 1 and ri >= 1:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tval = float(val)\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\tws.write(ri+rowoffset,ci,val,style)\n\t\t\toutfile.save(filename[:-3]+'xls')\n\t\telse:\n\t\t\twr = csv.writer(open(outfile,'w'),delimiter=delimiter)\n\t\t\twr.writerows(rows)", "def create_template(path_string) :\r\n today = datetime.now()\r\n today = today.strftime('%y%y%m%d%H%M%S')\r\n # print(today)\r\n temp_path = os.path.join(path_string, today)\r\n # temp_path = today\r\n # Create a workbook and add a worksheet.\r\n workbook = xlsxwriter.Workbook(f'{temp_path}.xlsx')\r\n worksheet0 = workbook.add_worksheet('ATR') # Defaults to Sheet1.\r\n worksheet1 = workbook.add_worksheet('ESS') # Data.\r\n worksheet2 = workbook.add_worksheet('Statistics') # Defaults to Sheet\r\n\r\n # Some data we want to write to the worksheet.\r\n Tests_List = ['Temp', 'SN', 'Output Power @ P1dBCP', 'Output Power Control Range/Resolution, FWD PWR Ind',\r\n 'Output IP3', 'LO Carrier Leakage', 'Sideband Suppression',\r\n 'Frequency Accuracy and Stability', 'A1 - Noise Figure vs. Gain', 'A1 - Gain variability',\r\n 'A1 - Image Suppression vs. Gain', 'Spurious',\r\n 'A2 - Noise Figure vs. Gain', 'A2 - Gain variability', 'A2 - Image Suppression vs. Gain',\r\n 'Average Power Consumption', 'Input Voltage', 'Digital Tests'\r\n ]\r\n\r\n # Start from the first cell. Rows and columns are zero indexed.\r\n row = 0\r\n # col = 0\r\n\r\n # Iterate over the data and write it out row by row.\r\n for index in range(3) :\r\n for i in range(len(Tests_List)) :\r\n worksheet0.write(row, i, Tests_List[i])\r\n worksheet1.write(row, i, Tests_List[i])\r\n worksheet2.write(row, i, Tests_List[i])\r\n # col += 1\r\n\r\n workbook.close()\r\n\r\n return today, temp_path", "def print_report_pdf(self):\n self.ensure_one()\n return self.env.ref('eliterp_sale_reports.action_report_product_catalogue').report_action(self)", "def print_stock_rotation_report(self):\n warehouses = False\n locations = False\n from_date = False\n to_date = False\n active_id = self.ids[0]\n today=datetime.now().strftime(\"%Y-%m-%d\")\n f_name = 'Stock Rotation Report' + ' ' + today\n stock_warehouse_obj = self.env['stock.warehouse']\n stock_locations_obj = self.env['stock.location']\n product_obj = self.env['product.product']\n \n if self.filtaration == 'warehouse':\n if not self.include_all_warehouse:\n if not self.warehouse_ids:\n raise ValidationError(\"please select the Warehouse.\")\n warehouses = self.warehouse_ids\n else:\n warehouses = stock_warehouse_obj.search([])\n else:\n if not self.include_all_location:\n if not self.location_ids:\n raise ValidationError(\"please select the Locations.\")\n locations = self.location_ids\n else:\n locations = stock_locations_obj.search([('usage','=','internal')])\n\n\n if not self.from_date:\n raise ValidationError(\"please select the From Date.\")\n \n if not self.to_date:\n raise ValidationError(\"please select the To Date.\")\n\n all_products = product_obj.with_context(active_test=True).search([('type','=','product')])\n from_date = self.from_date\n to_date = self.to_date\n \n date_1 = time.strptime(from_date, \"%Y-%m-%d\")\n date_2 = time.strptime(to_date, \"%Y-%m-%d\")\n if not (date_1 <= date_2):\n raise ValidationError(\"Fromdate is not previous then Todate\")\n self.get_stock_rotation_report(from_date,to_date,warehouses,locations,all_products)\n if self.datas:\n return {\n 'type' : 'ir.actions.act_url',\n 'url':'web/content/?model=stock.rotation.report&download=true&field=datas&id=%s&filename=%s.xls'%(active_id,f_name),\n 'target': 'new',\n }", "def create_xlsx(request):\n\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output, worksheet, workbook, formats_dict = creating_empty_xlsx_file()\n\n if income_history:\n head_row, head_col = 1, 1\n row, col = 2, 1\n for i in income_history[0]:\n if i != 'income_history_id':\n worksheet.write(head_row, head_col, i, formats_dict['head_format'])\n head_col += 1\n\n for history_dict in income_history:\n worksheet.write(row, col, history_dict['income'], formats_dict['cell_format'])\n worksheet.write(row, col + 1, history_dict['fund'], formats_dict['cell_format'])\n date = datetime.datetime.strptime(history_dict['date'], \"%Y-%m-%d\")\n worksheet.write_datetime(row, col + 2, date, formats_dict['date_format'])\n worksheet.write_number(row, col + 3, history_dict['amount'],\n formats_dict['value_format'])\n worksheet.write(row, col + 4, history_dict['comment'], formats_dict['cell_format'])\n col, row = 1, row + 1\n\n workbook.close()\n\n response = file_streaming_response \\\n ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'income_history.xlsx', output)\n return response", "def generate_excel(structure:dict, output:str):\t\n\n\tstructure_columns = identify_columns(structure)\n\n\tworkbook = xlsxwriter.Workbook(output)\n\tworksheet = workbook.add_worksheet()\n\n\tcol = 0\n\tfor column in structure_columns:\n\t\tworksheet.write(0, col, column)\n\t\tcol += 1\n\n\trow = 1\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif isinstance(day[key], list):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), ', '.join(day[key]))\n\t\t\telif isinstance(day[key], dict):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), str(day[key]))\n\t\t\telse:\n\t\t\t\tworksheet.write(row, structure_columns.index(key), day[key])\n\t\trow += 1\n\t\n\tworksheet.freeze_panes(1, 1)\n\tworkbook.close()", "def write_xlsx(data):\n workbook = xlsxwriter.Workbook('MyWorkbook.xlsx')\n main_sheet = workbook.add_worksheet('MySheet')\n\n date_format = workbook.add_format(\n {'num_format': 'mm/dd/yy hh:mm:ss AM/PM'})\n length = str(len(data) + 1)\n \n main_sheet.add_table(('A1:D' + length), \n {'data': data,\n 'columns': [{'header': 'Department'}, {'header': 'Students'},\n {'header': 'Cumulative GPA'},\n {'header': 'Final Date',\n 'format': date_format}]})\n\n department_grades = workbook.add_chart({'type':'column'})\n department_grades.set_title(\n {'name':'Department and Grade distribution'})\n department_grades.add_series(\n {'categories':'=MySheet!$A$2:$A$5',\n 'values':'=MySheet!$C$2:$C$5'})\n main_sheet.insert_chart('A8', department_grades)\n workbook.close()", "def print_report_pdf(self):\n self.ensure_one()\n return self.env.ref('eliterp_sale_reports.action_report_product_sold').report_action(self)", "def excel_print(data1, data2, data3, data4, data5, data6):\r\n\r\n list_data = [data1, data2, data3, data4, data5, data6]\r\n name_list = ['Old elec', 'New elec', 'Old elec dup', 'New elec dup',\r\n 'Diff After Strip', 'New Elec Before Strip']\r\n zipped = zip(list_data, name_list)\r\n excel_writer = pd.ExcelWriter('elec_delta2.xlsx', engine='xlsxwriter')\r\n for data, name in zipped:\r\n data.to_excel(excel_writer, sheet_name=name,\r\n index=False, freeze_panes=(1, 0))\r\n num_cols = len(list(data))\r\n worksheet = excel_writer.sheets[name]\r\n worksheet.autofilter(0, 0, 0, num_cols-1)\r\n worksheet.set_column(0, 0, 23.56)\r\n worksheet.set_column(1, 1, 34.89)\r\n excel_writer.save()", "def report_table(self, filename='ODH_report'):\n table = []\n header = ['Source', 'Failure', 'Event failure rate, 1/hr', '# of',\n 'Total failure rate, 1/hr', 'Leak rate, SCFM',\n '# fans working', 'Fan rate, SCFM', 'Event duration, min',\n 'Oxygen concentration', 'Fatality prob', 'Case prob',\n 'Fatality rate, 1/hr']\n # 'Total failure rate', 'ODH protection PFD', 'Building is powered'\n table.append(header)\n self.fail_modes.sort(key=lambda x: x.source.name)\n for f_mode in self.fail_modes:\n table.append([\n f_mode.source.name,\n f_mode.name,\n (f_mode.leak_fr/f_mode.N).m_as(1/ureg.hr),\n f_mode.N,\n f_mode.leak_fr.m_as(1/ureg.hr),\n f_mode.q_leak.m_as(ureg.ft**3/ureg.min),\n f_mode.N_fan,\n f_mode.Q_fan.m_as(ureg.ft**3/ureg.min),\n f_mode.tau.m_as(ureg.min),\n f_mode.O2_conc,\n f_mode.F_i,\n f_mode.P_i/f_mode.leak_fr,\n f_mode.phi.m_as(1/ureg.hr)])\n filename += '.xlsx'\n with xlsxwriter.Workbook(filename) as workbook:\n header_format = workbook.add_format({'bold': True,\n 'font_size': 12,\n 'bottom': 3})\n worksheet = workbook.add_worksheet()\n col_width = [len(x) for x in table[0]]\n for row_n, row in enumerate(table):\n for col_n, data in enumerate(row):\n worksheet.write(row_n, col_n, data)\n if col_n in (0, 1, 10):\n # For source names, failure names\n # and 'Total failure rate'\n col_width[col_n] = max(col_width[col_n], len(str(data)))\n sci_format = workbook.add_format({'num_format': '0.00E+00'},)\n flow_format = workbook.add_format({'num_format': '#'},)\n percent_format = workbook.add_format({'num_format': '0%'},)\n number_format = workbook.add_format({'num_format': '0'},)\n worksheet.set_row(0, None, header_format)\n worksheet.set_column(2, 2, None, sci_format)\n worksheet.set_column(4, 4, None, sci_format)\n worksheet.set_column(5, 5, None, flow_format)\n worksheet.set_column(8, 8, None, sci_format)\n worksheet.set_column(9, 9, None, percent_format)\n worksheet.set_column(10, 12, None, sci_format)\n # Writing total/summary\n N_rows = len(table)\n N_cols = len(table[0])\n worksheet.write(N_rows+1, N_cols-2, 'Total fatality rate, 1/hr')\n worksheet.write(N_rows+1, N_cols-1,\n self.phi.m_as(1/ureg.hr))\n worksheet.write(N_rows+2, N_cols-2, 'ODH class')\n worksheet.write(N_rows+2, N_cols-1, self.odh_class(),\n number_format)\n # Autofit column width\n for col_n, width in enumerate(col_width):\n adj_width = width - 0.005 * width**2\n worksheet.set_column(col_n, col_n, adj_width)\n # Adding usability\n worksheet.conditional_format(\n 1, N_cols-1, N_rows-1, N_cols-1,\n {'type': '3_color_scale', 'min_color': '#008000',\n 'max_color': '#FF0000'})\n worksheet.freeze_panes(1, 0)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response" ]
[ "0.7330527", "0.6696778", "0.64272696", "0.6423999", "0.640377", "0.63337004", "0.625461", "0.621144", "0.61932975", "0.61916244", "0.6103704", "0.59963775", "0.5941088", "0.5938474", "0.5917404", "0.58896947", "0.58573407", "0.5852752", "0.5852723", "0.58489084", "0.5818077", "0.5803708", "0.57907164", "0.5779262", "0.57602906", "0.57589215", "0.57451236", "0.5731941", "0.57286435", "0.5727536" ]
0.69838953
1
Here we define the configuration settings needed for all ingestion plugins with reasonable defaults.
def vdk_configure(self, config_builder: ConfigurationBuilder) -> None: # Plugin-related configurations config_builder.add( key="INGEST_METHOD_DEFAULT", default_value=None, description="Default Ingestion method to be used.", ) config_builder.add( key="INGEST_TARGET_DEFAULT", default_value=None, description="Default Ingestion target to be used.", ) # Configure ingestion specific environment variables ingester_configuration.add_definitions(config_builder=config_builder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configuration():", "def init_config(self):\n pass", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):\n\n pass", "def get_default_config(self):\n config = super(SignalfxHandler, self).get_default_config()\n\n config.update({\n 'url': 'https://ingest.signalfx.com/v2/datapoint',\n 'batch': 300,\n # Don't wait more than 10 sec between pushes\n 'batch_max_interval': 10,\n 'auth_token': '',\n })\n\n return config", "def configure(self):\n pass", "def configure(self):\n pass", "def configure(self):\r\n pass", "def default_configs(cls):\n config: dict = super().default_configs()\n\n config.update({\n \"file_ext\": '.txt',\n \"num_sent_per_doc\": -1,\n \"doc_break_str\": None,\n \"column_format\": cls._DEFAULT_FORMAT,\n \"entity_mention_class\": None\n })\n return config", "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "def configure(self, options, conf):", "def _configure(self):\n pass", "def config(self):\n pass", "def config(self):\n pass", "def default_configs(cls):\n config = super().default_configs()\n config.update(\n {\n \"entry_type\": \"ft.onto.base_ontology.Document\",\n \"model_name\": \"ktrapeznikov/biobert_v1.1_pubmed_squad_v2\",\n \"question\": \"Where do I live\",\n \"max_answer_len\": 15,\n \"cuda_devices\": -1,\n \"handle_impossible_answer\": False,\n }\n )\n return config", "def __init__(self, cfg):\n super(DKInfluxDB, self).__init__(cfg, 'influxdb')", "def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')", "def _config_options(self):\n self._config_sortable(self._sortable)\n self._config_drag_cols(self._drag_cols)", "def define_user_config(self) -> None:\n self.add_standard_metadata('infiles')\n\n self.add_custom_metadata(name='key_cols',\n short_name='k',\n required=True,\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='compare_cols',\n short_name='c',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='ignore_cols',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='col_names',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='variables',\n default=[],\n nargs='*',\n type=list)\n self.add_custom_metadata(name='already_sorted',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='already_uniq',\n action='store_const',\n const=True,\n default=False,\n type=bool)\n self.add_custom_metadata(name='temp_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='out_dir',\n default=None,\n type=str)\n self.add_custom_metadata(name='assignments',\n default=[],\n type=list)\n\n self.add_standard_metadata('verbosity')\n self.add_all_config_configs()\n self.add_all_csv_configs()\n self.add_all_help_configs()", "def apply_config_defaults():\n\n # don't worry about broken settings, validate_config() will take\n # care of them\n\n if 'pre_action_callbacks' not in nori.cfg:\n nori.cfg['pre_action_callbacks'] = [\n (pre_action_drupal_readonly, [], {})\n ]\n\n if 'post_action_callbacks' not in nori.cfg:\n nori.cfg['post_action_callbacks'] = [\n (post_action_drupal_readonly, [], {}, True)\n ]\n\n if 'source_type' not in nori.cfg:\n nori.cfg['source_type'] = 'generic'\n\n if 'source_query_func' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_func'] = generic_db_query\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_func'] = drupal_db_query\n\n if 'source_query_defaulter' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_defaulter'] = None\n\n if 'source_query_validator' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_query_validator'] = validate_generic_args\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_query_validator'] = validate_drupal_args\n\n if 'source_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_template_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'source_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['source_type'] == 'generic':\n nori.core.cfg['source_global_change_callbacks'] = []\n elif nori.core.cfg['source_type'] == 'drupal':\n nori.core.cfg['source_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'dest_type' not in nori.cfg:\n nori.cfg['dest_type'] = 'generic'\n\n if 'dest_query_func' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_func'] = generic_db_query\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_func'] = drupal_db_query\n\n if 'dest_query_defaulter' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_defaulter'] = (\n apply_generic_arg_defaults\n )\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_defaulter'] = None\n\n if 'dest_query_validator' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_query_validator'] = validate_generic_args\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_query_validator'] = validate_drupal_args\n\n if 'dest_template_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_template_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_template_change_callbacks'] = [\n (drupal_timestamp_callback, [], {})\n ]\n\n if 'dest_global_change_callbacks' not in nori.cfg:\n if nori.core.cfg['dest_type'] == 'generic':\n nori.core.cfg['dest_global_change_callbacks'] = []\n elif nori.core.cfg['dest_type'] == 'drupal':\n nori.core.cfg['dest_global_change_callbacks'] = [\n (drupal_cache_callback, [], {})\n ]\n\n if 'templates' not in nori.core.cfg:\n return\n if not isinstance(nori.core.cfg['templates'],\n nori.core.MAIN_SEQUENCE_TYPES):\n return\n\n for i, template in enumerate(nori.core.cfg['templates']):\n if not isinstance(nori.core.cfg['templates'][i],\n nori.core.MAPPING_TYPES):\n continue\n\n if T_MULTIPLE_KEY not in template:\n nori.core.cfg['templates'][i][T_MULTIPLE_KEY] = False\n\n if T_S_QUERY_ARGS_KEY in template:\n args_t = template[T_S_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['source_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_D_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_D_FUNC_KEY] = None\n\n if T_S_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_S_NO_REPL_KEY] = False\n\n if T_S_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_S_CHANGE_CB_KEY] = []\n\n if T_D_QUERY_ARGS_KEY in template:\n args_t = template[T_D_QUERY_ARGS_KEY]\n defaulter = nori.core.cfg['dest_query_defaulter']\n if (isinstance(args_t, tuple) and len(args_t) >= 2 and\n isinstance(args_t[0], nori.core.MAIN_SEQUENCE_TYPES) and\n isinstance(args_t[1], nori.core.MAPPING_TYPES) and\n defaulter and callable(defaulter)):\n defaulter(args_t[0], args_t[1])\n\n if T_TO_S_FUNC_KEY not in template:\n nori.core.cfg['templates'][i][T_TO_S_FUNC_KEY] = None\n\n if T_D_NO_REPL_KEY not in template:\n nori.core.cfg['templates'][i][T_D_NO_REPL_KEY] = False\n\n if T_D_CHANGE_CB_KEY not in template:\n nori.core.cfg['templates'][i][T_D_CHANGE_CB_KEY] = []\n\n if T_KEY_MODE_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_MODE_KEY] = 'all'\n\n if T_KEY_LIST_KEY not in template:\n nori.core.cfg['templates'][i][T_KEY_LIST_KEY] = []", "def setUpConfig(self):\n pass", "def configure(self) -> None:", "def configs(self):\n raise NotImplementedError()", "def config(\n data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n cache_responses=settings.cache_responses,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_idf_objects=settings.useful_idf_objects,\n default_weight_factor=\"area\",\n ep_version=settings.ep_version,\n debug=settings.debug,\n):\n # set each global variable to the passed-in parameter value\n settings.cache_responses = cache_responses\n settings.cache_folder = Path(cache_folder).expand().makedirs_p()\n settings.data_folder = Path(data_folder).expand().makedirs_p()\n settings.imgs_folder = Path(imgs_folder).expand().makedirs_p()\n settings.logs_folder = Path(logs_folder).expand().makedirs_p()\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_idf_objects = useful_idf_objects\n settings.zone_weight.set_weigth_attr(default_weight_factor)\n settings.ep_version = ep_version\n settings.debug = debug\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n get_logger(name=\"archetypal\")\n log(\"Configured archetypal\")", "def _initConfig(self):\n from tg import config as tg_config\n\n # Set config defaults\n config = DEFAULT_CONFIG.copy()\n temp_verbose = config[\"verbose\"]\n\n # Configuration file overrides defaults\n default_config_file = os.path.abspath(DEFAULT_CONFIG_FILE)\n config_file = tg_config.get('wsgidav.config_path', default_config_file)\n fileConf = self._readConfigFile(config_file, temp_verbose)\n config.update(fileConf)\n\n if not useLxml and config[\"verbose\"] >= 1:\n print(\n \"WARNING: Could not import lxml: using xml instead (slower). Consider installing lxml from http://codespeak.net/lxml/.\")\n from wsgidav.dir_browser import WsgiDavDirBrowser\n from tracim.lib.webdav.tracim_http_authenticator import TracimHTTPAuthenticator\n from wsgidav.error_printer import ErrorPrinter\n from tracim.lib.webdav.utils import TracimWsgiDavDebugFilter\n\n config['middleware_stack'] = [\n WsgiDavDirBrowser,\n TracimHTTPAuthenticator,\n ErrorPrinter,\n TracimWsgiDavDebugFilter,\n ]\n\n config['provider_mapping'] = {\n config['root_path']: Provider(\n # TODO: Test to Re enabme archived and deleted\n show_archived=False, # config['show_archived'],\n show_deleted=False, # config['show_deleted'],\n show_history=False, # config['show_history'],\n manage_locks=config['manager_locks']\n )\n }\n\n config['domaincontroller'] = TracimDomainController(presetdomain=None, presetserver=None)\n\n return config", "def config():\n config_django()\n config_svisor()", "def config( **kwargs ):" ]
[ "0.6318215", "0.6309264", "0.62523276", "0.62523276", "0.62523276", "0.62523276", "0.62494737", "0.62309724", "0.6225098", "0.6225098", "0.6183282", "0.6180916", "0.6179421", "0.6114876", "0.6098103", "0.60826004", "0.60826004", "0.6071773", "0.6046305", "0.60166687", "0.60119224", "0.599236", "0.59749216", "0.5965325", "0.5924659", "0.5921973", "0.59173036", "0.5916264", "0.58941644", "0.5879194" ]
0.6801265
0
Get a single TW task as an Albert Item.
def get_tw_item(task: taskw.task.Task) -> v0.Item: # type: ignore field = get_as_subtext_field task_id = tw_side.get_task_id(task) actions = [ FuncAction( "Complete task", lambda args_list=["done", task_id]: run_tw_action(args_list), ), FuncAction( "Delete task", lambda args_list=["delete", task_id]: run_tw_action(args_list), ), FuncAction( "Start task", lambda args_list=["start", task_id]: run_tw_action(args_list), ), FuncAction( "Stop task", lambda args_list=["stop", task_id]: run_tw_action(args_list), ), FuncAction( "Edit task interactively", lambda args_list=["edit", task_id]: run_tw_action(args_list, need_pty=True), ), FuncAction( "Fail task", lambda task_id=task_id: fail_task(task_id=task_id), ), ClipAction("Copy task UUID", f"{task_id}"), ] found_urls = url_re.findall(task["description"]) if "annotations" in task.keys(): found_urls.extend(url_re.findall(" ".join(task["annotations"]))) for url in found_urls[-1::-1]: actions.insert(0, UrlAction(f"Open {url}", url)) if reminders_tag_path.is_file(): global reminders_tag reminders_tag = load_data(reminders_tag_path) else: save_data("remindme", str(reminders_tag_path)) actions.append( FuncAction( f"Add to Reminders (+{reminders_tag})", lambda args_list=[ "modify", task_id, f"+{reminders_tag}", ]: run_tw_action(args_list), ) ) actions.append( FuncAction( "Work on next (+next)", lambda args_list=[ "modify", task_id, "+next", ]: run_tw_action(args_list), ) ) urgency_str, icon = urgency_to_visuals(task.get("urgency")) text = task["description"] due = None if "due" in task: due = task["due"].astimezone(dateutil.tz.tzlocal()).strftime("%Y-%m-%d %H:%M:%S") # type: ignore return get_as_item( text=text, subtext="{}{}{}{}{}".format( field(urgency_str), "ID: {}... | ".format(tw_side.get_task_id(task)[:8]), field(task["status"]), field(task.get("tags"), "tags"), field(due, "due"), )[:-2], icon=[str(icon)], completion=f'{curr_trigger}{task["description"]}', actions=actions, urgency=task.get("urgency"), )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)", "def get_task(task_id):\n return db.task.find_one({'_id': ObjectId(task_id)})", "def get_item(self):\n return self.item", "def get_item(self):\n return self.item", "def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item", "def get(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if not task == None:\n guid = \"%s\" % task.key().id_or_name()\n task_json = { \"id\": \"%s\" % guid, \"name\": task.name,\n \"priority\": task.priority, \"effort\": task.effort,\n \"projectId\": task.projectId,\n \"submitterId\": task.submitterId, \"assigneeId\": task.assigneeId,\n \"type\": task.type, \"developmentStatus\": task.developmentStatus,\n \"validation\": task.validation, \"description\": task.description,\n \"createdAt\": task.createdAt,\n \"updatedAt\": task.updatedAt }\n \n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(404, \"Task not found\")", "def getitem(itemID):\n\n return harvest(GET_ITEM_URL, itemID)", "def get_item(self, id: str, user: User) -> Optional[T]:", "def GetItem(self):\r\n \r\n return self._item", "def get_task(self, id):\n\n collection = self._get_collection()\n\n item = collection.find_one({\"_id\": ObjectId(id)})\n\n if item:\n return _mongo_item_to_task(item)\n else:\n return None", "def get(self, guid):\n results = j.sal.fs.find(self._root, '*_%s' % guid)\n if len(results) <= 0:\n raise TaskNotFoundError(\"task %s not found\" % guid)\n if len(results) > 1:\n raise RuntimeError(\"found 2 tasks with same guid, this should not happen\")\n return self._deserialize_task(j.sal.fs.readFile(results[0]))", "def get(self, task_id):\n try:\n return self.dal.task.get_by_id(task_id)\n except EntityNotFound:\n raise DoesNotExist()", "def get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)", "def get_task(self, task_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE id=?\", (task_id,))\n return res.fetchone()", "def get_task_by_tid(self, tid):\n return self.task_controller.get_task(tid)", "def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()", "def get_item(self, item_id): # pragma: no cover\n raise NotImplementedError", "def get_item(self):\n raise NotImplementedError", "def get_item(self, name: str) -> Optional[Item]:\n item = self.filter_items(name, limit=1)\n return item[0] if item else None", "def get_by_name(task_name):\n return tasks.find_one({'name': task_name})", "def getItem(self, id):\n path = 'item/' + id\n return self.sendRestRequest('GET', path)", "def get_task(task_id):\n try:\n return Task.objects.get(id=task_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\n 'There is no task with id={}.'.format(task_id))", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def taskdetail_get(td_id):\n return IMPL.taskdetail_get(td_id)", "def get_item(self, call_number):\n return self.item_list.get(call_number)", "def get_task(self, name):\n res = Task()\n self.GetTask(name, res)\n return res", "def getItem(self):\n return self.getItem(0)", "def __getitem__(self, txid: int) -> asyncio.Task:\n return self._tasks[txid]", "def get(self, name, task):\n assert name, \"Must input a valid dataset name.\"\n assert task, \"Must input a valid task name.\"\n self._assert_dataset_exists_in_cache(name)\n self._assert_task_exists_in_dataset_in_cache(name, task)\n return self.manager.data[\"dataset\"][name][\"tasks\"][task]", "def getItem(self) -> Optional[items.Item]:\n return None if self.__itemRef is None else self.__itemRef()" ]
[ "0.6219642", "0.6196074", "0.6165974", "0.6165974", "0.61451584", "0.61449784", "0.61061776", "0.6070151", "0.6032105", "0.6015247", "0.6007213", "0.59836924", "0.59652644", "0.59573513", "0.59348106", "0.5902991", "0.5900254", "0.58807874", "0.58392173", "0.58383256", "0.5837305", "0.580728", "0.57932675", "0.5767161", "0.5763113", "0.5761524", "0.5744938", "0.5731988", "0.5706495", "0.56801385" ]
0.676394
0
Determine whether current query is of a subcommand. If so first returned the corresponding SubcommandQeury object.
def get_subcommand_query(query_str: str) -> Optional[SubcommandQuery]: if not query_str: return None # spilt: # "subcommand_name rest of query" -> ["subcommand_name", "rest of query""] query_parts = query_str.strip().split(None, maxsplit=1) if len(query_parts) < 2: query_str = "" else: query_str = query_parts[1] subcommand = get_subcommand_for_name(query_parts[0]) if subcommand: return SubcommandQuery(subcommand=subcommand, query=query_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_sub_commands(self) -> bool:\n if self.__dict__.get(\"sub_commands\"):\n return True\n\n return False", "def _subcommand_for_name(self, name):\n for subcommand in self.subcommands:\n if name == subcommand.name or \\\n name in subcommand.aliases:\n return subcommand\n return None", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def subcmd(self) -> Optional[str]:\n return self._subcmd", "def get_subcommand_for_name(name: str) -> Optional[Subcommand]:\n matching = [s for s in subcommands if s.name.lower() == name.lower()]\n if matching:\n return matching[0]", "def is_command(schema_obj):\n\n return isinstance(schema_obj, schema.Command)", "def fetch_subcommand(self, name):\n try:\n subcommand_class = self.subcommands[name]\n except KeyError:\n self.print_command_unkown_error(name)\n sys.exit(1)\n return subcommand_class(self.prog, name, self.argv[2:], self.stdout)", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def get_subcmd(self, name: str) -> \"CommandHelp\":\n try:\n return self.subcmds[name]\n except KeyError:\n # Try looking up by alias\n for sub_name, sub_help in self.subcmds.items():\n for alias in sub_help.aliases:\n if name == alias:\n return self.subcmds[sub_name]\n raise", "def _is_command(obj, cli):\n if not inspect.isfunction(obj) or obj.__name__.startswith(\"_\"):\n return False\n return hasattr(obj, \"__module__\") and obj.__module__ == cli.__name__", "def __Ancestor(self, flag):\n command = self._parent\n while command:\n if flag in command.flags:\n return True\n command = command._parent # pylint: disable=protected-access\n return False", "def _is_command(self, ext):\n try:\n return issubclass(ext, CommandExtension)\n except TypeError:\n return False", "def test_subCommandInTwoPlaces(self):\n class SubOpt(usage.Options):\n pass\n class OptFoo(usage.Options):\n subCommands = [\n ('foo', 'f', SubOpt, 'quux'),\n ]\n class OptBar(usage.Options):\n subCommands = [\n ('bar', 'b', SubOpt, 'quux'),\n ]\n oFoo = OptFoo()\n oFoo.parseOptions(['foo'])\n oBar=OptBar()\n oBar.parseOptions(['bar'])\n self.failUnless(hasattr(oFoo.subOptions, 'parent'))\n self.failUnless(hasattr(oBar.subOptions, 'parent'))\n self.failUnlessIdentical(oFoo.subOptions.parent, oFoo)\n self.failUnlessIdentical(oBar.subOptions.parent, oBar)", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "def fetch_command(self, subcommand):\n # Get commands outside of try block to prevent swallowing exceptions\n commands = get_commands()\n try:\n app_name = commands[subcommand]\n except KeyError:\n possible_matches = get_close_matches(subcommand, commands)\n sys.stderr.write(\"Unknown command: %r\" % subcommand)\n if possible_matches:\n sys.stderr.write(\". Did you mean %s?\" % possible_matches[0])\n sys.stderr.write(\"\\nType '%s help' for usage.\\n\" % self.prog_name)\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def is_subcall(self):\n return False", "def is_command_response(schema_obj):\n\n return isinstance(schema_obj, schema.CommandResponse)", "def subectIsSelf():", "def is_Q(self):\n return isinstance(self,Q)", "def _dispatching(self):\n return bool(self.generate_config or self.subapp or self.subcommand)", "def fetch_command(self, subcommand):\n try:\n app_name = get_commands()[subcommand]\n except KeyError:\n sys.stderr.write(\"Unknown command: %r\\nType '%s help'\"\n \" for usage.\\n\" % \\\n (subcommand, self.prog_name))\n sys.exit(1)\n if isinstance(app_name, BaseCommand):\n # If the command is already loaded, use it directly.\n klass = app_name\n else:\n klass = load_command_class(app_name, subcommand)\n return klass", "def is_cmd(self, name):\n \n return name in self.cmds", "def get_command(self):\n if self.command is not None:\n return self.command\n elif self.parent is not None:\n return self.parent.get_command()\n else:\n return None", "def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None", "def nested_subcmd(self, depth: int = 2) -> Optional[str]:\n # pylint: disable=protected-access\n current = 0\n subparser = self.parser\n try:\n while current < depth:\n action = subparser._actions[0]\n if isinstance(action, _SubParsersAction):\n subparser = action.choices[self.args[action.dest]]\n current += 1\n else:\n return None\n return subparser.name.split()[-1]\n except (IndexError, KeyError, TypeError):\n return None", "def __init__(self, subcommand: Subcommand, query: str):\n\n self.command = subcommand\n self.query = query", "def is_valid_command(args):\n if args.command is not None:\n return True\n return False", "def is_command_ancillary(args):\n # pylint: disable=bad-continuation\n if (\n # skip the parent check and only\n # determine if the parameter is present\n is_valid_executes(args, skip=True)\n ):\n return True\n return False", "def is_command(self, text):\n return text.split(' ', 1)[0].startswith(\"!\")", "def _iscommand(self, key):\r\n\t\tyes = False\r\n\t\tfor i in COMMAND_NAME.keys():\r\n\t\t\tif key == i: \r\n\t\t\t\tyes = True; break\r\n\t\treturn yes" ]
[ "0.65202355", "0.6357731", "0.63516015", "0.6259924", "0.60126", "0.59781826", "0.5967946", "0.5843146", "0.5827129", "0.5752442", "0.5749248", "0.57485867", "0.5696898", "0.5505429", "0.5458622", "0.53960603", "0.5385143", "0.53838754", "0.5379334", "0.53359336", "0.5302235", "0.5151404", "0.514488", "0.5140806", "0.5090426", "0.50821155", "0.50634664", "0.5018499", "0.5016916", "0.49812126" ]
0.6729017
0
Opens and reads the parameters in the [SUBCATCHMENT] and [SUBAREA] headers within the SWMM input file. Adds these parameters (as strings) to a numpy array
def read_initial_parameters(inputfilename): subc_params = [] subarea_params = [] global subc_names subc_names = [] subcatchment_parameters = [] inputfile = open(inputfilename, 'r') for line in inputfile: if(line.find("[SUBCATCHMENTS]") != -1): line = inputfile.readline() for i in range(count): templine = list(line) if templine[0] == ";" or templine[0] == " " or len(templine) < 10: line = inputfile.readline() continue elif (line.find("[") != -1): break else: linesplit = line.split() subc_params.append(linesplit[4:7]) subc_names.append(linesplit[0]) line = inputfile.readline() if (line.find("[SUBAREAS]") != -1): line = inputfile.readline() for i in range(count): templine = list(line) if templine[0] == ";" or templine[0] == " " or len(templine) < 10: line = inputfile.readline() continue elif (line.find("[") != -1): break else: linesplit = line.split() subarea_params.append(linesplit[1:6]) line = inputfile.readline() inputfile.close() #Part of the function that experiments with np array. Potentially removes the need for the list transformation # functions that chew up a lot of time. Each subcatchment has a row, each parameter type has a column. global subcatchment_parameters_np subcatchment_parameters_np = np.empty((len(subc_params[0]) + len(subarea_params[0]), len(subc_params)), dtype=float) for row in range(len(subc_params)): for col in range(len(subc_params[0])): subcatchment_parameters_np[row, col] = float(subc_params[row][col]) for row in range(len(subarea_params)): for col in range(len(subarea_params[0])): subcatchment_parameters_np[row, col + len(subc_params[0])] = float(subarea_params[row][col]) #Old string code # for i in range(len(subc_params)): # for j in range(len(subarea_params[i])): # subc_params[i].append(subarea_params[i][j]) # subcatchment_parameters.append(subc_params[i]) return(np_subcatchment_parameters)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subcatch(ini_file='subcatch.ini'):\n config.read(ini_file)\n print 'Read the file ', ini_file\n\n file_in = config.get('file_in', 'file_in')\n\n file_out = config.get('file_out', 'file_out')\n\n picture_out = config.get('picture_out', 'picture_out')\n\n Xoutlet = config.getfloat('coord_outlet', 'Xoutlet')\n Youtlet = config.getfloat('coord_outlet', 'Youtlet')\n\n nb_param = config.getfloat('flags', 'nb_param')\n X = config.getfloat('flags', 'X')\n\n #Reading of parameter file\n print 'Reading parameter file'\n ar_cell_label, ar_coorx, ar_coory, ar_lambda, ar_Xc, ar_dam, ar_tan_beta, \\\n ar_tan_beta_channel, ar_L, ar_Ks, ar_theta_r, ar_theta_s, ar_n_o, ar_n_c, \\\n ar_cell_down, ar_pVs_t0, ar_Vo_t0, ar_Qc_t0, ar_kc \\\n = pm.read_cell_parameters(file_in)\n\n #Search for the cell close to the coordinates\n print 'Search for the outlet cell'\n cell_outlet = find_cell_coordinates(ar_cell_label, Xoutlet,\n Youtlet, ar_coorx, ar_coory, ar_lambda)\n\n #Search for the catchment cells\n print 'Search for the catchment cells'\n subcatch_label = all_up_cell(cell_outlet, ar_cell_down, ar_cell_label)\n\n #Select the subcatchmnent parameters\n print 'Select the subcatchmnent parameters'\n tab_param = np.zeros((len(subcatch_label),nb_param))\n new_label = np.arange(len(subcatch_label))\n\n tab_param[:,0] = new_label#ar_cell_label[subcatch_label]\n tab_param[:,1] = ar_coorx[subcatch_label]\n tab_param[:,2] = ar_coory[subcatch_label]\n tab_param[:,3] = ar_lambda[subcatch_label]\n tab_param[:,4] = ar_Xc[subcatch_label]\n tab_param[:,5] = ar_dam[subcatch_label]\n tab_param[:,6] = ar_tan_beta[subcatch_label]\n tab_param[:,7] = ar_tan_beta_channel[subcatch_label]\n tab_param[:,8] = ar_L[subcatch_label]\n tab_param[:,9] = ar_Ks[subcatch_label]\n tab_param[:,10] = ar_theta_r[subcatch_label]\n tab_param[:,11] = ar_theta_s[subcatch_label]\n tab_param[:,12] = ar_n_o[subcatch_label]\n tab_param[:,13] = ar_n_c[subcatch_label]\n for i in range(len(subcatch_label)):\n if i == 0:\n tab_param[i,14] = -9999.0\n else:\n ind = np.where(ar_cell_label[subcatch_label]\n == ar_cell_down[subcatch_label][i])\n\n tab_param[i,14] = new_label[ind]\n\n tab_param[:,15]=ar_pVs_t0[subcatch_label]\n tab_param[:,16]=ar_Vo_t0[subcatch_label]\n tab_param[:,17]=ar_Qc_t0[subcatch_label]\n tab_param[:,18]=ar_kc[subcatch_label]\n\n #~~~~~~Write parameter file~~~~~~#\n np.savetxt(file_out, tab_param)\n\n ar_image=ar_cell_label*0.\n ar_image[subcatch_label]=1.\n ar_image[ar_lambda==1.]=10.\n ar_image[cell_outlet]=5.\n field_map(ar_image, ar_coorx, ar_coory, X, picture_out, 'Subcatchment')", "def Read_RMCA_basic(Complete_Path):\n fid = open(Complete_Path,'r')\n S = []\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n S.append(float(line))\n #R.append(float(line[27:-2]))\n return np.array(S)", "def readParams(file_name):\n try:\n info = np.load(file_name,allow_pickle=True)[()]\n except FileNotFoundError:\n if file_name.split('/')[-2] == 'checkpoint':\n lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'\n file_name = lfc_id_dir + os.path.basename(file_name)\n info = np.load(file_name,allow_pickle=True)[()]\n else:\n raise FileNotFoundError\n # Assemble information into \"fit-able\" form\n num_orders = len(info['params'])\n lines = [p[:,1] for p in info['params'] if p is not None]\n errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]\n ordrs = [o for o in np.arange(86) if info['params'][o] is not None]\n waves = [w for w in info['wvln'] if w is not None]\n # I believe, but am not sure, that the wavelengths are multiplied by order\n # to separate them from when orders overlap at the edges\n waves = [wvln for order, wvln in zip(ordrs,waves)]\n ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]\n\n x = np.concatenate(lines)\n y = np.concatenate(ordrs)\n e = np.concatenate(errs)\n w = np.concatenate(waves)\n # Note: default of pipeline includes ThAr lines, which we're not including here\n \n return (x,y,w,e)", "def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array", "def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set", "def Read_RMCA_out(Complete_Path):\n fid = open(Complete_Path,'r')\n L,R = [],[]\n while 1: \n line = fid.readline()\n if line =='': \n break \n else :\n L.append(float(line[:25]))\n R.append(float(line[27:-2]))\n return np.array(L),np.array(R)", "def read_from_file(self,grd_fn):\n self.grd_fn = grd_fn\n self.fp = open(self.grd_fn,'rt')\n hdr = self.fp.readline().strip() #header &GRD_2008 or &LISTGRD\n\n if hdr == self.hdr_08:\n print( \"Will read 2008 format for grid\" )\n n_parms = 11\n elif hdr == self.hdr_old:\n print( \"Will read old UnTRIM grid format\" )\n n_parms = 10\n\n for i in range(n_parms): # ignore TNE and TNS in new format files\n l = self.fp.readline()\n lhs,rhs = l.split('=')\n val = rhs.strip().strip(',')\n varname = lhs.strip()\n print( \"%s=%s\"%(varname,val) )\n\n if varname=='NV':\n Nvertices = int(val)\n elif varname=='NE':\n Npolys = int(val)\n elif varname=='NS':\n Nsides = int(val)\n elif varname=='NBC':\n Nboundary_poly = int(val)\n elif varname=='NSI':\n Ninternal_sides = int(val)\n elif varname=='NSF':\n Nflow_sides = int(val)\n elif varname=='NBC':\n Nbc = int(val)\n elif varname=='ANGLE':\n self.angle = float(val)\n elif varname=='LOCATION':\n self.location = val\n elif varname=='NR': ## these are read, but not used\n Nred = int(val)\n elif varname=='TNE':\n TNE=int(val)\n elif varname=='TNS':\n TNS=int(val)\n # others: HLAND for older fmt.\n \n while 1:\n s = self.fp.readline().strip() # header: /\n if s == '/':\n break\n\n # We know the size of everything, and can ask UnstructuredGrid to allocate\n # arrays now, with the 'special' meaning that passing an integer means allocate\n # the array of that size, full of zeros.\n # this allocates\n # self.nodes, self.edges, self.cells\n self.from_simple_data(points = Nvertices,edges = Nsides, cells = Npolys)\n\n for v in range(Nvertices):\n Cv = self.fp.readline().split()\n if hdr == self.hdr_08:\n vertex_num = int(Cv.pop(0))\n if vertex_num != v+1:\n print( \"Mismatched vertex numbering: %d != %d\"%(vertex_num,v+1) )\n self.nodes['x'][v,0] = float(Cv[0])\n self.nodes['x'][v,1] = float(Cv[1])\n \n print( \"Npolys\",Npolys )\n self.cells['edges'] = self.UNKNOWN # initialize all\n self.cells['nodes'] = self.UNKNOWN\n \n for c in range(Npolys):\n l = self.fp.readline()\n Cp = l.split()\n if hdr == self.hdr_08:\n poly_num = int(Cp.pop(0))\n if poly_num-1 != c:\n print( \"Mismatched polygon id: %fd != %d\"%(poly_num,c+1) )\n \n numsides = int(Cp[0])\n\n self.cells['_center'][c,0] = float(Cp[1])\n self.cells['_center'][c,1] = float(Cp[2])\n\n if hdr == self.hdr_old:\n # vertex index is Cp[3,5,7,9]\n # the others, 4,6,8,10, are edges, right?\n # convert to 0 based indices here\n\n # This is probably wrong! I think it's actually reading the\n # sides\n self.cells['edges'][c,0] = int(Cp[4]) - 1\n self.cells['edges'][c,1] = int(Cp[6]) - 1 \n self.cells['edges'][c,2] = int(Cp[8]) - 1\n if numsides == 4:\n self.cells['edges'][c,3] = int(Cp[10]) - 1 \n else:\n self.cells['edges'][c,3]=self.UNDEFINED\n #HERE - need to copy that to self.cells['nodes']\n else:\n for ei in range(numsides):\n self.cells['nodes'][c,ei] = int(Cp[3+ei]) - 1\n self.cells['edges'][c,ei] = int(Cp[3+numsides+ei]) - 1\n self.cells['nodes'][c,numsides:]=self.UNDEFINED\n self.cells['edges'][c,numsides:]=self.UNDEFINED\n \n # choose some large, above-sea-level depth\n self.cells['depth_mean'] = -1000 # not sure this is doing anything...\n\n for e in range(Nsides):\n Cs = self.fp.readline().split()\n if hdr == self.hdr_08:\n # side num = int(Cs.pop(0))\n Cs.pop(0)\n elif hdr == self.hdr_old:\n # side depth?\n edge_depth = self.edges['depth_mean'][e] = float(Cs.pop(0))\n \n self.edges['nodes'][e,0] = int(Cs[0])-1 # vertex indices\n self.edges['nodes'][e,1] = int(Cs[1])-1\n \n self.edges['cells'][e,0] = int(Cs[2])-1 # cell neighbors\n self.edges['cells'][e,1] = int(Cs[3])-1\n\n if hdr == self.hdr_old:\n for nc in self.edges['cells'][e]:\n if nc >= 0 and edge_depth > self.cells['depth_mean'][nc]:\n self.cells['depth_mean'][nc] = edge_depth\n\n if hdr==self.hdr_old:\n # old format - have to infer cell nodes from edges\n self.make_cell_nodes_from_edge_nodes()\n\n # Try to make sense of the marks and red/black:\n self.cells['red'][:Nred] = True\n self.cells['mark'][:Nboundary_poly] = self.BOUNDARY\n self.edges['mark'][:Ninternal_sides] = 0\n self.edges['mark'][Ninternal_sides:Nflow_sides] = self.FLOW\n self.edges['mark'][Nflow_sides:] = self.LAND\n\n # Bathymetry:\n if hdr == self.hdr_08:\n # make a cheap tokenizer to read floats across lines\n # note that it's up to the user to know that all values from\n # the line are read, and not to get the iterator until you're\n # ready for some values to be read\n def tokenizer():\n while True:\n for item in self.fp.readline().split():\n yield item\n for c in range(Npolys):\n check_c,nis = [int(s) for s in self.fp.readline().split()]\n if check_c != c+1:\n print(\"ERROR: while reading cell subgrid, cell index mismatch: %s vs. %d\"%(c+1,check_c))\n \n next_token = tokenizer().next\n areas = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n \n self.cells['depth_mean'][c] = np.sum(areas*depths) / np.sum(areas)\n self.cells['_area'][c] = np.sum(areas)\n self.cells['depth_max'][c] = depths.max()\n self.cells['subgrid'][c] = (areas,depths)\n for e in range(Nflow_sides):\n l = self.fp.readline()\n # print \"%d/%d - Read line: %s\"%(e,self.Nsides,l)\n check_e,nis = [int(s) for s in l.split()]\n if check_e != e+1:\n print( \"ERROR: While reading edge subgrid, edge index mismatch: %s vs. %s\"%(e+1,check_e) )\n next_token = tokenizer().next\n lengths = np.array( [float(next_token()) for sg in range(nis)] )\n depths = np.array( [float(next_token()) for sg in range(nis)] )\n if sum(lengths)<=0:\n print( \"edge %d has bad lengths\"%e )\n self.edges['depth_mean'][e] = np.sum(lengths*depths) / sum(lengths)\n self.edges['depth_max'][e] = depths.max()\n self.edges['subgrid'][e] = (lengths,depths)\n # and land boundaries get zeros.\n for e in range(Nflow_sides,Nsides):\n self.edges['depth_mean'][e] = 0.0\n self.edges['depth_max'][e] = 0.0\n self.edges['subgrid'][e] = ([],[])", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def parseSpineXout(ofname):\n# 0 1 2 3 4 5 6 7 8 9 10 11 12\n# # index AA SS phi1 psi1 P_E P_C P_H phi0 psi0 ASA S_pk S_SS pk_phi pk_psi pkc_phi pkc_ps\n# 1 E C -85.6 141.3 0.0527 0.8784 0.0689 -87.5 143.0 130.5 0.6941 0.4126 -5.0000 5.0000 0.9924 0.2499\n ss=[]\n phi=[]\n psi=[]\n asa=[]\n rasa=[]\n MAX_ACC=getMAXASA('single')\n for f in open(ofname,'r'):\n f=f.split()\n if f[0]=='#':\n continue\n #ss.append(f[2])\n phi.append([float(f[8]),float(f[3])])\n psi.append([float(f[9]),float(f[4])])\n ss.append([float(i) for i in f[5:8]])\n asa.append(float(f[10]))\n try:\n m=MAX_ACC[f[1]] #if key not found then produce nan\n except KeyError as e:\n print e\n m=np.nan\n continue\n rasa.append(float(f[10])/m)\n return (np.array(asa),np.array(rasa),np.array(ss),np.array(phi),np.array(psi))", "def readDriverFile(self, input_file):\n\n\n fid = open(self.basePath + input_file,'r')\n\n # Line 1\n line = fid.readline()\n l_input = line.split('!')\n mshfile = l_input[0].rstrip()\n\n # Line 2\n line = fid.readline()\n l_input = line.split('!')\n obsfile = l_input[0].rstrip()\n\n # Line 3\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='null':\n topofile = []\n\n else:\n topofile = l_input[0].rstrip()\n\n\n # Line 4\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mstart = float(l_input[1])\n\n else:\n mstart = l_input[0].rstrip()\n\n # Line 5\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n mref = float(l_input[1])\n\n else:\n mref = l_input[0].rstrip()\n\n # Line 6\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n staticInput = float(l_input[1])\n\n elif l_input[0]=='DEFAULT':\n staticInput = None\n\n else:\n staticInput = l_input[0].rstrip()\n\n\n # Line 7\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n magfile = []\n\n else:\n magfile = l_input[0].rstrip()\n\n # Line 8\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input=='DEFAULT':\n wgtfile = []\n\n else:\n wgtfile = l_input[0].rstrip()\n\n # Line 9\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n chi = float(l_input[0])\n\n # Line 10\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n val = np.array(l_input[0:4])\n alphas = val.astype(np.float)\n\n # Line 11\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n bounds = val.astype(np.float)\n\n else:\n bounds = l_input[0].rstrip()\n\n # Line 12\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:6])\n lpnorms = val.astype(np.float)\n\n else:\n lpnorms = l_input[0].rstrip()\n\n # Line 13\n line = fid.readline()\n l_input = re.split('[!\\s]',line)\n if l_input[0]=='VALUE':\n val = np.array(l_input[1:3])\n eps = val.astype(np.float)\n\n else:\n eps = [None,None]\n\n self.mshfile = mshfile\n self.obsfile = obsfile\n self.topofile = topofile\n self.mstart = mstart\n self._mrefInput = mref\n self._staticInput = staticInput\n self.magfile = magfile\n self.wgtfile = wgtfile\n self.chi = chi\n self.alphas = alphas\n self.bounds = bounds\n self.lpnorms = lpnorms\n self.eps = eps", "def read(self, run):\n # read the file\n self['run'] = run[0:run.rfind('.xml')]\n f = open(run)\n for line in f:\n \n if line.find('SDSU Exec') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n self['application'] = line[n1:n2]\n\n elif line.find('<detector_status') >= 0:\n n1 = line.index('name=') + 6\n n2 = line.index('\"', n1)\n if line[n1:n2] != 'Ultraspec':\n raise Exception, 'Run ' + run + ' is not an Ultraspec file.'\n \n elif line.find('SPEED') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['speed'] = line[n1:n2]\n \n elif line.find('X_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x_bin'] = line[n1:n2]\n \n elif line.find('Y_BIN') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y_bin'] = line[n1:n2]\n \n # first window \n \n elif line.find('X1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_start'] = line[n1:n2]\n \n elif line.find('X1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x1_size'] = line[n1:n2]\n \n elif line.find('Y1_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_start'] = line[n1:n2]\n \n elif line.find('Y1_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y1_size'] = line[n1:n2]\n \n # second window\n \n elif line.find('X2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_start'] = line[n1:n2]\n \n elif line.find('X2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['x2_size'] = line[n1:n2]\n \n elif line.find('Y2_START') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_start'] = line[n1:n2]\n \n elif line.find('Y2_SIZE') >= 0:\n n1 = line.index('value=') + 7\n n2 = line.index('\"', n1)\n self['y2_size'] = line[n1:n2]\n \n elif line.find('<target>') >= 0:\n n1 = line.index('target') + 7\n n2 = line.index('<', n1)\n self['target'] = line[n1:n2]\n\n elif line.find('<grating>') >= 0:\n n1 = line.index('grating') + 8\n n2 = line.index('<', n1)\n self['grating'] = line[n1:n2]\n\n elif line.find('<slit_width>') >= 0:\n n1 = line.index('slit_width') + 11\n n2 = line.index('<', n1)\n self['slit_width'] = line[n1:n2]\n\n elif line.find('<slit_angle>') >= 0:\n n1 = line.index('slit_angle') + 11\n n2 = line.index('<', n1)\n self['slit_angle'] = line[n1:n2]\n \n elif line.find('<filters>') >= 0:\n n1 = line.index('filters') + 8\n n2 = line.index('<', n1)\n self['filters'] = line[n1:n2]\n\n elif line.find('<ID>') >= 0:\n n1 = line.index('ID') + 3\n n2 = line.index('<', n1)\n self['ID'] = line[n1:n2]\n\n elif line.find('<PI>') >= 0:\n n1 = line.index('PI') + 3\n n2 = line.index('<', n1)\n self['PI'] = line[n1:n2]\n\n elif line.find('<comment>') >= 0:\n n1 = line.index('comment') + 8\n n2 = line.index('<', n1)\n self['comment'] = line[n1:n2]\n \n\n # check that we have found what we expected to find\n if 'application' not in self:\n raise Exception, 'Failed to find application name in ' + run\n\n if self.is_not_power_onoff():\n\n if 'x_bin' not in self:\n raise Exception, 'Failed to find X_BIN in ' + run\n\n if 'y_bin' not in self:\n raise Exception, 'Failed to find Y_BIN in ' + run\n\n if 'x1_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x1_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y1_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y1_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'x2_start' not in self:\n raise Exception, 'Failed to find X2_START in ' + run\n \n if 'x2_size' not in self:\n raise Exception, 'Failed to find X2_SIZE in ' + run\n \n if 'y2_start' not in self:\n raise Exception, 'Failed to find Y2_START in ' + run\n \n if 'y2_size' not in self:\n raise Exception, 'Failed to find Y2_SIZE in ' + run\n \n if 'target' not in self:\n self['target'] = 'UNKNOWN'\n\n if 'filters' not in self:\n self['filters'] = '---'\n\n if 'grating' not in self:\n self['grating'] = '---'\n\n if 'slit_width' not in self:\n self['slit_width'] = '---'\n\n if 'slit_angle' not in self:\n self['slit_angle'] = '---'\n\n if 'ID' not in self:\n self['ID'] = 'UNKNOWN'\n\n if 'PI' not in self:\n self['PI'] = 'UNKNOWN'", "def read(self, timestamp=None):\n grbs = pygrib.open(self.filename)\n\n grid = self.subgrid\n\n return_img = {}\n return_metadata = {}\n\n var_msg_lut = {p: None for p in self.parameter}\n sea_mask = None\n for N in range(grbs.messages):\n n = N + 1\n message = grbs.message(n)\n param_name = str(message.cfVarNameECMF)\n\n if param_name == \"lsm\":\n if self.mask_seapoints and sea_mask is None:\n sea_mask = message.values.flatten()\n\n if param_name not in self.parameter:\n continue\n else:\n var_msg_lut[param_name] = n\n\n # available variables\n shape = None\n for param_name, n in var_msg_lut.items():\n if n is None:\n continue\n\n return_metadata[param_name] = {}\n\n message = grbs.message(n)\n\n param_data = message.values.flatten()\n if not shape:\n shape = param_data.shape\n return_img[param_name] = param_data\n\n if grid is None:\n lats, lons = message.latlons()\n try:\n res_lat, res_lon = get_grid_resolution(lats, lons)\n grid = ERA_RegularImgGrid(res_lat, res_lon)\n except ValueError: # when grid not regular\n lons_gt_180 = np.where(lons > 180.0)\n lons[lons_gt_180] = lons[lons_gt_180] - 360\n grid = ERA_IrregularImgGrid(lons, lats)\n\n return_metadata[param_name][\"units\"] = message[\"units\"]\n return_metadata[param_name][\"long_name\"] = \\\n message[\"parameterName\"]\n\n if \"levels\" in message.keys():\n return_metadata[param_name][\"depth\"] = \"{:} cm\".format(\n message[\"levels\"])\n\n if self.mask_seapoints:\n if sea_mask is None:\n raise IOError(\n \"No land sea mask parameter (lsm) in passed image\"\n \" for masking.\")\n else:\n # mask the loaded data\n for name in return_img.keys():\n param_data = return_img[name]\n param_data = np.ma.array(\n param_data,\n mask=np.logical_not(sea_mask),\n fill_value=np.nan,\n )\n param_data = param_data.filled()\n return_img[name] = param_data\n\n grbs.close()\n\n # missing variables\n for param_name, n in var_msg_lut.items():\n if n is not None:\n continue\n param_data = np.full(shape, np.nan)\n warnings.warn(\"Cannot load variable {var} from file {thefile}. \"\n \"Filling image with NaNs.\".format(\n var=param_name, thefile=self.filename))\n return_img[param_name] = param_data\n return_metadata[param_name] = {}\n return_metadata[param_name][\"long_name\"] = lookup(\n self.product, [param_name]).iloc[0][\"long_name\"]\n\n if self.array_1D:\n return Image(\n grid.activearrlon,\n grid.activearrlat,\n return_img,\n return_metadata,\n timestamp,\n )\n else:\n nlat = np.unique(grid.activearrlat).size\n nlon = np.unique(grid.activearrlon).size\n\n for key in return_img:\n return_img[key] = return_img[key].reshape((nlat, nlon))\n\n return Image(\n grid.activearrlon.reshape(nlat, nlon),\n grid.activearrlat.reshape(nlat, nlon),\n return_img,\n return_metadata,\n timestamp,\n )", "def __load_data(self) -> np.array:\n with open('.'+os.sep+'ressources'+os.sep+self.path+os.sep+'capacity.txt','r') as fp:\n capacity = self.__parse_line(fp.readline())\n with open('.' + os.sep + 'ressources' + os.sep + self.path + os.sep + 'demand.txt', 'r') as fp:\n demand = self.__parse_line(fp.readline())\n with open('.' + os.sep + 'ressources' + os.sep + self.path + os.sep + 'distance.txt', 'r') as fp:\n distance_matrix = []\n for line in fp:\n row = self.__parse_line(line)\n distance_matrix.append(row)\n with open('.' + os.sep + 'ressources' + os.sep + self.path + os.sep + 'transportation_cost.txt', 'r') as fp:\n transportation_cost = self.__parse_line(fp.readline())\n return np.asarray(capacity), np.asarray(demand), np.asarray(distance_matrix), np.asarray(transportation_cost)", "def structure(self, ism_input):\n f = open(ism_input, 'r')\n data = []\n for line in f:\n line = line.replace('\\\"', '')\n line = line.replace('],[', '];[')\n line = line.strip()\n line = line.replace(']', '')\n line = line.replace('[', '')\n line = line.split(';')\n line[0] = line[0].split('|')\n ls = list(map(lambda x: x.split(','), line[1:]))\n ls = list(map(lambda x: list(map(lambda y: y.split('|'), x)), ls))\n line[1:] = ls\n data.append(line)\n data = np.array(data[1:]) \n \n return data", "def readCrystParam(crystfile):\n \n # Default values\n ccell1 = np.eye(3)\n ccell2 = np.eye(3)\n planehkl = [1,0,0]\n diruvw = [0,1,0]\n \n try:\n with open(crystfile,\"r\") as f:\n content = f.readlines()\n except FileNotFoundError:\n content = []\n\n for l in content:\n if l[0].rstrip() == \"#\":\n continue\n line = l.split('=')\n if len(line) == 2:\n if line[0].rstrip()==\"ccell1\":\n ccell1 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"ccell2\":\n ccell2 = eval(line[1].rstrip())\n elif line[0].rstrip()==\"planehkl\":\n planehkl = eval(line[1].rstrip())\n elif line[0].rstrip()==\"diruvw\":\n diruvw = eval(line[1].rstrip())\n else:\n print(\"WARNING: %s is not a supported input\"%(line[0].rstrip()))\n elif len(line) > 2:\n raise SyntaxError(l)\n\n return ccell1, ccell2, planehkl, diruvw", "def rdspecdat(self):\n # TODO : ugh. this is crude. Should have some checks for file format\n # and probably better to use the astropy.io functions now.\n try:\n w, f, e = np.loadtxt(self.filename, unpack=True)\n except:\n w, f = np.loadtxt(self.filename, unpack=True)\n e = []", "def open_gains(fname, snver=1):\n\n hdu = get_hdu(fname, extname='AIPS SN', ver=snver)\n\n nif = hdu.header['NO_IF']\n npol = hdu.header['NO_POL']\n nant = hdu.header['NO_ANT']\n # set ``nif'' from dtype of hdu.data\n _data = np.zeros(hdu.header['NAXIS2'], dtype=[('start', '<f8'),\n ('stop', '<f8'),\n ('antenna', 'int'),\n ('gains', 'complex',\n (nif, npol,)),\n ('weights', '<f8',\n (nif, npol,))])\n\n time = hdu.data['TIME']\n dtime = hdu.data['TIME INTERVAL']\n antenna = hdu.data['ANTENNA NO.']\n\n # Constructing `gains` field\n rgains = hdu.data['REAL1'] + 1j * hdu.data['IMAG1']\n # => (466, 8)\n lgains = hdu.data['REAL2'] + 1j * hdu.data['IMAG2']\n rgains = np.expand_dims(rgains, axis=2)\n # => (466, 8, 1)\n lgains = np.expand_dims(lgains, axis=2)\n gains = np.dstack((rgains, lgains))\n # => (466, 8, 2)\n\n # Constructing `weights` field\n rweights = hdu.data['WEIGHT 1']\n # => (466, 8)\n lweights = hdu.data['WEIGHT 2']\n rweights = np.expand_dims(rweights, axis=2)\n # => (466, 8, 1)\n lweights = np.expand_dims(lweights, axis=2)\n weights = np.dstack((rweights, lweights))\n # => (466, 8, 2)\n\n # Filling structured array by fields\n _data['start'] = time - 0.5 * dtime\n _data['stop'] = time + 0.5 * dtime\n _data['antenna'] = antenna\n _data['gains'] = gains\n _data['weights'] = weights\n\n gains = list()\n for ant in set(_data['antenna']):\n idx = _data['antenna'] == ant\n gains.append(GainCurve(ant, nif, npol, _data[idx][['start', 'stop',\n 'gains',\n 'weights']]))\n return gains", "def readFile(file_name):\n if file_name.split('.')[-1] == 'thid':\n x,m,w = readThid(file_name)\n e = np.empty_like(x)\n e[:] = np.nan\n return x,m,w,e\n else:\n return readParams(file_name)", "def ReadBasicInfo():\r\n\r\n EquilibriumStep, ProductionStep,HEPCP,HEPCE,Multiple=10000000,10000000,100,100,2\r\n InputPath,OutputPath,AtomParameterPath,TaskSuffix,MaterialInputFormat='..','..','..','','mol'\r\n GasType,GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,\\\r\n TorqueSetting,MuSiCSetting,Nodes=[],[],[],[],[],[],[],[],['1:ppn=1']\r\n CutOff,GridSpacingP,GridSpacingE=12.8,2.0,2.0\r\n MakeGCMC,UsePmap,UseEmap,UsePost,MakePmap,MakeEmap,MakeTorque,KeyOne,KeyTwo,\\\r\n PDBCharges = False,False,False,False,False,False,False,False,False,False\r\n\r\n with open('GlueParameters', 'r') as File:\r\n for Line in File.readlines():\r\n if Line.strip():\r\n WordList = Line.strip().split()\r\n if len(WordList)>1 or KeyOne==True or KeyTwo==True:\r\n if WordList[0]=='#':\r\n continue\r\n\r\n # Controlled part\r\n elif WordList[0] == 'MakeGCMC:' and WordList[1] == 'open':\r\n MakeGCMC = True\r\n elif WordList[0] == 'UsePmap:' and WordList[1] == 'yes':\r\n UsePmap = True\r\n elif WordList[0] == 'UseEmap:' and WordList[1] == 'yes':\r\n UseEmap = True\r\n elif WordList[0] == 'UsePost:' and WordList[1] == 'yes':\r\n UsePost = True\r\n elif WordList[0] == 'MakePmap:' and WordList[1] == 'open':\r\n MakePmap = True\r\n elif WordList[0] == 'MakeEmap:' and WordList[1] == 'open':\r\n MakeEmap = True\r\n elif WordList[0] == 'MakeTorque:' and WordList[1] == 'open':\r\n MakeTorque = True\r\n elif WordList[0] == 'UseChargesFromPDBFile:' and WordList[1] == 'yes':\r\n PDBCharges = True\r\n\r\n # Basic part\r\n elif WordList[0]=='InputPath:':\r\n InputPath=WordList[1]\r\n elif WordList[0]=='MaterialInputFormat:':\r\n MaterialInputFormat=WordList[1]\r\n elif WordList[0]=='OutputPath:':\r\n OutputPath=WordList[1]\r\n elif WordList[0]=='AtomParameterPath:':\r\n AtomParameterPath=WordList[1]\r\n elif WordList[0] == 'GasType:':\r\n GasType = list(WordList[1:])\r\n elif WordList[0] == 'GasAtomTypeNum:':\r\n\r\n for i in WordList[1:]:\r\n GasAtomTypeNum.append(int(i))\r\n\r\n elif WordList[0] == 'GasAtomType:':\r\n GasAtomType = list(WordList[1:])\r\n elif WordList[0] == 'Multiple:':\r\n Multiple = int(WordList[1])\r\n elif WordList[0] == 'CutOff:':\r\n CutOff = float(WordList[1])\r\n\r\n # GCMC part\r\n\r\n elif WordList[0] == 'GasPartialPressure:':\r\n\r\n for j in WordList[1:]:\r\n GasPartialPressure.append(str(j))\r\n\r\n elif WordList[0] == 'TemperatureList(K):':\r\n\r\n for l in WordList[1:]:\r\n TemperatureList.append(float(l))\r\n\r\n elif WordList[0] == 'PressureList(kPa):':\r\n\r\n for k in WordList[1:]:\r\n PressureList.append(float(k))\r\n\r\n elif WordList[0] == 'EquilibriumStep:':\r\n EquilibriumStep = int(WordList[1])\r\n elif WordList[0] == 'ProductionStep:':\r\n ProductionStep = int(WordList[1])\r\n\r\n # Pmap part\r\n elif WordList[0] == 'GridSpacingP(Ang):':\r\n GridSpacingP = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffP(kJ/mol):':\r\n HEPCP = int(WordList[1])\r\n\r\n # Emap part\r\n elif WordList[0] == 'GridSpacingE(Ang):':\r\n GridSpacingE = float(WordList[1])\r\n elif WordList[0] == 'HighEndPotentialCutoffE(kJ/mol):':\r\n HEPCE = int(WordList[1])\r\n\r\n # Torque part\r\n elif WordList[0] == 'Nodes:':\r\n Nodes = WordList[1:]\r\n elif WordList[0] == 'TaskSuffix:':\r\n TaskSuffix = WordList[1]\r\n elif WordList[0] == 'TorqueSetting:':\r\n KeyOne = True\r\n elif WordList[0] == 'MuSiCSetting:':\r\n KeyOne = False\r\n KeyTwo = True\r\n elif WordList[0] == 'END':\r\n KeyTwo = False\r\n elif KeyOne == True:\r\n TorqueSetting.append(Line)\r\n elif KeyTwo == True:\r\n MuSiCSetting.append(Line)\r\n\r\n return (InputPath,OutputPath,AtomParameterPath,MakeTorque,GasType,\r\n GasAtomTypeNum,GasAtomType,GasPartialPressure,TemperatureList,PressureList,CutOff,MakeGCMC,UsePmap,\r\n UseEmap,UsePost,MakePmap,MakeEmap,EquilibriumStep,ProductionStep,GridSpacingP,HEPCP,GridSpacingE,HEPCE,\r\n Multiple,TorqueSetting,MuSiCSetting,Nodes,TaskSuffix,PDBCharges,MaterialInputFormat)", "def getSubParamLine(self,subname, numNodesSub, subParamInfo,dir_name):\n #nodeSubInterface = []\n subOptionInfo_p = []\n subSchemInfo_p = []\n filename_t = subname + '.sub'\n filename_t = os.path.join(dir_name, filename_t)\n data_p = self.readNetlist(filename_t)\n subOptionInfo_p, subSchemInfo_p = self.separateNetlistInfo(data_p)\n \n if len(subOptionInfo_p) > 0:\n newline = subOptionInfo_p[0]\n newline = newline.split('.subckt '+ subname) \n intLine = newline[1].split()\n print \"numNodesSub Index---------->\",numNodesSub\n newindex = numNodesSub[subname]\n appen_line = intLine[newindex:len(intLine)]\n appen_param = ','.join(appen_line)\n paramLine = 'parameter Real ' + appen_param + ';'\n paramLine = paramLine.translate(maketrans('{}', ' '))\n subParamInfo.append(paramLine)\n return subParamInfo", "def readBeaches(filein):\n\n rdarray = np.genfromtxt(filein, skip_header=1, delimiter=',', usecols=[5,6,8,9,10,11,12])\n beaches = np.genfromtxt(filein, skip_header=1, delimiter=',', usecols=[4], dtype=str)\n sspaid = np.genfromtxt(filein, skip_header=1, delimiter=',', usecols=[0], dtype=str)\n\n beachlats = rdarray[:,0]\n beachlons = rdarray[:,1]\n beachnorm = rdarray[:,2]\n maxang = rdarray[:,3]\n beachslope = rdarray[:,4]\n approslope = rdarray[:,5]\n beachtype = np.array(rdarray[:,6], dtype=int)\n\n return sspaid, beaches, beachlats, beachlons, beachnorm, maxang, approslope, beachslope, beachtype", "def spinex_sec(infile, sequence):\n return np.loadtxt(infile, usecols=[7, 5, 6], skiprows=1).reshape((1, -1, 3))", "def legacy_load(self,filepath = '', amplifier = 'Amplifier'):\n if filepath == '':\n filepath = filedialog.askopenfilename()\n file1 = open(filepath)\n ctxt = file1.readline().rstrip()\n\n header = ''\n rowskip = 0\n while ctxt[0] == '#':\n header = header + ctxt[1:]\n ctxt = file1.readline().rstrip()\n rowskip += 1\n\n voltstr = header[2:-1]\n\n if voltstr.find(',') > 0:\n volts = np.fromstring(voltstr, sep=',')\n else:\n volts = np.fromstring(voltstr, sep='\\t')\n\n file1.close()\n data1 = np.loadtxt(filepath, skiprows=rowskip)\n self.hydoutput = data1\n self.cfreq = volts\n self.amplifier = amplifier", "def loadFromINIFile(self): \r\n if (self.verbose):\r\n print(\"Reading SA300.ini\")\r\n iniFile = open('SA300.ini','r')\r\n for i in range(8):\r\n aLine = iniFile.readline().rstrip(\"\\n\") # read line \r\n tokens = aLine.split()\r\n if (self.verbose):\r\n print(tokens)\r\n self.IDStrList[i].set(tokens[0])\r\n self.WeightList[i].set(tokens[1])\r\n self.schedList[i].set(self.sched[int(tokens[2])])\r\n self.paramNumList[i].set(tokens[3])\r\n self.SessionLengthList[i].set(tokens[4])\r\n self.IBILengthList[i].set(tokens[5])\r\n self.PumpTimeList[i].set(tokens[6])\r\n self.calcPumpTimeList[i].set(tokens[7])\r\n aString = iniFile.readline().rstrip(\"\\n\") # COM number (done differently on a Mac)\r\n self.portString.set(aString)\r\n # print(\"portString = \"+aString)\r\n aString = iniFile.readline().rstrip(\"\\n\") # read next line\r\n tokens = aString.split()\r\n self.varCode = int(tokens[0])\r\n # print(\"self.varCode =\",self.varCode,format(self.varCode,'08b')) \r\n for bit in range(8):\r\n mask = (2**bit) # mask (eg. 00001000)\r\n # Uses AND and mask to determine whether to set bit\r\n if (self.varCode & mask > 0): self.sysVarList[bit].set(True)\r\n else: self.sysVarList[bit].set(False)\r\n iniFile.close()", "def read_PSSM_data(self):\n\n names = os.listdir(self.pssm_path)\n fname = [n for n in names if n.find(self.molname)==0]\n\n if len(fname)>1:\n raise ValueError('Multiple PSSM files found for %s in %s',self.mol_name,self.pssm_path)\n if len(fname)==0:\n raise FileNotFoundError('No PSSM file found for %s in %s',self.mol_name,self.pssm_path)\n else:\n fname = fname[0]\n\n f = open(self.pssm_path + '/' + fname,'rb')\n data = f.readlines()\n f.close()\n raw_data = list( map(lambda x: x.decode('utf-8').split(),data))\n\n self.res_data = np.array(raw_data)[:,:3]\n self.res_data = [ (r[0],int(r[1]),r[2]) for r in self.res_data ]\n self.pssm_data = np.array(raw_data)[:,3:].astype(np.float)", "def read_inversion_info(file_dic):\n #print_file_test = open('file_test.txt','w')\n\n if not ( check_inversion_files(file_dic) ):\n print 'error(read_inversion_info): problem with lenstool file names'\n return 0\n \n file_generate_arcs = file_dic['file_generate_arcs']\n info_input_lens = fc.extract_second_identifiers( file_generate_arcs, \\\n 'potential' )\n#-------------------------------------------------------------------------------\n\n file_source = file_dic['file_source']\n info_src = np.loadtxt(file_source, unpack=False)\n if len(info_src) == 8 and np.isscalar(info_src[0]):\n #FIXME - check if the second condition is all we need\n info_src = [info_src]\n#-------------------------------------------------------------------------------\n\n file_make_inversion = file_dic['file_make_inversion']\n info_fited_param = fc.extract_second_identifiers( file_make_inversion, \\\n 'limit' )\n info_forme = fc.extract_parameter(file_make_inversion, 'forme')[0][0]\n\n#-------------------------------------------------------------------------------\n\n file_best_fit = file_dic['file_best_fit']\n info_best_fit = fc.extract_second_identifiers( file_best_fit, \\\n 'potentiel' )\n\n info_xi2 = fc.extract_parameter(file_best_fit, '#Chi2pos:')\n\n#-------------------------------------------------------------------------------\n file_chires = file_dic['file_chires']\n\n info_chires = extract_parameter(file_chires, '0')\n rmss_mean = [0.0, 0.0]\n rmsi_mean = [0.0, 0.0]\n for i in info_chires:\n if i[0] != 'A':\n rmss_mean[0] = rmss_mean[0] + float(i[7])\n rmss_mean[1] = rmss_mean[1] + 1.0\n \n rmsi_mean[0] = rmsi_mean[0] + float(i[8])\n rmsi_mean[1] = rmsi_mean[1] + 1.0\n\n rmss_mean = rmss_mean[0]/rmss_mean[1]\n rmsi_mean = rmsi_mean[0]/rmsi_mean[1]\n#-------------------------------------------------------------------------------\n out_dict = { 'xi2' : float(info_xi2[0][0]), \\\n 'best_fit_lens' : info_best_fit, \\\n 'rmsi_mean' : rmsi_mean, \\\n 'rmss_mean' : rmss_mean, \\\n 'fited_parameters' : info_fited_param[0].keys(), \\\n 'input_lens' : info_input_lens[len(info_input_lens) - 1], \\\n 'forme' : info_forme \\\n }\n #for i in out_dict.keys():\n # print i, out_dict[i]\n return out_dict", "def init_data_array(self, mess = None): \n if self.verbose > 1:\n print(\"MultiLinearSpectra.init_data_array()\") \n \n if mess is None:\n if self.mess is None:\n warnings.warn(\"MultiLinearSpectra.init_data_array(): no data to initialize\")\n return None\n else:\n self.mess = mess\n \n\n \n \n for m in range(len(self.mess)):\n \n self.mess[m][\"index\"] = m\n \n kwargs = {}\n for k, v in self.mess[m].items():\n kwargs[k] = v\n \n if self.mess[m][\"class\"] == \"PASGas\" and flag_ST:\n self.mess[m][\"object\"] = PASG.PASGas(verbose = self.verbose, **kwargs)\n\n elif self.mess[m][\"class\"] == \"PASLiquid\" and flag_ST:\n self.mess[m][\"object\"] = PASL.PASLiquid(verbose = self.verbose, **kwargs)\n\n\n # x_unit = self.mess[0].x_unit\n # y_unit = self.mess[0].y_unit\n\n # for m in range(1, len(self.mess)):\n # if x_unit != self.mess[m].x_unit:\n # self.mess.x_unit", "def parse_parameters(filePath):\r\n numThreads, queue, affinity = 0,\"\",\"\"\r\n \r\n for line in open(filePath):\r\n if \"spec.omp2001.size:\" in line:\r\n if get_last_column_number(line)==\"test\":\r\n print(\"IS TEST SIZE!!1 : \" + filePath)\r\n \r\n if \"spec.omp2001.sw_threads:\" in line:\r\n numThreads = int(get_last_column_number(line))\r\n \r\n if \"spec.omp2001.mach:\" in line:\r\n machine = line.split(\" \")[-1]\r\n columns = machine.split(\".\")\r\n \r\n queue = columns[0]\r\n affinity = columns[1]\r\n \r\n return numThreads, queue, affinity", "def read_params(fname):\n f = open(fname, 'r')\n par = {} #output\n for i in range(10): # esta dentro de las primeras 10 lineas\n l = f.readline().split()\n #print \" ---> \", l\n number = u'%s' % l[-1] # presumably a number\n if not number.replace('.','').replace('-','').isnumeric():\n if l[0]=='#####':\n break\n else:\n continue # we proceed ONLY IF this is numeric string\n #print ' FIRST: ', l[0]\n if l[0]=='#####':\n #print \"IM I HERE????\"\n break # end of header\n\n name = l[1][:-1] # l[0] es '#', y -1 para comernos el \":\"\n value = np.float(l[2]) # l[2] es el valor\n par[name] = value\n\n return par", "def read_file(self, fullname):\n\n data = np.genfromtxt(fullname, dtype=None, names=True, skip_header=0)\n return data" ]
[ "0.58298117", "0.5741129", "0.5640352", "0.54980344", "0.5496068", "0.5432148", "0.5394989", "0.53380096", "0.53087157", "0.5273573", "0.52647877", "0.5246655", "0.5224779", "0.5212376", "0.52076614", "0.5198927", "0.5182285", "0.5177914", "0.5172997", "0.5172781", "0.516248", "0.51255375", "0.51171046", "0.5105941", "0.5099218", "0.5091882", "0.50832456", "0.5080764", "0.50803757", "0.50546193" ]
0.7251039
0
Sets parameters for rigs rig_ids_str coma separated string with rig ids "1,2,3,4" miner Miner to set. Leave it null if you do not want to change. "claymore", "claymorez", "ewbf", ... miner2 Second miner to set. Leave it null if you do not want to change. "0" if you want to unset it. id_wal ID of wallet. Leave it null if you do not want to change. id_oc ID of OC profile. Leave it null if you do not want to change. bool|mixed
def multiRocket(self, rig_ids_str, miner, miner2, id_wal, id_oc): if rig_ids_str is not None: self.log("Rigs ids required") exit() params = { 'method': 'multiRocket', 'rig_ids_str': rig_ids_str, 'miner': miner, 'miner2': miner2, 'id_wal': id_wal, 'id_oc': id_oc } result = self.request(params) if 'error' in result: return False return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_sids(self, sids):\n self._sids = sids\n # encode sids in RGB\n r = sids // 256**2\n rem = sids % 256**2 # remainder\n g = rem // 256\n b = rem % 256\n self.rgbsids = np.zeros((self.npoints, 3), dtype=np.uint8)\n self.rgbsids[:, 0] = r\n self.rgbsids[:, 1] = g\n self.rgbsids[:, 2] = b", "def set_sids(self, sids):\n self._sids = sids\n # encode sids in RGB\n r = sids // 256**2\n rem = sids % 256**2 # remainder\n g = rem // 256\n b = rem % 256\n self.rgbsids = np.zeros((self.npoints, 3), dtype=np.uint8)\n self.rgbsids[:, 0] = r\n self.rgbsids[:, 1] = g\n self.rgbsids[:, 2] = b", "def set_reads(self,bkid,pgs=0,vgs=None):\n# logging.debug('models.set_reads(%s,%s)'%(bkid,pg))\n myreads = self.get_reads()\n try:\n pg = int(pgs)\n except:\n pg = 0\n if bkid not in myreads:\n myreads[bkid] = [datetime.strftime(datetime.utcnow(),'%Y-%m-%d %H:%M:%S'),pg,vgs or '']\n else:\n if myreads[bkid][1] <= pg:\n myreads[bkid][1] = pg\n if vgs:\n if isinstance(vgs,list):\n myreads[bkid][2] = ','.join(filter(None,vgs))\n elif myreads[bkid][2]:\n mrs = myreads[bkid][2].split(',')\n if vgs in mrs: mrs.remove(vgs)\n mrs.append(vgs)\n myreads[bkid][2] = ','.join(filter(None,mrs))\n else:\n myreads[bkid][2] = str(vgs)\n self.put_reads(myreads)\n return True", "def set_params(self, params):\n for item in params:\n if len(item.split(\"-\")) == 5:\n self.params[item.split(\"-\")[-1]] = params[item]\n elif item.split(\"-\")[4] == \"BECKE88\":\n self.becke88.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE88_LR\":\n self.becke88_lr.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE88_LR_ADIABATIC\":\n self.becke88_lr_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE97\":\n self.becke97.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BECKE_ROUSSEL\":\n self.becke_roussel.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"BEEF\":\n self.beef.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"CS1\":\n self.cs1.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"GV09\":\n self.gv09.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"HCTH\":\n self.hcth.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"KE_GGA\":\n self.ke_gga.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"KE_LIBXC\":\n self.ke_libxc.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LDA_HOLE_T_C_LR\":\n self.lda_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LIBXC\":\n self.libxc.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LYP\":\n self.lyp.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"LYP_ADIABATIC\":\n self.lyp_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"OPTX\":\n self.optx.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"P86C\":\n self.p86c.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PADE\":\n self.pade.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PBE\":\n self.pbe.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PBE_HOLE_T_C_LR\":\n self.pbe_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PW92\":\n self.pw92.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"PZ81\":\n self.pz81.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"TF\":\n self.tf.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"TFW\":\n self.tfw.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"TPSS\":\n self.tpss.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"VWN\":\n self.vwn.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"XALPHA\":\n self.xalpha.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"XGGA\":\n self.xgga.set_params({item: params[item]})\n elif item.split(\"-\")[4] == \"XWPBE\":\n self.xwpbe.set_params({item: params[item]})\n else:\n pass", "def set_params(self, params):\n for item in params:\n if len(item.split(\"-\")) == 6:\n self.params[item.split(\"-\")[-1]] = params[item]\n elif item.split(\"-\")[5] == \"BECKE88\":\n self.becke88.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE88_LR\":\n self.becke88_lr.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE88_LR_ADIABATIC\":\n self.becke88_lr_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE97\":\n self.becke97.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BECKE_ROUSSEL\":\n self.becke_roussel.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"BEEF\":\n self.beef.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"CS1\":\n self.cs1.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"GV09\":\n self.gv09.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"HCTH\":\n self.hcth.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"KE_GGA\":\n self.ke_gga.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"KE_LIBXC\":\n self.ke_libxc.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LDA_HOLE_T_C_LR\":\n self.lda_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LIBXC\":\n self.libxc.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LYP\":\n self.lyp.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"LYP_ADIABATIC\":\n self.lyp_adiabatic.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"OPTX\":\n self.optx.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"P86C\":\n self.p86c.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PADE\":\n self.pade.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PBE\":\n self.pbe.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PBE_HOLE_T_C_LR\":\n self.pbe_hole_t_c_lr.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PW92\":\n self.pw92.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"PZ81\":\n self.pz81.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"TF\":\n self.tf.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"TFW\":\n self.tfw.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"TPSS\":\n self.tpss.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"VWN\":\n self.vwn.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"XALPHA\":\n self.xalpha.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"XGGA\":\n self.xgga.set_params({item: params[item]})\n elif item.split(\"-\")[5] == \"XWPBE\":\n self.xwpbe.set_params({item: params[item]})\n else:\n pass", "async def set_roster(\n self, jid: typing.Union[JID, str], roster_items: dict, **send_kwargs\n ) -> Iq:\n if self.granted_privileges[\"roster\"] not in (\"set\", \"both\"):\n log.error(\"The server did not grant us privileges to set rosters\")\n raise ValueError\n else:\n return await self._make_set_roster(jid, roster_items).send(**send_kwargs)", "def __set_receivers_id(self, receivers_id):\n if not isinstance(receivers_id, list):\n raise TypeError('Receivers id should be a list')\n if not all(isinstance(receiver_id, int) for receiver_id in receivers_id): # Check if all elements are int\n raise TypeError('All elements in the receivers id list should be integer')\n if any(receiver_id < 0 for receiver_id in receivers_id): # If any elements is negative\n raise ValueError('An element is negative, there can not be negative ids')\n self.__receivers_id = receivers_id", "def init_armor_set(self, armor_set):\n \n if armor_set:\n for armor_build in armor_set:\n armor = armor_build(self)\n self.armor_set.append(armor)\n armor.activate()", "def set_ships(self, dictionary):\n for key, value in dictionary.items():\n if value < 0:\n raise SettingsError(\"No negative ships\")\n self._parser.set(\"settings\", \"carriers\", str(dictionary[CARRIER]))\n self._parser.set(\"settings\", \"battleships\", str(dictionary[BATTLESHIP]))\n self._parser.set(\"settings\", \"cruisers\", str(dictionary[CRUISER]))\n self._parser.set(\"settings\", \"destroyers\", str(dictionary[DESTROYER]))\n self._save()", "def addtagDic(dic_i,tag,tag_str,setint=False):\n if( len( tag_str ) ):\n dic_i[tag] = []\n for id_s in tag_str.split():\n if( setint ):\n dic_i[tag].append(int(id_s))\n else:\n dic_i[tag].append(id_s)\n \n return dic_i", "def addtagDic(dic_i,tag,tag_str,setint=False):\n if( len( tag_str ) ):\n dic_i[tag] = []\n for id_s in tag_str.split():\n if( setint ):\n dic_i[tag].append(int(id_s))\n else:\n dic_i[tag].append(id_s)\n \n return dic_i", "def updateDict(self,strSet):\n\tself.createAdjList(strSet,\"remove\")", "def reviewer_id(self, reviewer_id: int):\n\n self._reviewer_id = reviewer_id", "def set_srid(self, srid: ir.IntegerValue) -> GeoSpatialValue:\n return ops.GeoSetSRID(self, srid=srid).to_expr()", "def setModemInitString(self, initString, unitCode=0):\n resp = self.XAPCommand('MINIT', initString, unitCode=unitCode)\n return resp", "def krsedg(self, krsedg):\n if (self.local_vars_configuration.client_side_validation and\n krsedg is not None and len(krsedg) > 32):\n raise ValueError(\"Invalid value for `krsedg`, length must be less than or equal to `32`\") # noqa: E501\n\n self._krsedg = krsedg", "def replace_ids_submission(ids):\n \n item = np.zeros((len(ids), ), dtype = 'int')\n user = np.zeros((len(ids), ), dtype = 'int')\n for i in range(len(ids)):\n row, col = ids[i].split(\"_\")\n item[i] = int(row.replace(\"r\", \"\"))\n user[i] = int(col.replace(\"c\", \"\"))\n \n return item, user", "async def set_chat_sticker_set(self, chat_id: typing.Union[base.Integer, base.String],\n sticker_set_name: base.String) -> base.Boolean:\n payload = generate_payload(**locals())\n result = await self.request(api.Methods.SET_CHAT_STICKER_SET, payload)\n\n return result", "def make_chromarms(\n chromsizes,\n midpoints,\n cols_chroms=(\"chrom\", \"length\"),\n cols_mids=(\"chrom\", \"mid\"),\n suffixes=(\"_p\", \"_q\"),\n):\n columns_to_drop = [\"index\", \"sub_index_\"]\n if len(cols_chroms) == 2:\n ck1, sk1 = cols_chroms\n elif len(cols_chroms) == 3:\n ck1, sk1, ek1 = cols_chroms\n\n if isinstance(chromsizes, pd.Series):\n df_chroms = (\n pd.DataFrame(chromsizes).reset_index().rename(columns={\"index\": ck1})\n )\n elif isinstance(chromsizes, pd.DataFrame):\n df_chroms = chromsizes.copy()\n else:\n raise ValueError(\"unknown input type for chromsizes\")\n\n if len(cols_chroms) == 2:\n _verify_columns(df_chroms, [ck1, sk1])\n columns_to_drop += [sk1]\n df_chroms[\"end\"] = df_chroms[sk1].values\n df_chroms[\"start\"] = 0\n sk1, ek1 = \"start\", \"end\"\n elif len(cols_chroms) == 3:\n ck1, sk1, ek1 = cols_chroms\n _verify_columns(df_chroms, [ck1, sk1, ek1], unique_cols=True)\n if any(df_chroms[sk1].values != 0):\n raise ValueError(\"all values in starts column must be zero\")\n else:\n raise ValueError(\"invalid number of cols_chroms\")\n\n ck2, sk2 = cols_mids\n if isinstance(midpoints, dict):\n df_mids = pd.DataFrame.from_dict(midpoints, orient=\"index\", columns=[sk2])\n df_mids.reset_index(inplace=True)\n df_mids.rename(columns={\"index\": ck2}, inplace=True)\n elif isinstance(midpoints, pd.DataFrame):\n df_mids = midpoints.copy()\n else:\n raise ValueError(\"unknown input type for midpoints\")\n _verify_columns(df_mids, [ck2, sk2])\n df_mids[\"start\"] = df_mids[sk2]\n df_mids[\"end\"] = df_mids[sk2]\n\n df_chromarms = ops.subtract(\n df_chroms,\n df_mids,\n cols1=(ck1, sk1, ek1),\n cols2=(ck2, \"start\", \"end\"),\n return_index=True,\n )\n if df_chromarms[\"sub_index_\"].max() > 1:\n raise ValueError(\n \"chromosome split into more than two arms, double-check midpoints\"\n )\n df_chromarms[\"name\"] = df_chromarms[ck1] + [\n suffixes[i] for i in df_chromarms[\"sub_index_\"].values\n ]\n # df_chromarms.drop(columns=columns_to_drop, inplace=True)\n return df_chromarms[[ck1, sk1, ek1, \"name\"]]", "def set_positions(self, x, y, station_diameter=40,\n hpol_phased_antennas=10, vpol_phased_antennas=10,\n hpol_phased_separation=1, vpol_phased_separation=1,\n hpol_phased_lowest=-49, vpol_phased_lowest=-69,\n outrigger_strings_per_station=3,\n outrigger_string_type=ARAString,\n **outrigger_string_kwargs):\n # Change defaults for outrigger strings\n if \"antennas_per_string\" not in outrigger_string_kwargs:\n outrigger_string_kwargs[\"antennas_per_string\"] = 8\n if \"antenna_separation\" not in outrigger_string_kwargs:\n n = outrigger_string_kwargs[\"antennas_per_string\"]\n sep = [1, 29] * int(n/2)\n outrigger_string_kwargs[\"antenna_separation\"] = sep[:n-1]\n if \"lowest_antenna\" not in outrigger_string_kwargs:\n outrigger_string_kwargs[\"lowest_antenna\"] = -100\n\n self.subsets.append(\n PhasedArrayString(x, y, antennas_per_string=hpol_phased_antennas,\n antenna_separation=hpol_phased_separation,\n lowest_antenna=hpol_phased_lowest,\n antenna_type=HpolAntenna)\n )\n self.subsets.append(\n PhasedArrayString(x, y, antennas_per_string=vpol_phased_antennas,\n antenna_separation=vpol_phased_separation,\n lowest_antenna=vpol_phased_lowest,\n antenna_type=VpolAntenna)\n )\n\n r = station_diameter/2\n for i in range(outrigger_strings_per_station):\n angle = 2*np.pi * i/outrigger_strings_per_station\n x_str = x + r*np.cos(angle)\n y_str = y + r*np.sin(angle)\n self.subsets.append(\n outrigger_string_type(x_str, y_str, **outrigger_string_kwargs)\n )", "def parse_run_range(self, run_range_str):\r\n\r\n assert isinstance(run_range_str, str)\r\n if not \"-\" in run_range_str:\r\n return None\r\n\r\n # split <>-<>\r\n (str_min, str_max) = run_range_str.split(\"-\")\r\n run_min_set = False\r\n run_max_set = False\r\n\r\n # parse run min\r\n try:\r\n run_min = int(str_min)\r\n run_min_set = True\r\n except ValueError:\r\n run_min = 0\r\n\r\n # parse run max\r\n try:\r\n run_max = int(str_max)\r\n run_max_set = True\r\n except ValueError:\r\n run_max = INFINITE_RUN\r\n\r\n return run_min, run_max, run_min_set, run_max_set", "def geneset(self, value: Union[str, int, Geneset, List[str]]):\n # Geneset can be set only once, prevent modifications\n if self._geneset is not None:\n raise ValueError(\"It is not allowed to change geneset value.\")\n\n if value is None:\n return\n\n # If id / slug of a geneset is given, get it from the Resolwe server\n if isinstance(value, (int, str)):\n gs = self.resolwe.geneset.get(value)\n value = gs.genes\n elif isinstance(value, Geneset):\n value = value.genes\n\n if isinstance(value, (list, set, tuple, pd.Series)):\n self._geneset = set(value)\n else:\n raise ValueError(f'Unsupported type of \"geneset\" input: {value}.')", "def set_RQ(self): \n # Convenience abbreviations.\n ion_atms = self.ion_atms # Atoms to average dictionary\n ion_res = self.ion_res # Ionizable residues\n RQ = self.RQ # list of q_i coordinates\n for res_id in self.res_ids:\n if res_id[0] in ion_res:\n # Atoms to average. Omitting the residue id at the 0 position in the\n # 'res'-list, therefore 'res_id[3:]'.\n # 'atm.split()[0]' returns the atom type.\n av_atms = []\n for atm in res_id[3:]:\n if atm.split()[0] in ion_atms[res_id[0]]:\n av_atms.append(\" \".join(res_id[:3]) + \" \" + atm.strip())\n RQ.append(av_atms) \n self.RQ = RQ", "def srs_id(self, srs_id):\n self.logger.debug(\"In 'srs_id' setter.\")\n\n if len(srs_id) < 3:\n raise Exception(\"SRS ID is too short, must be more than 3 characters.\")\n\n self._srs_id = srs_id", "def assign_gids(self, int[::1] gids):\n self.mdb.get().assign_gids(<int> gids.size, <const int *> &gids[0])", "def setReactionId(self, *args):\n return _libsbml.ReactionGlyph_setReactionId(self, *args)", "async def setReaders(self, eventID: str, readers: Iterable[str]) -> None:", "def _format_set_iss(self, format_set_iss=None):\n ## Format iss\n if format_set_iss is None or format_set_iss == 'general':\n self._set_iss = self._general_set_iss\n elif format_set_iss == 'null':\n self._set_iss = self._null_set_iss\n elif format_set_iss == 'int':\n self._set_iss = self._int_set_iss\n elif format_set_iss == 'list':\n self._set_iss = self._list_set_iss", "def _params(self, qs):\n return [str_id for str_id in qs.split(',')]", "def get_camp_ids_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [x.get(\"id\") for x in all_campaigns if string in x[\"name\"]]" ]
[ "0.50190747", "0.50190747", "0.4670787", "0.45393595", "0.45045197", "0.44899312", "0.44867754", "0.4224754", "0.42206508", "0.4202021", "0.4202021", "0.41952497", "0.41925597", "0.41226864", "0.41184327", "0.40634874", "0.40553337", "0.40470064", "0.40200716", "0.40155357", "0.4013787", "0.3996931", "0.39936945", "0.3992161", "0.39744848", "0.39669463", "0.39614373", "0.3939572", "0.39366975", "0.3930893" ]
0.6751865
0
Dump utils image template.py as a Dict. The key is like "simnet/lndbtc"
def _dump_template(self, utils_image) -> Dict[str, str]: cmd = f"docker run -i --rm --entrypoint python {utils_image}" p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT) out, _ = p.communicate(input=SCRIPT.encode()) output = out.decode() if p.returncode != 0: self._logger.error("Failed to dump %s template.py\n%s", utils_image, output) raise RuntimeError("Failed to dump %s template.py" % utils_image) lines = output.splitlines() result = {} for line in lines: key, value = line.split() result[key] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _driver_template_data(self):\n return {\n 'driver_module': self.driver_modulename(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'driver_path': self.metadata.driver_path,\n 'release_notes': self.metadata.notes,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def create_dump(self) -> Dict[str, str]:\n return self.http.post(self.config.paths.dumps)", "def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data", "def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF", "def _get_image_type_templates():\n yaml_file = os.path.join(ROOT_DIR, 'docker', 'image_types.yaml')\n all_templates = yaml_utils.read(yaml_file)\n return all_templates", "def genConvOnboardingInfoJsonFile( sztpOnboardingInfo, onboardingFileJson ):\n template = {\n \"boot-image\": {\n \"os-name\": str,\n \"os-version\": str,\n \"download-uri\": list, # of uri strings\n \"image-verification\": [ {\n \"hash-algorithm\": str,\n \"hash-value\": str } ],\n },\n \"configuration-handling\": str,\n \"pre-configuration-script\": str,\n \"configuration\": str,\n \"post-configuration-script\": str\n }\n\n def verifyBootImage( template, sztpBootImage ):\n \"\"\"Verify boot image is correct\"\"\"\n def verifyImageVerification( imageVerification ):\n \"\"\"Verify instance of image-verification is correct\"\"\"\n if \"hash-algorithm\" in imageVerification:\n assert imageVerification[ \"hash-algorithm\" ] == \\\n \"ietf-sztp-conveyed-info:sha-256\",\\\n \"Unsupported hash-algorithm\"\n assert \"hash-value\" in imageVerification, \\\n \"Expected hash-value not present\"\n hashValue = imageVerification[ \"hash-value\" ]\n # Verify hashValue appears to be a yang:hex-string\n assert len( hashValue ) == 32 * 3 - 1 and \\\n all( c == ':' or c in string.hexdigits for c in hashValue ), \\\n \"hash-value invalid\"\n\n def verifyImageVerificationList( template, sztpImageVerification ):\n \"\"\"Verify image-verification list is correct\"\"\"\n assert isinstance( sztpImageVerification, list ), \\\n \"Expected list\"\n for imageVer in sztpImageVerification:\n assert verifyDictTypes( template, imageVer ), \"Unexpected value types\"\n assert set( imageVer.keys() ).issubset( set( template.keys() ) ), \\\n \"Unexpected keys in dict\"\n verifyImageVerification( imageVer )\n\n mandatory = [ \"download-uri\" ]\n assert isinstance( sztpBootImage, dict ), \"Expected dict\"\n assert set( sztpBootImage.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpBootImage ), \\\n \"Unexpected value types\"\n assert set( mandatory ).issubset( sztpBootImage ), \\\n \"Mandatory keys not present\"\n if \"image-verification\" in sztpBootImage:\n verifyImageVerificationList( template[ \"image-verification\" ][ 0 ],\n sztpBootImage[ \"image-verification\" ] )\n\n # verify onboarding-info dict is correctly constructed\n assert isinstance( sztpOnboardingInfo, dict ), \"Expected dict\"\n assert set( sztpOnboardingInfo.keys() ).issubset( template.keys() ), \\\n \"Unexpected keys in dict\"\n assert verifyDictTypes( template, sztpOnboardingInfo ), \\\n \"Unexpected values types\"\n assert sztpOnboardingInfo[ \"configuration-handling\" ] == \"replace\", \\\n \"Unsupported configuration-handling value\"\n if \"boot-image\" in sztpOnboardingInfo:\n verifyBootImage( template[ \"boot-image\" ],\n sztpOnboardingInfo[ \"boot-image\" ] )\n\n # construct outer dictionary and convert to json\n ietfOnboardingInfo = { \"ietf-sztp-conveyed-info:onboarding-information\":\n sztpOnboardingInfo }\n jsonIetfOnboardingInfo = json.dumps( ietfOnboardingInfo, indent=4 )\n\n # save to file\n with open( onboardingFileJson, \"w\" ) as tmpFile:\n tmpFile.write( jsonIetfOnboardingInfo )", "def create_json(self):\n data = {\"image_id\": self.ids, \"img_path\": self.img_paths, \"bg\": self.bgs}\n if hasattr(self, \"bbox\"):\n data[\"bbox\"] = self.bbox\n if hasattr(self, \"masks\"):\n data[\"masks\"] = self.masks\n with open(f\"{self.save_path}{self.name}/json/images_info.json\", \"w\") as f:\n json.dump(data, f)", "def gen_obj(image_name):\n\n recon_options['dataroot'] = f'{dir_path}/static'\n recon_options['out_path'] = recon_options['dataroot']\n recon_options['results_path'] = recon_options['dataroot']\n recon_options['ckpt_path'] = f'{dir_path}/checkpoints/pifuhd.pt'\n recon_options['load_netMR_checkpoint_path'] = recon_options['ckpt_path']\n recon_options['checkpoints_path'] = f'{dir_path}/checkpoints'\n recon_options['loadSize'] = 1024\n recon_options['resolution'] = 512\n\n path = reconWrapper(DotDict(recon_options), True, image_name)\n\n return path", "def _test_template_data(self):\n chars=string.ascii_uppercase + string.digits\n id = ''.join(random.choice(chars) for x in range(6))\n\n return {\n 'test_module': self.test_modulename(),\n 'driver_module': self.driver_modulename(),\n 'driver_dir': self.driver_dir(),\n 'file': self.driver_relative_path(),\n 'author': self.metadata.author,\n 'driver_name': self.metadata.driver_name,\n 'constructor': self.metadata.constructor,\n 'full_instrument_lower': self.metadata.driver_name.lower(),\n 'full_instrument_camelcase': self.driver_name_camelcase(),\n }", "def _get_template_data(snapshot_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n snapshot_id = snapshot_data['id']\n Linux.logger.debug(f'Compiling template data for Snapshot #{snapshot_id}')\n data: Dict[str, Any] = {key: None for key in Linux.template_keys}\n\n data['host_sudo_passwd'] = settings.NETWORK_PASSWORD\n data['snapshot_identifier'] = f'{snapshot_data[\"vm\"][\"id\"]}_{snapshot_data[\"id\"]}'\n data['vm_identifier'] = f'{snapshot_data[\"vm\"][\"project\"][\"id\"]}_{snapshot_data[\"vm\"][\"id\"]}'\n\n # Get the ip address of the host\n host_ip = None\n for interface in snapshot_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_ip = interface['ip_address']\n break\n if host_ip is None:\n error = f'Host ip address not found for the server # {snapshot_data[\"vm\"][\"server_id\"]}'\n Linux.logger.error(error)\n snapshot_data['errors'].append(error)\n return None\n data['host_ip'] = host_ip\n return data", "def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data", "def img_map(ts):\n image_map = \"\"\n texdata = bpy.data.textures[ts.texture]\n if ts.mapping == \"FLAT\":\n image_map = \"map_type 0 \"\n elif ts.mapping == \"SPHERE\":\n image_map = \"map_type 1 \"\n elif ts.mapping == \"TUBE\":\n image_map = \"map_type 2 \"\n\n # map_type 3 and 4 in development (?) (ENV in pov 3.8)\n # for POV-Ray, currently they just seem to default back to Flat (type 0)\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 3 \"\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 4 \"\n if ts.use_interpolation: # Available if image sampling class reactivated?\n image_map += \" interpolate 2 \"\n if texdata.extension == \"CLIP\":\n image_map += \" once \"\n # image_map += \"}\"\n # if ts.mapping=='CUBE':\n # image_map+= \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_map == \"\":\n # print(\" No texture image found \")\n return image_map", "def __make_processing(self, img_name, abspath_dir_img, id_foot):\n data = {}\n data['data'] = ImageInfo.get_date(abspath_dir_img)\n data['total_part'] = TOTAL_PART\n data['nuvens'] = ImageInfo.get_cloud(abspath_dir_img)\n self.__make_tms(abspath_dir_img)\n data['geom'] = self.__make_footprint(abspath_dir_img, shp_out=id_foot)\n abspath_rgb, img_name_rgb = ImageInfo.get_image_rgb(\n abspath_dir_img, img_name\n )\n data['tms'] = ImageInfo.get_xml_tms(img_name_rgb)\n data['image'] = img_name_rgb\n data['quicklook'] = self.__make_png(abspath_rgb)\n data['path'] = ImageInfo.get_path(img_name)\n return data", "def detailed_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name,\n \"minRam\": self.minRam,\n \"minDisk\": self.minDisk,\n \"OS-EXT-IMG-SIZE:size\": self.image_size,\n \"com.rackspace__1__ui_default_show\": self.is_default,\n \"created\": \"1972-01-01_15-59-11\",\n \"updated\": \"1972-01-01_15-59-11\",\n \"status\": \"ACTIVE\",\n \"progress\": 100,\n \"metadata\": self.metadata_json()\n })\n return template", "def detailed_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name,\n \"minRam\": self.minRam,\n \"minDisk\": self.minDisk,\n \"OS-EXT-IMG-SIZE:size\": self.image_size,\n \"com.rackspace__1__ui_default_show\": self.is_default,\n \"created\": \"1972-01-01_15-59-11\",\n \"updated\": \"1972-01-01_15-59-11\",\n \"progress\": 100,\n \"status\": \"ACTIVE\",\n \"metadata\": self.metadata_json()\n })\n return template", "def export_project_dump(self, key):", "def dict(self):\n d = {}\n d['template_id'] = self.id\n d['name'] = self.name\n d['cpu'] = self.cpu\n d['memory'] = self.memory\n d['points'] = self.points\n d['description'] = self.description\n d['ec2name'] = self.ec2name\n # state is not put in dictionary\n return d", "def serialize(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'body': self.body,\n 'images': {\n 'img_path_xs': self.img_path_xs,\n 'img_path_sm': self.img_path_sm,\n 'img_path_md': self.img_path_md,\n 'img_path_lg': self.img_path_lg\n },\n 'is_active': self.is_active,\n }", "def create_base_image(self, builder, template, parameters):", "def return_template_output(base_dir,filename,data_dict):\n templateLoader = jinja2.FileSystemLoader( searchpath=base_dir)\n templateEnv = jinja2.Environment( loader=templateLoader )\n template = templateEnv.get_template(filename)\n output = template.render(data_dict)\n return output", "def raw_image(self):\n\t\treturn FstabEntry([f\"{self.mount_point}_image\", \"emmc\", self.device])", "def dump(self) -> dict[Any, str]:\r\n ...", "def make_image(self):\n\n if self.type == 'passthrough':\n return\n render_template(\n os.path.dirname(self.main_module),\n os.path.basename(self.main_module_path),\n self.language,\n self.requirements,\n self.whitelist,\n self.type,\n into=self.code_dir)\n self.build()", "def _write_packet_dict(ctx, package_dict):\n p4gf_util.write_dict_to_file(package_dict, _packet_filename(ctx.config.repo_name))", "def outputs(self):\n return {\"path_to_dtb_json_file\": File_IO(\n self.node.outputs[0])}", "def get_heat_json_from_topology_config(config, project_name='admin'):\n\n template = dict()\n template[\"heat_template_version\"] = \"2013-05-23\"\n template[\"resources\"] = dict()\n\n for network in config[\"networks\"]:\n nr = dict()\n nr[\"type\"] = \"OS::Neutron::Net\"\n\n nrp = dict()\n nrp[\"shared\"] = False\n nrp[\"name\"] = network[\"name\"]\n nrp[\"admin_state_up\"] = True\n\n nr[\"properties\"] = nrp\n\n nrs = dict()\n nrs[\"type\"] = \"OS::Neutron::Subnet\"\n #\n p = dict()\n p[\"cidr\"] = \"1.1.1.0/24\"\n p[\"enable_dhcp\"] = False\n p[\"gateway_ip\"] = \"\"\n p[\"name\"] = network[\"name\"] + \"_subnet\"\n if network[\"name\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n elif network[\"name\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": network[\"name\"]}\n\n nrs[\"properties\"] = p\n\n template[\"resources\"][network[\"name\"]] = nr\n template[\"resources\"][network[\"name\"] + \"_subnet\"] = nrs\n\n # cache the image_details here to avoid multiple REST calls for details about an image type\n # as many topologies have lots of the same types of images around\n image_details_dict = dict()\n\n for device in config[\"devices\"]:\n\n if device[\"imageId\"] in image_details_dict:\n image_details = image_details_dict[device[\"imageId\"]]\n else:\n image_details = imageUtils.get_image_detail(device[\"imageId\"])\n image_details_dict[device[\"imageId\"]] = image_details\n\n image_name = image_details[\"name\"]\n\n image_disk_size = 20\n\n # set the size in GB, rounding up to the nearest int\n if 'size' in image_details:\n current_size = float(image_details['size'])\n image_disk_size = int(math.ceil(current_size / 1000000000))\n\n # if the glance image asks for a minimum disk size, let's see if it's larger that what we have\n if \"min_disk\" in image_details and image_details['min_disk'] > image_disk_size:\n image_disk_size = image_details[\"min_disk\"]\n\n # if the user has specified a desired disk size, grab it here so we get the correct flavor\n if type(image_disk_size) is int and device[\"resizeImage\"] > image_disk_size:\n image_disk_size = device[\"resizeImage\"]\n\n # determine openstack flavor here\n device_ram = int(device[\"ram\"])\n device_cpu = int(device[\"cpu\"])\n\n flavor_detail = openstackUtils.get_minimum_flavor_for_specs(configuration.openstack_project,\n device_cpu,\n device_ram,\n image_disk_size\n )\n\n flavor = flavor_detail[\"name\"]\n\n dr = dict()\n dr[\"type\"] = \"OS::Nova::Server\"\n dr[\"properties\"] = dict()\n dr[\"properties\"][\"flavor\"] = flavor\n dr[\"properties\"][\"networks\"] = []\n index = 0\n for p in device[\"interfaces\"]:\n port = dict()\n port[\"port\"] = dict()\n port[\"port\"][\"get_resource\"] = device[\"name\"] + \"_port\" + str(index)\n index += 1\n dr[\"properties\"][\"networks\"].append(port)\n\n dr[\"properties\"][\"image\"] = image_name\n dr[\"properties\"][\"name\"] = device[\"name\"]\n\n if device[\"configDriveSupport\"]:\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n metadata[\"console\"] = \"vidconsole\"\n dr[\"properties\"][\"metadata\"] = metadata\n\n # let's check all the configDriveParams and look for a junos config\n # FIXME - this may need tweaked if we need to include config drive cloud-init support for other platforms\n # right now we just need to ignore /boot/loader.conf\n for cfp in device[\"configDriveParams\"]:\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/boot/loader.conf\":\n logger.debug(\"Creating loader.conf config-drive entry\")\n template_name = cfp[\"template\"]\n loader_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n logger.debug('----------')\n logger.debug(loader_string)\n logger.debug('----------')\n for l in loader_string.split('\\n'):\n if '=' in l:\n left, right = l.split('=')\n if left not in metadata and left != '':\n metadata[left] = right.replace('\"', '')\n\n if \"destination\" in cfp and cfp[\"destination\"] == \"/juniper.conf\":\n logger.debug(\"Creating juniper.conf config-drive entry\")\n template_name = cfp[\"template\"]\n personality_string = osUtils.compile_config_drive_params_template(template_name,\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n dr[\"properties\"][\"personality\"] = dict()\n dr[\"properties\"][\"personality\"] = {\"/config/juniper.conf\": personality_string}\n else:\n logger.debug('No juniper.conf found here ')\n\n if device['cloudInitSupport']:\n logger.debug('creating cloud-init script')\n dr[\"properties\"][\"config_drive\"] = True\n dr[\"properties\"][\"user_data_format\"] = \"RAW\"\n metadata = dict()\n metadata[\"hostname\"] = device[\"name\"]\n dr[\"properties\"][\"metadata\"] = metadata\n # grab the prefix len from the management subnet which is in the form 192.168.122.0/24\n if '/' in configuration.management_subnet:\n management_prefix_len = configuration.management_subnet.split('/')[1]\n else:\n management_prefix_len = '24'\n\n management_ip = device['ip'] + '/' + management_prefix_len\n\n device_config = osUtils.get_cloud_init_config(device['name'],\n device['label'],\n management_ip,\n device['managementInterface'],\n device['password'])\n\n script_string = \"\"\n if \"configScriptId\" in device and device[\"configScriptId\"] != 0:\n logger.debug(\"Passing script data!\")\n try:\n script = Script.objects.get(pk=int(device[\"configScriptId\"]))\n script_string = script.script\n device_config[\"script_param\"] = device.get(\"configScriptParam\", '')\n logger.debug(script_string)\n except ObjectDoesNotExist:\n logger.info('config script was specified but was not found!')\n\n user_data_string = osUtils.render_cloud_init_user_data(device_config, script_string)\n dr[\"properties\"][\"user_data\"] = user_data_string\n\n template[\"resources\"][device[\"name\"]] = dr\n\n for device in config[\"devices\"]:\n index = 0\n for port in device[\"interfaces\"]:\n pr = dict()\n pr[\"type\"] = \"OS::Neutron::Port\"\n p = dict()\n\n if port[\"bridge\"] == \"virbr0\":\n p[\"network_id\"] = configuration.openstack_mgmt_network\n\n # specify our desired IP address on the management interface\n p['fixed_ips'] = list()\n fip = dict()\n fip['ip_address'] = device['ip']\n p['fixed_ips'].append(fip)\n\n elif port[\"bridge\"] == configuration.openstack_external_network:\n p[\"network_id\"] = configuration.openstack_external_network\n else:\n p[\"network_id\"] = {\"get_resource\": port[\"bridge\"]}\n # disable port security on all other ports (in case this isn't set globally)\n p['port_security_enabled'] = False\n\n pr[\"properties\"] = p\n template[\"resources\"][device[\"name\"] + \"_port\" + str(index)] = pr\n index += 1\n\n return json.dumps(template)", "def generate_utils(self):\n # type: (Generator) -> str\n return render_to_string(\n self.backend,\n \"utils.py\",\n {\n \"security_defs\": self.security_defs\n },\n )", "def get_kml_dict(self, tx, ty_tms, tz, image_format, draworder = 0):\n d = {}\n\n d[\"south\"], d[\"west\"], d[\"north\"], d[\"east\"] = self.tileswne(tx, ty_tms, tz)\n\n image_filename = get_tile_filename(tx, ty_tms, tz, format_extension[image_format],False)\n d[\"image_filename\"] = image_filename\n d[\"image_filename\"] = d[\"image_filename\"].replace(\"\\\\\",\"/\")\n\n if self.options.url is None:\n d[\"image_url\"] = \"../../%s\" % image_filename\n else:\n d[\"image_url\"] = \"%s%s\" % (self.options.url, image_filename)\n d[\"image_url\"] = d[\"image_url\"].replace(\"\\\\\",\"/\")\n\n url = self.options.url\n if url is None:\n # Top level KML is linked from `doc.kml' and it needs different path.\n if tz == self.tminz:\n url = \"\"\n else:\n url = \"../../\"\n\n if self.options.kmz:\n extension = \"kmz\"\n else:\n extension = \"kml\"\n\n d[\"link_url\"] = \"%s%s\" % (url, get_tile_filename(tx, ty_tms, tz, extension,False))\n d[\"link_url\"] = d[\"link_url\"].replace(\"\\\\\",\"/\")\n\n d[\"minlodpixels\"] = int(self.tilesize / 2)\n d[\"maxlodpixels\"] = -1 # int(self.tilesize * 8)\n\n if tx == 0:\n d[\"draw_order\"] = draworder + 2 * tz + 1\n else:\n d[\"draw_order\"] = draworder + 2 * tz\n\n return d", "def get_mapdata():\n return render_template(\"l_heatmap.html\")", "def get_config_template() -> dict:\n return {\n VENE_PAYMENTS_BAMBORA_API_URL: (str, \"https://payform.bambora.com/pbwapi\"),\n VENE_PAYMENTS_BAMBORA_API_KEY: str,\n VENE_PAYMENTS_BAMBORA_API_SECRET: str,\n VENE_PAYMENTS_BAMBORA_PAYMENT_METHODS: list,\n }" ]
[ "0.5737873", "0.55475605", "0.5534782", "0.5529993", "0.55225337", "0.55086523", "0.5487717", "0.54603654", "0.545359", "0.54374003", "0.53441983", "0.53371257", "0.5325836", "0.5310178", "0.530841", "0.5289318", "0.5286247", "0.52432984", "0.5220684", "0.5216042", "0.5184362", "0.5157884", "0.5150819", "0.5148567", "0.514077", "0.5132544", "0.51256675", "0.5119062", "0.51017666", "0.5085865" ]
0.7714567
0
Send detection data and return status
def send_detection_data(self, image_width, image_height, image, detection_result): if self._send_buffer.full() is True: log_error("Send detection data failed for buffer is full") return False image_data = None if isinstance(image, AclImage): image_data = DataBuf(image.data(), image.size).copy_to_local() elif isinstance(image, np.ndarray): image_data = image else: log_error("Invalid data to send") return False request_msg = pm.image_frame_request(image_width, image_height, image_data.tobytes(), detection_result) self.send_message(request_msg) self._send_buffer.put(image_data) self._release_send_success_data() return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_image(self, image_width, image_height, image):\n detection_result = []\n return self.send_detection_data(image_width, image_height, image, detection_result)", "def test_http_classifier(self):\n \n msg = \"\"\n \n files = 0\n tp = 0\n fp = 0\n tn = 0\n fn = 0\n\n self.addr = \"http://\" + self.Helpers.confs[\"cnn\"][\"api\"][\"server\"] + \\\n ':'+str(self.Helpers.confs[\"cnn\"][\"api\"][\"port\"]) + '/Inference'\n self.headers = {'content-type': 'image/jpeg'}\n\n for data in os.listdir(self.testing_dir):\n if os.path.splitext(data)[1] in self.valid:\n \n response = self.send_request(self.testing_dir + \"/\" + data)\n\n msg = \"\"\n if response[\"Diagnosis\"] == \"Positive\" and \"_1.\" in data:\n tp += 1\n msg = \"Acute Lymphoblastic Leukemia correctly detected (True Positive)\"\n elif response[\"Diagnosis\"] == \"Positive\" and \"_0.\" in data:\n fp += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly detected (False Positive)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_0.\" in data:\n tn += 1\n msg = \"Acute Lymphoblastic Leukemia correctly not detected (True Negative)\"\n elif response[\"Diagnosis\"] == \"Negative\" and \"_1.\" in data:\n fn += 1\n msg = \"Acute Lymphoblastic Leukemia incorrectly not detected (False Negative)\" \n \n files += 1\n \n self.Helpers.logger.info(msg)\n print()\n time.sleep(7)\n \n self.Helpers.logger.info(\"Images Classifier: \" + str(files))\n self.Helpers.logger.info(\"True Positives: \" + str(tp))\n self.Helpers.logger.info(\"False Positives: \" + str(fp))\n self.Helpers.logger.info(\"True Negatives: \" + str(tn))\n self.Helpers.logger.info(\"False Negatives: \" + str(fn))", "def sendDetection(self, idData, classes, aux=None):\n self.dp.URL = self.URL\n self.dp.sendDetection(classifier=self.Config[\"MACHINE_NAME\"], \n idData=idData, classes=classes, aux=aux)", "def run(self):\n if self.stream:\n while True:\n try:\n ret, frame = self.stream.read()\n if ret is True:\n # TODO: replace by a real function that send frame to detection model\n self.detection_model.send_image(image=frame)\n if self.show_in_window:\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n except KeyboardInterrupt:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.close()\n return None\n except Exception as e:\n self.stream.release()\n cv2.destroyAllWindows()\n self.log.write('Error:Unexpected Error happened:\\n {}'.format(e))\n self.log.close()\n return None\n else:\n self.log.write(\"Error initializing stream....\\n\")\n self.log.close()\n return None", "def status_check_callback(self, req, res):\n try:\n res.single_camera_status = 1\n res.stereo_camera_status = 1\n res.lidar_status = 1\n if self.camera_buffer.read_buffer is not None \\\n and isinstance(self.camera_buffer.read_buffer, list):\n if len(self.camera_buffer.read_buffer) == 2:\n res.stereo_camera_status = 0\n elif len(self.camera_buffer.read_buffer) == 1:\n res.single_camera_status = 0\n if self.lidar_buffer.read_buffer is not None:\n res.lidar_status = 0\n return res\n except Exception as ex:\n self.get_logger().error(f\"Failed to get sensor data status: {ex}\")", "def image_test_case(img, expected_results, info_string):\n global passed_count, failed_count\n\n path = TEST_IMGS + img\n\n print(\"\\n\\nTEST: {}\".format(info_string))\n print(\"\\nTesting image handling of {}\".format(path))\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.connect((HOST, PORT))\n\n with open(path, 'rb') as f:\n img_bytes = f.read()\n\n sock.send(START)\n sock.send(GPS)\n sock.send(b'51.5138')\n sock.send(LONG)\n sock.send(b'-0.09847899999999754')\n sock.send(SOF)\n sock.send(img_bytes)\n sock.send(END_MESSAGE)\n\n response_1 = sock.recv(4)\n response_2 = sock.recv(4)\n responses = [response_1, response_2]\n\n for expected in expected_results:\n if expected not in responses:\n print(\"\\n\\tResult: FAILED.\")\n print(\"Expected server response {}. Received {}.\".format(\n expected_results, responses))\n failed_count += 1\n return\n\n print(\"\\n\\tResult: PASSED.\\n\")\n passed_count += 1", "def getStatus():\n return json.dumps({'camera': Camera.status(), 'rover': rover.status()}), 200", "def run(self):\n # Wait for the 'shot' message ready\n self.wait_for_messages()\n # Send the initial states to the server\n self.send_shape_and_states()\n # Wait for the 'method' message ready\n self.wait_for_messages()\n\n # Send the measurement angles to the server\n for y in range(self.__depth):\n self.send_angle_bulks(y)\n\n # Obtain the measurement outcomes\n result = self.get_classical_output()[::-1]\n self.send_back(\n 'local',\n self.__wrap_shot_message(\n 'setResult',\n {'result': result, 'shot': self.__shots},\n )\n )", "async def detect(self, request: Request) -> Response:\n raw_data = await request.body()\n as_str = raw_data.decode(\"utf-8\")\n\n try:\n body = orjson.loads(as_str)\n except orjson.JSONDecodeError as e:\n raise InferenceError(\"Unrecognized request format: %s\" % e)\n\n request_handler = get_request_handler(\n Protocol(self.alibi_detect_settings.protocol), body\n )\n request_handler.validate()\n input_data = request_handler.extract_request()\n\n y = await self.predict_fn(input_data)\n output_data = orjson.dumps(y, option=orjson.OPT_SERIALIZE_NUMPY)\n\n return Response(content=output_data, media_type=\"application/json\")", "def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result", "def video_test():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # do some fancy processing here....\n\n # build a response dict to send back to client\n response = {'message': 'image received. size={}x{}'.format(img.shape[1], img.shape[0])\n }\n print(response)\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n cv2.imwrite(\"1.jpg\", img)\n print(\"done\")\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def process_image(self, data):\n try:\n\n # Convert the image from ROS format to OpenCV format\n # 'bgr8' means it will encode as 8-bit values in BGR channels\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n # Apply a threshold to your image\n cv_image = self.bound_green_object(cv_image)\n # Display the modified image\n cv2.imshow('picture', cv_image)\n cv2.waitKey(3)\n except CvBridgeError, e:\n rospy.loginfo(e)", "def serve_inference_requests():\n global image_queue\n\n with tf.Session() as sess:\n while True:\n image_data = image_queue.get()\n\n tensor = sess.graph.get_tensor_by_name('final_result:0')\n predictions = sess.run(tensor, {'DecodeJpeg/contents:0': image_data})\n predictions = np.squeeze(predictions)\n\n top_k = predictions.argsort()[-NUM_PREDICTIONS:][::-1]\n\n human_string = labels[top_k[0]]\n score = predictions[top_k[0]]\n logging.info('%s classified with score %.5f', human_string, score)\n\n emit_image = False\n if human_string != 'nothing':\n emit_image = True\n logging.debug('emitting image cause %s was detected', human_string)\n elif score <= config['inference']['threshold']:\n emit_image = True\n logging.debug('emitting image cause score %.5f is below threshold of %s',\n score, config['inference']['threshold'])\n else:\n logging.debug('image not emitted, cause nothing was detected with a probability of %.5f',\n score)\n\n if emit_image:\n mqtt_publish(image_data)\n else:\n save_image(image_data)", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n rospy.loginfo(\"image info: {}\".format(image.numpy().shape))\n\n # Run pose estimation\n boxes = self.object_detector.infer(image, threshold=0.1, keep_size=False)\n\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n\n # Convert detected boxes to ROS type and publish\n ros_boxes = self.bridge.to_ros_boxes(boxes)\n if self.bbox_publisher is not None:\n self.bbox_publisher.publish(ros_boxes)\n rospy.loginfo(\"Published face boxes\")\n\n # Annotate image and publish result\n # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,\n # only used to test the corresponding bridge methods\n odr_boxes = self.bridge.from_ros_boxes(ros_boxes)\n image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)\n if self.image_publisher is not None:\n message = self.bridge.to_ros_image(np.uint8(image))\n self.image_publisher.publish(message)\n rospy.loginfo(\"Published annotated image\")", "def send(self, data, status=\"CON\"):\n return self.c.sendall(pack(data, status=status))", "def _send(self):\n executor_id = self.status['executor_id']\n job_id = self.status['job_id']\n call_id = self.status['call_id']\n act_id = self.status['activation_id']\n\n if self.status['type'] == '__init__':\n init_key = create_init_key(executor_id, job_id, call_id, act_id)\n self.internal_storage.put_data(init_key, '')\n\n elif self.status['type'] == '__end__':\n status_key = create_status_key(executor_id, job_id, call_id)\n dmpd_response_status = json.dumps(self.status)\n drs = sizeof_fmt(len(dmpd_response_status))\n logger.info(\"Storing execution stats - Size: {}\".format(drs))\n self.internal_storage.put_data(status_key, dmpd_response_status)", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))", "def ping():\n health = AutoGluonClassifierService.load_model() is not None # You can insert a health check here\n\n status = 200 if health else 404\n return flask.Response(response='\\n', status=status, mimetype='application/json')", "def remote_status():", "def detect(self):\n # process the input video and get the attributes:\n self.process_video()\n\n # build a rcnn/ yolov5 predictor:\n self.build_predictor()\n\n \n # assert not os.path.isfile(args.output_file), \"File with the name %s already exists\"%args.output_file\n # build the writer with same attributes:\n self.vid_writer = cv2.VideoWriter(self.output, self.fourcc, self.fps, (self.w, self.h))\n\n # inference time:\n start = time.time()\n print(\"Started inference\\n\")\n \n # progress bar using tqdm:\n pbar = tqdm(total=self.nframes)\n\n while(self.cap.isOpened()):\n ret, frame = self.cap.read()\n if ret == False:\n break # when the last frame is read \n\n # different formats of results:\n if self.library == \"yolov5\":\n # predict and bring the outputs to cpu:\n results = self.predictor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) # convert to RGB\n predictions = results.xyxy[0].cpu()\n # find the instance indices with person:\n person_idx = predictions[:,5] == self.label_dict[\"person\"]\n # extract the corresponding boxes and scores:\n boxes = predictions[person_idx,:4].numpy()\n probs = predictions[person_idx,4].numpy()\n\n if self.library == \"detectron2\":\n # predict and bring the outputs to cpu:\n results = self.predictor(frame) # RGB conversion done automatically in detectron\n predictions = results[\"instances\"].to(\"cpu\")\n # find the instance indices with person:\n person_idx = [predictions.pred_classes == self.label_dict[\"person\"]]\n # extract the corresponding boxes and scores:\n boxes = predictions.pred_boxes[person_idx].tensor.numpy()\n probs = predictions.scores[person_idx].numpy()\n\n # draw boxes and write the frame to the video:\n if len(boxes): # check whether there are predictions\n box_frame = self.draw_person_boxes(frame, boxes, probs)\n else:\n box_frame = frame\n self.vid_writer.write(box_frame)\n\n pbar.update(1)\n pbar.close()\n\n # release the video capture object and write object:\n self.cap.release()\n self.vid_writer.release()\n\n print(\"Inferene on the video file took %0.3f seconds\"%(time.time()-start))", "def main_recognition():\n if request.method == 'POST':\n # print(request.url)\n # stream = BytesIO(request.data)\n # image = Image.open(stream).convert(\"RGBA\")\n # path = 'C:/Users/13/Documents/FRS_v1/path.png'\n # image = image.save(path)\n # stream.close()\n #df = faces_info_export(path)\n print(request.url)\n stream = BytesIO(request.data)\n img_pil=Image.open(stream).convert(\"RGB\")\n stream.close()\n img_cv=np.array(img_pil)\n try:\n df = faces_info_export(img_cv)\n return df.to_json(orient='index')\n except SystemError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n except AttributeError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n if request.method == 'GET':\n # ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n df = faces_info_export(\"C:/Users/13/Documents/FRS_v1/test_image.jpg\")\n return df.to_json(orient='index')", "def detect_image_client(img):\n rospy.wait_for_service('detect_service') # attendende che il servizio sia pronto\n rospy.loginfo(\"Detection service invoked\")\n try:\n detect_service = rospy.ServiceProxy('detect_service', Detect) #istanzia il proxy al servizio detect_service\n msg = detect_service(img) # invoca il servizio con un'istanza di Image per ottenere un'istanza di DetectResponse\n return msg.det # restituisce l'istanza di tipo Detection2DArray prelevandola dall'oggetto DetectResponse\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def testSendState(self):\n self.mgr.enabled = 1\n self.mgr.model = MODEL_HERO3PLUS_BLACK\n self.mgr.status = STATUS_GOPRO_CONNECTED\n self.mgr.isRecording = False\n self.mgr.captureMode = CAPTURE_MODE_VIDEO\n self.mgr.videoFormat = VIDEO_FORMAT_NTSC\n self.mgr.videoResolution = 3\n self.mgr.videoFrameRate = 1\n self.mgr.videoFieldOfView = 2\n self.mgr.videoLowLight = True\n self.mgr.photoResolution = 1\n self.mgr.photoBurstRate = 2\n self.mgr.videoProtune = True\n self.mgr.videoProtuneWhiteBalance = 2\n self.mgr.videoProtuneColor = 1\n self.mgr.videoProtuneGain = 3\n self.mgr.videoProtuneSharpness = 2\n self.mgr.videoProtuneExposure = 1\n\n # Send old spec version\n # 2 unsigned shorts for a header, 26 unsigned bytes, then 5 unsigned shorts\n pkt1 = struct.pack('<IIBBBBBBBBBBBBBBBBBBBBBBBBBBHHHHH', app_packet.GOPRO_V1_STATE, 36, \\\n GOPRO_V1_SPEC_VERSION,\n self.mgr.model,\n self.mgr.status,\n self.mgr.isRecording,\n self.mgr.captureMode,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0\n )\n\n # send new spec version\n # 2 unsigned shorts for a header, 26 unsigned bytes, then 5 unsigned shorts\n pkt2 = struct.pack('<IIBBBBBBBBBBBBBBBBBBBBBBBBBBHHHHH', app_packet.GOPRO_V2_STATE, 36, \\\n GOPRO_V2_SPEC_VERSION,\n self.mgr.model,\n self.mgr.status,\n self.mgr.isRecording,\n self.mgr.captureMode,\n self.mgr.videoFormat,\n self.mgr.videoResolution,\n self.mgr.videoFrameRate,\n self.mgr.videoFieldOfView,\n self.mgr.videoLowLight,\n self.mgr.photoResolution,\n self.mgr.photoBurstRate,\n self.mgr.videoProtune,\n self.mgr.videoProtuneWhiteBalance,\n self.mgr.videoProtuneColor,\n self.mgr.videoProtuneGain,\n self.mgr.videoProtuneSharpness,\n self.mgr.videoProtuneExposure,\n self.mgr.enabled,\n 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0\n )\n\n self.mgr.sendState()\n call1 = call(pkt1)\n call2 = call(pkt2)\n self.mgr.shotMgr.appMgr.sendPacket.assert_has_calls([call1, call2])", "def detect():\n pass", "def start(self):\n self.activateLog()\n self.preLoad()\n\n self.running = True\n\n logging.debug('Start detection of {} in {}.'.format(self.Config[\"CLASSES\"], \n self.__class__.__name__))\n failedSend = 0\n while self.running:\n gdList = []\n _classes = None\n _idData = 0\n\n try:\n if not self.Standalone:\n gdList = self.bring(controller=self.Controller, device=self.Device, \n limit=self.Limit, lastTime=self.lastTime)\n self.lastTime = gdList[0]['timeQuery']\n else:\n gdList = self.testData()\n failedSend = 0 \n except:\n failedSend += 1\n logging.exception(\n 'Unexpected error getting data from pool: {}. Controller: {}, Device: {}, Limit: {}.'.format(\n self.URL, self.Controller, self.Device, self.Limit))\n if failedSend > 2 and not self.dp.isLive():\n logging.error('Pool no found {} will shutdown.'.format(self.__class__.__name__))\n self.stop()\n break\n continue\n\n for gd in gdList[1:]:\n _classes = []\n try:\n _classes, _aux = self.predict(gd)\n _idData = gd['id']\n except:\n logging.exception(\n 'Unexpected error in prediction from classifier: {} ({}).'.format(\n self.__class__.__name__, self.Config[\"MACHINE_NAME\"]))\n \n try:\n if not self.Standalone and len(_classes) > 0:\n self.sendDetection(_idData, _classes, _aux)\n else:\n self.showData(gd, _classes, _aux)\n failedSend = 0 \n except:\n failedSend += 1\n logging.exception(\n 'Unexpected error sending data from classifier: {} ({}).'.format(\n self.__class__.__name__, self.Config[\"MACHINE_NAME\"]))\n\n if failedSend > 2 and not self.dp.isLive():\n logging.error('Pool no found {} will shutdown.'.format(self.__class__.__name__))\n self.stop()\n break", "def camera_status():\n # Do command\n consoleOutput = exec_console_command(constants.cameraCheck)\n\n # Parse output for results\n status = False\n feedbackOutput = constants.cameraCheckOff\n\n if \"Nikon Corp.\" in consoleOutput:\n status = True\n feedbackOutput = constants.cameraCheckOn\n\n # Encode to JSON\n return feedbackOutput, status", "def detect_object():\n response = None\n try:\n # logger.info(request.Form)\n if request.files['base_image'] is not None:\n base_img = cv2.imdecode(np.fromstring(request.files['base_image'].read(), np.uint8), cv2.IMREAD_UNCHANGED)\n\n if base_img is not None:\n response = predictionService.verify(base_img=base_img)\n else:\n response = BaseResponse(code=400, reason='base_image cannot be null')\n except Exception as e:\n logger.error(e)\n response = BaseResponse(code=500, reason=\"Internal server error occurred. refer to logs\")\n\n return response.toJSON()", "def image_cb(self, msg):\n if self.waypoints is None:\n return\n if self.state_count >= self.state_count_threshold and time.time() - self.last_detection_time < self.traffic_light_detection_interval:\n return\n if time.time() - self.last_tl_off_time < self.traffic_light_off_idle_interval:\n if self.loglevel >= 5:\n rospy.logdebug(\"No detection %f %f %f\", time.time(), self.last_tl_off_time, self.traffic_light_off_idle_interval)\n return\n\n self.last_detection_time = time.time()\n self.has_image = True\n self.camera_image = msg\n light_wp, state = self.process_traffic_lights()\n\n '''\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n '''\n if self.state != state:\n self.state_count = 1\n self.state = state\n else:\n self.state_count += 1\n if self.state_count >= self.state_count_threshold:\n if state == TrafficLight.GREEN and self.last_state in (TrafficLight.RED, TrafficLight.YELLOW):\n self.last_tl_off_time = time.time()\n self.last_state = self.state\n self.last_wp = light_wp\n self.last_msg = state_msg = TrafficLightStatus()\n state_msg.tlwpidx = light_wp\n state_msg.state = state\n self.upcoming_red_light_pub.publish(state_msg)\n elif self.last_msg: # have not reached the threshold\n if self.car_wpidx < self.last_msg.tlwpidx + self.traffic_light_over_waypoints: \n # keep sending previous message when we are still close to the current traffic light\n self.upcoming_red_light_pub.publish(self.last_msg)\n else: # for other locations, clear traffic light status\n self.last_msg.tlwpidx = -1\n self.last_msg.state = TrafficLight.UNKNOWN\n self.upcoming_red_light_pub.publish(self.last_msg)\n self.last_msg = None\n if self.loglevel >= 4:\n rospy.loginfo(\"Curr Light_wp: %d, state: %d, global state: %d, last Light_wp: %d, state count: %d\", light_wp, state, self.state, self.last_wp, self.state_count)", "def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n self.image_sub.unregister()\n\n except CvBridgeError as e:\n rospy.logerr(e)\n (rows, cols, channels) = cv_image.shape\n #result = cv2.fastNlMeansDenoisingColored(cv_image, None, 20, 10, 7, 21)\n image = cv_image\n # Resize a 720x1280 image to 360x640 to fit it on the screen\n \"\"\"resized_image = cv2.resize(image, (720 / 2, 1280 / 2))\n cv2.imshow(\"/eyrc/vb/camera_1/image_raw\", resized_image)\n rospy.loginfo(self.get_qr_data(image))\"\"\"\n _,threshold = cv2.threshold(image, 70, 255, cv2.THRESH_TRUNC)\n self.get_qr_data(threshold)\n cv2.waitKey(3)" ]
[ "0.6287213", "0.6152359", "0.60468817", "0.57658076", "0.5763883", "0.56264436", "0.5618451", "0.56042737", "0.5574418", "0.5567113", "0.5565702", "0.55625457", "0.55605626", "0.553324", "0.5526417", "0.5485286", "0.5476039", "0.54632705", "0.5459137", "0.54455686", "0.5437376", "0.54235965", "0.5406025", "0.5385306", "0.5377799", "0.53760463", "0.5366128", "0.53534037", "0.53421426", "0.5335312" ]
0.6724717
0
send detection image data
def send_image(self, image_width, image_height, image): detection_result = [] return self.send_detection_data(image_width, image_height, image, detection_result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_detection_data(self, image_width, image_height,\n image, detection_result):\n if self._send_buffer.full() is True:\n log_error(\"Send detection data failed for buffer is full\")\n return False\n\n image_data = None\n if isinstance(image, AclImage):\n image_data = DataBuf(image.data(), image.size).copy_to_local()\n elif isinstance(image, np.ndarray):\n image_data = image \n else:\n log_error(\"Invalid data to send\") \n return False \n\n request_msg = pm.image_frame_request(image_width, image_height,\n image_data.tobytes(),\n detection_result) \n self.send_message(request_msg) \n self._send_buffer.put(image_data) \n self._release_send_success_data()\n\n return True", "def process_image(self, data):\n try:\n\n # Convert the image from ROS format to OpenCV format\n # 'bgr8' means it will encode as 8-bit values in BGR channels\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n # Apply a threshold to your image\n cv_image = self.bound_green_object(cv_image)\n # Display the modified image\n cv2.imshow('picture', cv_image)\n cv2.waitKey(3)\n except CvBridgeError, e:\n rospy.loginfo(e)", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n self.ID = self.ID + 1\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n name = str(f\"{self.ID:02d}\"+\"_single.jpg\")\n cv2.imwrite(os.path.join(self.args.path_in, name), image)\n\n if (self.ID == 5):\n # Run SyntheticDataGeneration\n self.synthetic.eval()\n self.ID = 0\n # Annotate image and publish results\n current_directory_path = os.path.join(self.args.save_path, str(\"/Documents_orig/\"))\n for file in os.listdir(current_directory_path):\n name, ext = os.path.splitext(file)\n if ext == \".jpg\":\n image_file_savepath = os.path.join(current_directory_path, file)\n cv_image = cv2.imread(image_file_savepath)\n cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)\n if self.image_publisher is not None:\n image = Image(np.array(cv_image, dtype=np.uint8))\n message = self.bridge.to_ros_image(image, encoding=\"bgr8\")\n self.image_publisher.publish(message)\n for f in os.listdir(self.args.path_in):\n os.remove(os.path.join(self.args.path_in, f))", "def callback(self, data):\n\n # Convert sensor_msgs.msg.Image into OpenDR Image\n image = self.bridge.from_ros_image(data)\n rospy.loginfo(\"image info: {}\".format(image.numpy().shape))\n\n # Run pose estimation\n boxes = self.object_detector.infer(image, threshold=0.1, keep_size=False)\n\n # Get an OpenCV image back\n image = np.float32(image.numpy())\n\n # Convert detected boxes to ROS type and publish\n ros_boxes = self.bridge.to_ros_boxes(boxes)\n if self.bbox_publisher is not None:\n self.bbox_publisher.publish(ros_boxes)\n rospy.loginfo(\"Published face boxes\")\n\n # Annotate image and publish result\n # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,\n # only used to test the corresponding bridge methods\n odr_boxes = self.bridge.from_ros_boxes(ros_boxes)\n image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)\n if self.image_publisher is not None:\n message = self.bridge.to_ros_image(np.uint8(image))\n self.image_publisher.publish(message)\n rospy.loginfo(\"Published annotated image\")", "def callback(self, data):\n try:\n cv_image = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n self.image_sub.unregister()\n\n except CvBridgeError as e:\n rospy.logerr(e)\n (rows, cols, channels) = cv_image.shape\n #result = cv2.fastNlMeansDenoisingColored(cv_image, None, 20, 10, 7, 21)\n image = cv_image\n # Resize a 720x1280 image to 360x640 to fit it on the screen\n \"\"\"resized_image = cv2.resize(image, (720 / 2, 1280 / 2))\n cv2.imshow(\"/eyrc/vb/camera_1/image_raw\", resized_image)\n rospy.loginfo(self.get_qr_data(image))\"\"\"\n _,threshold = cv2.threshold(image, 70, 255, cv2.THRESH_TRUNC)\n self.get_qr_data(threshold)\n cv2.waitKey(3)", "def process_image(self):\n\n detect.main(self.nn_args)", "def process(self, image):", "def detect_image_client(img):\n rospy.wait_for_service('detect_service') # attendende che il servizio sia pronto\n rospy.loginfo(\"Detection service invoked\")\n try:\n detect_service = rospy.ServiceProxy('detect_service', Detect) #istanzia il proxy al servizio detect_service\n msg = detect_service(img) # invoca il servizio con un'istanza di Image per ottenere un'istanza di DetectResponse\n return msg.det # restituisce l'istanza di tipo Detection2DArray prelevandola dall'oggetto DetectResponse\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\"%e)", "def send_image(self, device_id, image):\n self.logger.debug(f\"{device_id}: sending processed image!\")\n base64_img = base64.b64encode(\n cv2.imencode('.jpg', image)[1].tostring())\n self.socketio.emit(\n \"image\", {\"message\": base64_img}, room=f\"device-{device_id}\")", "def send_frame(self):\n frame = self.frame_buffer.get()\n result, jpeg = cv2.imencode(\".jpg\", frame.nparray)#, self.encode_param)\n data = numpy.array(jpeg)\n string_data = data.tostring()\n self.sock.send(str(len(string_data)).ljust(16))\n self.sock.send(string_data)", "def send_request(self, img_path):\n\n self.Helpers.logger.info(\"Sending request for: \" + img_path)\n \n _, img_encoded = cv2.imencode('.png', cv2.imread(img_path))\n response = requests.post(\n self.addr, data=img_encoded.tostring(), headers=self.headers)\n response = json.loads(response.text)\n \n return response", "def image_cb(self, msg):\n rospy.logdebug(\"TLDetector.image_cb\")\n self.__has_image = True\n self.__camera_image = msg\n\n cv_image = self.__bridge.imgmsg_to_cv2(msg, \"bgr8\")\n light_wp, state = self.__process_traffic_lights()\n if self.__mode == LABEL_MODE and not self.__classification_done and state != 4:\n self.__classification_done = self.__light_classifier.save_image(\n cv_image, state\n )\n if self.__classification_done:\n rospy.loginfo(\"TLDetector.image_cb: Done generating labels.\")\n\n \"\"\"\n Publish upcoming red lights at camera frequency.\n Each predicted state has to occur `STATE_COUNT_THRESHOLD` number\n of times till we start using it. Otherwise the previous stable state is\n used.\n \"\"\"\n self.__publish_traffic_light_state(light_wp, state)", "def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result", "def receive_image(self):\n code = self.socket.recv(1)\n self.verify_img_code(code)\n if code[0] == codes['timeout']:\n print(\"Ocurrió un timeout en la conexión\")\n self.close_connection()\n idpokemon = bytes_to_int(self.socket.recv(1))\n self.verify_pokemon(idpokemon)\n tam_image = bytes_to_int(self.socket.recv(4))\n f = open(\"../..\" + str(idpokemon) + \".png\", 'wb')\n l = 1\n while(l):\n l = self.socket.recv(1024)\n f.write(l)\n print(\"Se guardó una imagen del pokémon capturado en el archivo \" +\n str(idpokemon) + \".png.\")\n f.close()\n\n print(\"Sesión terminada.\")\n reply = self.socket.recv(1)\n self.close_connection()", "def send_image(self, path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n # removed by alice\n #rospy.sleep(1)", "def video_test():\n r = request\n # convert string of image data to uint8\n nparr = np.fromstring(r.data, np.uint8)\n # decode image\n img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # do some fancy processing here....\n\n # build a response dict to send back to client\n response = {'message': 'image received. size={}x{}'.format(img.shape[1], img.shape[0])\n }\n print(response)\n # encode response using jsonpickle\n response_pickled = jsonpickle.encode(response)\n cv2.imwrite(\"1.jpg\", img)\n print(\"done\")\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")", "def get_data(self):\n global CAM\n count = 0\n while CAM.isOpened():\n count += 1\n print('COUNT' + str(count))\n _, frame = CAM.read()\n\n # cropped face\n cropped_face, bbox_coordinate, anchor_coordinate = detect_faces(frame)\n if cropped_face is None:\n print(\"NONE FACE DETECTED\")\n sleep(1)\n continue\n\n # get fake face\n fake_face, profile_feature_vector = generate_frontal_face(cropped_face)\n\n cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB)\n fake_face = cv2.cvtColor(fake_face, cv2.COLOR_BGR2RGB)\n\n # face matching\n face_matcher = FaceMatcher()\n matched_face, matched_name, matched_front_fake_face, matched_diff = \\\n face_matcher.match(cropped_face, fake_face, profile_feature_vector)\n\n matched_face = cv2.cvtColor(matched_face, cv2.COLOR_BGR2RGB)\n matched_front_fake_face = cv2.cvtColor(matched_front_fake_face, cv2.COLOR_BGR2RGB)\n\n _, cropped_face_jpeg = cv2.imencode('.jpg', cropped_face)\n _, fake_face_jpeg = cv2.imencode('.jpg', fake_face)\n _, matched_face_jpeg = cv2.imencode('.jpg', matched_face)\n _, matched_front_fake_face_jpeg = cv2.imencode('.jpg', matched_front_fake_face)\n\n encoded_cropped_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(cropped_face_jpeg.tobytes()).decode())\n encoded_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(fake_face_jpeg.tobytes()).decode())\n\n encoded_matched_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_face_jpeg.tobytes()).decode())\n encoded_matched_front_fake_face = \"data:image/jpg;base64,\" + str(\n base64.b64encode(matched_front_fake_face_jpeg.tobytes()).decode())\n\n # get detection model return here and send to face frontalization model\n SIO.emit('detection', {'cropped_face': encoded_cropped_face,\n 'fake_face': encoded_fake_face,\n 'matched_face': encoded_matched_face,\n 'matched_name': matched_name,\n 'matched_front_fake_face': encoded_matched_front_fake_face,\n 'id': uuid.uuid4().hex},\n namespace='/detections')\n sleep(self.delay)", "def main_recognition():\n if request.method == 'POST':\n # print(request.url)\n # stream = BytesIO(request.data)\n # image = Image.open(stream).convert(\"RGBA\")\n # path = 'C:/Users/13/Documents/FRS_v1/path.png'\n # image = image.save(path)\n # stream.close()\n #df = faces_info_export(path)\n print(request.url)\n stream = BytesIO(request.data)\n img_pil=Image.open(stream).convert(\"RGB\")\n stream.close()\n img_cv=np.array(img_pil)\n try:\n df = faces_info_export(img_cv)\n return df.to_json(orient='index')\n except SystemError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n except AttributeError as er:\n \tprint(er)\n \treturn json.dumps({'msg':'error'})\n if request.method == 'GET':\n # ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n df = faces_info_export(\"C:/Users/13/Documents/FRS_v1/test_image.jpg\")\n return df.to_json(orient='index')", "def handle_image_data(data):\n \n #Get the incoming RGB image from the Kinect\n D.image = D.bridge.imgmsg_to_cv(data, \"bgr8\")\n\n if D.created_images == False:\n #Initialize the additional images we need for processing\n ImageProcessing.initialize(D)\n D.created_images = True\n\n # Recalculate threshold image\n ImageProcessing.threshold_image(D)\n\n # Recalculate blob in main image\n ImageProcessing.find_biggest_region(D)\n\n # Check on the display of dragged section\n ImageProcessing.mouse_section(D)\n\n #Display target circle\n #ImageProcessing.target_coord(D)\n \n #Display info box on image\n ImageProcessing.draw_on_image(D)\n \n #Handle incoming key presses\n key_press = cv.WaitKey(5) & 255\n if key_press != 255:\t\t\t#Handle only if it's a real key\n check_key_press(D, key_press)\t\t#(255 = \"no key pressed\")\n\n #Update the displays:\n #Show main image in the image window\n #cv.ShowImage('Image', D.image)\n\n #Show threshold image in the threshold window 3currentThreshold = getattr(D, D.current_threshold)\n cv.ShowImage('Threshold', currentThreshold)", "def capture_image(self, data={}):\n # call self.increment_count() after each image saved\n pass", "def sendDetection(self, idData, classes, aux=None):\n self.dp.URL = self.URL\n self.dp.sendDetection(classifier=self.Config[\"MACHINE_NAME\"], \n idData=idData, classes=classes, aux=aux)", "def detect(model, dataset_dir, subset):\n print(\"Running on {}\".format(dataset_dir))\n\n # Create directory\n if not os.path.exists(RESULTS_DIR):\n os.makedirs(RESULTS_DIR)\n submit_dir = \"submit_{:%Y%m%dT%H%M%S}\".format(datetime.datetime.now())\n submit_dir = os.path.join(RESULTS_DIR, submit_dir)\n os.makedirs(submit_dir)\n\n # Read dataset\n dataset = TamperDataset()\n dataset.load_tamper(dataset_dir, subset)\n dataset.prepare()\n # Load over images\n submission = []\n f1 = 0\n print(len(dataset.image_ids))\n # for image_id in dataset.image_ids:\n # # Load image and run detection\n # image = dataset.load_image(image_id)\n # # Detect objects\n # r = model.detect([image], verbose=0)[0]\n\n # # Encode image to RLE. Returns a string of multiple lines\n # source_id = dataset.image_info[image_id][\"id\"]\n # rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n # submission.append(rle)\n # # Save image with masks\n\n # N = r[\"scores\"].shape[0]\n # if not N:\n # \tH, W, C = image.shape\n # \tmask = np.zeros((H,W))\n\n \t\n # else:\n\n # H, W, C = image.shape\n\n # idx = np.argsort(-r[\"scores\"])\n # mask = r[\"masks\"][:,:,idx[0]].astype(np.float32)\n\n # bbox = r[\"rois\"][idx[0], :4]\n\n # y1, x1, y2, x2 = bbox\n\n\n\n # mask = dense_crf(image, mask)\n\n # mask = np.where(mask >= 0.5, 255, 0)\n\n # H, W, C = image.shape\n\n # full_mask = np.zeros((H, W))\n # full_mask[y1:y2, x1:x2] = mask\n\n for image_id in dataset.image_ids:\n # Load image and run detection\n image = dataset.load_image(image_id)\n # ela=dataset.load_ela(image_id)\n # Detect objects\n # r = model.detect([image],[ela], verbose=0)[0]\n r = model.detect([image],verbose=0)[0]\n\n # Encode image to RLE. Returns a string of multiple lines\n source_id = dataset.image_info[image_id][\"id\"]\n rle = mask_to_rle(source_id, r[\"masks\"], r[\"scores\"])\n submission.append(rle)\n # Save image with masks\n\n N = r[\"scores\"].shape[0]\n if not N:\n H, W, C = image.shape\n mask = np.zeros((H,W))\n\n \n else:\n idx = np.argsort(-r[\"scores\"])\n mask = r[\"masks\"][:,:,idx[0]].astype(np.uint8)\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n\n annotation = dataset.load_annaation(image_id)\n annotation = np.where(annotation >= 0.5, 1, 0) \n f = get_FM(mask, annotation)\n f1 += f\n\n print(f1/len(dataset.image_ids))\n\n\n\n\n # save_image(mask, submit_dir, name=dataset.image_info[image_id][\"id\"]) \n\n # visualize.display_instances(\n # image, r['rois'], r['masks'], r['class_ids'],\n # dataset.class_names, r['scores'],\n # show_bbox=False, show_mask=False,\n # title=\"Predictions\")\n # plt.savefig(\"{}/{}.png\".format(submit_dir, dataset.image_info[image_id][\"id\"]))\n\n # Save to csv file\n # submission = \"ImageId,EncodedPixels\\n\" + \"\\n\".join(submission)\n # file_path = os.path.join(submit_dir, \"submit.csv\")\n # with open(file_path, \"w\") as f:\n # f.write(submission)\n print(\"Saved to \", submit_dir)", "def main():\n # Set up socket\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('localhost', 12345))\n dat = b''\n dataSegement = [0] * 5\n\n while True:\n seg, addr = s.recvfrom(MAX_DGRAM)\n print(\"type: \", type(seg))\n chunk_number = struct.unpack(\"B\", seg[0:1])[0]\n if chunk_number > 1:\n print(\"chunk_number: \", chunk_number)\n dat += seg[1:]\n else:\n dat += seg[1:]\n img = cv2.imdecode(np.frombuffer(dat, dtype=np.uint8), 1)\n cv2.imwrite(\"image/4k_image_sample_compressed.jpg\", img)\n if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n break\n dat = b\"\"", "def image_cb(self, msg): # incoming image\n self.has_image = True\n self.camera_image = msg", "def raw_image_callback(self, msg):\n if self.pictures_to_take and not self.detection_to_receive:\n self.pictures_to_take -= 1\n # so let's analyse it here and then delete the subscription\n rows = msg.height\n step = msg.step\n cols = msg.width\n dim = int(step / cols)\n pixels = msg.data # of size (steps, nrows)\n # save the image (later we will need to analyse it)\n vision_utils.save_picture(pixels, rows, cols, dim, self.name, FOLDER)", "def _handle_image(self, image_msg):\n # converting the ROS image message to CV2-image\n image = self._cv_bridge.imgmsg_to_cv2(image_msg, 'bgr8')\n\n # Skip if image is None\n if image is None:\n rospy.logdebug(\"Image content is None :(\", logger_name=\"vision\")\n return\n\n # Check if its the first image callback\n if self._first_image_callback:\n # Check if a cap may be on the camera\n self._handle_forgotten_camera_cap(image)\n\n # Instances that should be notified with the new image\n internal_image_subscribers =[\n self._field_color_detector,\n self._white_color_detector,\n self._red_color_detector,\n self._blue_color_detector,\n self._unknown_obstacle_detector,\n self._field_boundary_detector,\n self._obstacle_detector,\n self._red_obstacle_detector,\n self._blue_obstacle_detector,\n self._goalpost_detector,\n self._line_detector,\n self._ball_detector,\n self._debug_image_creator,\n ]\n\n # Distribute the image to the detectors\n # Iterate over subscribers\n for vision_object in internal_image_subscribers:\n # Send image\n vision_object.set_image(image)\n\n # Check if the vision should run the conventional and neural net part parallel\n if self._config['vision_parallelize']:\n # Create and start threads for conventional calculation and neural net\n #fcnn_thread = Thread(target=self._ball_detector.compute)\n\n conventional_thread = Thread(target=self._conventional_precalculation())\n\n conventional_thread.start()\n #fcnn_thread.start()\n\n # Wait for both threads\n conventional_thread.join()\n #fcnn_thread.join()\n else:\n # Calc conventional calculation and neural net\n self._ball_detector.compute()\n self._conventional_precalculation()\n\n ########\n # Ball #\n ########\n\n # Get a number of top balls under the field boundary, which have an high enough rating\n all_balls = self._ball_detector.get_top_candidates(count=self._max_balls)\n balls_under_field_boundary = \\\n self._field_boundary_detector.candidates_under_convex_field_boundary(\n all_balls,\n self._ball_candidate_y_offset)\n top_balls = candidate.Candidate.rating_threshold(\n balls_under_field_boundary,\n self._ball_candidate_threshold)\n # check whether there are ball candidates\n if top_balls:\n # Convert ball cancidate list to ball message list\n list_of_balls = map(ros_utils.build_ball_msg, top_balls)\n # Create balls msg with the list of balls\n balls_msg = ros_utils.build_balls_msg(image_msg.header, list_of_balls)\n # Publish balls\n self._pub_balls.publish(balls_msg)\n\n # Debug draw all ball candidates\n self._debug_image_creator.draw_ball_candidates(\n all_balls,\n (0, 0, 255))\n # Debug draw possible ball candidates under the field boundary\n self._debug_image_creator.draw_ball_candidates(\n balls_under_field_boundary,\n (0, 255, 255))\n # Debug draw top ball candidate\n self._debug_image_creator.draw_ball_candidates(\n top_balls,\n (0, 255, 0),\n thickness=2)\n\n #############\n # Obstacles #\n #############\n\n # Init list for obstacle msgs\n list_of_obstacle_msgs = []\n # Add red obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_MAGENTA,\n self._red_obstacle_detector.get_candidates()))\n # Add blue obstacles\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.ROBOT_CYAN,\n self._blue_obstacle_detector.get_candidates()))\n # Add UFO's (Undefined Found Obstacles)\n list_of_obstacle_msgs.extend(ros_utils.build_obstacle_msgs(ObstacleInImage.UNDEFINED,\n self._unknown_obstacle_detector.get_candidates()))\n # Build obstacles msgs containing all obstacles\n obstacles_msg = ros_utils.build_obstacle_array_msg(image_msg.header, list_of_obstacle_msgs)\n # Publish obstacles\n self._pub_obstacle.publish(obstacles_msg)\n\n # Debug draw unknown obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._unknown_obstacle_detector.get_candidates(),\n (0, 0, 0),\n thickness=3)\n # Debug draw red obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._red_obstacle_detector.get_candidates(),\n (0, 0, 255),\n thickness=3)\n # Debug draw blue obstacles\n self._debug_image_creator.draw_obstacle_candidates(\n self._blue_obstacle_detector.get_candidates(),\n (255, 0, 0),\n thickness=3)\n\n ########\n # Goal #\n ########\n\n # Get all goalposts under field boundary\n goal_posts = self._field_boundary_detector.candidates_under_convex_field_boundary(\n self._goalpost_detector.get_candidates(),\n self._goal_post_field_boundary_y_offset)\n\n # Get goalpost msgs and add them to the detected goal posts list\n goal_post_msgs = ros_utils.build_goal_post_msgs(goal_posts)\n # Create goalposts msg\n goal_posts_msg = ros_utils.build_goal_post_array_msg(image_msg.header, goal_post_msgs)\n # Check if there is a goal\n if goal_posts_msg:\n # If we have a goal, lets publish it\n self._pub_goal_posts.publish(goal_posts_msg)\n\n # Debug draw all goal posts\n self._debug_image_creator.draw_obstacle_candidates(\n self._goalpost_detector.get_candidates(),\n (180, 180, 180),\n thickness=3)\n # Debug draw goal posts which start in the field\n self._debug_image_creator.draw_obstacle_candidates(\n goal_posts,\n (255, 255, 255),\n thickness=3)\n\n #########\n # Lines #\n #########\n if self._use_line_points:\n # Build a LineSegmentInImage message for each linepoint\n line_points = self._line_detector.get_linepoints()\n # Create line segments\n line_segments = ros_utils.convert_line_points_to_line_segment_msgs(line_points)\n # Create line msg\n line_msg = ros_utils.build_line_information_in_image_msg(image_msg.header, line_segments)\n # Publish lines\n self._pub_lines.publish(line_msg)\n\n # Draw debug line points\n self._debug_image_creator.draw_points(\n line_points,\n (0, 0, 255))\n\n if self._use_line_mask:\n # Define detections (Balls, Goal Posts) that are excluded from the line mask\n excluded_objects = top_balls + goal_posts\n # Get line pixel mask\n line_mask = self._line_detector.get_line_mask_without_other_objects(excluded_objects)\n # Create line mask message\n line_mask_message = ros_utils.build_image_msg(image_msg.header, line_mask, '8UC1')\n # Publish line mask\n self._pub_line_mask.publish(line_mask_message)\n\n # Draw debug line mask\n self._debug_image_creator.draw_mask(\n line_mask,\n color=(255, 0, 0),\n opacity=0.8)\n\n ##################\n # Field boundary #\n ##################\n\n # Get field boundary msg\n convex_field_boundary = self._field_boundary_detector.get_convex_field_boundary_points()\n # Build ros message\n convex_field_boundary_msg = ros_utils.build_field_boundary_polygon_msg(image_msg.header, convex_field_boundary)\n # Publish field boundary\n self._pub_convex_field_boundary.publish(convex_field_boundary_msg)\n\n # Debug draw convex field boundary\n self._debug_image_creator.draw_field_boundary(\n convex_field_boundary,\n (0, 255, 255))\n # Debug draw field boundary\n self._debug_image_creator.draw_field_boundary(\n self._field_boundary_detector.get_field_boundary_points(),\n (0, 0, 255))\n\n #########\n # Debug #\n #########\n '''\n if self._config['neural_network_type'] == 'fcnn':\n # Publish fcnn output for the region of interest under the field boundary (for the world model)\n if self._ball_fcnn_publish_output:\n roi_msg = ros_utils.build_fcnn_region_of_interest(\n self._ball_detector.get_fcnn_output(),\n self._field_boundary_detector,\n image_msg.header,\n self._config['ball_fcnn_publish_field_boundary_offset'])\n self._pub_ball_fcnn.publish(roi_msg)\n\n # Publish whole fcnn output for debug purposes\n if self._publish_fcnn_debug_image:\n self._pub_debug_fcnn_image.publish(self._ball_detector.get_debug_image())\n '''\n # Check, if HSV mask images should be published\n if self._publish_HSV_mask_image:\n # Mask images\n white_mask = self._white_color_detector.get_mask_image()\n red_mask = self._red_color_detector.get_mask_image()\n blue_mask = self._blue_color_detector.get_mask_image()\n\n # Publish mask images\n self._pub_white_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, white_mask, '8UC1'))\n self._pub_red_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, red_mask, '8UC1'))\n self._pub_blue_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, blue_mask, '8UC1'))\n\n # Check, if field mask image should be published\n if self._publish_field_mask_image:\n if isinstance(self._field_color_detector, color.DynamicPixelListColorDetector):\n # Mask image\n dyn_field_mask = self._field_color_detector.get_mask_image()\n static_field_mask = self._field_color_detector.get_static_mask_image()\n # Publish mask image\n self._pub_dynamic_color_lookup_table_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, dyn_field_mask, '8UC1'))\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, static_field_mask, '8UC1'))\n else:\n # Mask image\n field_mask = self._field_color_detector.get_mask_image()\n # Publish mask image\n self._pub_field_mask_image.publish(\n ros_utils.build_image_msg(image_msg.header, field_mask, '8UC1'))\n\n # Check if we should draw debug image\n if self._debug_image_creator.active:\n # publish debug image\n self._pub_debug_image.publish(\n ros_utils.build_image_msg(\n image_msg.header,\n self._debug_image_creator.get_image(),\n 'bgr8'))", "def send_image(path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def send_image(path):\n img = cv2.imread(path)\n msg = cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding=\"bgr8\")\n pub = rospy.Publisher('/robot/xdisplay', Image, latch=True, queue_size=1)\n pub.publish(msg)\n # Sleep to allow for image to be published.\n rospy.sleep(1)", "def callback(self,data):\n self.cvtImage(data)\n\n \"\"\" Do some image processing; flip, resize, and etc\"\"\"\n self.imgProcessing()\n\n \"\"\" displaying an OpenCV image \"\"\"\n cv2.imshow(self.cv_window_name, self.cv_image)\n cv2.waitKey(1)\n# ------------------------------------------------------------------------------\n\n try:\n \"\"\" coverting the uint8 OpenCV image to ROS image data \"\"\"\n \"\"\" Publisher.publish() -- explicit way \"\"\"\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(self.cv_image, \"bgr8\"))\n except CvBridgeError as e:\n print(e)", "def detect(self, input_image):\n self.t.start()\n frame = self.convert_image(input_image)\n frame = cv2.pyrDown(frame)\n\n img, confidence, x, y = self.detector.detect(frame)\n print('Detection:', confidence, x, y)\n det = Target_coordinates()\n det.confidence = confidence\n det.x = x\n det.y = y\n self.pub_detection.publish(det)\n self.pub_fpv.publish(self.bridge.cv2_to_imgmsg(img))\n cv2.imwrite('frames/frame%d.jpg' % self.frame_num, img)\n self.frame_num += 1\n self.t.end()\n # Display\n cv2.imshow(self.iw, img)\n key = cv2.waitKey(30) & 0xFF\n if key == 27:\n cv2.destroyAllWindows()\n sys.exit(27)" ]
[ "0.7168172", "0.6857813", "0.6741724", "0.65148985", "0.64819443", "0.64530796", "0.64069283", "0.6382538", "0.6379917", "0.633284", "0.6293329", "0.6292463", "0.6283481", "0.6194607", "0.6180911", "0.617084", "0.61642075", "0.61596763", "0.615218", "0.61351126", "0.6130977", "0.61192364", "0.6112583", "0.61004716", "0.60909986", "0.607433", "0.60375", "0.60375", "0.60308295", "0.60289204" ]
0.7531366
0
get channel presenter_server_ip, port, channel_name, content_type
def get_channel_config(config_file): config = configparser.ConfigParser() config.read(config_file) presenter_server_ip = config['baseconf']['presenter_server_ip'] port = int(config['baseconf']['presenter_server_port']) channel_name = config['baseconf']['channel_name'] content_type = int(config['baseconf']['content_type']) log_info("presenter server ip %s, port %d, channel name %s, " "type %d " % (presenter_server_ip, port, channel_name, content_type)) return presenter_server_ip, port, channel_name, content_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getChannel(self):\r\n return self.channel", "def get_channel(channel_id):\r\n if channel_id[0] == 'C':\r\n type = \"channel\"\r\n elif channel_id[0] == 'G':\r\n type = \"group\"\r\n elif channel_id[0] == 'D':\r\n return False\r\n else:\r\n return False\r\n data = slack_client.api_call(type + \"s.info\", channel=channel_id)\r\n if not data[\"ok\"]:\r\n return False\r\n response = {}\r\n response[\"name\"] = data[type][\"name\"]\r\n response[\"members\"] = data[type][\"members\"]\r\n response[\"channel_id\"] = data[type][\"id\"]\r\n return response", "def getChannelResponse(self):\n \n \n return self.channel_response", "def get_channel(self, channel_id):\n uri = 'channels/' + channel_id\n return self.make_request(uri)", "def extract_medialive_channel_info(ml_client, ml_channel_id):\n mediapackage_channel_list = []\n channel_name = None\n try:\n response = ml_client.describe_channel(\n ChannelId=ml_channel_id\n )\n channel_name = str(response[\"Name\"])\n destinations = response[\"Destinations\"]\n for destination in destinations:\n for output in destination[\"Settings\"]:\n url = str(output[\"Url\"])\n if \"mediapackage\" in url:\n mediapackage_channel_list.append(url)\n except Exception, e:\n print \"Error:\", e.message\n return channel_name, mediapackage_channel_list", "def get_channel(self, channel_name):\n try:\n cm = self.__core.get_service(\"channel_manager\")\n cdb = cm.channel_database_get()\n channel = cdb.channel_get(channel_name)\n return channel.get()\n except Exception:\n traceback.print_exc()", "def parse_channel(self, channel):\n return channel.split(\":\")[1:]", "def extract_channel_views(show_views_channel):\n channel,views,=show_views_channel[1]\n return (channel, views)", "def channel(self):\n return self._channel", "def channel(self):\n return self._channel", "def channel(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"channel\")", "def channelinfo(self):\n\n return ChannelInfo(\n self._filetextbox.text(),\n self._idtextbox.text(),\n self._datafilebox.text()\n )", "def channel_info(channel_id):\n\n if not settings.SLACK_TOKEN:\n return None\n\n client = WebClient(token=settings.SLACK_TOKEN)\n\n try:\n response = client.conversations_info(channel=channel_id)\n assert response['ok'] is True\n return response['channel']\n except SlackApiError as e:\n assert e.response['ok'] is False\n return None", "def get_livechat_channel_info(self):\n self.ensure_one()\n if self.channel_id:\n return self.channel_id.sudo().get_livechat_info()\n return {}", "def channel(self):\n raise NotImplementedError", "def read_channel(self, channel: int, /) -> int:", "def get_channels(self):\n return self.channels", "def get_channel(self):\n if self.channel is None or not self.channel.is_open:\n if not self.connection.is_open:\n self.connection = CONNECTION_MANAGER.get_connection(self.connection_name)\n self.channel = self.connection.channel()\n return self.channel", "def get_channels():\n r = slack.channels.list().body\n return [ c for c in r['channels'] if c['is_member'] ]", "def channels(message):\n load_users(message._client.users)\n for x in message._client.channels:\n chan = message._client.channels[x]\n if 'is_member' in chan:\n if chan['is_member']:\n message.reply(\"{} ({})\".format(chan['name'], chan['id']))\n# message.reply(pretty_json(chan, True))\n elif 'is_im' in chan:\n print(chan)\n friendlyname = chan['user']\n try:\n friendlyname = chan['user'].name\n except KeyError:\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n chan['id']))", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "def channel(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"channel\")", "def channel_details(token, channel_id):\n authorised_u_id = get_id_from_token(token)\n channel = channels.get(channel_id)\n if channel is None:\n raise ValueError(\"channel_id does not exist.\")\n if authorised_u_id not in channel[\"all_members\"]:\n raise AccessError(\"The authorised user is not a member of the channel.\")\n name = channel[\"name\"]\n all_members = []\n owner_members = []\n for member_id in channel[\"all_members\"]:\n member = users.get(member_id)\n all_members.append(\n {\n \"u_id\": member[\"u_id\"],\n \"name_first\": member[\"first_name\"],\n \"name_last\": member[\"last_name\"],\n \"profile_img_url \": member[\"img_url\"],\n }\n )\n for owner_id in channel[\"owners\"]:\n owner = users.get(owner_id)\n owner_members.append(\n {\n \"u_id\": owner[\"u_id\"],\n \"name_first\": owner[\"first_name\"],\n \"name_last\": owner[\"last_name\"],\n \"profile_img_url \": owner[\"img_url\"],\n }\n )\n return {\"name\": name, \"all_members\": all_members, \"owner_members\": owner_members}", "def get_channels(cj): \n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))\n channels = opener.open(\"http://www.douban.com/j/app/radio/channels\")\n channel_list = json.loads(channels.read())\n return channel_list[\"channels\"]\n # print channel_list", "def channel(self) -> 'Channel': # stub\n return self._channel", "def get_channel_details(self, chan_ids_list, part='statistics'):\n\n chnl_details = {}\n key = self.keylist[self.keyindex]\n url_c = \"https://www.googleapis.com/youtube/v3/channels\"\n\n for ind, chan in enumerate(chan_ids_list):\n try:\n querystring = {\"id\": chan, \"part\": part,\n \"key\": key}\n response = request_handler(self, url_c, params=querystring, wait=100)\n #print(response)\n # Error-handling\n if response.get('error'):\n print(response.get('error'))\n while response['error']['errors'][0]:\n key = keychange(self)\n \n querystring = {\"id\": chan, \"part\": part,\n \"key\": key}\n response = request_handler(self, url_c, params=querystring, wait=100)\n\n if response.get('error'):\n #chnl_details.update({chan:[str(response), response.text]})\n #\n if response['error']['errors'][0]['reason'] == 'keyInvalid':\n return [{chan:[str(response), response.text]}]\n break\n\n if response.get('Interneterror'):\n chnl_details.update({chan: str(response)})\n continue\n\n chnl_details[chan] = response['items']\n\n except Exception as e:\n print(e, traceback.format_exc())\n\n if ind % 100 == 0:\n print(ind)\n \n return chnl_details", "def get(self, channel):\n try:\n return self[channel.lower()]\n except KeyError:\n return None", "def channels(message):\n for channel in message._client.channels:\n if 'is_member' in channel:\n message.reply(\"{} ({})\".format(channel['name'], channel['id']))\n elif 'is_im' in channel:\n #print(channel)\n friendlyname = channel['user']\n try:\n friendlyname = channel['user'][\"name\"]\n except (KeyError, AttributeError):\n pass\n message.reply(\"User channel: {} ({})\".format(friendlyname,\n channel['id']))", "def test_api_read_channel(api):\n response = api.read_channel()\n assert \"name='request().json()'\" in repr(response)\n req_call = requests.request\n assert req_call.call_count == 1\n req_args = req_call.call_args[0]\n req_kw = req_call.call_args[1]\n assert req_args[0] == 'GET'\n assert req_args[1] == 'https://news-api.apple.com/channels/FAKE_CHANNEL'\n assert 'Authorization' in req_kw['headers']\n assert 'HHMAC; key=FAKE_ID; signature=' in req_kw['headers']['Authorization']\n assert req_kw['data'] is None", "def get_channels():\n\tchannels = slack.get_channels()\n\treturn jsonify(channels=channels.body['channels'])" ]
[ "0.65134233", "0.643602", "0.6210229", "0.619519", "0.6009834", "0.5932588", "0.58862126", "0.58620787", "0.5847913", "0.5847913", "0.584146", "0.58371985", "0.58296996", "0.5799957", "0.57254803", "0.57109845", "0.57049286", "0.5669257", "0.5649315", "0.5622001", "0.5602927", "0.5602927", "0.56024", "0.560007", "0.55995256", "0.5590534", "0.55889165", "0.5565141", "0.55267626", "0.55061245" ]
0.74030155
0
Sets the caller of this Dial.
def caller(self, caller): self._caller = caller
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_scripts_caller(self, caller):\n self._scripts_caller = caller", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def sender(self, sender):\n\n self._sender = sender", "def caller_reference(self) -> str:\n return pulumi.get(self, \"caller_reference\")", "def dialstring(self, dialstring):\n\n self._dialstring = dialstring", "def setPeer (self, peer):\n\t\tself.peer = peer", "def call(self, callee: \"SIPPhoneTemplate\") -> None:", "def set_follower(self, follower):\n self.follower = follower", "def referred_by_name(self, referred_by_name: str):\n self._referred_by_name = referred_by_name", "def CALL_addr(self, addr):\n\t\tself.stack[self.SP] = self.IP\n\t\tself.SP += 1\n\t\tself.IP = addr", "def sender(self) -> Address:\n return self._sender", "def sender(self, sender: Address) -> None:\n enforce(\n isinstance(sender, str), f\"Sender must be string. Found '{type(sender)}'\"\n )\n self._sender = sender", "def ping(self, caller):\n if not hasattr(self, \"_ping_callers\"):\n self._ping_callers = []\n self._ping_callers.append(caller)\n super(ServerBot, self).msg(ping=\"\")", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def owner(self, owner):\n self._owner = owner", "def call_from_contact(self):\n\n log_test_case(self.name, 'call_from_contact')\n #lick_textview_by_text(SC.PRIVATE_CONTACT_NUMBER)\n click_textview_by_id('primary_action_view')\n sleep(1)\n goback()\n sleep(3)\n return", "def sender(self) -> str:\n return self._sender", "def set_owner(self, owner):\n self.__owner = owner", "def owner(self, owner: str):\n\n self._owner = owner", "def sender(self, sender: str):\n if sender is None:\n raise ValueError(\"Invalid value for `sender`, must not be `None`\") # noqa: E501\n\n self._sender = sender", "def salt_caller(self):\n if self._salt_caller is None:\n self._salt_caller = salt.client.Caller()\n return self._salt_caller", "def toggle_call(self) -> None:", "def handle_call(self):\n call_socket, address = self.call_socket.accept()\n print(\"connected call socket: {}\".format(call_socket))\n # gets name of user making the call:\n caller_name = self.receive_mes(call_socket)\n # gets from calling client user they want to call:\n receiver_name = self.receive_mes(call_socket)\n # gets receivers socket from dictionary\n if receiver_name not in self.client_dict:\n print(\"boi bye\")\n sys.exit(EXIT)\n receiver_sock = self.client_dict[receiver_name]\n mes = \"{} is calling you\".format(caller_name)\n self.send_mes(mes.encode(), receiver_sock)\n answer = self.receive_mes(receiver_sock)\n print(\"answer from {}: {}\".format(receiver_name, answer))\n if answer == \"Y\":\n self.send_mes(\"call\".encode(), call_socket)\n self.start_call()\n else:\n self.send_mes(\"no call\".encode(), call_socket)", "def owner(self, owner):\n\n self._owner = owner", "def owner(self, owner):\n\n self._owner = owner" ]
[ "0.5862553", "0.55642617", "0.55642617", "0.55642617", "0.55642617", "0.55642617", "0.55458647", "0.52504754", "0.52443874", "0.5207483", "0.5065583", "0.5034338", "0.50276506", "0.50047404", "0.5002639", "0.49828595", "0.49691632", "0.49691632", "0.49691632", "0.49691632", "0.4918082", "0.49063438", "0.48871636", "0.48751572", "0.48585597", "0.4850022", "0.48449415", "0.48439252", "0.48348072", "0.48348072" ]
0.74337715
0
Sets the dialstatus of this Dial.
def dialstatus(self, dialstatus): if dialstatus is None: raise ValueError("Invalid value for `dialstatus`, must not be `None`") # noqa: E501 self._dialstatus = dialstatus
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def setstatus(self, status):\n with self.lock:\n self.status = status", "def SetStatus(self, status):\r\n self.status = status", "def set_status(self, status: str) -> None:\n\n try:\n self.status = Buddy.status_map[status.lower()]\n except KeyError:\n self.status = status", "def set_vpn_state(self, status):\n if hasattr(self, status):\n self.change_to(getattr(self, status))", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def set_status(self, status):\n # TODO log to db\n self.status = status", "def setStatus(self, status):\n self.__status = status", "def status(self, status):\n self._set_property_(self.STATUS, str(status))", "def _set_status(self, status):\n with self.status_lock:\n if (status in _ENDING_STATUSES) or (not self.status in _ENDING_STATUSES):\n self.status = status", "async def set_status(self, ctx, *, status: str = \"online\"):\n\n try:\n status = discord.Status[status.lower()]\n except KeyError:\n await ctx.error(\"Invalid Status\", \"Only `online`, `idle` or `dnd` statuses are available.\")\n else:\n await self.bot.change_presence(status=status, activity=ctx.me.activity)\n await ctx.success(f\"Status changed to {status}.\")", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def status(self, status):\n self._status = status", "def set_activity(self, status):\n self._activity = status", "def status(self, status):\n allowed_values = [\"D\", \"P\", \"V\", \"S\", \"M\", \"I\", \"R\", \"C\"] # noqa: E501\n if status not in allowed_values:\n raise ValueError(\n \"Invalid value for `status` ({0}), must be one of {1}\" # noqa: E501\n .format(status, allowed_values)\n )\n\n self._status = status", "def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return", "def set_on_tunnel(self, status: bool):\n self._is_on_tunnel = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status", "def status(self, status):\n\n self._status = status" ]
[ "0.6324947", "0.6324947", "0.6324947", "0.62151617", "0.6139082", "0.6040312", "0.6012847", "0.60080004", "0.5966004", "0.59111637", "0.5868122", "0.5861913", "0.58358765", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58328503", "0.58151054", "0.5798218", "0.5796215", "0.5787214", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809", "0.5785809" ]
0.8169267
0
Sets the dialstring of this Dial.
def dialstring(self, dialstring): self._dialstring = dialstring
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string_value(self, string_value):\n\n self._string_value = string_value", "def setString(self, name: unicode, value: unicode) -> None:\n ...", "def setstring(self):\n self._str = 's '+' '.join([self.src, self.start, self.size,\n self.strand, self.srcSize, self.text])+'\\n'", "def set_text( self, a_string ):\n self.a_string_var.set( a_string )", "def set_text( self, a_string ):\n self.a_string_var.set( a_string )", "def set_gift_conversation(self, conversation_string):\r\n self.gift_conversation = conversation_string", "def setInputString(self, inputString):\n assert isinstance(inputString, basestring), \\\n \"Invalid template string!\"\n\n self.__inputString = inputString", "def add_string(self, str):\n self.__add_source_data(str)", "def measurement_unit_string(self, measurement_unit_string):\n\n self._measurement_unit_string = measurement_unit_string", "def query_str(self, new_query_str):\n self.query_buffer.text = new_query_str", "def dialstatus(self, dialstatus):\n if dialstatus is None:\n raise ValueError(\"Invalid value for `dialstatus`, must not be `None`\") # noqa: E501\n\n self._dialstatus = dialstatus", "def setHint( self, hint ):\n self._urlEdit.setHint(hint)", "def setiddname(self, iddname, testing=False):\n self.iddname = iddname\n self.idd_info = None\n self.block = None", "def custom_string(self, custom_string):\n\n self._custom_string = custom_string", "def __init__(self, string):\n self.string = string", "def set_raw_string(self, string, length):\n if len(string) != length:\n raise ValueError('Length of passed string does not match length')\n self.originstring = string\n self.stringlength = length", "def set_dispute_contact_state(self, state):\n if state == \"\":\n state = self.random_string_generator(6, string.ascii_uppercase)\n self.set_value_into_input_field(self.dispute_contact_state_textbox_locator, state)", "def 置项目文本(self, n, string): # real signature unknown; restored from __doc__\n self.SetString(n, string)", "def set_response(self, response_str):\r\n input_css = \"textarea.short-form-response\"\r\n self.q(css=input_css).fill(response_str)", "def __init__(self, number=None, **kwargs):\n super(Dial, self).__init__(**kwargs)\n if number:\n self.value = number", "def set_adapter_name(self, sAdapterName):\n\t\tcall_sdk_function('PrlVirtNet_SetAdapterName', self.handle, sAdapterName)", "def sendString(self,string):\n\t\tif self.outChannel==None:\n\t\t\traise Exception, \"before calling sendString() on this EpicsAsysnSerialInterface object first call configure() to open the epics channels\"\n\t\tself.outChannel.caput(string)", "def setSnr(tel, snr):\n simuConfig[\"SNRS\"] = snr", "def set_stock_sym_append_str(self, append_str):\n self.com_data_stock_portion_additional_url = append_str", "def dial(address: str, network: Optional[str]=None):\n return NotImplementedError()", "def messier_name(self, messier_name):\n\n self._messier_name = messier_name", "def setModemInitString(self, initString, unitCode=0):\n resp = self.XAPCommand('MINIT', initString, unitCode=unitCode)\n return resp", "def addstr(self,name,string):\n\t\tself.windows[name].addstr(string)", "def set_stock_sym_append_str(self, append_str):\n self.cur_quotes_stock_portion_additional_url = append_str", "def setName(self, name):\n self.name = str(name)" ]
[ "0.5384727", "0.5367918", "0.52513164", "0.5229012", "0.5229012", "0.51429296", "0.4930052", "0.49152938", "0.48876902", "0.48603123", "0.48325157", "0.47601306", "0.47166997", "0.46935034", "0.46845663", "0.46628696", "0.46405885", "0.4633442", "0.4625754", "0.46073928", "0.46067485", "0.45958725", "0.45811033", "0.4579079", "0.45759702", "0.45693684", "0.4563997", "0.45204884", "0.4519031", "0.45180133" ]
0.8753864
0
Sets the forward of this Dial.
def forward(self, forward): self._forward = forward
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def fastforward(self):\n self.run_command('fastforward')", "def forward(self):\n self.cursor.forward()", "def forward(self):\n pass", "def forward(self):\n pass", "def go_forward(self):\n command = _build_robovac_command(RobovacModes.GO_FORWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)", "def forwarded(self, forwarded):\n\n self._forwarded = forwarded", "def forward(self):\n self.position += 1", "def set_forward_trig(self, trig):\n\t\tself.forward_trig = trig", "def forward(self, speed):\n self.controller.forward(speed)", "def forward(self) -> None:\n self.system.notify(\"Jarvis::Forward\")\n self.media.fast_forward()", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def drive_forward(self):\n print(f\"{self.make.title()} is now driving forward.\")", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"forward\")\n return self[-1](*args, **kwargs)", "def start_forward(self, velocity=VELOCITY):\n action = StartForward(velocity=velocity)\n self._velocity_control_client(pickle.dumps(action))", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self, speed):\n self.pwm_backward.ChangeDutyCycle(0)\n self.pwm_forward.ChangeDutyCycle(speed)", "def forward(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, True)\n GPIO.output(11, False)\n GPIO.output(13, True)\n GPIO.output(15, False)\n # time.sleep(sec)\n motor_direction = 'Forward'\n return motor_direction", "def forward(self,distance):\n assert (type(distance) in [int, float]), \"parameter distance:%s is not a valid number\" % `distance`\n self._turtle.forward(distance)", "def forward(self, *args, **kwargs):\n pass", "def forward(self, x):\n pass", "def fastforward(self, /, noerror=False):\n\t\tif not self in _running:\n\t\t\tif noerror: return\n\t\t\traise RuntimeError('Not running')\n\t\t_running.remove(self)\n\t\t_anim_stopped(self)\n\t\tfor attr in self._end:\n\t\t\tsep = attr.split('__')\n\t\t\tsubtarget, subattr = eval('.'.join(['self.target']+sep[:-1])), sep[-1]\n\t\t\tsetattr(subtarget, subattr, self._end[attr])", "def move_forward(self, distance):\r\n return self.move('forward', distance)", "def forward(\n self\n ) -> None:\n if not self._forward_page_history_stack:\n # Do nothing if there is no forward page history.\n return\n\n self._back_page_history_stack.append(self._current_page)\n self._current_page = self._forward_page_history_stack.pop()", "def base_forward(self, x):\r\n pass", "def set_port_forward_list(self, nPortFwdType, hPortFwdList):\n\t\tcall_sdk_function('PrlVirtNet_SetPortForwardList', self.handle, nPortFwdType, conv_handle_arg(hPortFwdList))", "def left_forward(self):\n self.left_motor.run_forever(speed_sp=self.MAX_SPEED)" ]
[ "0.6834646", "0.67426616", "0.6636892", "0.64269316", "0.64269316", "0.6326287", "0.62784827", "0.62357396", "0.62272525", "0.62225056", "0.6159911", "0.6145522", "0.6091365", "0.6059019", "0.6059019", "0.6059019", "0.60449684", "0.60174173", "0.5994386", "0.5994386", "0.59784514", "0.59499764", "0.59146667", "0.58958226", "0.5845093", "0.581511", "0.5811764", "0.5791648", "0.57873344", "0.57249874" ]
0.7644515
0
Sets the forwarded of this Dial.
def forwarded(self, forwarded): self._forwarded = forwarded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forward(self, forward):\n\n self._forward = forward", "def forwarder(self, forwarder: ICNForwarder):\n self._forwarder = forwarder", "def forward(self):\n pass", "def forward(self):\n pass", "def forward( self ):\n self._has_change = True\n print( \"Forward\" )", "def forward(self, *args, **kwargs):\n pass", "def forward_pass(self):", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def forward(self):\n raise NotImplemented", "def fastforward(self):\n self.run_command('fastforward')", "def forward(self, *args, **kwargs) -> Dict[str, Any]:\n pass", "def forward_to(self):\n if \"forwardTo\" in self._prop_dict:\n if isinstance(self._prop_dict[\"forwardTo\"], OneDriveObjectBase):\n return self._prop_dict[\"forwardTo\"]\n else :\n self._prop_dict[\"forwardTo\"] = Recipient(self._prop_dict[\"forwardTo\"])\n return self._prop_dict[\"forwardTo\"]\n\n return None", "def set_port_forward_list(self, nPortFwdType, hPortFwdList):\n\t\tcall_sdk_function('PrlVirtNet_SetPortForwardList', self.handle, nPortFwdType, conv_handle_arg(hPortFwdList))", "def set_forward_trig(self, trig):\n\t\tself.forward_trig = trig", "def set_forwarded_remote_consul_once(self, set_to=True):\n self.FORWARDED_CONSUL_ONCE_ALREADY = set_to", "def forward(self, distance):\n self.logger.debug(\"forward \" + str(distance))", "def forwarder(self) -> ICNForwarder:\n return self._forwarder", "def forward(self, x):\n pass", "def forward(self, x, mask):\n \"Follow Figure 1 for connections.\"\n x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n return self.sublayer[1](x, self.feed_forward)", "def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')", "def base_forward(self, x):\r\n pass", "def forward(self, x, **kwargs):\n pass", "def forward(self):\n self.cursor.forward()", "def forward(self, request, forward, times=None):\n data = {\n 'httpRequest': request.dict(),\n 'httpForward': forward.dict(),\n 'times': {\n 'remainingTimes': 1,\n 'unlimited': True\n }\n }\n if times:\n data['times'] = vars(times)\n req = requests.put('{}/expectation'.format(self._get_url()),\n json.dumps(data))\n return req", "def turn_on(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 1)", "def forward(self, *args, **kwargs):\n\n raise NotImplementedError()", "async def skip_forward(self) -> None:\n return await self.relay(\"skip_forward\")()", "def forward(self, *args: Any, **kwargs: Any) -> None:\n self._check_for_increment(\"forward\")\n return self[-1](*args, **kwargs)", "def forward(self, *args, **kwargs):\n raise NotImplementedError" ]
[ "0.6887666", "0.6606515", "0.6383124", "0.6383124", "0.62122256", "0.60811234", "0.594021", "0.5932288", "0.5932288", "0.5932288", "0.581016", "0.5741199", "0.57171863", "0.56688905", "0.5637646", "0.5620836", "0.5603269", "0.5560376", "0.5535223", "0.5532661", "0.5506793", "0.55006456", "0.5477857", "0.5435532", "0.5351057", "0.5345362", "0.53316665", "0.5326462", "0.5325653", "0.5322424" ]
0.81721765
0