query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
test that the generator yields the correct output when indexing from zero | def test_generation_index_zero(self):
generator = math_helpers.triangle_number_generator()
first_eleven_triangle_numbers = [next(generator) for _ in range(11)]
canonical_values = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55]
self.assertEqual(canonical_values, first_eleven_triangle_numbers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_generation_index_one(self):\n generator = math_helpers.triangle_number_generator(1)\n first_ten_triangle_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 3, 6, 10, 15, 21, 28, 36, 45, 55]\n self.assertEqual(canonical_values, first_ten_triangle_numbers)",
"def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i",
"def generator(self):\n return [None, 1]",
"def test_generator_manual() -> None:\n reversed_int: List[int] = []\n\n generator = reverse([1, 2, 3])\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n\n with pytest.raises(StopIteration):\n next(generator)\n\n assert reversed_int == [3, 2, 1]",
"def test_func_generator_transplant():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i",
"def test_func_generator():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i",
"def test_in_range_0_1():\n g = RG.larger_random()\n assert 0 <= next(g) <= 1",
"def emptyGenerator():\n return\n yield",
"def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)",
"def test_uniform_basic():\r\n yield check_uniform_basic, False\r\n yield check_uniform_basic, False, True\r\n yield check_uniform_basic, True",
"def __emptygen():\n if False:\n yield",
"def testGetSequence():\r\n\t\r\n\t#a few of hand-tested genome positions\r\n\ttest_data = [\t('1',500,520,'GTCTGACCTGAGGAGAACTGT'),\r\n\t\t\t\t\t('2',500,520,'CCCGACCCCGACCCCGACCCA'),\r\n\t\t\t\t\t('3',50000,50020,'TCTTCTTTTATGAAAAAGGAT'),\r\n\t\t\t\t\t('4',50000,50020,'AGAGCCCTGCAATTTGAAGAT'),\r\n\t\t\t\t\t('5',100000,100020,'AATGTTCACCAGTATATTTTA'),\r\n\t\t\t\t\t('X',100000,100020,'TAGGTCTCATTGAGGACAGAT'),\r\n\t\t\t\t\t('Y',100000,100020,'TAGGTCTCATTGAGGACAGAT')]\r\n\t\t\t\t\t\r\n\tfor this_check in test_data:\r\n\t\tyield CheckGetSequence, this_check",
"def test_random_generator(self):\n gen = random_data()\n data = [next(gen) for _ in range(100)]\n self.assertEqual(len(data), 100)",
"def test_generator_inline(self):\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i",
"def test_testGenerator():\n\n # check type\n assert isinstance(testset, list)\n\n # check the shape\n assert len(testset)==newObs.shape[0]",
"def iter_zeros(self):\n num = quotient = 0\n while num < self._len:\n chunk = self.data[quotient]\n if chunk & self.zero_mask:\n remainder = 0\n while remainder < self.width and num < self._len:\n item = (chunk >> remainder) & 3\n if item == PC_ZERO:\n yield num\n remainder += 2\n num += 1\n else:\n num += (self.width >> 1)\n quotient += 1",
"def test_generator_expressions() -> None:\n data = \"golf\"\n # This generator expression is essentially the same as the reverse() function above.\n reverse_generator: Generator[str, None, None] = (\n data[i] for i in range(len(data) - 1, -1, -1)\n )\n\n assert next(reverse_generator) == \"f\"\n assert next(reverse_generator) == \"l\"\n assert next(reverse_generator) == \"o\"\n assert next(reverse_generator) == \"g\"\n\n with pytest.raises(StopIteration):\n next(reverse_generator)",
"def very_simple():\n yield 1",
"def test_generator_send() -> None:\n\n def reverse_send() -> Generator[int, int, str]:\n \"\"\"Return a generator that produces `int`s in descending order.\"\"\"\n start = yield 0\n for index in range(start, -1, -1):\n yield index\n\n return \"done\"\n\n reversed_int: List[int] = []\n\n generator = reverse_send()\n # The first `send()` to start the generator must be a `None`.\n assert generator.send(None) == 0\n\n reversed_int.append(generator.send(3))\n for num in generator:\n reversed_int.append(num)\n\n assert reversed_int == [3, 2, 1, 0]",
"def test_generator_continuous():\n RANGE_MAX = 100\n prev_value = RANGE_MAX // 2\n for msg in it.islice(generate_msgs(0, RANGE_MAX), 0, 42):\n curr_value = Message.parse(msg).power\n assert curr_value - prev_value <= 1\n prev_value = curr_value",
"def test_normal_basic():\r\n yield check_normal_basic, False\r\n yield check_normal_basic, False, True\r\n yield check_normal_basic, True",
"def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2",
"def testExplicitGeneratorUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.g(8, 9)\n\t\tc.generator()\n\t\tc.setReturn(10)\n\t\tc.setReturn(11)\n\t\tc.replay()\n\t\tself.failUnless([k for k in x.g(8, 9)] == [10, 11])",
"def testExplicitGeneratorConvenienceFunctionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.generator(x.g(8, 9), [10, 11])\n\t\tc.replay()\n\t\tself.failUnless([k for k in x.g(8, 9)] == [10, 11])",
"def test_yield_in_const_conditional_true():\n if True:\n print((yield 1))",
"def test_generator_downward(narrow_power_range):\n with patch('random.randint', side_effect=lambda a,b: -1):\n range_min, range_max = narrow_power_range\n for msg in it.islice(generate_msgs(range_min, range_max), 0, 5):\n pass\n power = Message.parse(msg).power\n assert power == range_min",
"def Next():\n return CheckForError(lib.Generators_Get_Next())",
"def test_zero_different(self):\n cant_interations = int(random()*100)\n for i in range(cant_interations):\n number = self.sudoku.zero_different() # Generate number\n self.assertGreaterEqual(number, 1) # number >= 1\n self.assertLessEqual(number, 9) # number <= 9",
"def test_last_is_zero(self):\n countdown = [x for x in generators.countdown(10)]\n self.assertEqual(countdown[::-1][0], 0)",
"def sampler(self) -> Generator[int, None, None]:\n remaining = self.num_samples\n if self.shuffled:\n while remaining > 0:\n n = min(remaining, len(self.data_source))\n for idx in torch.randperm(len(self.data_source))[0:n]:\n yield int(idx)\n remaining -= n\n else:\n current_idx = None\n while remaining > 0:\n if current_idx is None or current_idx >= len(self.data_source):\n current_idx = 0\n yield current_idx\n current_idx += 1\n remaining -= 1"
] | [
"0.65620244",
"0.65434223",
"0.6286427",
"0.6189783",
"0.6189389",
"0.61437476",
"0.60891944",
"0.60622066",
"0.60527146",
"0.60308594",
"0.6026977",
"0.60156935",
"0.59985435",
"0.5989036",
"0.59800375",
"0.5966629",
"0.5911112",
"0.58879185",
"0.58549297",
"0.5843811",
"0.5839204",
"0.5801689",
"0.5801161",
"0.5771167",
"0.57708144",
"0.5763378",
"0.57214177",
"0.57213366",
"0.5693391",
"0.56861705"
] | 0.6773308 | 0 |
test that the generator yields the correct output when indexing from one | def test_generation_index_one(self):
generator = math_helpers.triangle_number_generator(1)
first_ten_triangle_numbers = [next(generator) for _ in range(10)]
canonical_values = [1, 3, 6, 10, 15, 21, 28, 36, 45, 55]
self.assertEqual(canonical_values, first_ten_triangle_numbers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_generator_method(self):\n for i in range(0, 4):\n yield self.try_odd, i",
"def test_testGenerator():\n\n # check type\n assert isinstance(testset, list)\n\n # check the shape\n assert len(testset)==newObs.shape[0]",
"def test_func_generator_transplant():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i",
"def test_func_generator():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i",
"def testGetSequence():\r\n\t\r\n\t#a few of hand-tested genome positions\r\n\ttest_data = [\t('1',500,520,'GTCTGACCTGAGGAGAACTGT'),\r\n\t\t\t\t\t('2',500,520,'CCCGACCCCGACCCCGACCCA'),\r\n\t\t\t\t\t('3',50000,50020,'TCTTCTTTTATGAAAAAGGAT'),\r\n\t\t\t\t\t('4',50000,50020,'AGAGCCCTGCAATTTGAAGAT'),\r\n\t\t\t\t\t('5',100000,100020,'AATGTTCACCAGTATATTTTA'),\r\n\t\t\t\t\t('X',100000,100020,'TAGGTCTCATTGAGGACAGAT'),\r\n\t\t\t\t\t('Y',100000,100020,'TAGGTCTCATTGAGGACAGAT')]\r\n\t\t\t\t\t\r\n\tfor this_check in test_data:\r\n\t\tyield CheckGetSequence, this_check",
"def test_generator_inline(self):\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i",
"def test_generator_manual() -> None:\n reversed_int: List[int] = []\n\n generator = reverse([1, 2, 3])\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n\n with pytest.raises(StopIteration):\n next(generator)\n\n assert reversed_int == [3, 2, 1]",
"def test_yield_value(self):\n msg = 'Must be an iterable which yield sequences in order.'\n examples = (\n [\n 'Hello',\n 'World',\n 'Hello World',\n ],\n [\n 'Mario use Kimura Lock on Luigi, and Luigi tap out.',\n 'Mario use Superman Punch.',\n 'Luigi get TKO.',\n 'Toad and Toadette are fightting over mushroom (weed).',\n ],\n [''],\n [],\n )\n\n for batch_sequences in examples:\n dataset = LanguageModelDataset(batch_sequences=batch_sequences)\n self.assertIsInstance(dataset, Iterable, msg=msg)\n\n for ans_sequence, sequence in zip(batch_sequences, dataset):\n self.assertIsInstance(sequence, str, msg=msg)\n self.assertEqual(sequence, ans_sequence, msg=msg)",
"def test_assert_iterator(self):\n iterator = iter([1,2,3,4])\n # Should pass\n self.assert_iterator(iterator,\n count=4,\n assert_item_function=lambda i: i>0)",
"def testExplicitGeneratorUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tx.g(8, 9)\n\t\tc.generator()\n\t\tc.setReturn(10)\n\t\tc.setReturn(11)\n\t\tc.replay()\n\t\tself.failUnless([k for k in x.g(8, 9)] == [10, 11])",
"def test_iter(\n self, start: Result[int, int], exp: t.Tuple[int, ...]\n ) -> None:\n assert tuple(start.iter()) == exp",
"def test_normal_basic():\r\n yield check_normal_basic, False\r\n yield check_normal_basic, False, True\r\n yield check_normal_basic, True",
"def testExplicitGeneratorConvenienceFunctionUsage(self):\n\t\tc = Controller()\n\t\tx = c.mock()\n\t\tc.generator(x.g(8, 9), [10, 11])\n\t\tc.replay()\n\t\tself.failUnless([k for k in x.g(8, 9)] == [10, 11])",
"def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2",
"def test_generator1(self):\n xpb = XPathBuilder()\n xp = xpb.foo\n xp = xp.bar\n xp = xp.baz[xpb.attr('x') == 'y']\n xp = xp[1]\n exp = '/foo/bar/baz[@x = \"y\"][1]'\n self.assertEqual(xp.tostring(), exp)",
"def generator(self):\n return [None, 1]",
"def test_generator_expressions() -> None:\n data = \"golf\"\n # This generator expression is essentially the same as the reverse() function above.\n reverse_generator: Generator[str, None, None] = (\n data[i] for i in range(len(data) - 1, -1, -1)\n )\n\n assert next(reverse_generator) == \"f\"\n assert next(reverse_generator) == \"l\"\n assert next(reverse_generator) == \"o\"\n assert next(reverse_generator) == \"g\"\n\n with pytest.raises(StopIteration):\n next(reverse_generator)",
"def _test_generator(get_output, get_expected, input_filename, **options):\n def test(self):\n output_docs, output_errors = get_output(input_filename, **options)\n expect_docs, expect_errors = get_expected(input_filename, **options)\n\n self.assertEqual(expect_docs, output_docs)\n self.assertEqual(expect_errors, output_errors)\n\n return test",
"def test_generation_index_zero(self):\n generator = math_helpers.triangle_number_generator()\n first_eleven_triangle_numbers = [next(generator) for _ in range(11)]\n canonical_values = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55]\n self.assertEqual(canonical_values, first_eleven_triangle_numbers)",
"def Next():\n return CheckForError(lib.Generators_Get_Next())",
"def test_uniform_basic():\r\n yield check_uniform_basic, False\r\n yield check_uniform_basic, False, True\r\n yield check_uniform_basic, True",
"def test_nested_yield():\n yield (yield (yield 1))",
"def test_random_generator(self):\n gen = random_data()\n data = [next(gen) for _ in range(100)]\n self.assertEqual(len(data), 100)",
"def test_generator_method_name(self):\n for i in range(0, 4):\n yield 'try_odd', i",
"def very_simple():\n yield 1",
"def test_consecutive_queries_yield_different_individual_items(test_store):\n queried = next(test_store.get_by(name=\"Andy\"))\n other = next(test_store.get_by(name=\"Andy\"))\n\n assert queried is not other\n assert queried == other",
"def simple():\n yield 1\n yield 2\n yield 3",
"def test_yield_in_const_conditional_true():\n if True:\n print((yield 1))",
"def testGeneratorType(self):",
"def _test_pairs(self, idx0, idx1):\n pass"
] | [
"0.67612666",
"0.6540647",
"0.6347304",
"0.63265353",
"0.6272674",
"0.6268136",
"0.6202047",
"0.6105433",
"0.6067533",
"0.60582614",
"0.6054167",
"0.60507053",
"0.60323983",
"0.6031629",
"0.6027344",
"0.60162985",
"0.60071313",
"0.6004916",
"0.5967041",
"0.59528667",
"0.5938355",
"0.5933203",
"0.5922203",
"0.5910497",
"0.5898676",
"0.5860578",
"0.5854441",
"0.5852251",
"0.5829854",
"0.58125263"
] | 0.66759306 | 1 |
Get login manager names | def getTimekprLoginManagers():
global _loginManagers
return(_loginManagers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_managers():\n return {'managers': get_users('managers')}",
"def get_system_managers(only_name: bool = False) -> list[str]:\n\tHasRole = DocType(\"Has Role\")\n\tUser = DocType(\"User\")\n\n\tif only_name:\n\t\tfields = [User.name]\n\telse:\n\t\tfields = [User.full_name, User.name]\n\n\tsystem_managers = (\n\t\tfrappe.qb.from_(User)\n\t\t.join(HasRole)\n\t\t.on(HasRole.parent == User.name)\n\t\t.where(\n\t\t\t(HasRole.parenttype == \"User\")\n\t\t\t& (User.enabled == 1)\n\t\t\t& (HasRole.role == \"System Manager\")\n\t\t\t& (User.docstatus < 2)\n\t\t\t& (User.name.notin(frappe.STANDARD_USERS))\n\t\t)\n\t\t.select(*fields)\n\t\t.orderby(User.creation, order=Order.desc)\n\t\t.run(as_dict=True)\n\t)\n\n\tif only_name:\n\t\treturn [p.name for p in system_managers]\n\telse:\n\t\treturn [formataddr((p.full_name, p.name)) for p in system_managers]",
"def list():\n rino.login.list()",
"def get_managers_list(self):\n try:\n role_id = [x[0] for x in self.db_handler.get_roles_list() if x[1] == 'Менеджер'][0]\n staff_by_role = self.db_handler.get_all_staff_by_role_id(role_id)\n\n self.logger.write_to_log('managers list got', 'model')\n\n return staff_by_role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')",
"def nodeNames(self):\n if self.role == Roles.ACTIVE or self.role == Roles.PASSIVE:\n return Backend().configuration.getNodeNames()\n else:\n return [self.node, \"system-manager\"]",
"def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users",
"def login_registry(self):\n status = []\n for name, container in self.containers.items():\n result = container.daemon.login()\n status.append(result)\n return status",
"def manager_info(self, manager_cn):\n manager_login = manager_cn[4:manager_cn.find(\",\")]\n manager_info = self.locate_user(manager_login)\n try:\n return manager_info[0][1]['cn'][0] + \\\n \" (\" + manager_info[0][1]['uid'][0] + \")\"\n except IndexError:\n return \"(LDAP record removed) %s\" % manager_cn",
"def manager_agents(self):\n return self.get(\"manager_agents\")",
"def users(self):\n from sagas.ofbiz.entities import OfEntity as e, oc\n rs=e().allUserLogin()\n for r in rs:\n print(r['userLoginId'])",
"def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]",
"def getUsers(self):\n return [u[0] for u in pwd.getpwall()\n if (u[5].startswith('/home/') and u[6].endswith('sh'))]",
"def get_personnel():\r\n if len(man) == 0:\r\n print(\"There are no managers\")\r\n else:\r\n for i in man:\r\n print(str(i))",
"def manager_info(self, manager):\n _, body = self.request('/v1.1/managers/active/%s' % manager, 'GET')\n return body",
"def get_users_admins_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .all()\n )\n return users",
"def teammates_player_names(self):\n return [p.name for p in self.teammates]",
"def search_session_providers(name: str) -> List[str]:\n from renku.core.plugin.session import get_supported_session_providers\n\n name = name.lower()\n return [p.name for p in get_supported_session_providers() if p.name.lower().startswith(name)]",
"def get_admins(self):\n from Employee import Employee\n admins = list()\n cursorRoles = self.dbconnect.get_cursor()\n cursorRoles.execute('select * from employeeRoles where role=\\'admin\\'')\n for row in cursorRoles:\n admins.append(self.get_employee(row[0]))\n return admins",
"def namelist(self):\n return self._handle.namelist()",
"def namelist(self):\n return self._handle.namelist()",
"def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]",
"def name(self):\n names = []\n for logger in self._loggers:\n names.append(logger.name)\n return names",
"def getLoginDetails(self):\n loginDetails = [self._v2, self._v3]\n return loginDetails",
"def get_local_admins():\n admin_list = get_users_config()\n response = []\n\n if \"users\" not in admin_list[\"result\"]:\n return response\n\n if isinstance(admin_list[\"result\"][\"users\"][\"entry\"], list):\n for entry in admin_list[\"result\"][\"users\"][\"entry\"]:\n response.append(entry[\"name\"])\n else:\n response.append(admin_list[\"result\"][\"users\"][\"entry\"][\"name\"])\n\n return response",
"def get_manager_info(handle, timeout):\n mgr_info = dict()\n mgr_info['ls-modules'] = ceph_mon_command(handle, 'mgr module ls', timeout)\n mgr_info['dump'] = ceph_mon_command(handle, 'mgr dump' , timeout)\n mgr_info['metadata'] = ceph_mon_command(handle, 'mgr metadata' , timeout)\n return mgr_info",
"def get_admins(name):\n obj = DataService.objects(name=name).first()\n if obj is None:\n return []\n return list(obj.admins)",
"def manager_active_list(self):\n _, body = self.request('/v1.1/managers/active', 'GET')\n return body",
"def getLogFileNames():\r\n return [\"Server1.txt\", \"Server2.txt\", \"Client1.txt\", \"Client2.txt\"]",
"def log_in(codecool):\n\n login = school_view.get_login()\n password = school_view.get_password()\n\n password = utilities.hash_password(password)\n\n users = codecool.managers_list + codecool.administrators_list + codecool.mentors_list + codecool.students_list\n for user in users:\n if user.login == login and user.password == password:\n return user",
"def returnIdLogin(self):\r\n self.cursor.execute(\"SELECT USUARIO FROM LOGIN;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []"
] | [
"0.65457773",
"0.6402471",
"0.62186337",
"0.618175",
"0.6157624",
"0.5922514",
"0.58962244",
"0.5892551",
"0.5839041",
"0.58095676",
"0.5780388",
"0.5710504",
"0.5697233",
"0.5679872",
"0.5607318",
"0.551856",
"0.5480489",
"0.5459577",
"0.54572856",
"0.54572856",
"0.5455005",
"0.54395163",
"0.5436212",
"0.54352",
"0.54329175",
"0.5431046",
"0.5414461",
"0.5386835",
"0.53867525",
"0.53810763"
] | 0.7099838 | 0 |
Initialize all users present in the system as per particular config | def checkAndInitUsers(self):
# config
users = {}
# iterate through all usernames
for rUser in pwd.getpwall():
# check userid
if rUser.pw_uid is not None and rUser.pw_uid != "" and not ("/nologin" in rUser.pw_shell or "/false" in rUser.pw_shell):
# save our user, if it mactches
if verifyNormalUserID(rUser.pw_uid):
# get processed usernames
userFName = getNormalizedUserNames(pUser=rUser)[1]
# save ()
users[rUser.pw_name] = [rUser.pw_uid, userFName]
# get user config
timekprConfigManager = timekprConfig()
# load user config
timekprConfigManager.loadMainConfiguration()
# go through our users
for rUser in users:
# get path of file
file = os.path.join(timekprConfigManager.getTimekprConfigDir(), cons.TK_USER_CONFIG_FILE % (rUser))
# check if we have config for them
if not os.path.isfile(file):
log.log(cons.TK_LOG_LEVEL_INFO, "setting up user \"%s\" with id %i" % (rUser, users[rUser][0]))
# user config
timekprUserConfig(timekprConfigManager.getTimekprConfigDir(), rUser).initUserConfiguration()
# user control
timekprUserControl(timekprConfigManager.getTimekprWorkDir(), rUser).initUserControl()
log.log(cons.TK_LOG_LEVEL_DEBUG, "finishing setting up users")
# user list
return users | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init():\n create_user(app)\n get_all_user()",
"def init_default_users():\n from flask import current_app as app\n with app.app_context():\n notion_uname = app.config.get(\"NOTION_CRONJOB_USERNAME\")\n notion_passwd = app.config.get(\"NOTION_CRONJOB_PASSWORD\")\n\n if notion_uname and notion_passwd:\n try:\n User.createOne(\n username=notion_uname,\n password=notion_passwd\n )\n except NotUniqueError:\n app.logger.info(\"Notion Job User already exists!\")\n except Exception as err:\n app.logger.error(\"Notion Job User was not created!\", err)\n else:\n app.logger.info(\"Created Notion Job User Successfully!\")",
"def _initialize_users():\n if not USER_ACCOUNTS_PATH.exists():\n raise FileNotFoundError()\n\n with open(str(USER_ACCOUNTS_PATH)) as f:\n user_accounts = json.load(f)\n\n conn, c = _get_db_connection()\n\n c.execute('''DELETE FROM user''')\n\n for user in user_accounts['accounts']:\n c.execute(\"\"\"INSERT INTO user VALUES (?, ?)\"\"\", (user['email'], user['password']))\n\n conn.commit()",
"def create_users(self):\n if self.gl is None:\n print(\"No config found, please run connect first.\")\n exit(1)\n else:\n print(\"Starting Users creation.\")\n gl = self.gl\n config = self.config\n for username in config[\"users\"]:\n i = 0\n count = int(config[\"users\"][username][\"count\"])\n pw = config[\"users\"][username][\"pass\"]\n groups = config[\"users\"][username][\"groups\"]\n while i < count:\n i += 1\n print(\"creating user: \" + username + '-' + str(i) + \" ...\", end=' ')\n user = gl.users.create({'email': username + str(i) + '@example.com',\n 'password': pw,\n 'username': username + '-' + str(i),\n 'name': username + '-' + str(i),\n 'skip_confirmation': True})\n self.users.append(user)\n self.usergroups[user.id] = groups\n print(\"done.\")\n print(\"All Users created!\")",
"def test_0000_initiate_users( self ):\n self.login( email=common.test_user_1_email, username=common.test_user_1_name )\n test_user_1 = self.test_db_util.get_user( common.test_user_1_email )\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role( test_user_1 )\n self.login( email=common.admin_email, username=common.admin_username )\n admin_user = self.test_db_util.get_user( common.admin_email )\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role( admin_user )",
"def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.login(email=common.admin_email, username=common.admin_username)\n self.galaxy_login(email=common.admin_email, username=common.admin_username)",
"def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)",
"def test_0000_initiate_users(self):\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n test_user_1 = self.test_db_util.get_user(common.test_user_1_email)\n assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email\n self.test_db_util.get_private_role(test_user_1)\n self.login(email=common.admin_email, username=common.admin_username)\n admin_user = self.test_db_util.get_user(common.admin_email)\n assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email\n self.test_db_util.get_private_role(admin_user)",
"def init_config():\n global udata\n udata = UserConfig()",
"def load_users(self):\n for user_type in self.user_types:\n url_string = \"%s_url\" % user_type\n try:\n url = self.lookup(url_string)\n users = self._fetcher.get_entities(url)\n except AttributeError as ate:\n logger.err(str(ate))\n continue\n user_list = []\n for user in users:\n if 'username' in user:\n user_list.append({'name': user['username']})\n if len(user_list) > 0:\n setattr(self, user_type, user_list)",
"def load_users(self):\n for user_type in self.user_types:\n url = \"%s_url\" % user_type\n try:\n self.lookup(url)\n except AttributeError:\n continue\n users = self._fetcher.get_entities(self.lookup(url))\n user_list = []\n for user in users:\n if 'username' in user:\n user_list.append({'name': user['username']})\n if len(user_list) > 0:\n setattr(self, user_type, user_list)",
"def setusers(self, users=None):\n if users:\n self.users = users\n return\n import jsb.lib.users as u\n if not u.users: u.users_boot()\n self.users = u.users",
"def _fill_user_entries(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n # For every enabled verification parameter, set its value in its corresponding entry.\n for param in self.verify_params.enabled:\n self._fill_user_entry(self.computer, param)",
"def insert_default_users():\n user1 = User(email=current_app.config['ADMIN_EMAIL'],\n password=current_app.config['ADMIN_PW'],\n first_name=current_app.config['ADMIN_FIRST_NAME'],\n last_name=current_app.config['ADMIN_LAST_NAME'],\n confirmed=True)\n user1.role = Role.query.filter_by(name='Administrator').first()\n db.session.add(user1)\n\n user2 = User(email=current_app.config['USERMANAGER_EMAIL'],\n password=current_app.config['USERMANAGER_PW'],\n first_name=current_app.config['USERMANAGER_FIRST_NAME'],\n last_name=current_app.config['USERMANAGER_LAST_NAME'],\n confirmed=True)\n user2.role = Role.query.filter_by(name='Usermanager').first()\n db.session.add(user2)\n\n user3 = User(email=current_app.config['USER_EMAIL'],\n password=current_app.config['USER_PW'],\n first_name=current_app.config['USER_FIRST_NAME'],\n last_name=current_app.config['USER_LAST_NAME'],\n confirmed=True)\n user3.role = Role.query.filter_by(name='User').first()\n db.session.add(user3)\n\n db.session.commit()",
"def populate_users(self):\n synced = 0\n for user_dn, ldap_dict in self._get_users():\n _truncate('sn', 'last_name', ldap_dict)\n user = self._create_or_update_user(user_dn, ldap_dict)\n self.nested_groups.handle(user)\n synced += 1\n return synced",
"def setUp(self):\n self.users = [UserFactory.create() for i in range(5)]",
"def setUp(self):\n\n # Allocates users\n self.users = []\n self.user_session_tokens = []\n\n # Template for creating users\n user_template = {\n \"clientId\": 2,\n \"username\": \"user\",\n \"pwd\": \"password\",\n \"nameLast\": \"Last\",\n \"nameFirst\": \"First\",\n \"email\": \"[email protected]\",\n \"phone\": \"123-4567\",\n \"profile_picture_path\": \"/\",\n \"timezoneDefault\": \"EST\",\n \"languageDefault\": \"English\"\n }\n\n # Creates 'n' users and stores them\n n = 3\n for i in range(0, n):\n user = deepcopy(user_template)\n user['username'] += randstr()\n user['email'] += randstr()\n handler.user_create(event=user, context=None)\n self.users.append(user)\n self.user_session_tokens.append(None)",
"def generate_users(config: Config):\n users_by_id = {}\n users_by_alternative_id = {}\n for user_data in config.users:\n alternative_id = secrets.token_hex()\n user = User(user_data[\"user_id\"], user_data[\"password_hash\"], alternative_id)\n users_by_id[user.id] = user\n users_by_alternative_id[user.alternative_id] = user\n return users_by_id, users_by_alternative_id",
"def init_data_for_users(db_data):\n users = db_data.get('user')\n if users is not None:\n rows = users.get('data')\n for row in rows:\n user = User(name=row[0], password=generate_password_hash(row[1]))\n db_add_and_commit(db, user)",
"def setUp(self):\n self.new_users = User('Dennis', 'Kiplangat', 'kiplangat18')",
"def load_users(everyone):\n if user_list.loaded:\n return\n for user in iteritems(everyone):\n user_list.load(user[1])",
"def get_all_users():",
"def setUpTestUsers(self) -> None:\n self.password = \"thisisasecret\"\n self.other = get_user_model().objects.create_user(\"other\", password=self.password)\n self.user = get_user_model().objects.create_user(\"user\", password=self.password)\n self.admin = get_user_model().objects.create_superuser(\"admin\", password=self.password)\n self.anonymous = AnonymousUser()",
"def user_init(self):\n pass",
"def configure_users_for_client(ctx, config, client, everywhere=False):\n log.info('Configuring users...')\n log.info('for client %s', client)\n log.info('everywhere %s', everywhere)\n\n # For data sync the master zones and regions must have the\n # system users of the secondary zones. To keep this simple,\n # just create the system users on every client if regions are\n # configured.\n clients_to_create_as = [client]\n if everywhere:\n clients_to_create_as = list(config.keys())\n\n # extract the user info and append it to the payload tuple for the given\n # client\n for client, c_config in config.items():\n if not c_config:\n continue\n user_info = extract_user_info(c_config)\n if not user_info:\n continue\n\n for client_name in clients_to_create_as:\n log.debug('Creating user {user} on {client}'.format(\n user=user_info['system_key']['user'], client=client_name))\n rgwadmin(ctx, client_name,\n cmd=[\n 'user', 'create',\n '--uid', user_info['system_key']['user'],\n '--access-key', user_info['system_key']['access_key'],\n '--secret', user_info['system_key']['secret_key'],\n '--display-name', user_info['system_key']['user'],\n '--system',\n ],\n check_status=True,\n )\n yield",
"def test_initial_share_all_users(self) -> None:\n self.handler.search_all_users = True\n self.hs.config.userdirectory.user_directory_search_all_users = True\n\n u1 = self.register_user(\"user1\", \"pass\")\n self.register_user(\"user2\", \"pass\")\n u3 = self.register_user(\"user3\", \"pass\")\n\n shares_private = self.get_success(\n self.user_dir_helper.get_users_who_share_private_rooms()\n )\n public_users = self.get_success(\n self.user_dir_helper.get_users_in_public_rooms()\n )\n\n # No users share rooms\n self.assertEqual(public_users, set())\n self.assertEqual(shares_private, set())\n\n # Despite not sharing a room, search_all_users means we get a search\n # result.\n s = self.get_success(self.handler.search_users(u1, u3, 10))\n self.assertEqual(len(s[\"results\"]), 1)\n\n # We can find the other two users\n s = self.get_success(self.handler.search_users(u1, \"user\", 10))\n self.assertEqual(len(s[\"results\"]), 2)\n\n # Registering a user and then searching for them works.\n u4 = self.register_user(\"user4\", \"pass\")\n s = self.get_success(self.handler.search_users(u1, u4, 10))\n self.assertEqual(len(s[\"results\"]), 1)",
"async def statinit(client):\n conn = client.bot.dbs[client.server_tag]\n print(('Initializing stat columns in \\'users\\''\n f' in /persist/db/{client.server_tag}.db...'))\n for attr in usr_attributes:\n db.add_column(conn, 'users', attr)\n db.ccache()\n print('User stat initialization complete.')",
"def load_users():\n\n \n\n User.query.delete()\n\n with open(\"seed_data/seed_users.psv\") as users:\n for row in users:\n username, fname, lname, email, password, user_role = row.strip().split(\"|\")\n\n user = User(username=username,\n fname=fname,\n lname=lname,\n email=email,\n password=generate_password_hash(password),\n user_role=user_role)\n\n db.session.add(user)\n\n db.session.commit()",
"def atlas_users():\n pass",
"def _create_users(self):\r\n users = []\r\n for i in range(8):\r\n username = \"user{}\".format(i)\r\n email = \"test+user{}@edx.org\".format(i)\r\n user = User.objects.create_user(username, email, 'foo')\r\n user.is_active = True\r\n user.save()\r\n users.append(user)\r\n return users"
] | [
"0.7886939",
"0.71261805",
"0.7068532",
"0.6840538",
"0.6811883",
"0.6785962",
"0.67232174",
"0.67232174",
"0.6675994",
"0.6618686",
"0.66058826",
"0.66042155",
"0.65231067",
"0.6509159",
"0.65031755",
"0.64593905",
"0.6435049",
"0.64212024",
"0.64208126",
"0.6414819",
"0.64018977",
"0.6311287",
"0.6301321",
"0.62980765",
"0.6248342",
"0.6221875",
"0.61855364",
"0.61715144",
"0.6167449",
"0.6146259"
] | 0.7862209 | 1 |
Gets or creates directory for logs | def init_logs_directory(self):
return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setup_log_dir():\n log_dir = get_log_dir()\n if log_dir.endswith('latest'):\n shutil.rmtree(log_dir, ignore_errors=True)\n mkdirs(log_dir)\n return log_dir",
"def logs_directory(self):",
"def _create_log_dir():\n if not os.path.exists(FLASK_APP.config[\"LOG_DIR\"]):\n os.makedirs(FLASK_APP.config[\"LOG_DIR\"])",
"def _default_log_dir():\n config_dir = os.path.abspath(os.path.dirname(self.config_filepath))\n log_dir = os.path.join(config_dir, \"logs\")\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n return log_dir",
"def new_custom_log_dir(self) -> str:",
"def get_logdir(self):\n return self.event_writer.get_logdir()",
"def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname",
"def log_directory(self):\n\n return self.get_raw(\"log_directory\")",
"def _make_log_dir(self, path):\n\n try:\n os.makedirs('/'.join([self._logpath, path]))\n except OSError, e:\n # Return True if dir already exists\n if e.args[0] is 17:\n return\n\n # Some other error; raise exception\n raise e\n\n return",
"def __init_log_folder():\n try:\n os.makedirs(Logger.__log_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise e",
"def _create_logdir(self, job_id):\n job_logdir = os.path.join(self.log_dir, self.alias, job_id)\n if not os.path.isdir(job_logdir):\n self.logger.debug(\"creating log directory '%s'\" % job_logdir)\n os.makedirs(job_logdir)\n\n return job_logdir",
"def get_logging_dir(self):\n return self.logging_dir",
"def CreateLoggingDirectories(\n dataset_root: Path, model_name: str, analysis: str, run_id: str = None\n) -> Path:\n run_id = run_id or time.strftime(\"%y:%m:%dT%H:%M:%S\")\n log_dir = dataset_root / \"logs\" / model_name / analysis / run_id\n if log_dir.is_dir():\n raise OSError(\n f\"Logs directory already exists. Refusing to overwrite: {log_dir}\"\n )\n logging.info(\"Writing logs to %s\", log_dir)\n log_dir.mkdir(parents=True)\n (log_dir / \"epochs\").mkdir()\n (log_dir / \"checkpoints\").mkdir()\n (log_dir / \"graph_loader\").mkdir()\n return log_dir",
"def get_log_directory(self):\n\n return self.__config_parser__.get('SETTINGS', 'LOGFILE_DIRECTORY')",
"def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())",
"def createTargetDir():\n sLogDir = getConfig('system', 'log_dir')\n if not os.path.isdir(sLogDir):\n usageMsg(\"log directory does not exist: \" + sLogDir)\n\n # Target directory combines all pieces including the date/time\n sTargetDir = \"%s/%s\" % (sLogDir, getDates()['gmt'].strftime(\"%Y_%m_%d-%H_%M_%S_%Z\"))\n\n try:\n os.makedirs(sTargetDir)\n except OSError:\n errorMsg(\"unable to create target directory: \" + sTargetDir)\n return sTargetDir",
"def create_dirs():\n os.makedirs(ORIGINAL_LOG_DIR, exist_ok=True)",
"def createDirectories(self):\n # -- LOG\n thepath = os.path.dirname(self.settings.logfile)\n distutils.dir_util.mkpath(thepath)\n\n # -- SESSION \n thepath = self.settings.sessionpath\n distutils.dir_util.mkpath(thepath)\n\n # -- DATABASE\n thepath = self.settings.dbpath\n distutils.dir_util.mkpath(thepath)",
"def logdir(self) -> str:\n return self._logdir",
"def get_log_folder(cls, test_suite_name):\n if not test_suite_name:\n test_suite_name = os.path.splitext(os.path.basename(sys.modules['__main__'].__file__))[0]\n sdk_path = cls.get_sdk_path()\n log_folder = os.path.join(sdk_path, \"TEST_LOGS\",\n test_suite_name +\n time.strftime(\"_%m%d_%H_%M_%S\", time.localtime(LOG_FOLDER_TIMESTAMP)))\n if not os.path.exists(log_folder):\n os.makedirs(log_folder)\n return log_folder",
"def logdir(self) -> Path:\n assert (\n self._logdir\n ), \"Log provider has not been tied to a SummaryWriter yet\"\n return self._logdir",
"def log_dir():\r\n if LogOptions._LOG_DIR is None:\r\n LogOptions._LOG_DIR = app.get_options().twitter_common_log_log_dir\r\n return LogOptions._LOG_DIR",
"def pytest_logger_logsdir(self, config):",
"def setup_logdir(self, default_logdir: Union[str, Path]) -> Path:\n self._default_logdir = Path(default_logdir)\n\n if self._create_logdir:\n self.logdir_path.mkdir(parents=True, exist_ok=True)\n\n if not self.logdir_path.is_dir():\n raise ValueError(f\"logdir '{self.logdir_path}' must be a directory.\")\n\n return self.logdir_path",
"def outdir_str(d):\n f = folder_str(d)\n logs_dir = os.path.join(f, 'logs')\n try:\n if not os.path.exists(logs_dir):\n os.makedirs(logs_dir)\n except OSError:\n raise argparse.ArgumentTypeError('could not create \"%s\" directory' % logs_dir)\n return f",
"def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)",
"def createLogFolders():\n os.chdir(\"ARCHIVES\")\n logFolder = datetime.datetime.now().strftime(\"ARCHIVE_%d_%b_%Y_%H_%M_%S_0\")\n while logFolder in os.listdir():\n split = logFolder.split('_')\n curIndex = int(split[7])\n nextIndex = curIndex + 1\n split[7] = str(nextIndex)\n logFolder = '_'.join(split)\n os.mkdir(logFolder)\n os.chdir(logFolder)\n os.mkdir(\"Premigration\")\n os.mkdir(\"Migration\")\n os.mkdir(\"Postmigration\")\n os.mkdir(\"Other\")\n print(\"Storing All Logs in ARCHIVES/%s\"%logFolder)\n globs.ARCHIVEFOLDER = os.getcwd()\n os.chdir(globs.PROGDIR)",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")",
"def logdir(self):\n return osp.join('runs/', self.net_name, '')"
] | [
"0.7976153",
"0.7894984",
"0.7791716",
"0.7596984",
"0.75095797",
"0.74462134",
"0.7382162",
"0.73279244",
"0.72530645",
"0.71706456",
"0.71434605",
"0.7124528",
"0.7102195",
"0.70760244",
"0.7049274",
"0.704064",
"0.6969299",
"0.69350225",
"0.69172055",
"0.6914677",
"0.690569",
"0.6905316",
"0.6900001",
"0.68868303",
"0.68237036",
"0.67997146",
"0.6736116",
"0.66845673",
"0.658322",
"0.65813667"
] | 0.8063092 | 0 |
Gets or creates path to error log files | def init_error_files(self):
dir_path = self.init_logs_directory()
log_errors = self.join_path(dir_path, PATH_FOR_LOG_ERRORS)
return log_errors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stderr_path(self):\n return self.log_path\n # return self.path / 'stderr.txt'",
"def errorpath():\n stdoutfile=pdbid()+\".error.log\"\n stdout = os.path.join(output_dir(), stdoutfile)\n\n return stdout",
"def get_log_file_path(self):\n dir_path = self._get_log_file_dir()\n self._check_make_dirs(dir_path)\n return join(dir_path, self.LOG_FILE_NAME)",
"def get_error_file(self):\n pass",
"def get_log_path():\n return LOG_PATH",
"def getLogPath():\n pwd = os.path.dirname(os.path.abspath(__file__))\n log_file = os.path.join(pwd, 'log.txt')\n\n return log_file",
"def WriteErrorsToFile():\n if(not __errorsTracked__ is None):\n if(len(__errorsTracked__)>0):\n formattedLogName = '_'.join[\"ErrorLog\",\"GarageChecker\",datetime.date,datetime.time]\n WriteToFile(formattedLogName,__errorsTracked__)\n __errorsTracked__ = []",
"def error_log(self):\n if not self._error_log_text:\n self._error_log_text = self._cat('/tmp/errors')\n return self._error_log_text",
"def errpath(self):\n return None",
"def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")",
"def log_path(self):\n return LOGS_RESOURCES_PATH / (self.daemon_id + '.log')",
"def get_log_path():\n forch_log_dir = os.getenv('FORCH_LOG_DIR')\n if not forch_log_dir:\n return None\n return os.path.join(forch_log_dir, 'forch.log')",
"def new_custom_log_dir(self) -> str:",
"def errfile(self):\n\n return f\"{self.name}.err.out\"",
"def get_log_file():\n log_file = os.getenv(\"LOG_FILE\", \"\")\n if log_file != \"\":\n return log_file\n return os.path.dirname(os.path.abspath(__file__)) + \"/server.log\"",
"def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file",
"def log_path(self):\n return os.path.join(self._sandbox, 'log')",
"def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)",
"def init_log_files(self): \n \n dir_path = self.init_logs_directory()\n log_files = self.join_path(dir_path, PATH_FOR_LOG_FILES)\n \n return log_files",
"def _get_log_filepath(self, imgname):\n\t\treturn os.path.join(self.workdir, imgname + \".log.txt\")",
"def get_log_file(self):\n return self.log_file.read_text(errors=\"backslashreplace\")",
"def logs_directory(self):",
"def find_logs():\n dirname = os.path.normpath('./logs')\n d = 1\n\n while d < 5:\n if os.path.exists(dirname):\n return os.path.normpath(dirname)\n d += 1\n dirname = os.path.join('../', dirname)\n\n return dirname",
"def GetLogFilePath():\n global _LOG_FILE\n return _LOG_FILE",
"def compute_log_file_paths(self, basename):\n log_file_template = os.path.join(self.output_folder, self.sample_name + '_{}.log')\n error_file_template = os.path.join(self.output_folder, self.sample_name + '_{}.err')\n log_file_path = log_file_template.format(basename)\n error_file_path = error_file_template.format(basename)\n return log_file_path, error_file_path",
"def _create_log_dir():\n if not os.path.exists(FLASK_APP.config[\"LOG_DIR\"]):\n os.makedirs(FLASK_APP.config[\"LOG_DIR\"])",
"def set_error(self, error):\n if self.log_file_exist(self.file_path_name):\n logging.error(error)\n else:\n print \"The log \"+ self.name_log + \"does not exist in the directory\"",
"def get_log_filepath(config: configs.Config) -> str:\n return os.path.join(config.model_training.dir_out, _DEFAULT_FILENAME_LOG)",
"def getLogFile(self):\r\n return LOG.getLogFile().name",
"def setup_log_dir():\n log_dir = get_log_dir()\n if log_dir.endswith('latest'):\n shutil.rmtree(log_dir, ignore_errors=True)\n mkdirs(log_dir)\n return log_dir"
] | [
"0.7527874",
"0.73948807",
"0.702239",
"0.68975806",
"0.68153846",
"0.679226",
"0.6656828",
"0.66181743",
"0.6549538",
"0.65288526",
"0.6426263",
"0.6417242",
"0.6403863",
"0.6398437",
"0.6381421",
"0.63401043",
"0.63269967",
"0.63261133",
"0.6292237",
"0.6261665",
"0.6213658",
"0.61951053",
"0.61913764",
"0.6169734",
"0.6161",
"0.6155526",
"0.61467755",
"0.6143841",
"0.6113352",
"0.60886544"
] | 0.7725463 | 0 |
Initialize this histogram as a new dict; update with given items | def __init__(self, iterable=None):
super(Histogram, self).__init__()
self.types = 0 # the number of distinct item types in this histogram
self.tokens = 0 # the total count of all item tokens in this histogram
if iterable:
self.update(iterable) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _updateFromItem(self):\n item = self.getItem()\n\n if item is None:\n self.reset()\n return\n\n if not isinstance(item, self._SUPPORTED_ITEM_CLASS):\n _logger.error(\"Unsupported item\", item)\n self.reset()\n return\n\n # Compute histogram and stats\n array = item.getValueData(copy=False)\n\n if array.size == 0:\n self.reset()\n return\n\n xmin, xmax = min_max(array, min_positive=False, finite=True)\n if xmin is None or xmax is None: # All not finite data\n self.reset()\n return\n guessed_nbins = min(1024, int(numpy.sqrt(array.size)))\n\n # bad hack: get 256 bins in the case we have a B&W\n if numpy.issubdtype(array.dtype, numpy.integer):\n if guessed_nbins > xmax - xmin:\n guessed_nbins = xmax - xmin\n guessed_nbins = max(2, guessed_nbins)\n\n # Set default nbins\n self.__nbinsLineEdit.setDefaultValue(guessed_nbins, extend_range=True)\n # Set slider range: do not keep the range value, but the relative pos.\n previousPositions = self.__rangeSlider.getPositions()\n if xmin == xmax: # Enlarge range is none\n if xmin == 0:\n range_ = -0.01, 0.01\n else:\n range_ = sorted((xmin * .99, xmin * 1.01))\n else:\n range_ = xmin, xmax\n\n self.__rangeSlider.setRange(*range_)\n self.__rangeSlider.setPositions(*previousPositions)\n\n histogram = Histogramnd(\n array.ravel().astype(numpy.float32),\n n_bins=max(2, self.__nbinsLineEdit.getValue()),\n histo_range=self.__rangeSlider.getValues(),\n )\n if len(histogram.edges) != 1:\n _logger.error(\"Error while computing the histogram\")\n self.reset()\n return\n\n self.setHistogram(histogram.histo, histogram.edges[0])\n self.resetZoom()\n self.setStatistics(\n min_=xmin,\n max_=xmax,\n mean=numpy.nanmean(array),\n std=numpy.nanstd(array),\n sum_=numpy.nansum(array))",
"def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}",
"def construct_score_book(self, items_and_size: List[Tuple[str, float]]) -> None:\n self.score_book = {}\n\n for item, size in items_and_size:\n self.score_book[item] = size",
"def _additems(self, w,h):\n for idx in range(len(self.data['items'])):\n default={\n 'color': self.data['itemscolor'],\n 'textscale': self.data['itemsscale'],\n 'textfont': self.data['textfont'],\n 'width': w-(self.data['margin'][0]*2.),\n }\n self.data['items'][idx].update(default)\n self.addItem(idx, **self.data['items'][idx])",
"def initDictionnary(self):\n partitions = self.vocabulary.getPartitions()\n for partition in partitions:\n for mod in partition.modalities:\n self.summaryDict[partition.getAttName() + \" : \" + mod] = 0.0\n self.summaryFilteredDict[partition.getAttName() + \" : \" + mod] = 0.0",
"def __init__(self):\n self.num_counts = {}",
"def histogramintegrals(self):\n return {}",
"def __init__(self, items=[]):\n dict.__init__(self, items)",
"def __init__(self):\n\n self._dict = OrderedDict(zip(const.BFHCOLS, [0] * 111))",
"def __init__(self, items: List[T], min_freq: int = 1):\n counter_ = Counter(items)\n unique_items = [x for x, freq in counter_.items() if freq >= min_freq]\n self._dict = {item: i + 1 for i, item in enumerate(unique_items)}\n self._items: List[Union[str, T]] = [\"UNK\"]\n self._items.extend(unique_items)",
"def __init__(self):\n self.buckets = 1009\n self.table = [{} for _ in range(self.buckets)]",
"def __init__(self):\n self.buckets = collections.defaultdict(list)",
"def __init__(self):\n self.items = []\n self.indexes: Dict[int, Set] = defaultdict(set)",
"def initDictionary(bands):\r\n for x in bands:\r\n d[\"{}\".format(x)] = {ProdCost: [], AlbumSales: []}",
"def _augment_item_hist(item_df, event_df):\n return item_df.join(\n event_df[event_df['_holdout']==0]\n .groupby('ITEM_ID').size().to_frame('_hist_len')\n ).fillna({'_hist_len': 0})",
"def __init__(self):\n \n self.items = [] \n self.ind = defaultdict(set) # item -> index into the items array",
"def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)",
"def __init__(self):\n self.vals = {}",
"def _new_hist(self, _hist, memo=NOTHING):\n\n other = self.__class__(_hist)\n for item in self.__dict__:\n if item not in [\"axes\", \"_hist\"]:\n if memo is NOTHING:\n other.__dict__[item] = self.__dict__[item]\n else:\n other.__dict__[item] = copy.deepcopy(self.__dict__[item], memo)\n other.axes = other._generate_axes_()\n for ax in other.axes:\n if memo is NOTHING:\n ax._ax.metadata = copy.copy(ax._ax.metadata)\n else:\n ax._ax.metadata = copy.deepcopy(ax._ax.metadata, memo)\n return other",
"def __init__(self, items=None):\n\n if items is None:\n items = []\n self.set = dict((item, []) for item in items)\n self.heap = list(self.set.keys())\n hpq.heapify(self.heap)\n self.counter = itertools.count()",
"def set_data(self, data):\n self.data = {}\n self.bar_order = []\n for (name, label, n) in data:\n if name not in self.bar_order:\n self.bar_order.append(name)\n self.data[name] = {\"label\": label,\n \"n\": int(n),\n \"color\": COLORS[len(self.data) % len(COLORS)]}",
"def __init__(self):\n self.counts = {}",
"def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]",
"def __init__ (self):\n self.lengths = {}\n self.lower_counts = {}\n self.upper_counts = {}\n self.digit_counts = {}\n self.symbol_counts = {}\n self.class_counts = {}\n self.word_counts = {}",
"def update(self, items: Mapping[Any, Any]) -> None:\n self.extend(list(items.values()))\n return",
"def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict",
"def createDictInstance(self):\n\n dictinstance = {}\n for i in range(len(self.instancenumbers)):\n dictinstance.setdefault(self.instancenumbers[i], []).append(i)\n\n return dictinstance",
"def __init__(self, wordlist=None, path=None):\n super().__init__() # Initialize this as a new dict\n if path:\n some_words = self.get_words(path)\n for word in some_words:\n if word:\n self[word] = self.get(word, 0) + 1\n if wordlist:\n for word in wordlist:\n self[word] = self.get(word, 0) + 1\n # after creating key-value pairs create instance variable that contains the sum of all values\n self.sum = sum([self.get(key, 0) for key in self]) # sum of weights\n # set the amount of words in the list to the instance variable token\n # Count of distinct word types in this histogram\n self.types = len(self)\n self.tokens = sum(self.values())",
"def __init__(self, word_list=None):\n super(MarkovChain, self).__init__() # Initialize this as a new dict\n # Add properties to track useful word counts for this histogram\n self.types = 0 # Count of distinct word types in this histogram\n self.tokens = 0 # Total count of all word tokens in this histogram\n # Count words in given list, if any\n # Done: Initialize from parameter\n if word_list is not None:\n prev1 = word_list[0]\n prev2 = word_list[1]\n for curr in word_list[2:]:\n self.add_word((prev1, prev2), curr)\n prev1 = prev2\n prev2 = curr\n self.add_word((prev1, prev2))",
"def __init__(self):\n self.cnt = {}"
] | [
"0.6358941",
"0.6122818",
"0.60806197",
"0.5977958",
"0.5972093",
"0.5950355",
"0.5922684",
"0.5915707",
"0.58972573",
"0.5872561",
"0.58719796",
"0.58702004",
"0.58552015",
"0.5855035",
"0.5743081",
"0.5723025",
"0.57209456",
"0.57205206",
"0.5706214",
"0.5673024",
"0.56627065",
"0.5660021",
"0.558356",
"0.5580532",
"0.5574419",
"0.55732256",
"0.55592966",
"0.5545958",
"0.5536055",
"0.5529785"
] | 0.67038065 | 0 |
Return the count of the given item in this histogram, or 0 | def count(self, item):
if item in self:
return self[item]
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count(self, item):\n # type: (Any) -> int\n return list.count(self, self.ref(item))",
"def count(self, item):\n return _(self._.count(item))",
"def count(self, item: Any) -> int:\n curr = self._first\n count = 0\n\n while curr is not None:\n if curr.item == item:\n count += 1\n curr = curr.next\n\n return count",
"def count(self, item):\n # TODO: complete this function!\n if item not in self:\n return 0\n else:\n num_occur = 0\n if self._first == item:\n num_occur += 1\n num_occur += self._rest.count(item)\n return num_occur",
"def count(item):\n return len(item)",
"def count(self, item: Any) -> int:\n # If this recursive list is empty\n if self.is_empty():\n return 0\n # If there is a first and a rest.\n else:\n # Check if the first is equal and add the count on the rest of the list.\n return int(self._first == item) + self._rest.count(item)",
"def item_count(self):\n return self.items.shape[0]",
"def count(self, e):\n try:\n return self.vals[e]\n except:\n return 0",
"def getItemCount(self, ItemBase):\n Found = 0\n for CurrItem in self.List:\n if CurrItem.Base == ItemBase:\n Found = 1\n break\n\n if not Found: return 0\n else: return CurrItem.Count",
"def get_count(self, asset=None):\n if asset is None or 'pc:count' not in asset.properties:\n return self.item.properties.get('pc:count')\n else:\n return asset.properties.get('pc:count')",
"def get_count(self, entry):\n return entry.count",
"def get_num_values(self, item):\n\tnum_values = 1\n\t\n\t# Valor mas antiguo de la linked list\n\t# Siempre tiene valor, si no, no tenemos la entrada en el hashset\n\tvalue = item[\"tail\"][\"next\"]\n \twhile long(value) != 0:\n\t num_values += 1\n\t value = value[\"next\"]\n\n\treturn num_values",
"def count(self) -> Optional[float]:\n return pulumi.get(self, \"count\")",
"def count(self, value): # real signature unknown; restored from __doc__\n return 0",
"def item_count(item_id, arg):\n global database\n table = database.Tables.items\n upd = table.update(None).where(table.c.id == item_id).values(count=table.c.count+(int(arg)))\n database.conn.execute(upd)",
"def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")",
"def cfCount(self, key, item):\n params = [key, item]\n\n return self.execute_command(self.CF_COUNT, *params)",
"def count(self) -> int:\n return pulumi.get(self, \"count\")",
"def count(self):\n # TODO not implemented yet\n return 0",
"def count(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"count\")",
"def total(my_list, item):\n return my_list.count(item)",
"def count(self) -> float:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")",
"def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")"
] | [
"0.7501823",
"0.74777937",
"0.74387944",
"0.7385838",
"0.71182686",
"0.7001242",
"0.69319326",
"0.6839006",
"0.6803228",
"0.67443347",
"0.66951853",
"0.6672748",
"0.653674",
"0.6500396",
"0.64986986",
"0.6493515",
"0.6493515",
"0.6473139",
"0.64564973",
"0.645404",
"0.644886",
"0.64346737",
"0.6423806",
"0.6410198",
"0.6410198",
"0.6410198",
"0.6410198",
"0.6410198",
"0.6410198",
"0.6410198"
] | 0.8094787 | 0 |
Build a browser object a and pointer to the page body, of the search results. | def build_browser(searchurl):
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
try:
browser = webdriver.Chrome(ChromeDriverManager().install())
except Exception as e:
print(f'No found chromedriver in this environment.')
print(f'Install on your machine. exception: {e}')
sys.exit()
browser.set_window_size(1280, 1024)
browser.get(searchurl)
time.sleep(1)
print(f'Getting you a lot of images. This may take a few moments...')
body = browser.find_element_by_tag_name('body')
return body, browser | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search():\n url = create_search_url()\n links = make_selenium_search(url)\n\n return links",
"def page_body():\r\n st.header(\"Search\")\r\n st.subheader(\"Search For SMEs With A Few Different Options\")\r\n\r\n search_mode_selection = st.radio(\r\n help=\"Search For SMEs That Have Particular Connections, Titles, Or Names...\",\r\n label=\"Search By\",\r\n options=(SearchMode.Connection.value, SearchMode.JobTitle.value, SearchMode.Name.value),\r\n )\r\n\r\n search_form = st.form(key=\"search_form\", clear_on_submit=False)\r\n search_query = search_form.text_input(label=\"\", value=\"Search...\", max_chars=50)\r\n search_button = search_form.form_submit_button(label=\"Search\")\r\n\r\n if search_button:\r\n results = get_search_results(search_query, SearchMode[str(search_mode_selection).replace(\" \", \"\")])\r\n\r\n # Loop through the results returned from the database query\r\n for result in results:\r\n result_dict = result.to_dict() # Convert internally to a Python dict\r\n\r\n # dict keys here are actually database keys in Firestore. You would need to be signed in to see the proper values\r\n with st.expander(result_dict[\"name\"] + \" - \" + str(result_dict[\"age\"]) + \" years old\"):\r\n st.header(result_dict[\"name\"])\r\n st.write(result_dict[\"jobTitle\"])\r\n\r\n st.subheader(\"Personal Summary\")\r\n st.write(result_dict[\"personalSummary\"])\r\n\r\n if result_dict[\"companyName\"]:\r\n st.subheader(\"Works At\")\r\n st.write(result_dict[\"companyName\"])\r\n\r\n if result_dict[\"connections\"]:\r\n st.subheader(result_dict[\"name\"] + \"'s Connections\")\r\n st.write(\", \".join(result_dict[\"connections\"]))",
"def search(self, query):\n opener = urllib2.build_opener()\n opener.addheaders = self.DEFAULT_HEADERS\n response = opener.open(self.SEARCH_URL + \"?q=\"+ urllib2.quote(query) + \"&hl=\" + \"en\")\n opener.close()\n\n mybytes = response.read()\n mystr = mybytes.decode(\"utf8\")\n soup = BeautifulSoup(mystr, \"html.parser\")\n\n for div in soup.find_all(\"div\", {\"class\": \"g\"}):\n div_link = div.find(\"h3\", {\"class\": \"r\"})\n div_text = div.find(\"span\", {\"class\": \"st\"})\n if div_link != None and div_text != None:\n link = div_link.a[\"href\"]\n title = div_link.text\n text = div_text.text\n yield GoogleResult(title, text, link)",
"def navigate_search_results(self):\n driver = self.driver\n search_results_exhausted = False\n results_page = self.results_page\n delay = 60\n date = get_date_time()\n # css elements to view job pages\n list_element_tag = '/descendant::a[@class=\"job-title-link\"]['\n print_num_search_results(driver, self.keyword, self.location)\n # go to a specific results page number if one is specified\n go_to_specific_results_page(driver, delay, results_page)\n results_page = results_page if results_page > 1 else 1\n\n while not search_results_exhausted:\n for i in range(1,26): # 25 results per page\n # define the css selector for the blue 'View' button for job i\n job_selector = list_element_tag + str(i) + ']'\n if search_suggestion_box_is_present(driver, \n job_selector, i, results_page):\n continue\n # wait for the selector for the next job posting to load.\n # if on last results page, then throw exception as job_selector \n # will not be detected on the page\n if not link_is_present(driver, delay, \n job_selector, i, results_page):\n continue\n robust_wait_for_clickable_element(driver, delay, job_selector)\n extract_transform_load(driver,\n delay,\n job_selector,\n date,\n self.keyword,\n self.location,\n self.filename)\n # attempt to navigate to the next page of search results\n # if the link is not present, then the search results have been \n # exhausted\n try:\n next_results_page(driver, delay)\n print(\"\\n**************************************************\")\n print(\"\\n\\n\\nNavigating to results page {}\" \\\n \"\\n\\n\\n\".format(results_page + 1))\n except ValueError:\n search_results_exhausted = True\n print(\"**************************************************\")\n print(\"\\n\\n\\n\\n\\nSearch results exhausted\\n\\n\\n\\n\\n\")\n else:\n results_page += 1",
"def get_search_results(self):\n sleep(10)\n try:\n addresses = self.driver.find_elements_by_class_name('details-title')\n for p in range(len(addresses)):\n address.append(addresses[p].text)\n prices = self.driver.find_elements_by_class_name('price-info')\n for p in range(len(prices)):\n price.append(prices[p].text)\n links = self.driver.find_element_by_tag_name('a.details-titleLink jsCardLinkGA')\n for p in range(len(links)):\n link.append(links[p].text)\n except NoSuchElementException:\n sleep(3)\n self.pop_up()",
"def extractSearchResults(self, html):\n results = list()\n soup = BeautifulSoup(html, 'html.parser')\n div = soup.find('div', id='main')\n if (type(div) == types.NoneType):\n div = soup.find('div', id='center_col')\n if (type(div) == types.NoneType):\n div = soup.find('body')\n if (type(div) != types.NoneType):\n lis = div.findAll('a')\n if(len(lis) > 0):\n for link in lis:\n if (type(link) == types.NoneType):\n continue\n \n url = link['href']\n if url.find(\".google\") > 6:\n continue\n \n url = self.extractUrl(url)\n if(cmp(url, '') == 0):\n continue\n title = link.renderContents()\n title = re.sub(r'<.+?>', '', title)\n result = SearchResult()\n result.setURL(url)\n print '### URL: ' + url\n result.setTitle(title)\n span = link.find('div')\n if (type(span) != types.NoneType):\n content = span.renderContents()\n content = re.sub(r'<.+?>', '', content)\n result.setContent(content)\n results.append(result)\n return results",
"def construct(self):\n return self.as_search().construct()",
"def search(self, q):\n self.__query = q\n self.scrape_page()",
"def create_browser():\n\t#currently the one I use, but it should work\n\t#user_agent=\"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:38.0) Gecko/20100101 Firefox/38.0\"\n user_agent = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/41.0.2272.76 Chrome/41.0.2272.76 Safari/537.36\"\n br=mechanize.Browser()\n\t#makes br behave like a real browser\n\tcj=cookielib.LWPCookieJar()\n\tbr.set_cookiejar(cj)\n\tbr.set_handle_equiv(True)\n\tbr.set_handle_gzip(True)\n\t#temporarily changed to False due to unwanted mobile redirection\n\tbr.set_handle_redirect(False)\n\tbr.set_handle_referer(True)\n\tbr.set_handle_robots(False)\n\t#debug messages if desired\n\tbr.set_debug_http(False)\n\tbr.set_debug_redirects(True)\n\tbr.set_debug_responses(False)\n\t#adding user agent...this is kind of shady\n\tbr.addheaders=[('User-agent',user_agent)]\n\treturn br",
"def get_page(self):\n self.browser.get(self.url)",
"def __init__(self, config={}, html='', query=''):\n self.config = config\n self.searchtype = self.config.get('search_type', 'normal')\n assert self.searchtype in self.search_types, 'search type \"{}\" is not supported in {}'.format(\n self.searchtype,\n self.__class__.__name__\n )\n\n self.query = query\n self.html = html\n self.dom = None\n self.search_results = {}\n self.num_results_for_query = ''\n self.num_results = 0\n self.effective_query = ''\n self.page_number = -1\n self.no_results = False\n self.related_keywords = {}\n\n # to be set by the implementing sub classes\n self.search_engine = ''\n\n # short alias because we use it so extensively\n self.css_to_xpath = HTMLTranslator().css_to_xpath\n\n if self.html:\n self.parse()",
"def search(request):\n title = \"Voices search\"\n search_term = request.params.get('search_term','')\n form = Form(request)\n searchstring = u'%%%s%%' % search_term\n\n # generic_filter can be applied to all Node (and subclassed) objects\n\n generic_filter = or_(\n Content.title.like(searchstring),\n Content.body.like(searchstring),\n )\n\n results = DBSession.query(Content).filter(Content.type !='listing').filter(generic_filter).\\\n order_by(Content.title.asc()).all()\n\n\n page_url = PageURL_WebOb(request)\n page = int(request.params.get(\"page\", 1))\n paginator = Page(results,\n page=page,\n items_per_page=10,\n url=page_url)\n\n return render_to_response(\"buddy:templates/home/searchresult.mako\",\n dict(paginator=paginator,title=title,\n form=FormRenderer(form)),request=request)",
"def page12(self):\n self.token_query = \\\n 'search'\n result = request1201.POST('/Cars_Sample_App/search.do' +\n '?query=' +\n self.token_query,\n ( NVPair('criteria', 'Bobble'),\n NVPair('x', '57'),\n NVPair('y', '5'), ),\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Content-Type', 'application/x-www-form-urlencoded'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result",
"def create_search_results(p, r, s):\n search_url = create_cfoa_url(p, r, s)\n\n parsed_res = create_soup(search_url)\n\n d = list()\n\n res_div = parsed_res.find_all(\n 'div',\n {'class':\n ['enforce_result_container_complied',\n 'enforce_result_container_in_force']\n }\n )\n\n for i in res_div:\n entry = create_header(i)\n d.append(entry)\n return d",
"def parseSearchHtml(self):\n pass",
"def parseSearchHtml(self):\n pass",
"def create_page(self):",
"def search(self, query, engine=\"duckduckgo\"):\n response = self.get(\"https://duckduckgo.com/html/?q=%s&ia=web\" % query)\n if not response.text:\n return {}\n\n parsed = self.parse(response)\n results = parsed.duckduckgo_results()\n ret = {\n \"response\": response,\n \"query\": query,\n \"results\": results,\n \"parsed\": parsed,\n }\n\n return ret",
"def main(url, MY_OUTWARD_TIME_MINI, MY_OUTWARD_TIME_MAXI=\"23:59\"):\n MY_OUTWARD_TIME_MINI = MY_OUTWARD_TIME_MINI.replace(\"h\", \":\")\n MY_OUTWARD_TIME_MAXI = MY_OUTWARD_TIME_MAXI.replace(\"h\", \":\")\n # Create the web browser object\n b = RB(history=True, allow_redirects=True)\n # Open the page\n b.open(url)\n # Find the next page to go\n res = str(b.select(\"#url_redirect_proposals\")[0])\n\n # # - First solution: manual search\n # offset = 4 + res.index('hid=')\n # length = 3\n # key = res[offset: offset + length]\n # print(\"key =\", key)\n # next_url = url1 + str(key)\n # print(\"1. Next url =\", next_url)\n # - Second solution: search with a regexp\n m = url_finder.search(res)\n next_url = m.string[m.start() : m.end()]\n print(\"Next url =\", next_url, \"...\")\n # Follow this url\n b.open(next_url)\n # Get the data.query part\n script = b.select(\"#vsc-preloaded-data-snippet\")[0]\n content = script.contents[0]\n\n # 1. Search for the query to display it nicely again\n m = query_finder.search(content)\n jsontext = m.string[m.start() : m.end()]\n # print(jsontext)\n beginning = \"data.query = JSON.parse('\"\n end = \"');\"\n query = jsontext[len(beginning) : -len(end)]\n jsonrawstr = query.replace(r\"\\\"\", '\"').replace(r\"\\'\", \"'\") # \\\" > \", \\' > '\n # print(jsonrawstr)\n jsonobj = json.loads(jsonrawstr)\n # print(json.dumps(jsonobj, sort_keys=True, indent=4))\n\n # 2. Search for the result\n m = searchResponse_finder.search(content)\n jsontext = m.string[m.start() : m.end()]\n # print(jsontext)\n beginning = \"data.searchResponse = JSON.parse('\"\n end = \"');\"\n searchResponse = jsontext[len(beginning) : -len(end)]\n # print(searchResponse)\n jsonrawstr = searchResponse.replace(r\"\\\"\", '\"').replace(\n r\"\\'\", \"'\"\n ) # \\\" > \", \\' > '\n # print(jsonrawstr)\n jsonobj = json.loads(jsonrawstr)\n # print(json.dumps(jsonobj, sort_keys=True, indent=4))\n\n \"\"\"\n with open('output.json', 'w+') as f:\n json.dump(jsonobj, f, sort_keys=True, indent=4)\n \"\"\"\n\n # 3. Affichage des horaires\n print(\"\\nDifferents horaires :\")\n horaires = [i[\"departureDate\"] for i in jsonobj[\"trainProposals\"]]\n print(horaires)\n for number, h in enumerate(horaires):\n print(\"Pour un train partant a :\", h)\n prices = jsonobj[\"trainProposals\"][number][\"priceProposals\"]\n if len(prices) > 0:\n prix = prices[0][\"amount\"]\n print(\"\\tPrix TGV minimum\", \"=\", prix, \"euros.\")\n else:\n print(\"\\tTrain complet.\")",
"def crawle_body(self, urlSearch: str):\n\n OneBodyParser().get_page_with(\n ExampleUrl(urlSearch),\n MyRequester(),\n lambda result : ExampleScrapper().scrap_with(\n result,\n lambda result: ExampleScrapper().if_templatize_do(\n result,\n ScrapperTemplate(),\n lambda templateValue: Print().print_with(\n templateValue.bodyTemplatize,\n sys.stdout\n )\n )\n )\n )",
"def search_page():\n return render_template('page_query.html', search_label=g_search_type)",
"def doSearch(self):\n r = \"https://www.gumtree.pl/s-%s/%s/v1c9000l3200008p1?sort=dt&order=desc\" % (self.category, self.location)\n print (r)\n request = requests.get(\"https://www.gumtree.pl/s-%s/%s/v1c9000l3200008p1?sort=dt&order=desc\" % (self.category, self.location), headers=REQUEST_HEADERS)\n\n\n if request.status_code == 200:\n # Got a valid response\n\n listingResult = []\n\n souped = BeautifulSoup(request.text, \"html5lib\")\n for listings_wrapper in souped.find_all(\"li\", class_=\"result pictures\"):\n title = listings_wrapper.find(\"a\", class_=\"href-link\").string\n url = \"https://www.gumtree.pl\" + listings_wrapper.find(class_=\"href-link\").get(\"href\")\n price = listings_wrapper.find(\"span\", class_=\"amount\").string\n description = listings_wrapper.find(class_=\"description hidden\").string\n creationDate = listings_wrapper.find(class_=\"creation-date\").find_all(\"span\")[1].string\n category = listings_wrapper.find(class_=\"category-location\").find(\"span\").string\n print(title)\n searchResultItem = GTRoomItemMain(creationDate, title, category, url, price, description)\n listingResult.append(searchResultItem)\n return listingResult\n else:\n # TODO: Add error handling\n print (\"Server returned code: \" + request.status_code)\n return []",
"def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })",
"def _build_htmlpage_one(args):\n return build_htmlpage_one(*args)",
"def get_page(search):\n headers = {\n \"User-Agent\":\n \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:42.0) Gecko/20100101 Firefox/42.0\",\n }\n url = 'http://google.com/search?h1=en&q=' + search + \"&meta=&gws_rd=ssl\"\n page = requests.get(url, headers=headers)\n return page",
"def page11(self):\n self.token_query = \\\n 'search'\n result = request1101.POST('/Cars_Sample_App/search.do' +\n '?query=' +\n self.token_query,\n ( NVPair('criteria', 'Aston'),\n NVPair('x', '46'),\n NVPair('y', '19'), ),\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Content-Type', 'application/x-www-form-urlencoded'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 3 different values for token_cid found in response; the first matched\n # the last known value of token_cid - don't update the variable.\n\n grinder.sleep(95)\n request1102.GET('/Cars_Sample_App/images/cars/1.jpg', None,\n ( NVPair('Accept', '*/*'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/search.do?query=search'), ))\n\n return result",
"def __init__(self, driver, output_folder, search_parameters):\n self.driver = driver\n self.search_results = SearchResults(output_folder, search_parameters)\n\n self.version = search_parameters[\"version\"]\n self.region = search_parameters[\"community\"]\n self.province = search_parameters[\"province\"]\n self.entity_type = search_parameters[\"entity_type\"]\n self.name = search_parameters[\"name\"]\n self.cif = search_parameters[\"cif\"]\n\n self.do_search()",
"def browser():\n return render_template(\n 'browser.html',\n title='Browser',\n time=datetime.now(),\n message='search for knowladge',\n\n )",
"def _make_page(self):\n (_hbx_page, __, _fxd_right, ___, _x_pos_r, __,\n _y_pos_r) = RAMSTKWorkView._make_assessment_results_page(self)\n\n _fxd_right.put(self.txtModeCount, _x_pos_r, _y_pos_r[8] + 30)\n _fxd_right.show_all()\n\n self.txtActiveHt.set_sensitive(False)\n self.txtDormantHt.set_sensitive(False)\n self.txtSoftwareHt.set_sensitive(False)\n self.txtReliability.set_sensitive(False)\n self.txtMissionRt.set_sensitive(False)\n\n return _hbx_page",
"def _init_browser(self):\n # Initialize the browser\n br = mechanize.Browser()\n # Ignore the robots.txt\n br.set_handle_robots(False)\n return br"
] | [
"0.61708033",
"0.60043526",
"0.56796",
"0.5647723",
"0.56084704",
"0.56076986",
"0.5590576",
"0.5586558",
"0.5546072",
"0.55399984",
"0.55074215",
"0.54825324",
"0.5472605",
"0.54686517",
"0.5468585",
"0.5468585",
"0.54547846",
"0.54461205",
"0.5438007",
"0.5430138",
"0.54218704",
"0.5389806",
"0.538311",
"0.5379956",
"0.53747493",
"0.5363059",
"0.5358651",
"0.53569853",
"0.53524595",
"0.5347053"
] | 0.67708874 | 0 |
Collect all the images url by an html images objects | def get_url_from_images(html_images):
urls = []
for image in html_images:
try:
url = image['data-src']
if not url.find("https://"):
urls.append(url)
except:
try:
url = image['src']
if not url.find("https://"):
urls.append(image['src'])
except Exception as e:
print(f'No found image sources.')
print(e)
return urls | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def scrape(self):\n reg = re.compile(self.regex)\n images = self.soup.findAll('img')\n results = []\n for img in images:\n try:\n url = dict(img.attrs)['src']\n url = self._make_url_path(url)\n if reg.match(url):\n results.append(url)\n\n except:\n pass\n\n print 'Img tag scraping OK'\n return results",
"async def get_url_images(session, url):\n content = await get_page(session, url)\n if not content:\n return []\n soup = BeautifulSoup(content, features=\"html.parser\")\n image_sources = [img['src'] for img in soup.find_all('img')]\n image_sources_fixed = [f'https:{source}' if 'https:' not in source else source for source in image_sources]\n images = []\n for source in image_sources_fixed:\n image = await get_image(session, source)\n if image:\n images.append((source, image))\n\n return images",
"def getimgs():",
"def get_images(url):\n soup = make_soup(url)\n # this makes a list of bs4 element tags\n images = [img for img in soup.findAll('img')]\n print(str(len(images)) + \" images found.\")\n # compile our unicode list of image links\n image_links = [each.get('src') for each in images]\n # clean list\n image_links = [each for each in image_links if each is not None]\n # specific for test site\n if len(image_links) > 0 and image_links[0][:4] != 'http':\n links = [url + link for link in image_links]\n else:\n links = image_links\n return links",
"def get_images_urls(self, grab, parse_first_image=True):\n images = []\n if parse_first_image:\n first_image = grab.doc.select(\n '//figure[@class=\"item\"]' +\n '/img[@class=\"img-fluid\"]'\n )\n if first_image.exists() and 'http' in first_image.attr('src'):\n images.append(first_image.attr('src'))\n\n for image in grab.doc.select(\n '//figure[@class=\"item\"]' +\n '/img[@class=\"lazyOwl img-fluid\"]'):\n images.append(image.attr('data-src'))\n if len(images) < 1:\n logging.debug(\"Images not found in: %s\" % grab.doc.url)\n\n return images",
"def find_img_urls(mainURL):\n \n imglist = []\n \n class IMGParser(HTMLParser):\n def handle_starttag(self, tag, attrs):\n if tag == 'img':\n imglist.append(dict(attrs)[\"src\"])\n \n URL = urlopen(mainURL)\n html = URL.read()\n \n parser = IMGParser()\n parser.feed(html)\n parser.close()\n \n return imglist",
"def _get_images(self, fuzzable_request):\n res = []\n\n try:\n response = self._uri_opener.GET(fuzzable_request.get_uri(),\n cache=False)\n except:\n om.out.debug('Failed to retrieve the page for finding captchas.')\n else:\n # Do not use parser_cache here, it's not good since CAPTCHA implementations\n # *might* change the image name for each request of the HTML\n #dp = parser_cache.dpc.get_document_parser_for( response )\n try:\n document_parser = DocumentParser.DocumentParser(response)\n except BaseFrameworkException:\n return []\n \n image_path_list = document_parser.get_references_of_tag('img')\n\n GET = self._uri_opener.GET\n sha1 = hashlib.sha1\n \n result_iter = self.worker_pool.imap_unordered(GET, image_path_list)\n \n for image_response in result_iter:\n if image_response.is_image():\n img_src = image_response.get_uri()\n img_hash = sha1(image_response.get_body()).hexdigest()\n res.append((img_src, img_hash, response))\n\n return res",
"def get_images(self):\n # test\n for it in self.xml.iterfind('image'):\n print(it)\n\n elements = []\n els = self.xml.findall('image')\n for el in els:\n elements.push(el.find('src')[0])\n els = self.xml.findall('full_picture')\n elements = elements + els\n self.__download_(elements)",
"def _get_img_urls(self, jdict):\n\n # find photos inside the JSON file\n pics = jdict['props']['homeDetails']['media']['photos']\n urls = [pic['url']['mediumSrc'] for pic in pics]\n return urls",
"def get_image_urls(self):\n return self.get_extract_image_urls(is_first=False)",
"def get_images(self,soup,Images):\n \n img=soup.find_all('a',href=re.compile(\"/photo.php?fbid=\"))\n img1=soup.find_all('a',href=re.compile(\"/photo\"))\n m=' '\n if img !=[]:\n img_href='https://www.facebook.com'+img[0]['href']\n m+=img_href+'\\n'\n \n elif img1 !=[]:\n img_href='https://www.facebook.com'+img1[0]['href']\n m+=img_href+'\\n'\n \n else:\n img=soup.find_all('a',href=re.compile(\"pcb\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n' \n \n \n else:\n img=soup.find_all('a',href=re.compile(\"photos\"))\n if img !=[]:\n for i in img:\n img_href='https://www.facebook.com'+i['href']\n m+=img_href+'\\n'\n \n Images.append(m)\n \n return Images",
"def embed_images(self):\n for img in self.book.xpath(\"//img[ not(starts-with(@src, 'data:')) and @src!= '']\"):\n img_src = img.attrib[\"src\"]\n img_raw = self.get_remote_content(img_src)\n if img_raw != None:\n img_64 = base64.b64encode(img_raw)\n file_info = os.path.splitext(img_src)\n ext = file_info[1].replace(\".\", \"\")\n ext = re.sub(\"\\?.*$\", \"\" , ext)\n \n if ext == \"svg\":\n svg = html.fromstring(img_raw.decode(\"utf-8\"))\n img.clear()\n img.tag = \"svg\"\n img[:] = [svg]\n else:\n img.set(\"src\", \"data:image/{};base64,{}\".format(ext, img_64.decode(\"utf-8\")))",
"def get_image_links(queries):\n images = []\n\n for query in queries:\n url = 'http://www.bing.com/images/search?q=' + urllib.quote_plus(query) + '&FORM=HDRSC2'\n soup = BeautifulSoup(requests.get(url).text, 'lxml')\n links = [a['src'] for a in soup.find_all('img', {'src': re.compile('mm.bing.net')})]\n images.extend(links)\n time.sleep(5) # wait 5 seconds before next scrape\n\n return images",
"def extract_images_url(url, source):\n if source == \"mangaseeonline\":\n r = s.post(\n \"http://playwright:5000/scrape\",\n json={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@id=\"TopPage\"]/descendant::img/@src')\n if source == \"nettruyen\":\n r = s.get(\n settings.SPLASH_URL, params={\n \"url\": url.replace(\"-page-1\", \"\"), \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[@class=\"reading-detail box_doc\"]/div/img/@src')\n if source == \"doctruyen3q\":\n r = s.get(\n settings.SPLASH_URL, params={\"url\": url, \"wait\": 1}\n )\n tree = html.fromstring(r.text)\n return tree.xpath('//*[contains(@id, \"page_\")]/img/@src')\n if source == \"truyenkinhdien\":\n r = s.get(\n settings.SPLASH_URL.replace(\"render.html\", \"execute\"),\n params={\"url\": url, \"lua_source\": lua_script, \"wait\": 1},\n )\n tree = html.fromstring(r.json()[\"html\"])\n return tree.xpath(\n '//*[@class=\"sgdg-gallery\"]/a[not(contains(@style,\"display:none\"))]/img/@src'\n )",
"def collect_images(self, html_body: str, encoding: str = \"UTF-8\") -> Tuple[str, List[Tuple[str, str, str, bytes]]]:\n images = []\n reader = etree.HTMLParser(recover=True, encoding=encoding)\n root = etree.fromstring(html_body, reader)\n self.init_cid()\n same_content = {} # type: Dict[bytes, str]\n # Search elements <img src=\"...\"> and <input type=\"image\" src=\"...\">\n for image in root.xpath(\"//img | //input[@type='image']\"):\n image_src = image.attrib[\"src\"]\n try:\n image_content = self.load_file(image_src)\n except ImageNotFound as err:\n self.log_error(err)\n self.conditionally_raise(err)\n continue\n content_hash = hashlib.md5(image_content).digest()\n if content_hash in same_content:\n cid = same_content[content_hash]\n else:\n cid = self.get_next_cid()\n same_content[content_hash] = cid\n maintype, subtype = self._get_mime_type(image_src)\n images.append((maintype, subtype, cid, image_content))\n image.attrib[\"src\"] = \"cid:{}\".format(cid)\n html_content = etree.tostring(root, encoding=encoding, pretty_print=self.pretty_print)\n return html_content.decode(encoding), images",
"def get_images(self, article: BeautifulSoup):\n images = []\n content = article.select_one(self.parsing_template.content)\n\n if content:\n body_images = content.select(self.parsing_template.image_element)\n else:\n body_images = None\n\n if body_images:\n for element in body_images:\n\n img = element.find('img')\n if not img:\n continue\n url = img.get(self.parsing_template.image_attribute) # TODO format url correctly\n\n try:\n text = self.get_text(element, self.parsing_template.image_text)\n except IndexError:\n text = ''\n\n try:\n photographer = self.get_text(element, self.parsing_template.image_photographer)\n except IndexError:\n photographer = ''\n\n # Image text and photographer is not separated.\n # Tries to separate out the photographer\n if self.parsing_template.photographer_delimiter:\n if text and not photographer:\n text, photographer = self.parse_photographer(text, text)\n if photographer:\n text, photographer = self.parse_photographer(text, photographer)\n\n if url:\n if photographer:\n # Removes unwanted text in the photographer\n for replace in self.parsing_template.photograph_ignore_text:\n photographer = photographer.replace(replace, '')\n photographer = photographer.replace('/', ',')\n\n if len(text) > 255:\n text = text[:254]\n\n # Separate each photograph\n photographers = []\n for photograph in photographer.split(','):\n photographer_name_split = list(filter(lambda x: x or x != ' ', photograph.split(' ')))\n if photographer_name_split:\n if len(photographer_name_split) == 1:\n lastName = photographer_name_split[0].strip(' ').strip('.')\n firstName = ''\n else:\n firstName = photographer_name_split[0].strip(' ')\n lastName = photographer_name_split[1].strip(' ').strip('.')\n photographers.append(Photographer(firstName=firstName, lastName=lastName))\n\n images.append((ArticleImage(url=url, text=text), photographers))\n\n return images",
"def get_images(url):\n \n # =============================================================================\n # Selenium.\n # =============================================================================\n\n chrome_options = Options()\n #chrome_options.add_argument('--incognito')\n #chrome_options.add_argument('--headless')\n #chrome_options.add_argument('--no-sandbox')\n \n driver = webdriver.Chrome(options=chrome_options,executable_path='/usr/local/bin/chromedriver') # Optional argument, if not specified will search path.\n driver.get('https://' + url)\n \n #scrolling to bottom to load all images on the page\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n #sleep to make sure everything loads\n time.sleep(5)\n \n \n html_source = driver.page_source\n \n img_alt_src(html_source)\n \n driver.close()\n driver.quit()",
"def get_images_relative_urls_from_page(page_content: str) -> list:\n\n soup = BeautifulSoup(page_content, 'lxml')\n\n return [img_tag.get('src') for img_tag in soup.findAll('img')]",
"async def getImageURLS(self, tags, fuzzy=False, singlePage=False):\n if fuzzy:\n tags = tags.split(\" \")\n for tag in tags:\n tag = tag + \"~\"\n temp = \" \"\n tags = temp.join(tags)\n print(tags)\n num = await self.totalImages(tags)\n if num != 0:\n PID = 0\n imgList = []\n XML = None\n t = True\n tempURL = self.urlGen(tags=tags, PID=PID)\n while t:\n with async_timeout.timeout(10):\n async with self.session.get(url=tempURL) as XML:\n XML = await XML.read()\n XML = ET.XML(XML)\n XML = self.ParseXML(XML)\n if XML is None:\n return None\n if len(imgList) >= int(XML['posts']['@count']): # \"if we're out of images to process\"\n t = False # \"end the loop\"\n else:\n for data in XML['posts']['post']:\n imgList.append(str(data['@file_url']))\n if singlePage:\n return imgList\n PID += 1\n return imgList\n else:\n return None",
"def img_urls(self, media, type = \"low_resolution\"):\n\n imgs = {}\n\n for item in media:\n if item[\"type\"] != \"image\":\n continue\n\n imgs[item[\"id\"]] = item[\"images\"][type][\"url\"]\n\n return imgs",
"def extract_images(content):\n\n return re.findall('src=\"([^\"]+)\"', content)",
"def _collect_img_links(self):\n raise NotImplementedError",
"def get_img_ref_from_attrs(attrs):\n\n for attr in attrs:\n if attr[0]== 'src':\n if isImage(attr[1]):\n list_of_img_refs.append(attr[1])\n\n if attr[0] == 'href':\n if isImage(attr[1]):\n list_of_img_refs.append(attr[1])",
"def get_apartment_images(self, soup, apartment_dict):\n\n image_urls = []\n images_container = soup.find('div', class_='photos')\n images_container = images_container.find('div')\n\n # Iterate over images in gallery\n for image_container in images_container.find_all('div'):\n anchor_tag = image_container.find('a')\n if anchor_tag:\n image_urls.append(self.base_url + anchor_tag['href'])\n apartment_dict['image_urls'] = image_urls",
"def get_extract_image_urls(self, is_first=False):\n additional_json = self.get_additional_json()\n image_urls = []\n\n try:\n image_list = additional_json['PicturePanel']['fsImgList']\n except Exception:\n image_list = []\n\n for image in image_list:\n image_urls.append(image.get('displayImgUrl'))\n\n image_urls = list(set(image_urls))\n if len(image_urls) < 1:\n try:\n raw = self.scrap(field='IMAGE_URL', data=IMAGE_URL)\n image_urls = list(set(raw['value']))\n except Exception:\n pass\n\n if is_first == True:\n return image_urls[0] if len(image_urls) > 0 else None\n return image_urls",
"def test_image_links(self):\r\n print('\\nTest image links: ', end='', flush=True)\r\n driver = self.driver\r\n driver.get(MY_URL)\r\n all_images = driver.find_elements_by_tag_name('img')\r\n for image in all_images:\r\n src = image.get_attribute('src')\r\n alt = image.get_attribute('alt')\r\n r = requests.get(src)\r\n assert r.status_code == 200, 'Bad http status (%d) for %s' % (r.status_code, src)\r\n assert len(alt) > 0, 'Missing or empty alt tag for %s' % (src)\r\n print('.', end=\"\", flush=True)\r\n if DEBUG:\r\n print ('Src=%s' % src)",
"def photo(el, base_url=''):\n # if element is an image use source if exists\n prop_value = get_attr(el, \"src\", check_name=\"img\")\n if prop_value is not None:\n return [urljoin(base_url, prop_value)]\n\n # if element is an object use data if exists\n prop_value = get_attr(el, \"data\", check_name=\"object\")\n if prop_value is not None:\n return [prop_value]\n\n # if element has one image child use source if exists and img is\n # not root class\n poss_imgs = el.find_all(\"img\", recursive=False)\n if len(poss_imgs) == 1:\n poss_img = poss_imgs[0]\n if mf2_classes.root(poss_img.get('class', [])) == []:\n prop_value = get_attr(poss_img, \"src\", check_name=\"img\")\n if prop_value is not None:\n return [urljoin(base_url, prop_value)]\n\n # if element has one object child use data if exists and object is\n # not root class\n poss_objs = el.find_all(\"object\", recursive=False)\n if len(poss_objs) == 1:\n poss_obj = poss_objs[0]\n if mf2_classes.root(poss_obj.get('class', [])) == []:\n prop_value = get_attr(poss_obj, \"data\", check_name=\"object\")\n if prop_value is not None:\n return [prop_value]\n\n children = el.find_all(True, recursive=False)\n # if only one child then repeat above in child\n if len(children) == 1:\n # if element has one image child use source if exists and img\n # is not root class\n poss_imgs = children[0].find_all(\"img\", recursive=False)\n if len(poss_imgs) == 1:\n poss_img = poss_imgs[0]\n if mf2_classes.root(poss_img.get('class', [])) == []:\n prop_value = get_attr(poss_img, \"src\", check_name=\"img\")\n if prop_value is not None:\n return [urljoin(base_url, prop_value)]\n\n # if element has one object child use data if exists and\n # object is not root class\n poss_objs = children[0].find_all(\"object\", recursive=False)\n if len(poss_objs) == 1:\n poss_obj = poss_objs[0]\n if mf2_classes.root(poss_obj.get('class', [])) == []:\n prop_value = get_attr(poss_obj, \"data\", check_name=\"object\")\n if prop_value is not None:\n return [prop_value]",
"def get_image_comic_url(session, response):\n soup = bs(response.text, 'lxml')\n for div in soup.find_all('div', class_=\"img-comic-container\"):\n for a in div.find_all('a', class_=\"img-comic-link\"):\n for img in a.find_all('img', src=True):\n return \"https:\" + img['src']",
"def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths",
"def _get_information(self):\n return [photo.attrs[\"data-src\"] for photo in self._tab.find_all(\"img\", class_=\"lazyload\")]"
] | [
"0.7710374",
"0.7701632",
"0.73915905",
"0.73700666",
"0.7355089",
"0.7310259",
"0.72975385",
"0.71919096",
"0.7167929",
"0.7150677",
"0.7149977",
"0.71022826",
"0.7074955",
"0.69532025",
"0.6945381",
"0.6927229",
"0.69269663",
"0.6885604",
"0.68824416",
"0.6851432",
"0.6836961",
"0.68347764",
"0.6834135",
"0.68340176",
"0.68185276",
"0.6815248",
"0.6782905",
"0.6680949",
"0.666239",
"0.6640233"
] | 0.7713752 | 0 |
Load all the environment variables defined in the `env_file`. This is equivalent to `. env_file` in bash. It is possible to define all the system specific variables in the `env_file`. | def load_envs(env_file: Optional[str] = None) -> None:
dotenv.load_dotenv(dotenv_path=env_file, override=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_envs_from_file(file_path=constants.ENV_FILE_DEFAULT_PATH.value):\n #pylint: disable=unspecified-encoding\n with open(file_path, \"r\") as file:\n for line in file:\n line = line.strip()\n if not line:\n continue\n if line.startswith(\"#\"):\n continue\n key, value = line.split(\"=\", 1)\n environ[key] = value",
"def load_evironment():\n environment = Utility.load_yaml(os.getenv(\"system_file\", \"./system.yaml\"))\n for key in environment:\n if key in os.environ:\n environment[key] = os.getenv(key)\n Utility.environment = environment",
"def load_env(env_path=''):\n if not env_path:\n env_path = Path.home() / '.pincidents'\n load_dotenv(dotenv_path=env_path)",
"def read_envdir():\n env_dir = \"env\"\n env_vars = glob.glob(os.path.join(env_dir, '*'))\n for env_var in env_vars:\n with open(env_var, 'r') as env_var_file:\n os.environ.setdefault(env_var.split(os.sep)[-1],\n env_var_file.read().strip())",
"def load_env():\n project_dir = abspath(join(dirname(__file__), '../..', '..', '..'))\n dotenv.read_dotenv(join(project_dir, 'nijanthan/practise/base_pro/.env'))",
"def load_env(env_files):\n env = {}\n for env_file in env_files:\n with open(env_file) as f:\n for line in f:\n if line and line[0] != \"#\":\n try:\n index = line.index(\"=\")\n env[line[:index].strip()] = line[index + 1 :].strip()\n except ValueError:\n # Ignore lines that don't have a '='\n pass\n return env",
"def load_config(env_file_path: str) -> None:\n if os.path.isfile(env_file_path):\n load_dotenv(dotenv_path=env_file_path)\n else:\n logger.info(f\".env file does not exist on {env_file_path}. Loading environment variable from the machine\")",
"def envload(self):\n\n check = load_dotenv(self.envpath, override=True)\n\n return check",
"def load_env():\n global api_key\n\n # Load the config file\n env_file = path.join(path.dirname(path.abspath(__file__)), 'env.yml')\n try:\n stream = open(env_file, 'r')\n y = yaml.safe_load(stream)\n except IOError:\n print(\"ERROR: Environment file {} not found\".format(env_file))\n sys.exit(3)\n except yaml.parser.ParserError as e:\n print(\"ERROR: Invalid Environment file\")\n print(e)\n sys.exit(3)\n\n api_key = y['api_key']",
"def load_environ(self):\n\t\tself.environ = {}\n\t\tf = file(\"/proc/%d/environ\" % self.pid)\n\t\tfor x in f.readline().split('\\0'):\n\t\t\tif len(x) > 0:\n\t\t\t\ty = x.split('=')\n\t\t\t\tself.environ[y[0]] = y[1]\n\t\tf.close()",
"def fix_dot_env_file():\n # Create path to the .env file\n env_file_path = Path(\".env\")\n\n # Ensure that the .env file exists\n env_file_path.touch(exist_ok=True)\n\n # Otherwise, extract all the lines in the .env file\n env_file_lines = env_file_path.read_text().splitlines(keepends=False)\n\n # Extract all the environment variables in the .env file\n env_vars = [line.split(\"=\")[0] for line in env_file_lines]\n\n # For each of the desired environment variables, check if it exists in the .env\n # file\n env_vars_missing = [\n env_var\n for env_var in DESIRED_ENVIRONMENT_VARIABLES.keys()\n if env_var not in env_vars\n ]\n\n # Create all the missing environment variables\n with env_file_path.open(\"a\") as f:\n for env_var in env_vars_missing:\n value = \"\"\n if env_var == \"GPG_KEY_ID\":\n gpg = subprocess.Popen(\n [\"gpg\", \"--list-secret-keys\", \"--keyid-format=long\"],\n stdout=subprocess.PIPE,\n )\n grep = subprocess.Popen(\n [\"grep\", \"sec\"], stdin=gpg.stdout, stdout=subprocess.PIPE\n )\n value = (\n subprocess.check_output(\n [\"sed\", \"-E\", \"s/.*\\\\/([^ ]+).*/\\\\1/\"],\n stdin=grep.stdout,\n )\n .decode()\n .strip(\"\\n\")\n )\n gpg.wait()\n grep.wait()\n if value == \"\":\n value = input(DESIRED_ENVIRONMENT_VARIABLES[env_var])\n f.write(f'{env_var}=\"{value}\"\\n')",
"def load_env_variables(self, environment):\n env_dir = os.path.join(self.__get_environments_root_dir(), environment)\n return read_and_combine_yamls_in_dir(env_dir)",
"def load_dot_env() -> None:\n dont_load_env = os.getenv(\"GITGUARDIAN_DONT_LOAD_ENV\", False)\n dotenv_path = os.getenv(\"GITGUARDIAN_DOTENV_PATH\", None)\n cwd_env = os.path.join(\".\", \".env\")\n if not dont_load_env:\n if dotenv_path and os.path.isfile(dotenv_path):\n load_dotenv(dotenv_path, override=True)\n return\n elif dotenv_path:\n display_error(\n \"GITGUARDIAN_DOTENV_LOCATION does not point to a valid .env file\"\n )\n if os.path.isfile(cwd_env):\n load_dotenv(cwd_env, override=True)\n return\n if is_git_dir() and os.path.isfile(os.path.join(get_git_root(), \".env\")):\n load_dotenv(os.path.join(get_git_root(), \".env\"), override=True)\n return",
"def load_local_into_env(cls, filename, stage=None):\n env_vars = cls.open_local(filename, stage=stage, for_env=True)\n\n for key, value in env_vars.items():\n if value is None:\n print('Environment variable: {0} has no value and will not be set.'.format(key))\n else:\n if isinstance(value, bool):\n value = str(value).lower()\n elif not isinstance(value, str):\n value = str(value)\n os.environ[key] = value\n\n return env_vars",
"def change_environment_variables():\n values = load('environment.yaml')\n\n for key in values.keys():\n os.environ[key] = values[key]\n\n info(f'Changed environment variables to {values}')",
"def loadenv(self):\n logging.debug('Loading OpenStack authentication information from environment')\n # Grab any OS_ found in environment\n for var in os.environ:\n if var[0:3] == 'OS_':\n value = os.environ[var]\n # Don't print out password or token to debug\n if 'PASSWORD' not in var or 'TOKEN' not in var:\n logging.debug('Using %s from environment for %s', value, var)\n self.creds[var[3:].lower()] = value",
"def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0",
"def parse_env_file(env_file):\n environment = {}\n\n with open(env_file, 'r') as f:\n for line in f:\n\n if line[0] == '#':\n continue\n\n parse_line = line.strip().split('=')\n if len(parse_line) == 2:\n k, v = parse_line\n environment[k] = v\n else:\n raise errors.DockerException(\n 'Invalid line in environment file {0}:\\n{1}'.format(\n env_file, line))\n\n return environment",
"def parse_env_file_form_dict(env='base'):\r\n cur_path = os.path.abspath(os.path.dirname(__file__))\r\n file_path = os.path.join(cur_path, r\"./config/{}.env\".format(env))\r\n\r\n with open(file_path, 'r') as fh:\r\n logger.debug(\"Env file fetched is {}.env\".format(env))\r\n vars_dict = dict(\r\n tuple(line.split('='))\r\n for line in fh.readlines() if not line.startswith('#')\r\n )\r\n # print(\"Parsed dict values are {}\".format(vars_dict))\r\n return vars_dict",
"def GetBashEnvFromFile(this, filename):\n DB_RE = re.compile(\"export (.+)=(.+)\")\n ret = {}\n if filename is not None:\n with open( filename, \"r\" ) as f:\n for line in f:\n m = DB_RE.search(line.strip())\n if m:\n name = m.group(1)\n val = m.group(2)\n # Check for quotes\n if val[0] in \"'\\\"\" and val[0]==val[-1]:\n val = val[1:-1]\n ret[name] = val\n for name in (MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DATABASE):\n if name not in ret:\n try:\n ret[name] = os.environ[name]\n except KeyError as e:\n logging.error(\"%s not in environment not in %s\",name,filename)\n raise\n return ret",
"def set_env():\n env.local_dotenv_path = os.path.join(\n os.path.dirname(__file__), 'etc/base_image/.env')\n dotenv.load_dotenv(env.local_dotenv_path)\n env.project_name = os.environ.get('PROJECT_NAME', '')\n env.project_dir = posixpath.join('/srv/images/', env.project_name)\n env.use_ssh_config = True\n\n # Bug: when setting this inside a function. Using host_string as workaround\n env.hosts = [os.environ.get('HOST_NAME', ''), ]\n env.host_string = os.environ.get('HOST_NAME', '')\n\n env.base_image_name = os.environ.get('BASE_IMAGE_NAME', '')\n env.build_dir = '/srv/build'\n env.local_path = os.path.dirname(__file__)",
"def get_environment_vars():\n return {env: os.environ[env] for env in\n params.ENV_DIRS if env in os.environ}",
"def load_environment(self, env):\n self.env = env",
"def load_environment(path: Optional[str] = None):\n environment = deserialize_environment_from_file(path=path)\n EnvironmentProvider().environment = environment",
"def load_dotenv(dotenv_path, verbose=False):\n if not os.path.exists(dotenv_path):\n if verbose:\n warnings.warn(f\"Not loading {dotenv_path}, it doesn't exist.\")\n return None\n for k, v in dotenv_values(dotenv_path).items():\n os.environ.setdefault(k, v)\n return True",
"def handle_dot_env_file(dot_env_file='.env') -> None:\n if os.path.isfile(dot_env_file):\n try:\n load_dotenv(dotenv_path=dot_env_file)\n except Exception:\n raise click.FileError('There was an error when processing the .env file, please check it out.')",
"def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)",
"def _get_env(self):\n env = {}\n for k, v in os.environ.items():\n k = k.decode() if isinstance(k, bytes) else k\n v = v.decode() if isinstance(v, bytes) else v\n env[k] = v\n return list(env.items())",
"def read_env(path=None, recurse=True, stream=None, verbose=False, override=False):\n # By default, start search from the same file this function is called\n if path is None:\n frame = inspect.currentframe().f_back\n caller_dir = os.path.dirname(frame.f_code.co_filename)\n start = os.path.join(os.path.abspath(caller_dir))\n else:\n start = path\n if recurse:\n for dirname in _walk_to_root(start):\n check_path = os.path.join(dirname, \".env\")\n if os.path.exists(check_path):\n return load_dotenv(\n check_path, stream=stream, verbose=verbose, override=override\n )\n else:\n if path is None:\n start = os.path.join(start, \".env\")\n return load_dotenv(start, stream=stream, verbose=verbose, override=override)",
"def get_env_vars():\n env_vars = []\n leapp_vars = {k: v for (k, v) in os.environ.items() if k.startswith('LEAPP_') and k not in ENV_IGNORE}\n for k, v in leapp_vars.items():\n if k in ENV_MAPPING:\n env_vars.append(EnvVar(name=ENV_MAPPING.get(k), value=v))\n continue\n env_vars.append(EnvVar(name=k, value=v))\n\n return env_vars"
] | [
"0.8037709",
"0.75521344",
"0.74704516",
"0.74306357",
"0.73876494",
"0.7366125",
"0.7323644",
"0.70755285",
"0.7024416",
"0.6894454",
"0.6866709",
"0.68233156",
"0.68144315",
"0.67741936",
"0.67297035",
"0.66447043",
"0.6612059",
"0.6598596",
"0.6559391",
"0.6450934",
"0.6385617",
"0.6255366",
"0.62519777",
"0.6221196",
"0.6210158",
"0.6196378",
"0.6129587",
"0.6123679",
"0.60912293",
"0.60778654"
] | 0.8404838 | 0 |
Update status and return task updated | def update_status(request):
task_id = request.POST.get('task_id', 0)
new_status = request.POST.get('new_status', 0)
search_task = task_models.Task.query.filter(task_models.Task.id == task_id).first()
if not search_task:
return HttpResponse(simplejson.dumps({'success': False}))
search_task.update(user=request.user, status=new_status, lastModifiedBy=request.user.id,
lastModified=str(datetime.utcnow()))
return JsonResponse({
'status': new_status,
'lastModifiedBy': request.user.id,
'lastModified': str(datetime.utcnow())
}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_status(self, *args, **kwargs):\n # Get Future\n future = self.future\n\n # Do nothing if no Future\n if not future:\n return\n\n # Get the status\n dask_status = future.status.lower()\n\n try:\n # Translate to TethysJob status\n self._status = self.DASK_TO_STATUS_TYPES[dask_status]\n self.save()\n # Clean up client\n self.client.close()\n\n except KeyError:\n log.error('Unknown Dask Status: \"{}\"'.format(dask_status))",
"async def async_update(self):\n\n await self.status_request()",
"def updateStatus(self, status):\n pass",
"def _update():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no add task input')\n\t\treturn 1\n\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\tfor ID in IDs:\n\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.finish_status: myOpt.f})\n\n\t\tif myOpt.vt:\n\t\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.version_time: myOpt.vt})\n\n\t#commit\n\tmyTaskSession.commit()\n\n\t\"\"\"\n\t#ERR: not given itsm id for update \n\tif not myOpt.id:\n\t\tprint('Error: no itsm id given for update finish_status to 1')\n\t\treturn 1\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\t\n\tquery.filter(WorkToolkitDB.db.Task.id == myOpt.id).update({'finish_status': myOpt.f})\n\tmyTaskSession.commit()\n\n\t\n\tdata = query.filter(WorkToolkitDB.db.Task.id == myOpt.id).all()\n\tfor record in data:\n\t\t\t#record_arr = record.to_array()\n\t\t\tpt.add_row(record.to_array())\n\n\tprint(pt)\n\t\"\"\"\n\n\treturn 0",
"def update_task_status(project_id, task_id):\n completion_status = request.get_json()['completion_status']\n\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n \n else:\n permission = has_project_permission(project, g.user)\n task = Task.query.filter_by(id=task_id).first()\n if not task:\n abort(404, f'There is no task with ID of {task_id}.')\n if task:\n task.completion_status = completion_status\n db_session.add(task)\n db_session.commit()\n return {\n 'success': True,\n 'result': task_schema.dump(task),\n 'message': f\"Successfully Updated the Completion Status of {task.name}.\"\n }",
"def task_status():\n pass",
"def task_update(self):\n try:\n self.task_stop()\n except:\n pass\n self.update()\n self.task_start()",
"def my_update_subtask_status(entry_id, current_task_id, new_subtask_status):\r\n entry = InstructorTask.objects.get(pk=entry_id)\r\n subtask_dict = json.loads(entry.subtasks)\r\n subtask_status_info = subtask_dict['status']\r\n current_subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])\r\n current_retry_count = current_subtask_status.get_retry_count()\r\n new_retry_count = new_subtask_status.get_retry_count()\r\n if current_retry_count <= new_retry_count:\r\n update_subtask_status(entry_id, current_task_id, new_subtask_status)",
"def update_status(request):\n tasklist = request.GET.get(\"tasklist\")\n pk = request.GET.get(\"pk\")\n status = request.GET.get(\"status\")\n qs = Todo.objects.get(pk=pk)\n qs.status = status\n if status == \"Done\":\n qs.close()\n elif status == \"Undone\":\n qs.reopen()\n elif status == \"In-Progress\":\n qs.in_progress()\n qs.save()\n return redirect(\"tasks\", tasklist=tasklist)",
"def _update(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")",
"def _updateStatus(self, result):\n\n if result.status is not None:\n # status was explicitly set\n self.target.localStatus = result.status\n if self.target.present and self.target.created is None:\n self.target.created = self.configSpec.operation not in [\n \"check\",\n \"discover\",\n ]\n elif not result.success:\n # if any task failed and (maybe) modified, target.status will be set to error or unknown\n if result.modified:\n self.target.localStatus = (\n Status.error if self.required else Status.degraded\n )\n elif result.modified is None:\n self.target.localStatus = Status.unknown\n # otherwise doesn't modify target status",
"def update_task(self):\n row_id = self.get_valid_id('update')\n\n if row_id == -1:\n return\n\n task_title = self.display.ask_user_title()\n task_description = self.display.ask_user_description()\n task_due = self.display.ask_user_due()\n task_finished = self.display.ask_user_finished()\n\n # Call the db function to update data\n self.db_link.update_task(row_id, task_title, task_description, task_due, task_finished)\n self.display.print_success('\\nTask successfully updated.\\n')\n self.print_tasks()",
"def UpdateStatus(self, status):\r\n self.status.update(status)",
"def patch(self, id):\n try:\n task = update_status(get_db(), id, Status[api.payload[\"status\"]])\n if not task:\n api.abort(404, \"Invalid Task\")\n return task_to_dict(task)\n except ValueError:\n api.abort(422, \"Invalid Status\")",
"def _update_subtask_status(entry_id, current_task_id, new_subtask_status):\r\n TASK_LOG.info(\"Preparing to update status for subtask %s for instructor task %d with status %s\",\r\n current_task_id, entry_id, new_subtask_status)\r\n\r\n try:\r\n entry = InstructorTask.objects.select_for_update().get(pk=entry_id)\r\n subtask_dict = json.loads(entry.subtasks)\r\n subtask_status_info = subtask_dict['status']\r\n if current_task_id not in subtask_status_info:\r\n # unexpected error -- raise an exception\r\n format_str = \"Unexpected task_id '{}': unable to update status for subtask of instructor task '{}'\"\r\n msg = format_str.format(current_task_id, entry_id)\r\n TASK_LOG.warning(msg)\r\n raise ValueError(msg)\r\n\r\n # Update status:\r\n subtask_status_info[current_task_id] = new_subtask_status.to_dict()\r\n\r\n # Update the parent task progress.\r\n # Set the estimate of duration, but only if it\r\n # increases. Clock skew between time() returned by different machines\r\n # may result in non-monotonic values for duration.\r\n task_progress = json.loads(entry.task_output)\r\n start_time = task_progress['start_time']\r\n prev_duration = task_progress['duration_ms']\r\n new_duration = int((time() - start_time) * 1000)\r\n task_progress['duration_ms'] = max(prev_duration, new_duration)\r\n\r\n # Update counts only when subtask is done.\r\n # In future, we can make this more responsive by updating status\r\n # between retries, by comparing counts that change from previous\r\n # retry.\r\n new_state = new_subtask_status.state\r\n if new_subtask_status is not None and new_state in READY_STATES:\r\n for statname in ['attempted', 'succeeded', 'failed', 'skipped']:\r\n task_progress[statname] += getattr(new_subtask_status, statname)\r\n\r\n # Figure out if we're actually done (i.e. this is the last task to complete).\r\n # This is easier if we just maintain a counter, rather than scanning the\r\n # entire new_subtask_status dict.\r\n if new_state == SUCCESS:\r\n subtask_dict['succeeded'] += 1\r\n elif new_state in READY_STATES:\r\n subtask_dict['failed'] += 1\r\n num_remaining = subtask_dict['total'] - subtask_dict['succeeded'] - subtask_dict['failed']\r\n\r\n # If we're done with the last task, update the parent status to indicate that.\r\n # At present, we mark the task as having succeeded. In future, we should see\r\n # if there was a catastrophic failure that occurred, and figure out how to\r\n # report that here.\r\n if num_remaining <= 0:\r\n entry.task_state = SUCCESS\r\n entry.subtasks = json.dumps(subtask_dict)\r\n entry.task_output = InstructorTask.create_output_for_success(task_progress)\r\n\r\n TASK_LOG.debug(\"about to save....\")\r\n entry.save()\r\n TASK_LOG.info(\"Task output updated to %s for subtask %s of instructor task %d\",\r\n entry.task_output, current_task_id, entry_id)\r\n except Exception:\r\n TASK_LOG.exception(\"Unexpected error while updating InstructorTask.\")\r\n transaction.rollback()\r\n dog_stats_api.increment('instructor_task.subtask.update_exception')\r\n raise\r\n else:\r\n TASK_LOG.debug(\"about to commit....\")\r\n transaction.commit()",
"def update_task(self, name, fields):\n pass",
"def post(self):\n task = self.params.task\n task.completed = not task.completed\n task.put()\n render_json(self, obj=task.as_json())",
"def update_task(request, tid):\n try:\n slogger.task[tid].info(\"update task request\")\n labels = request.POST['labels']\n task.update(tid, labels)\n except Exception as e:\n slogger.task[tid].error(\"cannot update task\", exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return HttpResponse()",
"def commit(self):\n data = self._to_json()\n resp = self._connection._put(get_url('task update', uuid=self._uuid), json=data)\n self._auto_update = self._last_auto_update_state\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)",
"async def __await__(self):\n return await self._update_task",
"def _update_status(self):\n self._db_update({'status': self.status})",
"def update_status(request_id, status):\n pass",
"def update(self, **kwargs):\n self.status = status.parse(status.get(host=self._host, port=self._port))",
"def put(self, id):\n req = api.payload\n try:\n result = update_task(\n get_db(),\n id,\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid Status\")",
"def update_task(self, task):\n create = task.id == 0\n\n xml = self._serialise_task(task)\n\n method = ['PUT','POST'][create]\n\n if create:\n url = \"%s/tasks?%s\" % \\\n (self._get_base_url(), self._get_url_params())\n else:\n url = \"%s/tasks/%s?%s\" % \\\n (self._get_base_url(), task.id, self._get_url_params())\n\n headers = { \"Accept\":\"application/xml\",\n \"Content-Type\":\"application/xml\" }\n self.__conn.request(method, url, xml, headers) \n response = self.__conn.getresponse()\n\n data = response.read()\n\n if not response.status == 200:\n raise Exception(\"Could not update/create task.\"\\\n \" Response was [%s]: %s\" % (response.status, data))\n\n return self._parse_task(ET.fromstring(data))",
"async def get_task_status(task_id: TaskId):",
"def update(task_id):\n data = request.get_json()\n try:\n if \"status\" in data:\n db_helper.update_status_entry(task_id)\n result = {'success': True, 'response': 'Status Updated'}\n elif \"first_name\" in data:\n db_helper.update_belt_entry(task_id, data)\n result = {'success': True, 'response': 'Task Updated'}\n else:\n result = {'success': True, 'response': 'Nothing Updated'}\n except:\n result = {'success': False, 'response': 'Something went wrong'}\n\n return jsonify(result)",
"def _update_instructor_task(instructor_task, task_result):\r\n # Pull values out of the result object as close to each other as possible.\r\n # If we wait and check the values later, the values for the state and result\r\n # are more likely to have changed. Pull the state out first, and\r\n # then code assuming that the result may not exactly match the state.\r\n task_id = task_result.task_id\r\n result_state = task_result.state\r\n returned_result = task_result.result\r\n result_traceback = task_result.traceback\r\n\r\n # Assume we don't always save the InstructorTask entry if we don't have to,\r\n # but that in most cases we will update the InstructorTask in-place with its\r\n # current progress.\r\n entry_needs_updating = True\r\n entry_needs_saving = False\r\n task_output = None\r\n\r\n if instructor_task.task_state == PROGRESS and len(instructor_task.subtasks) > 0:\r\n # This happens when running subtasks: the result object is marked with SUCCESS,\r\n # meaning that the subtasks have successfully been defined. However, the InstructorTask\r\n # will be marked as in PROGRESS, until the last subtask completes and marks it as SUCCESS.\r\n # We want to ignore the parent SUCCESS if subtasks are still running, and just trust the\r\n # contents of the InstructorTask.\r\n entry_needs_updating = False\r\n elif result_state in [PROGRESS, SUCCESS]:\r\n # construct a status message directly from the task result's result:\r\n # it needs to go back with the entry passed in.\r\n log.info(\"background task (%s), state %s: result: %s\", task_id, result_state, returned_result)\r\n task_output = InstructorTask.create_output_for_success(returned_result)\r\n elif result_state == FAILURE:\r\n # on failure, the result's result contains the exception that caused the failure\r\n exception = returned_result\r\n traceback = result_traceback if result_traceback is not None else ''\r\n log.warning(\"background task (%s) failed: %s %s\", task_id, returned_result, traceback)\r\n task_output = InstructorTask.create_output_for_failure(exception, result_traceback)\r\n elif result_state == REVOKED:\r\n # on revocation, the result's result doesn't contain anything\r\n # but we cannot rely on the worker thread to set this status,\r\n # so we set it here.\r\n entry_needs_saving = True\r\n log.warning(\"background task (%s) revoked.\", task_id)\r\n task_output = InstructorTask.create_output_for_revoked()\r\n\r\n # save progress and state into the entry, even if it's not being saved:\r\n # when celery is run in \"ALWAYS_EAGER\" mode, progress needs to go back\r\n # with the entry passed in.\r\n if entry_needs_updating:\r\n instructor_task.task_state = result_state\r\n if task_output is not None:\r\n instructor_task.task_output = task_output\r\n\r\n if entry_needs_saving:\r\n instructor_task.save()",
"def refresh_status() -> None:\n ...",
"def updatestatus(self):\n self.status = self.query()\n if self.status['success']:\n return True\n else:\n return False"
] | [
"0.74056137",
"0.72580117",
"0.7137461",
"0.70980364",
"0.7047253",
"0.69939744",
"0.69837",
"0.6919644",
"0.6919353",
"0.68925995",
"0.6863291",
"0.6858715",
"0.68347573",
"0.68258166",
"0.6808914",
"0.67726386",
"0.67489964",
"0.6748969",
"0.6720296",
"0.6717316",
"0.67113495",
"0.67048454",
"0.6658792",
"0.6654741",
"0.6640763",
"0.66178894",
"0.6613967",
"0.658962",
"0.6582693",
"0.65763545"
] | 0.7371842 | 1 |
Return a list of domains for a hostname (in args) | def return_domains(hostname, username):
myconnection = ssh_connection(hostname, username)
if myconnection == 1:
return "Connection to %s failed" % hostname
else:
# Send the command (non-blocking)
stdin, stdout, stderr = myconnection.exec_command("sudo /usr/sbin/postconf -P */unix/syslog_name | cut -d '/' -f 1")
#On récupère la sortie standard
out=stdout.read().splitlines()
if not out:
return "No domains for this hostname"
else:
#On retourne la liste des domaines
return out
# Disconnect from the host
myconnection.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_domain_names():\n pass",
"def resolv(hostname):\n\n ips = list()\n\n # Create resolver object\n res = resolver.Resolver()\n\n # Choose the correct DNS servers\n # Blue DNS servers\n if hostname.startswith('b-'):\n res.nameservers = ['172.16.2.10', '172.16.2.11']\n # Green DNS servers\n elif hostname.startswith('g-'):\n res.nameservers = ['10.0.2.10', '10.0.2.11']\n # Default to white DNS servers\n else:\n res.nameservers = ['194.47.252.134', '194.47.252.135']\n\n # Query\n try:\n query = res.query(hostname)\n for answer in query:\n ips.append(answer.address)\n except resolver.NXDOMAIN:\n raise CouldNotResolv\n\n # Return query result\n return ips",
"def domain_args(domains):\n return ' ' + ' '.join(['-d {0}'.format(domain) for domain in domains])",
"def __resolve_domain(self, domain=''):\n _ip = []\n if self.__is_ip_address(domain):\n # print hostname + \" is IP address\"\n _ip.append(domain)\n return _ip\n r = dns.resolver.get_default_resolver()\n r.nameservers = ['8.8.8.8']\n #answers = dns.resolver.query(hostname, 'A')\n try:\n answers = r.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n\n if domain.find(\"www.\") != 0:\n domain = \"www.\" + domain\n # print \"querying \" + hostname\n try:\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n # print(\"processed %s, it has %d ips.\" % (hostname, len(_ip)))\n\n return list(set(_ip))",
"def _parse_domain_list(self, *cmd):\n if self._fail_domain_list:\n raise processutils.ProcessExecutionError(exit_code=1)\n elif self._empty_domain_list:\n return '', ''\n else:\n return \"thisserver\\nthatserver\\nanotherserver\\n\", ''",
"def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur",
"def get_domains(filename):\n with open(filename, 'r') as file:\n result = []\n for line in file.readlines():\n domain = line.strip()[1:]\n result.append(domain)\n return result",
"def list_domain_names(self) -> Dict:\n pass",
"def fastlydomain(args):\n pprint(api.domain(service_id, args[0], args[1]).attrs)",
"def gethostbyname(self, hostname):\r\n\r\n self.log(\"NEW QUERY:\", hostname)\r\n serveriplist = [self.rootip]\r\n\r\n if self.caching:\r\n res, iplist, namelist = self.check_cache(hostname)\r\n if not (iplist == [] and namelist == []):\r\n self.log(\"CACHE HIT\")\r\n if res:\r\n return hostname, namelist, iplist\r\n else:\r\n serveriplist = iplist\r\n\r\n while len(serveriplist) != 0:\r\n res, iplist, namelist = self.resolve_request(serveriplist[0], Name(hostname))\r\n if res:\r\n self.log(\"END OF QUERY:\", hostname)\r\n if self.caching:\r\n self.cache.write_cache_file()\r\n return hostname, namelist, iplist\r\n elif len(iplist) == 0 and not len(namelist) == 0:\r\n newlist = []\r\n for x in namelist:\r\n newhostname, newaliases, newips= self.gethostbyname(str(x))\r\n newlist.extend(newips)\r\n newlist.extend(serveriplist)\r\n serveriplist = newlist\r\n else:\r\n iplist.extend(serveriplist[1:])\r\n serveriplist = iplist\r\n self.log(\"FAILURE\")\r\n return hostname, [], []\r\n return hostname, [], []",
"def hostnames(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"hostnames\")",
"def get_urls(self, queries):\n domains = defaultdict(list)\n for q in queries:\n q = \"\\\"\" + q + \"\\\"\"\n results = self.engine.search(q)\n\n for result in results: \n url = result.url\n domain = self.get_domain(url)\n domains[domain].append(q) \n return domains",
"def getHosts(**options):\n return search.HostSearch.byOptions(**options)",
"def __parse_domains(self, lines):\n domain_list = []\n for line in lines:\n if len(line) == 0:\n continue\n if line[0] == \"!\":\n continue\n if line[0] == \"|\":\n continue\n if line[0] == \"@\":\n continue\n if line[0] == \"[\":\n continue\n if line.find('zh.wikipedia.org') == 0:\n continue\n line = string.replace(line, \"||\", \"\").lstrip(\".\")\n # strip everything from \"/\" to the end\n if line.find(\"/\") != -1:\n line = line[0:line.find(\"/\")]\n if line.find(\"*\") != -1:\n continue\n if line.find(\".\") == -1:\n continue\n # if line in oklist:\n # \tcontinue\n domain_list.append(line)\n\n return domain_list",
"def _get_IP_addresses(hostname):\n try:\n answers, auth, addit = yield DNSclient.lookupAddress(hostname)\n except Exception as exc: # Too many different DNS failures to catch...\n log.exception('DNS Resolution failure: %r for name: %r', exc, hostname)\n returnValue([])\n\n returnValue(\n [answer.payload.dottedQuad()\n for answer in answers if answer.type == dns.A])",
"def extract_domains(line):\n return re.findall(r'(?:\\w+\\.)+\\w+', line) or None # or None can be omitted",
"def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply",
"def hostnames(self) -> Sequence[str]:\n return pulumi.get(self, \"hostnames\")",
"def main():\n argument_parser = argparse.ArgumentParser()\n argument_parser.add_argument(\"name\", nargs=\"+\",\n help=\"DNS name(s) to look up\")\n argument_parser.add_argument(\"-v\", \"--verbose\",\n help=\"increase output verbosity\",\n action=\"store_true\")\n program_args = argument_parser.parse_args()\n fuckall = []\n for a_domain_name in program_args.name:\n if a_domain_name not in fuckall:\n print_results(collect_results(a_domain_name))\n fuckall.append(a_domain_name)",
"def get_domains() -> List[str]:\n ret = _call_endpoint(\"v1/domains\")\n # Example response:\n # [{'createdAt': '2016-06-25T03:08:44.000Z',\n # 'domain': 'mydomain.com',\n # 'domainId': 12345678,\n # 'expirationProtected': False,\n # 'expires': '2020-06-25T03:08:44.000Z',\n # 'holdRegistrar': False,\n # 'locked': True,\n # 'nameServers': None,\n # 'privacy': False,\n # 'renewAuto': True,\n # 'renewDeadline': '2020-08-09T03:08:44.000Z',\n # 'renewable': True,\n # 'status': 'ACTIVE',\n # 'transferProtected': False},]\n domains = [d[\"domain\"] for d in ret]\n return domains",
"def list_hosts():\n task_run(\"/bin/hostname -f\",RING_1_dev__allnodes)",
"def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains",
"def getipaddrs(hostname):\n result = socket.getaddrinfo(hostname,None,0,socket.SOCK_STREAM)\n return [x[4][0] for x in result]",
"def GetSlavesForHost():\n hostname = os.getenv('TESTING_SLAVENAME')\n if not hostname:\n hostname = socket.getfqdn().split('.', 1)[0].lower()\n return [s for s in GetAllSlaves() if s.get('hostname') == hostname]",
"def list_computers(self, kwargs):\n resolve = \"resolve\" in kwargs and kwargs[\"resolve\"]\n dns = kwargs.get(\"dns\", \"\")\n dc = kwargs.get(\"dc\", False)\n\n hostnames = []\n if not dc:\n results = self.engine.query(self.engine.COMPUTERS_FILTER(), [\"name\"])\n else:\n results = self.engine.query(self.engine.DC_FILTER(), [\"name\"])\n for result in results:\n if \"name\" in result: # ugly\n computer_name = result[\"name\"]\n else:\n computer_name = result[:-1] # removing trailing $ sign\n\n hostnames.append(f\"{computer_name}.{self.engine.fqdn}\")\n # print only if resolution was not mandated\n if not resolve:\n print(f\"{computer_name}.{self.engine.fqdn}\")\n # do the resolution\n if resolve:\n for computer in utils_resolve(hostnames, dns):\n print(\"{addr:20} {name}\".format(addr=computer[\"address\"], name=computer[\"hostname\"]))",
"def _domain(self):\n return [self.args[0] >= 0, self.args[1] >= 0]",
"def domains(cls):\n return [cls.domain]",
"def get_input_domains():\n df = pandas.read_excel(\"AutoScrapy/files/EBE21 - Top 100 Onlineshops to scrapp.ods\", engine=\"odf\")\n list_of_addresses = df['Domain'].to_list()\n list_of_addresses = [(\"http://\" + address) for address in list_of_addresses]\n print(list_of_addresses)\n return list_of_addresses",
"def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]",
"def hostnames(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"hostnames\")"
] | [
"0.7465468",
"0.6850255",
"0.68328685",
"0.6611784",
"0.6570603",
"0.65482295",
"0.63765883",
"0.637582",
"0.63531",
"0.62842184",
"0.6282919",
"0.6246976",
"0.6245952",
"0.62338334",
"0.6204809",
"0.6178074",
"0.6166124",
"0.6165479",
"0.6160553",
"0.61244196",
"0.6105133",
"0.6091072",
"0.60748345",
"0.60617554",
"0.6060224",
"0.6040878",
"0.6026534",
"0.6022295",
"0.6010357",
"0.5990036"
] | 0.70557356 | 1 |
Add a domain extension (if does not exist) for a domain name in transport file | def return_add_transport(hostname, domain_name, domain_extension, username):
#Established the connection
myconnection = ssh_connection(hostname, username)
if myconnection == 1:
return "Connection to %s failed" % hostname
else:
#We will to test if the domain already exist in the postfix configuration
commandline="/bin/cat /etc/postfix/transport | grep %s | awk '{print $1}' | grep %s" % (domain_name, domain_extension)
print commandline
stdin, stdout, stderr = myconnection.exec_command(commandline)
if stdout.read():
#The domain does not exist, exit
return "This domain extension (%s) already exist for the domain name %s" % (domain_extension, domain_name)
else:
#Command to send to the host
commandline="echo \"%s %s:\" >> /etc/postfix/transport" % (domain_extension, domain_name)
stdin, stdout, stderr = myconnection.exec_command(commandline)
if stderr.read():
is_added=False
else:
is_added=True
if is_added == True:
stdin, stdout, stderr = myconnection.exec_command("sudo /usr/sbin/postmap /etc/postfix/transport")
#Reload conf postfix
stdin, stdout, stderr = myconnection.exec_command("sudo /etc/init.d/postfix restart")
if stderr.read():
return "The domain extension has not been added. Failed. The server postfix has not restarted. Please contact system administrator "
else:
return "This domain extension (%s) has been added for the domain name %s" % (domain_extension, domain_name)
else:
return "The domain extension has not been added. Failed. Please contact system administrator "
# Disconnect from the host
myconnection.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_email_domain(email, domain):\n if not domain:\n return email\n if '@' in email:\n return email\n at_domain = domain if domain.startswith('@') else '@' + domain\n if email.endswith(at_domain):\n return email\n if email.endswith(at_domain + '>'):\n return email\n return email + at_domain",
"def full_domain(self):\n if hasattr(self, 'domain'):\n if isinstance(self.domain, str):\n return self.domain\n if hasattr(self.domain, 'name') and hasattr(self.domain, 'extension'):\n return \"{0}.{1}\".format(self.domain.name, self.domain.extension)",
"def add_domain(user):\n if \"@\" not in user:\n user = user + \"@linaro.org\"\n return user",
"def add_ext_if_needed(fileName, ext):\n ls = fileName.split(\".\")\n if( ( len(ls)==1) or (not (ls[-1] == ext))):\n return fileName + \".\" + ext\n else:\n return fileName",
"def add(self, domain, port=DEFAULT_PORT, alias=[]):\n\n if self.has_domain(domain, port) is not True:\n self._content += \"\\n\"\n self._content += vhost_template.format(DOMAIN_NAME=domain,\n DOMAIN_DIR=self.get_domaindir(domain),\n PORT=port)\n\n new_content = []\n vhost_tag_open = False\n for line in self._content.split(\"\\n\"):\n new_content.append(line)\n\n if self.__get_vhost_line(port) in line:\n vhost_tag_open = True\n\n if \"</VirtualHost>\" in line:\n vhost_tag_open = False\n\n if vhost_tag_open and self.__get_servername_line(domain) in line: # Add alias\n for alias_ in alias:\n if alias_ not in self._content:\n new_content.append(vhost_alias_template.format(ALIAS=alias_))\n\n self._content = \"\\n\".join(new_content)\n self._write_file = True",
"def fixExt(ext):\n if not ext.startswith(\".\"):\n return \".{}\".format(ext)\n return ext",
"def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )",
"def add_ext(self, ext):\n assert m2.x509_type_check(self.x509), \"'x509' type error\"\n return m2.x509_add_ext(self.x509, ext.x509_ext, -1)",
"def addExtension(self, ext):\n self.files[ext] = SourceFile(self.path, self.name + ext)",
"def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)",
"def addSubdomain(self, name):\n reply = self.rpc.addSubdomain(self.username,\n self.password,\n self.domain,\n name)\n if reply != \"OK\":\n raise Exception(\"RPC returned error: \" + reply)",
"def append_before_ext(fullfile: Text, thing_to_append: Text):\n base, ext = os.path.splitext(fullfile)\n return '{}{}{}'.format(base, thing_to_append, ext)",
"def _add_file_extension(file_name: str, extension: str) -> str:\n fname = file_name.strip()\n slice_offset = -1 * (len(extension) + 1)\n if fname[slice_offset:] != f\".{extension}\":\n fname = fname + f\".{extension}\"\n return fname",
"def extFile(package, ext):\n\treturn 'debian/'+package+'.'+ext",
"def domain(self, domain):",
"def localizeForHostName(filename): \n hostname = socket.gethostname()\n if hostname in filename:\n updated_filename = filename.replace(hostname, '')\n return updated_filename.strip('-')\n return filename",
"def get_domain():\n domain=\"\"\n for item in re.split(\"\\.\", env.host)[1:]:\n domain = domain + \".\" + item\n return domain.lstrip(\".\")",
"def create_domain_name(self, name):\n return (\"%s.%s.%s\" % (name, \"net\", self.domain)).lower()",
"def _ensure_fqdn(self, name):\n if name[-1:] != \".\":\n return \"%s.\" % name\n else:\n return name",
"def replace_domain(address, old_domain, new_domain):\n old_domain_pattern = r'' + old_domain + '$'\n address = re.sub(old_domain_pattern, new_domain, address)\n return address",
"def with_extension(self, file_name: str) -> str:\n return f\"{file_name}.{self.extension_file()}\"",
"def __add_filename_suffix(filename, suffix):\n return \"{}{}.pdf\".format(filename.split(\".pdf\", 1)[0], suffix)",
"def set_extension(self, extension):\r\n pieces = self.path.split('/')\r\n dirs = pieces[:-1]\r\n base = pieces[-1].split('.')\r\n base = '.'.join(base[:-1] if len(base) > 1 else base)\r\n if extension:\r\n base += '.' + extension\r\n dirs.append(base)\r\n self.path = '/'.join(dirs)\r\n return self",
"def test_add_txt_record_fail_to_find_domain(self):\n self._register_response(\n \"/1/product?service_name=domain&customer_name={domain}\".format(domain=DOMAIN),\n data=[],\n )\n with self.assertRaises(PluginError):\n self.client.add_txt_record(\n DOMAIN, self.record_name, self.record_content, self.record_ttl\n )",
"def setup_domain(domain):\n bucket = BUCKET_MANAGER.get_bucket(domain)\n\n zone = DOMAIN_MANAGER.find_hosted_zone(domain) \\\n or DOMAIN_MANAGER.create_hosted_zone(domain)\n\n endpoint = util.get_endpoint(BUCKET_MANAGER.get_region_name(bucket))\n a_record = DOMAIN_MANAGER.create_s3_domain_record(zone, domain, endpoint)\n print(\"Domain configure: http://{}\".format(domain))\n print(\"A record created: {}\".format(a_record))",
"def writeDomainFile():\n writeTemplate(localTemplate)",
"def create_internal_elb_dns_name ( base_name, name ) :\n return 'lb.' + create_dns_name( base_name, name )",
"async def setjradd(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n allowedDomains.append(domain)\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")",
"def resolve_domain(host: str) -> str:\n parts = host.split('.')[-2:]\n return ''.join(parts)",
"def extension_name(ext):\n return \"script_extensions::%s\" % \"_\".join([e.upper() for e in ext])"
] | [
"0.6212772",
"0.59794325",
"0.5933907",
"0.59092826",
"0.58966273",
"0.5798986",
"0.5794832",
"0.5772177",
"0.57682776",
"0.57138413",
"0.5669241",
"0.5665553",
"0.5661082",
"0.5605815",
"0.5545321",
"0.55394465",
"0.55169743",
"0.54749495",
"0.54575026",
"0.54471385",
"0.542899",
"0.5354316",
"0.5353245",
"0.53466713",
"0.5323582",
"0.5317998",
"0.52798295",
"0.5277961",
"0.5275213",
"0.52731854"
] | 0.6211821 | 1 |
Returns the total number of subscribers for a given subreddit. | def number_of_subscribers(subreddit):
# Set the Default URL strings
base_url = 'https://www.reddit.com'
api_uri = '{base}/r/{subreddit}/about.json'.format(base=base_url,
subreddit=subreddit)
# Set an User-Agent
user_agent = {'User-Agent': 'Python/requests'}
# Get the Response of the Reddit API
res = requests.get(api_uri, headers=user_agent,
allow_redirects=False)
# Checks if the subreddit is invalid
if res.status_code in [302, 404]:
return 0
# Returns the total subscribers of the subreddit
return res.json().get('data').get('subscribers') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}.json\".format(subreddit)\n r = requests.get(url, headers={'User-agent': 'shoji'},\n allow_redirects=False)\n data = r.json()\n if not r.status_code == 200:\n return 0\n try:\n sub = data.get(\"data\")\n children = sub.get(\"children\")\n subreddit = children[0].get(\"data\")\n subscriber_count = subreddit.get(\"subreddit_subscribers\")\n except Exception as e:\n print(\"Something went wrong\\n {}\".format(e))\n return 0\n\n return subscriber_count",
"def number_of_subscribers(subreddit):\n url_rsubs = \"https://api.reddit.com/r/{}/about\".format(subreddit)\n headers = {'User-Agent': 'Python3'}\n response = requests.get(url_rsubs, headers=headers,\n allow_redirects=False)\n if str(response) != \"<Response [200]>\":\n return 0\n r_json = response.json()\n subs_count = r_json.get('data').get('subscribers')\n return subs_count",
"def number_of_subscribers(subreddit):\n import requests\n headers = {'User-Agent': 'Godfather'}\n about = requests.get(\n 'https://www.reddit.com/r/{}/about.json'.format(\n subreddit), headers=headers).json()\n try:\n subscribers = about.get('data').get('subscribers')\n if subscribers is None:\n raise TypeError\n return subscribers\n except:\n return 0",
"def number_of_subscribers(subreddit):\n\n import requests\n\n resInf = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit),\n headers={\"User-Agent\": \"My-User-Agent\"},\n allow_redirects=False)\n if resInf.status_code >= 300:\n return 0\n\n return resInf.json().get(\"data\").get(\"subscribers\")",
"def number_of_subscribers(subreddit):\n if subreddit is None or type(subreddit) is not str:\n return 0\n BASE_URL = 'http://www.reddit.com/r/{}/about.json'\n head = {'User-Agent': 'Mozilla/5.0'}\n r = requests.get(BASE_URL.format(subreddit), headers=head)\n return r.json().get('data', {}).get('subscribers', 0)",
"def number_of_subscribers(subreddit):\n URL = 'https://api.reddit.com/r/{}/about'.format(subreddit)\n header = {'User-Agent': 'Custom-User'}\n\n resp = requests.get(URL, headers=header).json()\n try:\n return resp['data']['subscribers']\n except Exception:\n return 0",
"def number_of_subscribers(subreddit):\n response = requests.get('https://www.reddit.com/r/{}/about.json'\n .format(subreddit),\n headers={'User-Agent': 'Camilo@holberton'},\n allow_redirects=False)\n if response.status_code == 200:\n response = response.json()\n data = response.get('data')\n subs_count = data.get('subscribers')\n if data and subs_count:\n return subs_count\n return 0",
"def number_of_subscribers(subreddit):\n header = {'User-Agent': 'Chrome/90.0.4430.212 Safari/537.36'}\n req = requests.get('https://www.reddit.com/r/{}/about.json'\n .format(subreddit), allow_redirects=False,\n headers=header)\n if req.status_code == 200:\n subscribers = req.json().get('data').get('subscribers')\n return subscribers\n else:\n return 0",
"def number_of_subscribers(subreddit):\n link = 'http://www.reddit.com/r/{}/about.json'.format(subreddit)\n red = requests.get(link, headers={'User-Agent': 'tope628'}).json()\n try:\n subs = red.get('data').get('subscribers')\n except:\n return 0\n if red is None:\n return 0\n return subs",
"def number_of_subscribers(subreddit):\n r = requests.get('https://api.reddit.com/r/{}/about.json'\n .format(subreddit),\n headers={'user-agent': 'ianscustomthing'},\n allow_redirects=False)\n rj = r.json()\n if rj.get('message') == 'Not Found':\n return 0\n s = rj.get('data').get('subscribers')\n return s",
"def number_of_subscribers(subreddit):\n url = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit), headers={\"User-Agent\": \"kalkidan\"})\n if url.status_code == 200:\n return url.json().get(\"data\").get(\"subscribers\")\n else:\n return 0",
"def number_of_subscribers(subreddit):\n url = \"https://api.reddit.com/r/{}/about\".format(subreddit)\n header = {'User-Agent': 'CustomClient/1.0'}\n request = requests.get(url, headers=header, allow_redirects=False)\n\n if request.status_code != 200:\n return 0\n jreq = request.json()\n\n if 'data' in jreq:\n return jreq.get(\"data\").get(\"subscribers\")\n else:\n return 0",
"def number_of_subscribers(subreddit):\n\n url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)\n\n headers = {'User-Agent': 'My User Agent 1.0'}\n\n request = requests.get(url, headers=headers)\n req = request.json()\n\n if request.status_code == 404:\n return 0\n\n subs = req.get('data').get('subscribers')\n return subs",
"def number_of_subscribers(subreddit):\n header = {\"User-agent\": \"darth\"}\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n response = (requests.get(url, headers=header))\n if response.status_code != 200:\n return 0\n return response.json().get('data').get('subscribers')",
"def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}/about.json\"\n headers = {\n 'User-Agent': 'My User Agent 1.0',\n 'From': '[email protected]'\n }\n r_subs = requests.get(url.format(subreddit), headers=headers)\n if r_subs.status_code == 200:\n data = r_subs.json()['data']\n subscribers = data.get('subscribers')\n if subscribers is not None:\n return subscribers\n return 0",
"def number_of_subscribers(subreddit):\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n header = {\"Content-Type\": \"application/json\",\n \"User-Agent\": \"Mozilla/5.0\"}\n request = requests.get(\n url,\n headers=header,\n allow_redirects=False)\n if request.status_code >= 300:\n return 0\n return json.loads(request.content.decode(\"utf-8\"))[\"data\"][\"subscribers\"]",
"def number_of_subscribers(subreddit):\n url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)\n response = requests.get(url,\n allow_redirects=False,\n headers={'User-Agent': 'MyChromeBook'})\n if response:\n suscribers_number = response.json().get('data').get('subscribers')\n return suscribers_number\n else:\n return 0",
"def number_of_subscribers(subreddit):\n\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n headers = {'user-agent': 'request'}\n response = requests.get(url, headers=headers, allow_redirects=False)\n if str(response) != '<Response [200]>':\n return 0\n response_json = response.json()\n subs = response_json.get('data').get('subscribers')\n return subs",
"def number_of_subscribers(subreddit):\n url = 'https://www.reddit.com/r/{}/about.json'\n headers = {'user-agent': 'X-Modhash'}\n url_format = requests.get(url.format(subreddit), headers=headers).json()\n try:\n name = url_format['data']['subscribers']\n return name\n except:\n return 0",
"def number_of_subscribers(subreddit):\n h = {'user-agent': 'GEEK1050'}\n link = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n req = requests.get(link, headers=h)\n\n req_data = req.json().get(\"data\").get(\"subscribers\")\n for element in req_data['children']:\n print(element['children']['title'])",
"def number_of_subscribers(subreddit):\n\n url = \"https://www.reddit.com/r/{}/about.json\".format(subreddit)\n headers = {\"User-Agent\": \"my-integration/1.2.3\"}\n\n response = get(url=url, headers=headers)\n\n if response.status_code == 200:\n # print(response.json())\n\n response_json = response.json()\n data = response_json.get('data')\n subscribers = data.get(\"subscribers\")\n\n return subscribers\n\n return 0",
"def get_subscriber_count(self, response):\n return response.css('.yt-subscriber-count')\\\n .extract_first(default='')",
"def subject_member_count(context, subject_id):\n session = get_session()\n\n if not subject_id:\n msg = _(\"Subject id is required.\")\n raise exception.Invalid(msg)\n\n query = session.query(models.SubjectMember)\n query = query.filter_by(deleted=False)\n query = query.filter(models.SubjectMember.subject_id == str(subject_id))\n\n return query.count()",
"def count_subscribers(self, topic_name):\n return self._count_publishers_or_subscribers(topic_name, _rclpy.rclpy_count_subscribers)",
"def count_subscribers(self):\n return self.request(\"count:Contact\", [ None ])",
"def count(self, sub, start=0, end=None):\n return count(self, sub, start, end)",
"def get_count(cls):\n total = 0\n for counter in SimpleCounterShard.objects.all():\n total += counter.count\n return total",
"def countSubDomain(subdomain):\r\n if not subdomain:\r\n return 0\r\n else:\r\n return len(subdomain.split('.'))",
"def count(self, sub) -> int:\n pass",
"def _count_subscriptions(self):\n for partner in self:\n subscriptions = self.env['subscription.subscription']\n count = subscriptions.sudo().search_count([('partner_id', '=', partner.id)])\n for child in partner.child_ids:\n count += subscriptions.sudo().search_count([('partner_id', '=', child.id)])\n partner.subscriptions_count = count"
] | [
"0.88452405",
"0.88001615",
"0.8758702",
"0.87437683",
"0.87410414",
"0.8737465",
"0.8723826",
"0.87215465",
"0.86755574",
"0.86608243",
"0.86076564",
"0.86025274",
"0.8579033",
"0.8564103",
"0.85612786",
"0.8550934",
"0.8545987",
"0.8475834",
"0.8413587",
"0.82184273",
"0.8203435",
"0.61095864",
"0.59731436",
"0.59585524",
"0.5905059",
"0.57802516",
"0.57707477",
"0.57604676",
"0.5708097",
"0.5691233"
] | 0.9056629 | 0 |
Filter out UN intervention nodes except the ones in the include list. | def filter_to_hume_interventions(stmts):
include = ['provision_of_free_food_distribution',
'provision_of_cash_transfer']
filter_out = [False] * len(stmts)
print('Filtering %d stmts' % len(stmts))
for idx, stmt in enumerate(stmts):
for agent in stmt.agent_list():
if 'UN' in agent.db_refs:
ug = agent.db_refs['UN'][0][0]
if ug.startswith('UN/interventions'):
if not ug.endswith(include[0]) and not \
ug.endswith(include[1]):
filter_out[idx] = True
stmts = [s for s, f in zip(stmts, filter_out) if not f]
print('Filtered to %d stmts' % len(stmts))
return stmts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def exclude_nodes(self, nodes):",
"def filter(self, include=None, exclude=None):\n if not include:\n include = ()\n if not exclude:\n exclude = ()\n\n return sorted([x for x in self.data.values() if (not include or any(isinstance(x, t) for t in include)) and\n (not exclude or all(not isinstance(x, t) for t in exclude))])",
"def clear_includepatterns(self):\n self._excludepatterns = []",
"def skip_tables(include=('*',), exclude=()):\n check = Filter()\n check.include(list(include))\n check.exclude(list(exclude))\n def _skip_handler(dispatcher, node):\n \"\"\"Process a node and skip based on table filter\"\"\"\n try:\n check('%s.%s' % (dispatcher.database, dispatcher.table))\n except FilteredItem:\n raise SkipNode()\n return node\n return _skip_handler",
"def reset(self):\n super().reset()\n whitelist = []\n for parent in self.cls.mro():\n whitelist.extend(getattr(parent, 'tab_whitelist', []))\n\n if getattr(parent, \"tab_component_names\", False):\n for cpt_name in parent.component_names:\n if getattr(parent, cpt_name).kind != Kind.omitted:\n whitelist.append(cpt_name)\n\n self._includes = set(whitelist)",
"def find_conclusions(self):\n conc = []\n self.rule.right.visit_find_premises(conc)\n self.conclusions = conc",
"def exclude_list(self):\n pass",
"def skip_engines(include=('*',), exclude=()):\n check = Filter()\n check.include([pat.lower() for pat in include])\n check.exclude([pat.lower() for pat in exclude])\n\n def _skip_handler(dispatcher, node):\n \"\"\"Process node and skip based on engine filters\"\"\"\n for token in node.tokens:\n if token.symbol is 'CreateTable':\n engine, = token.extract('^[)] ENGINE=([a-zA-Z]+)')\n elif token.symbol is 'CreateTmpView':\n engine = 'view'\n else:\n continue\n try:\n check(engine.lower())\n except FilteredItem:\n name = '%s.%s' % (dispatcher.database, dispatcher.table)\n # add a check for the dm - should be a one shot check\n if engine == 'view':\n dispatcher.register('view-ddl', skip_tables(exclude=[name]))\n else:\n dispatcher.register('table-dml', \n skip_tables(exclude=[name]))\n raise SkipNode()\n return node\n return _skip_handler",
"def filter_selected_nodes(tree) -> list:\n return [n for n in tree.nodes if n.select and n.bl_idname not in {'LNGroupInputsNode', 'LNGroupOutputsNode'}]",
"def skip_databases(include=('*',), exclude=()):\n check = Filter()\n check.include(list(include))\n check.exclude(list(exclude))\n def _skip_handler(dispatcher, node):\n \"\"\"Process a node and skip based on database filter\"\"\"\n try:\n check(dispatcher.database)\n except FilteredItem:\n raise SkipNode()\n return node\n return _skip_handler",
"def generate_exclusions(proteins):\n pass",
"def exclude_filter(excl_filter, paths):\n misses = set()\n for p in paths:\n if re.search(excl_filter, p) is None:\n misses.add(p)\n\n return misses",
"def apply_exclude(self, last_node, exclude):\n _id = last_node._id\n query = 'START s=node(%s)\\n' % _id + \\\n 'MATCH (s)-[:%s]->(m)\\n' % (RELATION_C2C) + \\\n 'RETURN (m)'\n records = neo4j.CypherQuery(self.db_handler, query).execute()\n _nodes = [record.values[0] for record in records.data]\n\n if not exclude:\n return _nodes\n\n nodes = []\n for node in _nodes:\n name = Name()\n name.set(node.get_properties()[PROPERTY_COMPONENT])\n comp = name.get(0)\n if not exclude.matches(comp):\n nodes.append(node)\n\n return nodes",
"def suppress_analyze(more_exclusions=None):\n return api.override_step_data(\n 'read filter exclusion spec',\n api.json.output({\n 'base': {\n 'exclusions': ['f.*'] + (more_exclusions or []),\n },\n 'chromium': {\n 'exclusions': [],\n },\n })\n )",
"def exclude_dirs(self, matches: Iterable[str]) -> List[str]:\n filters = [(\"ifmodule\", self.modules.keys()), (\"ifdefine\", self.variables)]\n\n valid_matches = []\n\n for match in matches:\n for filter_ in filters:\n if not self._pass_filter(match, filter_):\n break\n else:\n valid_matches.append(match)\n return valid_matches",
"def block_filter(include_tags, exclude_tags, block_tags, diag):\r\n diag.setdefault('tags_used', dict())\r\n diag.setdefault('tags_not_used', dict())\r\n include = None\r\n exclude = None\r\n\r\n # 1. Exclusion has priority over inclusion.\r\n # 2. Exclusion is the default rule\r\n for tag in block_tags:\r\n if tag in exclude_tags:\r\n exclude = True\r\n diag['tags_not_used'].pop(tag, None)\r\n diag['tags_used'].setdefault(tag, 1)\r\n elif tag in include_tags:\r\n include = True\r\n diag['tags_not_used'].pop(tag, None)\r\n diag['tags_used'].setdefault(tag, 1)\r\n elif tag not in diag['tags_used']:\r\n diag['tags_not_used'].setdefault(tag, 1)\r\n\r\n if exclude is not None:\r\n return not exclude\r\n return include or False",
"def remove_unused_influence(skin_node):\n influence_list = skin_node.getInfluence()\n weight_inf_list = skin_node.getWeightedInfluence()\n # Set skinCluster to HasNoEffect so it won't process after each removal\n skin_node.nodeState.set(1)\n zero_weight_inf_list = list(set(influence_list) - set(weight_inf_list))\n skin_node.removeInfluence(zero_weight_inf_list)\n skin_node.nodeState.set(0)\n return zero_weight_inf_list",
"def filter_selection_set(info: GraphQLResolveInfo):\n from graphql import Location\n from .pyutils import unfreeze\n\n excluded_field_nodes = []\n\n def _should_include(field_node: FieldNode):\n if not field_node.name:\n # Unknown field_node type\n return True\n if field_node.name.value == \"subscription_id\":\n return True\n\n # Location is a highly nested AST type\n excluded_field_nodes.append(unfreeze(field_node, ignore_types=[Location]))\n return False\n\n info.field_nodes[0].selection_set.selections = [\n x for x in info.field_nodes[0].selection_set.selections if _should_include(x)]\n\n return excluded_field_nodes",
"def filter_ignored(self, node):\n return node.getText() not in self.ignored_terms",
"def excluded(cls):\n return []",
"def Ignore(self, relative_file):\n return Whitelisted(relative_file)",
"def removeModulesNotOnAPathExcluding( process, keepList=() ):\n allMods=set((x for x in process.producers_().iterkeys()))\n allMods.update((x for x in process.filters_().iterkeys()))\n allMods.update((x for x in process.analyzers_().iterkeys()))\n allMods.update((x for x in process.outputModules_().iterkeys()))\n \n modulesOnPaths = set()\n for p in process.paths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames())) \n for p in process.endpaths_():\n modulesOnPaths.update( (x for x in getattr(process,p).moduleNames()))\n\n notOnPaths = allMods.difference(modulesOnPaths)\n \n keepModuleNames = set( (x.label_() for x in keepList) )\n \n getRidOf = notOnPaths.difference(keepModuleNames)\n \n for n in getRidOf:\n delattr(process,n)",
"def subset(\n self, \n include: Union[Sequence[Any], Any] = None, \n exclude: Union[Sequence[Any], Any] = None) -> Bunch:\n pass",
"def exclude_nodes(self, nodes):\n raise self.Error('qadapter failed to exclude nodes, not implemented yet in sge')",
"def exclude(self, *args, **kwargs):",
"def filter_non_one(self):\n G = nx.Graph()\n\n for u, v in self.edges:\n if self.graph[u][v][\"weight\"] == 1:\n # Add the nodes first in case they have data\n G.add_node(u, **self.nodes(data=True)[u])\n G.add_node(v, **self.nodes(data=True)[v])\n G.add_edge(u, v, **self.graph[u][v])\n\n self.graph = G",
"def exclude():\n data = list(request.files.values())[0].file.read() if len(request.files) else request.body.read()\n return excludeView(data, request.params)",
"def ignores(self):\n pass # make ignore_tags unaccessible",
"def getIncludes(self):\n return self.includes[:]",
"def _include_exclude_list(include, exclude):\n keys = []\n if include:\n for item in include:\n keys.append((item, 'included'))\n if exclude:\n for item in exclude:\n keys.append((item, 'excluded'))\n\n return keys"
] | [
"0.67418694",
"0.6117754",
"0.58795565",
"0.573762",
"0.56526923",
"0.55822784",
"0.55800045",
"0.5576209",
"0.55718714",
"0.5571381",
"0.5549623",
"0.5520182",
"0.55010605",
"0.54866487",
"0.54247105",
"0.5379925",
"0.5378728",
"0.53578",
"0.5351679",
"0.53441405",
"0.53338754",
"0.5325685",
"0.5323251",
"0.53174734",
"0.5306424",
"0.52951777",
"0.52625805",
"0.5244676",
"0.5244149",
"0.52385896"
] | 0.6273386 | 1 |
Returns 1 if X has won the game, 1 if O has won, 0 otherwise. | def utility(board):
winning_player = winner(board)
# Did X win?
if winning_player == X:
return 1
# Did O win?
if winning_player == O:
return -1
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_game_won(self) -> int:\n\n b = self.board\n for c1, c2, c3, c4 in _WINDOWS:\n if b[c1] and (b[c1] == b[c2] == b[c3] == b[c4]):\n print(\"win\", c1, c2, c3, c4)\n return b[c1]",
"def player(board):\n x_counter = 0\n o_counter = 0\n\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n x_counter += 1\n elif board[i][j] == O:\n o_counter += 1\n \n # print(f\"x: {x_counter}\")\n # print(f\"o: {o_counter}\")\n \n if x_counter > o_counter:\n return O\n else:\n return X",
"def player(board):\n x_turn = 0\n o_turn = 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n x_turn += 1\n elif board[i][j] == O:\n o_turn += 1\n if x_turn == 0 and o_turn == 0:\n return X\n elif x_turn > o_turn:\n return O\n elif x_turn == o_turn:\n return X\n return X",
"def utility(board):\n won = winner(board)\n\n if won == X:\n return 1\n elif won == O:\n return -1\n else:\n return 0",
"def check_for_game_won(self):\n all_moscuvites_captured = True\n king_captured = True\n king_escaped = True\n for piece in self.game_pieces:\n if piece.player == 2:\n all_moscuvites_captured = False\n elif piece.player == 3:\n king_captured = False\n king_coords = (piece.x,piece.y)\n escape_coords = [(0, 0), (0, 8),\n (8, 0), (8, 8)]\n if king_coords not in escape_coords:\n king_escaped = False\n if king_captured:\n return 2\n elif king_escaped or all_moscuvites_captured:\n return 1\n else:\n return 0",
"def player(board):\n\tif board == initial_state():\n\t\treturn X\n\n\tnumX=0\n\tnumO=0\n\n\tfor i in range(len(board)):\n\t\tfor j in range(len(board)):\n\t\t\tif(board[i][j]==X):\n\t\t\t\tnumX+=1\n\t\t\telif(board[i][j]==O):\n\t\t\t\tnumO+=1\n\n\tif numX > numO:\n\t\treturn O\n\telse:\n\t\treturn X",
"def utility(board):\n win = winner(board)\n if win == X: return 1\n elif win == O: return - 1\n else: return 0",
"def utility(board):\n winning_player = winner(board)\n\n if winning_player is X:\n return 1\n if winning_player is O:\n return -1\n \n return 0",
"def utility(board):\n winners = winner(board)\n if (X == winners):\n return 1\n elif (O == winners):\n return -1\n return 0",
"def player(board):\n xcount, ocount = 0, 0\n for row in board:\n xcount += row.count(X)\n ocount += row.count(O)\n if xcount > ocount:\n return O\n elif xcount == 0 and ocount == 0:\n return X\n elif xcount == ocount:\n return X",
"def check_game(self):\n gameOver = None\n if self.turn > 4:\n gameOver = self.check_x_won()\n if gameOver is True:\n self.game_x_won()\n return\n\n gameOver = None\n if self.turn > 5:\n gameOver = self.check_o_won()\n if gameOver is True:\n self.game_o_won()\n return\n\n if self.turn >= 9:\n self.game_tie()\n return",
"def check_over(self):\n if self.board.has_winner() == 1:\n return 1\n elif self.board.has_winner() == 2:\n return 2\n elif self.board.check_cats_game():\n return 0\n else:\n return -1",
"def winner(self):\n\n if self.game_ended():\n return self.winning()\n else:\n return 0",
"def player(board):\n if board == initial_state():\n return X\n \n # if board has lesser or eual X(s) than O(s)\n if sum([row.count(X) for row in board]) <= sum([row.count(O) for row in board]):\n return X\n else:\n return O",
"def player(board):\n numbofO = sum(row.count(\"O\") for row in board)\n numbofX = sum(row.count(\"X\") for row in board)\n if numbofX > numbofO:\n return O\n else:\n return X",
"def player(self, board):\n xCounter = 0\n oCounter = 0\n\n for i in range(0, len(board)):\n for j in range(0, len(board[0])):\n if board[i][j] == self.X:\n xCounter += 1\n elif board[i][j] == self.O:\n oCounter += 1\n \n return self.O if xCounter > oCounter else self.X",
"def utility(board):\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0",
"def utility(board):\n final = winner(board)\n if final == X:\n return 1\n elif final == O:\n return -1\n else:\n return 0",
"def who_won(self, board):\n winners = set()\n for x,y,z in self.wins:\n if board[x] == board[y] and board[y] == board[z]:\n winners.add(board[x])\n if 1 in winners and 2 in winners:\n return 3\n if 1 in winners:\n return 1\n if 2 in winners:\n return 2\n return 0",
"def player(board):\n # Check if board is in initial_state\n if board == initial_state():\n return X \n else:\n # Keep track of how many moves each player took\n x_moves = 0\n o_moves = 0\n # Loop over board list and count how many XO moves\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n x_moves += 1\n elif board[i][j] == O:\n o_moves += 1\n # If X has more moves its O's turn otherwise its X's turn\n return O if x_moves > o_moves else X",
"def player(board):\n num_x = sum([list.count(X) for list in board])\n num_o = sum([list.count(O) for list in board])\n if num_x == num_o:\n return X\n else:\n return O",
"def is_game_over(self):\r\n\r\n if self.winner != 0:\r\n return True\r\n\r\n return False",
"def utility(board):\n status = winner(board)\n if status == X:\n return 1\n elif status == O:\n return -1\n else:\n return 0",
"def utility(board) -> int:\n winner_player = winner(board)\n if winner_player == X:\n return 1\n elif winner_player == O:\n return -1\n else:\n return 0",
"def player(board):\n if terminal(board) == True:\n return None \n countO, countX = 0, 0\n for i in range(3):\n for j in range(3):\n if board[i][j] == X:\n countX += 1\n elif board[i][j] == O:\n countO += 1\n if countO >= countX:\n return X\n else:\n return O",
"def is_game_won(self):\n return True",
"def player(board):\n\n count_x = 0\n count_o = 0\n for i in board:\n for j in i:\n if (j == X):\n count_x += 1\n elif (j == O):\n count_o += 1\n if (count_x <= count_o):\n return X\n else:\n return O",
"def has_won(board, player):\r\n return False",
"def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0",
"def utility(board):\n if winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1\n else:\n return 0"
] | [
"0.7282075",
"0.72561866",
"0.72313863",
"0.7200964",
"0.7191572",
"0.7168141",
"0.7164547",
"0.71292514",
"0.7123542",
"0.71037567",
"0.7097756",
"0.7078315",
"0.70739603",
"0.7033477",
"0.7021798",
"0.701796",
"0.70142305",
"0.70036036",
"0.69928795",
"0.6981686",
"0.694844",
"0.6942827",
"0.69174474",
"0.6913314",
"0.6903751",
"0.6896415",
"0.6880004",
"0.68722606",
"0.6855937",
"0.6855937"
] | 0.73503214 | 0 |
Find words in word_set that have length greater than n. | def words_len_greater_than(n):
return {w for w in word_set if len(w) > n} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_long_words(words, n):\n words_longer_than_n = []\n for word in words:\n if len(word) > n:\n words_longer_than_n.append(word)\n\n return words_longer_than_n",
"def filter_long_words(list,n):\n numberlist=[]#set up a new list\n for i in range(0,len(list)):\n if len(list[i]) > n:#pick up the word that is longer than n\n numberlist.append(list[i])#count the length of each word\n else:\n continue\n return numberlist",
"def get_word_list_with_freq_at_least_n(text, n = 2):\n word_freq_dists = get_freq_dist_from_corpus(text)\n selected_word_list = [word for word in word_freq_dists.keys() if word_freq_dists.get(word) >= n]\n return selected_word_list",
"def longest_words(self, n=10):\n return sorted(set(self.text), key=len, reverse=True)[:n]",
"def hard_words(a_list):\n\n return [word for word in a_list if len(word) > 7]",
"def _filter_len(self):\n if max([len(x) for x in self._word_set]) == len(self._to_word):\n return self._word_set\n else:\n new_words = set()\n for word in self._word_set:\n if len(self._from_word) == len(word):\n new_words.add(word)\n\n return new_words",
"def find_long_words(tokens):\n\n return sorted([word for word in set(tokens) if len(word) > 15])",
"def longwords_Fil(strings):\n # write your code here\n return list(filter(lambda x:len(x)>4,strings))",
"def long_words(words):\n words_longer_than_four = []\n for word in words:\n if len(word) > 4:\n words_longer_than_four.append(word)\n return words_longer_than_four",
"def important_words(words):\n return [x for x in words if len(x) >= 3]",
"def most_words(self, n):\n return big_tags",
"def clean_words(words):\n return [clean_word(w) for w in words if len(clean_word(w)) >= 3]",
"def trim_words(word_set, data_sets, num):\n word_dict = {}\n for data in data_sets:\n for word_list, _ in data:\n for word in word_list:\n if word not in word_set:\n continue\n if word in word_dict:\n word_dict[word] += 1\n else:\n word_dict[word] = 1\n sorted_list = sorted(word_dict.keys(), key=lambda w: word_dict[w], reverse=True)\n\n result_set = set()\n result_set.update(sorted_list[:num])\n return result_set",
"def filter_by_max_length(self, nchars):\n\n self.docs = self._filter_by_length(nchars, 'max')\n return self",
"def small_word_filter(words, min_=1):\n new_words = []\n for w in words:\n if(len(w) > min_):\n new_words += [w]\n return new_words",
"def find_long_and_common_words(tokens):\n\n return sorted([word for word in set(tokens) if len(word) > 7 and FreqDist(tokens)[word] > 7])",
"def longwords(strings):\n # write your code here\n shorter_lst=[]\n for i in strings:\n if len(i)>4:\n shorter_lst.append(i)\n\n return shorter_lst",
"def most_words_and_longest(self, n):\n return big_tags",
"def find_long_words():\n f = open('session09/words.txt')\n \n for line in f:\n word = line.strip()\n if len(word) > 20:\n print(word, len(word))",
"def _filter_by_length(self, nchars, predicate):\n if nchars < 0:\n raise ValueError(\"`nchars` must be positive\")\n assert predicate in ('min', 'max')\n\n doc_lengths = self.doc_lengths\n\n filtered_docs = {}\n for dl, dt in self.docs.items():\n len_doc = doc_lengths[dl]\n if (predicate == 'min' and len_doc >= nchars) or (predicate == 'max' and len_doc <= nchars):\n filtered_docs[dl] = dt\n\n return filtered_docs",
"def filter_top_n_words(topic_words_dict, n, word_list):\n # First remove any redundant words in word_list\n words = set(word_list)\n # Now get the intersection with words, that appear as keys in the dict\n topic_words_intersect = set(topic_words_dict.keys()).intersection(words)\n # Now get the words with their scores, sort descending for the scores\n # and return the first n words:\n score_wordlist = [(x, topic_words_dict[x]) for x in topic_words_intersect]\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]",
"def filter_by_min_length(self, nchars):\n \n self.docs = self._filter_by_length(nchars, 'min')\n return self",
"def get_words_with_nplus_frequency(tokenized_sentences, count_threshold):\r\n\r\n closed_vocab = []\r\n \r\n\r\n word_counts = count_words(tokenized_sentences)\r\n \r\n\r\n for word, cnt in word_counts.items(): # complete this line\r\n \r\n\r\n if cnt >= count_threshold:\r\n \r\n # append the word to the list\r\n closed_vocab.append(word)\r\n \r\n return closed_vocab",
"def longwords_Li_Comp(strings):\n # write your code here\n return [string for string in strings if len(string)>4]",
"def calculate_most_frequent_n_words(self, input_string: str, n: int) \\\n -> List[WordFrequencyStructure]:\n results = \\\n self._typed_sorted_result(input_string=input_string)\n\n return results[:n]",
"def remove_shorts(word_list, minimum_length):\n\tworking_word_list = []\n\tfor word in word_list:\n\t\tif len(word) >= minimum_length:\n\t\t\tworking_word_list.append(word)\n\treturn working_word_list",
"def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]",
"def longwords_Li_Comp(strings):\n return [string for string in strings if len(string)>4 ]",
"def find_words(word_length: int, fhs: List[TextIO]) -> List[str]:\n\n words: List[str] = []\n clean = partial(re.sub, '[^a-zA-Z]', '')\n accept = lambda word: len(word) == word_length\n\n for fh in fhs:\n for line in fh:\n words.extend(filter(accept, map(clean, line.split())))\n\n return words",
"def length(self, length):\n result = Words()\n result.words = (word for word in self.words if len(word) == length)\n return result"
] | [
"0.78197736",
"0.7227532",
"0.69592094",
"0.69480854",
"0.6835576",
"0.67826504",
"0.6761459",
"0.6617565",
"0.65964276",
"0.63648957",
"0.62678367",
"0.6237345",
"0.61757463",
"0.6142094",
"0.61131424",
"0.60555744",
"0.603735",
"0.60353917",
"0.6034472",
"0.6024036",
"0.6015312",
"0.60017717",
"0.5982492",
"0.593361",
"0.5927323",
"0.59260416",
"0.5923936",
"0.59099317",
"0.5896541",
"0.58856535"
] | 0.8484499 | 0 |
Finds all words in word_set that do not have the letter l. | def words_without_letter(l):
return {w for w in word_set if has_no_letter(w, l)} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def words_uses_only(letters):\n\treturn {w for w in word_set if uses_only(w, letters)}",
"def known(words):\r\n return set(w for w in words if w in WORDS)",
"def exclude_words(self, words):\n idcs = []\n for i in range(len(self)):\n if not self.transcript(i) in words:\n idcs.append(i)\n subset = self.sub_set(idcs)\n return subset",
"def missingWords2(s, t):\n # missingWords = []\n \n new_s = s.split()\n # print(new_s)\n\n new_t = t.split()\n # print(new_t)\n\n missing = []\n\n while len(new_t) > 0:\n for word in new_s:\n if word not in new_t:\n missing.append(word)\n else:\n new_t.remove(word)\n\n return missing",
"def known(words: list[str]) -> list[str]:\n return [z for z in list(set(words)) if z in self.words]",
"def stopwordsRem(tokens):\n no_sw = [t for t in tokens if not t in stopwords.words('english')]\n return no_sw",
"def missingWords(s, t):\n missingWords = []\n \n new_t = t.split()\n new_s = s.split()\n\n for index, word in enumerate(new_s):\n if new_t[index] != word:\n missingWords.append(word)\n new_t[index] = word\n\n new_list = new_s - new_t\n\n return new_list",
"def find_letters_not_display(guess, secret_word):\n return set(secret_word) - set(guess)",
"def remove_unsuitable_words(words):\n\n max_length = Board.SIZE\n return [word for word in words if word and \"-\" not in word and len(word) <= max_length]",
"def known(self, words):\n return set(w for w in words if w in self.word_dict)",
"def missingWords3(s, t):\n # missingWords = []\n \n new_s = s.split()\n new_t = t.split()\n missing = []\n j = 0\n\n for i in range(len(new_s)):\n if j <= len(new_t) - 1:\n if new_s[i] != new_t[j]:\n missing.append(new_s[i])\n else:\n j += 1\n\n return missing",
"def find_unknown_words(vocab, wds):\n result = []\n for w in wds:\n # if search_linear(vocab, w) < 0:\n # result.append(w)\n if search_binary(vocab, w) < 0:\n result.append(w)\n return result",
"def ladder(word: str) -> List[str]:\n found_words = set()\n for i in range(len(word)):\n pattern = list(word)\n pattern[i] = '.'\n search_results = search(\"^\" + \"\".join(pattern) + \"$\")\n for result in search_results:\n if result != word:\n found_words.add(result)\n return found_words",
"def stopword_removal(words):\n stops = set(stopwords.words('english'))\n words = [w for w in words if w not in stops]\n return words",
"def avoids(word, forbidden):\n for letter in word:\n if letter in forbidden:\n return False\n return True",
"def words(self):\n # BEGIN Question 2\n x= str(self.text).lower()\n # m = str(x).translate(string.punctuation)\n y= x.split()\n\n y = set([''.join(c for c in s if c not in string.punctuation) for s in y])\n y = [s for s in y if s]\n while(len(y) != 0):\n self.word_set.append(min(y))\n y.remove(min(y))\n\n\n return self.word_set\n # END Question 2",
"def remove_non_words(text_tokens):\n\n # define the set of Latin characters\n keep_list = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',\n 'u', 'v', 'w', 'x', 'y', 'z']\n\n return [words for words in text_tokens if all(letter in keep_list for letter in list(words)) is True]",
"def wordset(word_list):\n\n unique_words = []\n\n for word in word_list:\n\n if word not in unique_words:\n unique_words.append(word)\n\n unique_words.sort()\n\n return unique_words",
"def check_unconformant(self, not_found, local_set):\n not_missing = set()\n for title in local_set:\n if \"|\" in title:\n not_missing.add(re.search(\"\\|.*\\|\", title).group()[1:-1])\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"\"))\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"_\"))\n not_missing.add(re.search(\"\\|.*$\", title).group()[1:].replace(\"|\", \"\") + \"+\")\n\n not_missing = not_missing.intersection(not_found)\n\n return not_missing",
"def removeNonDictionaryWords(self, words):\n\t\twordList = [w.strip() for w in words.split(' ')]\n\t\trtnWords = []\n\t\tfor word in wordList:\n\t\t\tif word.lower() in self.dictionary:\n\t\t\t\trtnWords.append(word)\n\t\treturn \" \".join(rtnWords)",
"def all_words( corpus, key, ignore_words = Ignore_words ) :\n return list(set(chain.from_iterable( (words(c,key,ignore_words) for c in corpus ) ) ) )",
"def test_unwanted_words(self) -> None:\n pad_open: bool = False\n for word in self.report.get_words():\n if word.text in self.rules.citation_delimiters:\n pad_open = not pad_open\n continue\n if pad_open:\n continue\n for u_word in self.rules.unwanted_words:\n if word.text == u_word[\"word\"]:\n self.add_error(\n f\"Ordet {word.text} är inte tillåtet, \"\n f\"använd {u_word['alternative']} istället.\",\n word=word,\n )\n break",
"def filter(q_words):\n filtered_words = [\"how\",\"what\"]\n for word in q_words:\n if word in filtered_words:\n q_words.remove(word)",
"def missing_letters(sentence):\n set1 = set(\"\".join(\n char for char in sentence if char not in punctuation + \" \").lower()\n )\n return sorted(ALPHA - set1)",
"def remove_non_wordnet(tokens):\n return [token for token in tokens if wn.synsets(token)]",
"def uses_only(w, letters):\n\treturn set(w).issubset(set(letters))",
"def remove_stopwords(tokens):\n\n return [t for t in tokens if t not in stopwords.words('english')]",
"def stopword_filter(words):\n new_words = []\n for w in words:\n if w in stopwords.words(\"german\"): continue\n else: new_words += [w]\n return new_words",
"def rm_stop_words(self, words):\n return [word for word in words if word.lower() not in self.stopwords]",
"def remove_stopwords_set(sentence: str, stop_words: Collection[str]) -> str:\n return \" \".join([w for w in word_tokenize(sentence) if w not in stop_words])"
] | [
"0.70398265",
"0.66386557",
"0.66319805",
"0.6586166",
"0.65605026",
"0.64877725",
"0.64639956",
"0.6449832",
"0.63439935",
"0.6343545",
"0.63153183",
"0.6264546",
"0.62233686",
"0.6147896",
"0.6138847",
"0.61236644",
"0.6107858",
"0.60502595",
"0.5988028",
"0.5979397",
"0.59717286",
"0.59687275",
"0.59684795",
"0.59591347",
"0.593129",
"0.5924978",
"0.5857856",
"0.5832677",
"0.5771374",
"0.57705015"
] | 0.8541773 | 0 |
Finds percentage of words in word_set without letter l. | def percent_without_letter(l):
return len(words_without_letter(l)) / len(word_set) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def boostScore(self, result: str, words:set ):\n found = 0;\n for word in words:\n if result in self.invertedIndex[word]:\n found += 1\n return found/len(words)",
"def percent_frequencies(self):\n word_count = 0\n local = self.frequencies()\n for key in local.keys():\n i = local[key]\n word_count += int(i)\n for key in local.keys():\n i = local[key]\n percentage = float(i) / float(word_count)\n local[key] = percentage\n return local",
"def words_without_letter(l):\n\treturn {w for w in word_set if has_no_letter(w, l)}",
"def lexical_diversity(text):\n return len(set(text)) / len(text)",
"def stopword_percent(self, stop_words = 'english'): \n stops = set(stopwords.words(stop_words))\n percent = []\n for i in tqdm(self.text):\n text_l = i.lower()\n word_tokens = word_tokenize(text_l)\n stop_w = [word for word in word_tokens if word in stops]\n if len(stop_w) == 0:\n percent.append(0.0)\n else:\n percent.append(round((len(stop_w)/len(word_tokens)), 2))\n return percent",
"def find_words_no_e():\n f = open('session09/words.txt')\n num_no_e = 0\n num_words = 0\n for line in f:\n num_words += 1\n word = line.strip()\n if has_no_e(word):\n # print(word)\n num_no_e += 1\n # print(num_no_e, num_words)\n return num_no_e/num_words",
"def coverage(text: str) -> float:\n words = set(text.split(' '))\n return len([w for w in words if frequency(w) != 0]) / len(words) * 100",
"def user_avoid_count():\n\tforbidden = input('Enter a string of forbidden letters.\\n> ')\n\tprint(len({w for w in word_set if avoids(w, forbidden)}))",
"def get_proportion_of_unique_lemmas(self):\n lemmas = self.blob.words.lemmatize()\n return len(set(lemmas)) / float(len(self.blob.words))",
"def token_percentage(word, text):\n word_count = text.count(word)\n text_len = len(text)\n return percentage(word_count, text_len)",
"def s_words(words):\n\t\n\treturn words // 100 / 10",
"def monkey_typing(text: str, words: set) -> int:\n # simple solution\n # counter = [chunk for chunk in words if chunk in text.lower()]\n\n # my solution\n text_split = text.lower().split(\" \")\n if not all(len(w) >= 3 and w.islower() and w.isalpha for w in words):\n return 0\n score = 0\n eliminated = []\n for text_chunk in text_split:\n words_to_count = words\n for word in words_to_count:\n if word.lower() in text_chunk and word.lower() not in eliminated:\n score += 1\n eliminated.append(word)\n return score",
"def lp(word, category, unique, k, name=\"category\"):\n\t\tp1 = category.count(word) + k\n\t\tp2 = len(category) + unique\n\t\tprint(word + \" in \"+name+\": \" + str((p1 * 1.0) / (p2 * 1.0)))\n\t\treturn (p1 * 1.0) / (p2 * 1.0)",
"def get_word_containement_measure(self,l2,l1):\n count = 0\n found_idfs = []\n unfound_idfs = []\n for w in l1:\n val = self.idf.get_tfidf_val(w)\n if (val > 10):\n val = 10\n if w in l2:\n count += 1\n found_idfs.append(val)\n else:\n unfound_idfs.append(val)\n if (len(found_idfs) == 0):\n avg_found = 0\n else:\n avg_found = np.mean(found_idfs)\n if (len(unfound_idfs) ==0):\n avg_unfound = 0\n else:\n avg_unfound = np.mean(unfound_idfs)\n\n\n\n return count / self.normalize_factor, avg_found, avg_unfound",
"def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)",
"def _alnum_percent(line):\n total = len(line)\n\n test_set = set()\n for letter in string.ascii_letters:\n test_set.add(letter)\n test_set.add(' ')\n\n # Return a failure (no good characters) if there are no characters\n if total < 1:\n return 0\n\n alnum_count = 0\n star_count = 0\n bar_count = 0\n for letter in line:\n # if letter.isalnum():\n if letter in test_set:\n alnum_count += 1\n if letter == '*':\n star_count += 1\n if letter == 'I' or letter == 'i' or letter == 'l' or letter == '|':\n bar_count += 1\n\n # TODO(searow): properly implement this, but sticking this here for now.\n\n if star_count / total > 0.1:\n return 0\n\n if bar_count / total > 0.5:\n return 0\n\n return alnum_count / total",
"def letterFreq(words):\n dict = {}\n total = 0\n for word in words:#Iterate through words\n for letter in word:#Increment by letter\n count = 0\n for yearCount in words[word]:\n count += yearCount.count#Increment total instances of word\n total += count#Count total letters\n if letter in dict:\n dict[letter] += count#Add to existing entry\n else:\n dict[letter] = count#Create new entry\n \"\"\"CODE FOR THE WHOLE ALPHABET\"\"\"\n list = []\n for letter in ascii_lowercase:\n if letter in dict and dict[letter] != 0:\n list.append(dict[letter] / total)#Convert to relative\n else:\n list.append(0.0)#Fill alphabet\n return list",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def _score_word_set(self, word_set):\n if len(word_set) < self._min_words:\n return -1\n len_to_score = dict({\n 3: 3,\n 4: 4,\n 5: 2,\n 6: 1\n })\n return sum([ len_to_score[len(word)] for word in word_set ])",
"def calculatePenalty(words, M):\n tot_len = 0\n for word in words:\n tot_len += len(word) + 1\n tot_len -= 1\n if tot_len > M:\n return None\n return (M - tot_len) ** 2",
"def get_frequency_class_score_unique_lemmas(blob): \n lemmaList = {}\n frequencies = make_frequency_dict(self.frequency_dict_filepath)\n lemmas = blob.words.lemmatize()\n totalFrequency = 0.0\n numWords = 0\n for lemma in lemmas:\n if lemma.lower() not in lemmaList:\n lemmaList[lemma.lower()]\n totalFrequency += frequencies[lemma.lower()]\n numWords +=1\n return float(totalFrequency / numWords)",
"def word_probability(self, word: str) -> int:\n try:\n return self.fdist[word.lower()] / len(self.fdist.keys())\n except KeyError:\n return 0.0",
"def _calculate_ranking(self, files_found_by_word: Dict[str, int],\n words: List[str]) -> List[Tuple[str, float]]:\n size_words = len(words)\n words_percentage_hit = [(k, v / size_words) for (k, v) in files_found_by_word.items()]\n return words_percentage_hit",
"def letter_percent(s):\r\n\r\n alpha = 'abcdefghijklmnopqrstuvwxyz'\r\n s_lower = s.lower()\r\n s_length = 0\r\n letter_count = {} # empty dictionary\r\n keys = letter_count.keys()\r\n\r\n for char in s_lower:\r\n if char in alpha:\r\n s_length = s_length + 1\r\n if char in letter_count:\r\n letter_count[char] = letter_count[char] + 1\r\n else:\r\n letter_count[char] = 1\r\n\r\n for char in sorted(keys):\r\n letter_count[char] = (letter_count[char] / s_length) * 100\r\n print(char, \"{:.1f}%\".format(letter_count[char]))",
"def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies",
"def avg_word_length(s,wc):\n s = s.translate(string.maketrans('',''),string.whitespace)\n return len(s) / float(wc)",
"def get_word_weight(words, word):\n weight = 0\n for w in words:\n if not any(c in word for c in w):\n weight += 1\n\n return weight",
"def bad_start_rate(labelled,str):\n#\tlabelled = RawClaim.objects.exclude(correcttrim=\"\")\n\tfiltered = set([l for l in labelled if fixstring(l.sentence).startswith(str)])\n\twrong = set([l for l in filtered if l.correcttrim!=\"X\"])\n\tright = filtered - wrong\n\treturn (float(len(right))/len(filtered),wrong,right)",
"def count_words(title_pair: np.array) -> float:\r\n title_1, title_2 = title_pair\r\n # Transform into sets of words\r\n title_1 = set(title_1.split())\r\n title_2 = set(title_2.split())\r\n # Divide length of intersection by length of union\r\n ratio = len(title_1.intersection(title_2)) / len(title_1.union(title_2))\r\n return ratio"
] | [
"0.65205586",
"0.6472289",
"0.6440788",
"0.643002",
"0.63763565",
"0.6327741",
"0.62209934",
"0.6209515",
"0.6150368",
"0.61175454",
"0.6074386",
"0.6044777",
"0.60375136",
"0.6026406",
"0.6003252",
"0.59939384",
"0.5986944",
"0.5981807",
"0.5981807",
"0.59739184",
"0.59416336",
"0.5912105",
"0.5905027",
"0.5904836",
"0.5885518",
"0.58641344",
"0.5862405",
"0.5837191",
"0.582645",
"0.58168447"
] | 0.88551563 | 0 |
Prints number of words from word_set that avoid the user submitted string of letters. | def user_avoid_count():
forbidden = input('Enter a string of forbidden letters.\n> ')
print(len({w for w in word_set if avoids(w, forbidden)})) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_uses_all():\n str = input('Enter a string of letters, please:')\n\n count_uses_all = 0\n for line in fin:\n word = line.strip()\n if uses_all(word, str):\n print(word)\n count_uses_all += 1\n print('Number of words that use all letters in the string:', count_uses_all)",
"def print_uses_only():\n str = input('Enter a string of letters, please:')\n\n count_uses_only = 0\n for line in fin:\n word = line.strip()\n if uses_only(word, str):\n print(word)\n count_uses_only += 1\n print('Number of words that contain only letters in the string:', count_uses_only)",
"def get_main_words(idioms_set):\r\n main_words = Counter([idiom.split()[-1] for idiom in idioms_set])\r\n print('main words:', '\\n', main_words)\r\n print('top 50 main words:', '\\n', main_words.most_common(50)) \r\n return list(main_words)",
"def count_words(word_list, print_words=False):\n freq_dist = Counter(word_list)\n global global_word_freq_list\n\n if print_words:\n for (word, freq) in freq_dist.items():\n print('{:25}{:10}'.format(word, freq))\n\n global_word_freq_list = freq_dist.copy()\n return freq_dist",
"def count_words(subreddit, word_list):\n word_list = [str.lower() for str in word_list]\n\n my_list = get_hot_list(subreddit)\n my_dict = {}\n\n for word in word_list:\n my_dict[word] = 0\n try:\n for title in my_list:\n title_split = title.split(\" \")\n\n for iter in title_split:\n for iter_split in word_list:\n if iter.lower() == iter_split.lower():\n my_dict[iter_split] += 1\n\n for key, val in sorted(my_dict.items(), key=lambda x: x[1],\n reverse=True):\n if val != 0:\n print(\"{}: {}\".format(key, val))\n except Exception:\n return None",
"def words(self):\n # BEGIN Question 2\n x= str(self.text).lower()\n # m = str(x).translate(string.punctuation)\n y= x.split()\n\n y = set([''.join(c for c in s if c not in string.punctuation) for s in y])\n y = [s for s in y if s]\n while(len(y) != 0):\n self.word_set.append(min(y))\n y.remove(min(y))\n\n\n return self.word_set\n # END Question 2",
"def word_count(self):\n from collections import Counter\n counts = Counter(self._replace_non_alnum().split())\n return counts",
"def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)",
"def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))",
"def words(self):\n pass",
"def print_words(words):\n print('Here are the words you entered:')\n for i, word in enumerate(words, start=1):\n print('{}. {}'.format(i, word.title()))",
"def test_getWordsSet(self):\n filename = 'Listwords.txt'\n self.testpzz.getAllWords(filename)\n\n self.expectedResult = {'a', 'ccc', 'ddd', '0', ' '}\n self.assertNotEqual(self.expectedResult, self.testpzz.wordSet)",
"def monkey_typing(text: str, words: set) -> int:\n # simple solution\n # counter = [chunk for chunk in words if chunk in text.lower()]\n\n # my solution\n text_split = text.lower().split(\" \")\n if not all(len(w) >= 3 and w.islower() and w.isalpha for w in words):\n return 0\n score = 0\n eliminated = []\n for text_chunk in text_split:\n words_to_count = words\n for word in words_to_count:\n if word.lower() in text_chunk and word.lower() not in eliminated:\n score += 1\n eliminated.append(word)\n return score",
"def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)",
"def test_run():\n print count_words(\"cat bat mat cat bat cat\", 3)\n print count_words(\"betty bought a bit of butter but the butter was bitter\", 3)",
"def trim_words(word_set, data_sets, num):\n word_dict = {}\n for data in data_sets:\n for word_list, _ in data:\n for word in word_list:\n if word not in word_set:\n continue\n if word in word_dict:\n word_dict[word] += 1\n else:\n word_dict[word] = 1\n sorted_list = sorted(word_dict.keys(), key=lambda w: word_dict[w], reverse=True)\n\n result_set = set()\n result_set.update(sorted_list[:num])\n return result_set",
"def nwords(s: str):\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZÄÜÖabcdefghijklmnopqrstuvwxyzüäö\"\n take = 0\n skip = 0\n for i in s:\n if i not in letters:\n skip += 1\n #print(\"S:\", skip)\n else:\n take += 1\n #print(\"t:\", take)\n res = (len(s) - take) + 1\n return res",
"def count_words():\n paragraph = \"a distinct section of a piece of writing,\"\n # 替换\n paragraph.replace(\",\", \" \").replace(\":\", \" \").replace(\";\", \" \").replace(\".\", \" \").replace(\"?\", \" \")\n words = paragraph.split(\" \")\n nums = {}\n\n for word in words:\n nums[word] = nums[word]+1 if word in nums else 1\n # nums[word] = nums.get(word, 0) + 1\n\n for word, num in nums.items():\n print(word, \": \", num)",
"def print_abecedarian():\n count_abc = 0\n for line in fin:\n word = line.strip()\n if is_abecedarian(word):\n print(word)\n count_abc += 1\n print('Number of words which letters are in alphabetical order:', count_abc)",
"def _score_word_set(self, word_set):\n if len(word_set) < self._min_words:\n return -1\n len_to_score = dict({\n 3: 3,\n 4: 4,\n 5: 2,\n 6: 1\n })\n return sum([ len_to_score[len(word)] for word in word_set ])",
"def display(wordsDictionary):\n noOfWords = 0\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n for word in list(sorted(wordsDictionary.keys())):\n noOfWords += 1\n print(\"| %-20s | %15s |\" % (word, str(wordsDictionary.get(word)).center(15)))\n # Halt every 20 words (configurable)\n if (noOfWords != 0 and noOfWords % 20 == 0):\n print(\"\\n\" * 2)\n input(\"PRESS ENTER TO CONTINUE ... \")\n print(\"\\n\" * 5)\n print(\"-\" * 42)\n print(\"| %20s | %15s |\" % (\"WORDS\".center(20), \"FREQUENCY\".center(15)))\n print(\"-\" * 42)\n print(\"-\" * 42)\n print(\"\\n\" * 2)",
"def display_words(word_list,specifier):\n \n if specifier.lower() == 'score':\n print(\"{:>6s} - {:s}\".format(\"Score\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[1], tup[0]))\n \n \n elif specifier.lower() == 'length':\n print(\"{:>6s} - {:s}\".format(\"Length\", \"Word\"))\n if len(word_list) < 5:\n for tup in word_list:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))\n else:\n \n for tup in word_list[:5]:\n print(\"{:>6d} - {:s}\".format(tup[2], tup[0]))",
"def countWord(afz, word, output=True):\n count = 0\n for msg in msgs:\n if msg.afz == afz:\n count = count + msg.msg.lower().count(word.lower())\n print afz, 'heeft', count, 'keer', word, 'gezegd.'",
"def usedWord(afz, word, output=True):\n count = 0\n for msg in msgs:\n if msg.afz == afz:\n if word.lower() in msg.msg.lower():\n count = count + 1\n print afz, 'heeft', count, 'keer het woord', word, 'gebruikt.'",
"def print_anagrams(anagrams):\n for letter_set in anagrams:\n words = anagrams.get(letter_set)\n if len(words) > 1:\n print(words)",
"def get_words():\n\tprompts = []\n\tfor prompt in story.prompts:\n\t\tprompts.append(prompt.replace('_', ' '))\n\n\treturn render_template(\"get-words.html\", prompts = prompts, key_prompts = story.prompts, num_of_prompts = len(prompts))",
"def count_words(text):\n\n # Open a file and read the text\n with open(text) as file:\n # Split the file in to a list of words\n words = remove_punctuation(file.read()).split()\n # Create a set of unique words from the list words\n unique_words = {*words}\n\n # For each string in the new list\n for unique_word in unique_words:\n # Count the number of times the word appears\n count = words.count(unique_word)\n # Print the string and the number of times it appears.\n print(f'\"{unique_word.capitalize() }\" appears {count} times.')",
"def count(self, word):\n pass",
"def words_uses_only(letters):\n\treturn {w for w in word_set if uses_only(w, letters)}",
"def countCharacters(self, words, chars):\n chars_dict = self.count_to_dict(chars)\n chars_set = chars_dict.keys()\n len_chars_set = len(chars_set)\n \n good_string = ''\n for words_i in words:\n word_dict = self.count_to_dict(words_i)\n words_i_set = word_dict.keys()\n check = 0\n for i in words_i_set:\n if i not in chars_set:\n check+=1\n break\n if word_dict[i] > chars_dict[i]:\n check+=1\n break\n if check == 1:\n continue\n good_string = good_string+words_i\n return len(good_string)"
] | [
"0.69023",
"0.68986577",
"0.6628784",
"0.6303168",
"0.622313",
"0.6209866",
"0.61580354",
"0.6153805",
"0.6128608",
"0.6120367",
"0.6089993",
"0.606419",
"0.6043903",
"0.6039978",
"0.6039978",
"0.60386497",
"0.6035041",
"0.6030058",
"0.5978528",
"0.59610736",
"0.59212035",
"0.5890993",
"0.5864555",
"0.58514863",
"0.58506054",
"0.5847511",
"0.5845869",
"0.5839143",
"0.583234",
"0.5809462"
] | 0.7388498 | 0 |
Finds all words in word_set made from letters. | def words_uses_only(letters):
return {w for w in word_set if uses_only(w, letters)} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def words_without_letter(l):\n\treturn {w for w in word_set if has_no_letter(w, l)}",
"def filter_by_lettersets(words):\n lettersets = {}\n\n for word in words:\n letterset = frozenset(word)\n length = len(word)\n if letterset not in lettersets or length > lettersets[letterset][0]:\n lettersets[letterset] = (length, word)\n\n return lettersets",
"def allWordsFromString(str):\n return set(re.findall(\"\\w+\", str.lower()))",
"def allPossibleWords(Rack):\n def checkWord(word):\n return stringInRack(word,Rack)\n return filter(checkWord, Dictionary)",
"def generate_words(combo,scrabble_words_dict):\n word_set = set()\n for w in itertools.permutations(combo):\n word = ''.join(w)\n if word in scrabble_words_dict:\n word_set.add(word)\n return word_set",
"def get_words(f: str, letters: List[str]) -> List[str]:\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n words_file = open(f)\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in words_file:\r\n word = word[:-1].lower()\r\n if len(word) >= 4:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list",
"def generate_words(self) -> set:\n\n words = set()\n max_word_length = int(self._restrictions['max-word-length'])\n\n def _reduce(seq):\n if self.is_word(seq):\n if len(seq) <= max_word_length and seq not in words:\n words.add(seq)\n else:\n if self.get_terminals_count(seq) <= max_word_length:\n first_non_terminal = seq[\n self.get_first_non_terminal_index(seq)]\n\n for transition in sorted(\n self._transitions[first_non_terminal]):\n _reduce(seq.replace(first_non_terminal, transition))\n\n for entry_transition in sorted(self._transitions[\n self._starting_non_terminal]):\n _reduce(entry_transition)\n\n return words",
"def search4letters(phrase:str, letters:str) -> set:\n return set(letters).intersection(set(phrase))",
"def words(self):\n # BEGIN Question 2\n x= str(self.text).lower()\n # m = str(x).translate(string.punctuation)\n y= x.split()\n\n y = set([''.join(c for c in s if c not in string.punctuation) for s in y])\n y = [s for s in y if s]\n while(len(y) != 0):\n self.word_set.append(min(y))\n y.remove(min(y))\n\n\n return self.word_set\n # END Question 2",
"def word_forms(self, word):\n result = set()\n for dic_name in self.dictionaries.keys():\n for vector in self.dictionaries[dic_name].word_forms(word):\n result.add(tuple(vector))\n return filter(lambda x: len(x), result)",
"def known(words):\r\n return set(w for w in words if w in WORDS)",
"def known(words: list[str]) -> list[str]:\n return [z for z in list(set(words)) if z in self.words]",
"def search_for_letters(phrase:str, letters:str='aeiou') -> set:\n return set(letters).intersection(set(phrase))",
"def contains(self, letters):\n result = Words()\n result.words = self._internal_contains(letters)\n return result",
"def all_words( corpus, key, ignore_words = Ignore_words ) :\n return list(set(chain.from_iterable( (words(c,key,ignore_words) for c in corpus ) ) ) )",
"def search4letters(phrase:str, letters:str='aeyuio') -> set:\n letters_to_be_checked = set(letters)\n return letters_to_be_checked.intersection(set(phrase))",
"def ladder(word: str) -> List[str]:\n found_words = set()\n for i in range(len(word)):\n pattern = list(word)\n pattern[i] = '.'\n search_results = search(\"^\" + \"\".join(pattern) + \"$\")\n for result in search_results:\n if result != word:\n found_words.add(result)\n return found_words",
"def find_letter_indices(words, letter):\n\n return []",
"def findWordsInPattern (self, pattern, letters):\n\t\twords = []\n\t\tletters = ' ' + letters\n\t\twords = self.root.findWordsInPattern(pattern, letters, u'')\n\t\treturn words;",
"def search4letters(phrase: str, letters: str = 'aeiou') -> set:\n return set(letters).intersection(set(phrase))",
"def compute_possibles(letters, slots, dictionary_words, context):\n\n words = dictionary_words\n\n # if we have a known number of slots filter\n # our word list down to words w/ that manny letters\n if slots:\n words = ifilter(f.word_len(slots), words)\n\n # filter our word list down to words who's\n # letters are a subset of the given letters\n words = ifilter(f.letter_subset(letters), words)\n\n # we now have our final iterator of possible solutions\n return words",
"def _internal_contains(self, letters):\n len_letters = len(letters)\n for word in self.words:\n len_word = len(word)\n word_with_letters_removed = WordUtils.remove_letters_from_word(word, letters)\n if len(word_with_letters_removed) == (len_word - len_letters):\n yield word",
"def wordset(word_list):\n\n unique_words = []\n\n for word in word_list:\n\n if word not in unique_words:\n unique_words.append(word)\n\n unique_words.sort()\n\n return unique_words",
"def find_words_using_all_vowels():\n pass",
"def check_words_in_trie(self, trie, words):\n result = []\n # get the unique combinations for our search\n word_set = set(words)\n print('The Number of possible combinations is:', len(words), '.\\n The Number of unique combinations is:',\n len(word_set), '.')\n for word in word_set:\n checked = self.in_trie(trie, word)\n if checked:\n result.append(checked)\n return result",
"def get_possible_vowels(self, word_set):\r\n \r\n vowels = \"\"\r\n for word in word_set:\r\n # Check if existing vowel is in word.\r\n if any(vowel in word for vowel in vowels):\r\n continue\r\n # Find most common letter and assume it's a vowel\r\n vowel, probability = '', 0\r\n for c in word:\r\n _, number = self.letters.get_value(c)\r\n if number > probability:\r\n vowel = c\r\n probability = number\r\n vowels += vowel\r\n return vowels",
"def _get_words(self, sentence):\n _uniq_words = set()\n for word in sentence.split():\n word = normed_word(re.sub(\"\\W\", \"\", word)).lower()\n _uniq_words.add(word)\n return _uniq_words",
"def _seed_words(self, seed):\n valid_words = set()\n for i in range(3, 7):\n for _tuple in itertools.permutations(list(seed), i):\n if ''.join(_tuple) in self._word_set:\n valid_words.add(''.join(_tuple))\n return valid_words",
"def get_words(f, letters):\n # lettrs = []\n # okay = True\n # words = []\n # nline = ''\n # with open(f, 'r') as vocabulary:\n # for line in vocabulary.readlines():\n # nline = line.replace(\"\\n\", \"\").lower()\n # if 4 <= len(nline) <= 9 and letters[4] in nline:\n # lettrs = list(nline)\n # for lettr in lettrs:\n # if lettr not in letters:\n # okay = False\n # break\n # else:\n # okay = True\n # if okay is True:\n # words.append(nline)\n #\n # lettrs = copy.copy(letters)\n # nwords = []\n # okay = True\n # for word in words[::1]:\n # lettrs = copy.copy(letters)\n # for letter in word:\n # if letter in lettrs:\n # lettrs[lettrs.index(letter)] = '0'\n # else:\n # okay = False\n # break\n # if okay is True:\n # nwords.append(word)\n # okay = True\n #\n # unique = True\n # words = []\n # for word in nwords:\n # if nwords.count(word) > 1:\n # nwords.remove(word)\n # nwords.sort()\n # return nwords\n res = []\n cort_letters = []\n our_letters = []\n res = []\n f = open(f, 'r')\n for line in f:\n line = line.replace(\"\\n\", \"\").strip().lower()\n if 4 <= len(line) <= 9:\n if letters[4] in line:\n count = 0\n for each_letter in line:\n if each_letter in letters:\n count += 1\n if count == len(line):\n our_letters.append(line)\n f.close()\n for each_word in our_letters:\n count_let = 0\n for each_letter in each_word:\n if each_word.count(each_letter) <= letters.count(each_letter):\n count_let += 1\n if count_let == len(each_word):\n res.append(each_word)\n for each in res:\n if res.count(each) > 1:\n res.remove(each)\n return sorted(res)",
"def test_getWordsSet(self):\n filename = 'Listwords.txt'\n self.testpzz.getAllWords(filename)\n\n self.expectedResult = {'a', 'ccc', 'ddd', '0', ' '}\n self.assertNotEqual(self.expectedResult, self.testpzz.wordSet)"
] | [
"0.7162044",
"0.70259464",
"0.70036924",
"0.69394267",
"0.69267565",
"0.68268996",
"0.6785891",
"0.677334",
"0.67678404",
"0.67589533",
"0.67586404",
"0.6694646",
"0.66327876",
"0.6626484",
"0.66089416",
"0.6602606",
"0.6593278",
"0.65701854",
"0.65646875",
"0.65646625",
"0.65641505",
"0.65333533",
"0.6511669",
"0.64826405",
"0.64789855",
"0.6468189",
"0.64670944",
"0.64601374",
"0.64353025",
"0.643382"
] | 0.8051923 | 0 |
Use alias to find actual wrapper, which contain actor handler. | def find_alias(self, alias):
if hasattr(self, '_logger'):
self._logger.debug(alias)
self.check_alias(alias, True)
path = self.alias_path_map[alias]
path = self._absolute_path(path)
self.check_path(path, True)
nid = self.path_nid_map[path]
return self.get_node_wrapper(nid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_alias(self):",
"def resolveAlias(self, alias):",
"def __call__(self, alias):\n return self.get_by_alias(alias)",
"def _get_target(self):\n target = None\n lnw = self.wrapped_handler_ref()\n if lnw is not None:\n target_ref = getattr(lnw, \"object\", None)\n if target_ref is not None:\n target = target_ref()\n return target",
"def getAlias(self):\n pass;",
"def alias(self):\n return self._alias",
"def alias(self):\n return self._alias",
"def help_alias(self, mess, args):\n return self.help(mess,args)",
"def magic(self, alias):\n if alias in self.aliases:\n return self.aliases[alias]\n else:\n return \"%%{}\\n\".format(alias)",
"def getAliases(self):",
"def with_alias(self):\n return self.node.alias",
"def alias(option=None):\n return alias(option)",
"def alias(option=None):\n return alias(option)",
"def alias(option=None):\n return alias(option)",
"def __find_wrapper(self, fd):\n with self.__wrapper_lock:\n if fd in self.__wrappers:\n return self.__wrappers[fd]\n with self.__poll_lock:\n if self.__poll.is_known(fd):\n self.__logger.warning(\"Cannot find wrapper object belonging to \" \\\n \"file descriptor %d. Unregistering.\" % fd)\n\n # Try to unregister with poller\n self.__poll.unregister(fd)\n\n return None",
"def alias(self):\n\n return self._alias",
"def resolve_alias_cls(self):\n alias = self.alias\n if not callable(alias):\n return\n self.is_proxy = True\n env = XSH.env\n thable = env.get(\"THREAD_SUBPROCS\") and getattr(\n alias, \"__xonsh_threadable__\", True\n )\n cls = ProcProxyThread if thable else ProcProxy\n self.cls = cls\n self.threadable = thable\n # also check capturability, while we are here\n cpable = getattr(alias, \"__xonsh_capturable__\", self.captured)\n self.captured = cpable",
"def _resolve_wrapper(wos, *a2, **k2):\n if isinstance(wos, ewrap.EntryWrapperGetter):\n wos = wos.get()\n\n @retry.retry(argmod_func=retry.refresh_wrapper, tries=60,\n delay_func=retry.STEPPED_RANDOM_DELAY)\n def _retry_refresh(wrapper, *a3, **k3):\n \"\"\"Retry as needed, refreshing its wrapper each time.\"\"\"\n return func(wrapper, *a3, **k3)\n return _retry_refresh(wos, *a2, **k2)",
"def resolve_alias(self):\n cmd0 = self.cmd[0]\n\n if cmd0 in self.alias_stack:\n # Disabling the alias resolving to prevent infinite loop in call stack\n # and futher using binary_loc to resolve the alias name.\n self.alias = None\n return\n\n if callable(cmd0):\n alias = cmd0\n else:\n alias = XSH.aliases.get(cmd0, None)\n if alias is not None:\n self.alias_name = cmd0\n self.alias = alias",
"def get_stream_alias(self) -> str:",
"def as_(self, alias):\n return AliasedQuery(self, alias)",
"def getAnchorWrapperOfSlot(hub, slot_name):\n slot_to_io = hub['SlotIO']\n slot_to_rtl = hub['SlotWrapperRTL']\n io_list = slot_to_io[slot_name]\n\n if args.invert_non_laguna_anchor_clock:\n clock_edge = 'negedge'\n else:\n clock_edge = 'posedge'\n\n wrapper = addAnchorToNonTopIOs(hub, f'{slot_name}_ctrl', io_list, clock_edge)\n\n # add the rtl for the inner module (the slot wrapper)\n # discard the first line (time scale)\n assert 'timescale' in slot_to_rtl[slot_name][0]\n wrapper.append('\\n\\n')\n wrapper += slot_to_rtl[slot_name][1:]\n\n return wrapper",
"def get_stream_alias(self) -> str:\n return self.alias",
"def dispatch_handler(self, opts: argparse.Namespace) -> int:\n handler_name = getattr(opts, self.handler_dest, None)\n\n if self._prefix:\n handler_name = f\"{self._prefix}:{handler_name}\"\n handler = self._handlers.get(handler_name, self._default_handler)\n\n return handler(opts)",
"def _wrap(self, name):\n attr = self.pget(name)\n for cls, handler in WRAP_HANDLERS:\n if isinstance(attr, cls):\n return handler(self, name)\n\n # immediately delegate to self.pboj\n return self._delegate(name)",
"def _getTarget(self, name):\n\t\tif isinstance(name, str):\n\t\t\treturn super(WindowManager, self)._getTarget(name)\n\t\telse:\n\t\t\ttry:\n\t\t\t\t#get the display\n\t\t\t\tdisplay = self.displays[name[0]]\n\t\t\t\ttry:\n\t\t\t\t\t#get the window on the display\n\t\t\t\t\twindow = display[name[1]]\n\t\t\t\t\t#return the function\n\t\t\t\t\treturn getattr(window, name[2])\n\t\t\t\texcept KeyError:\n\t\t\t\t\tself.log('Unknown window ' + name[0] + ' for user ' + name[1])\n\t\t\texcept KeyError:\n\t\t\t\tself.log('Unknown user ' + name[0])",
"def get_coroutine_wrapper(): # real signature unknown; restored from __doc__\n pass",
"def load_alias(name):\n mod = importlib.import_module(\"umdone.commands.\" + name)\n main = getattr(mod, \"main\")\n builtins.aliases[name] = main\n builtins.aliases[name.replace(\"_\", \"-\")] = main",
"def alias_lookup(alias):\n try:\n s = (session.query(Series)\n .filter_by(alias=alias, following=True)\n .one())\n except NoResultFound:\n output.error('Could not find alias \"{}\"'.format(alias))\n exit(1)\n else:\n return s",
"def find_actor(self, needle):\n return self.__make_api_call('find/actor/{}'.format(needle))"
] | [
"0.60231894",
"0.59816706",
"0.5686326",
"0.5532191",
"0.5452447",
"0.53855836",
"0.53855836",
"0.53269017",
"0.5276283",
"0.5275559",
"0.5274242",
"0.52167267",
"0.52167267",
"0.52167267",
"0.5179842",
"0.51712525",
"0.51392704",
"0.51069885",
"0.50725645",
"0.50703895",
"0.5026281",
"0.50040543",
"0.49789897",
"0.49639958",
"0.49203992",
"0.49160823",
"0.49138662",
"0.49088302",
"0.4878994",
"0.4867747"
] | 0.63080287 | 0 |
Given A = [x2 ... xM] where set A contains x2 lots of 2, x3 lots of 3, etc. Yields all ProdFreqPair's This algorithm does _not_ ensure that the products returned are distinct... | def prodgreqs_base(A):
choices = [ list(range(xi+1)) for xi in A ]
M = len(choices) + 1
for yi in itertools.product(*choices):
prod, freq = 1, 1
for a, y, x in zip(range(2, M+1), yi, A):
prod *= a ** y
freq *= math.factorial(x) // math.factorial(y) // math.factorial(x-y)
yield ProdFreqPair(prod, freq) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prodgreqs(A):\n pairs = list(prodgreqs_base(A))\n pairs.sort(key = lambda pfp : pfp.product)\n current_prod = -1\n current_freq = 0\n for pfp in pairs:\n if current_prod == -1:\n current_prod = pfp.product\n current_freq = pfp.freq\n else:\n if current_prod == pfp.product:\n current_freq += pfp.freq\n else:\n yield ProdFreqPair(current_prod, current_freq)\n current_prod = pfp.product\n current_freq = pfp.freq\n yield ProdFreqPair(current_prod, current_freq)",
"def probability(prods, prod_dict_As, count_dict):\n for p in prods:\n if p not in prod_dict_As:\n raise Exception(\"Think we cannot make the product {}.\".format(p))\n # Argh, Python, this is a reference!\n #possible_As = prod_dict_As[prods[0]]\n possible_As = set( prod_dict_As[prods[0]] )\n for p in prods[1:]:\n possible_As &= prod_dict_As[p]\n ret = []\n for A in possible_As:\n count = 1\n for p in prods:\n count *= count_dict[(p,A)]\n ret.append((A,count))\n return ret",
"def get_pairs(terms):\n return itertools.combinations(terms, 2)",
"def __iter__(self):\n return iproduct(*self.sets)",
"def pairs_of_factors(n):\n seq = factor(n)\n # indexes into seq\n i = set(range(len(seq)))\n # create pairs of subsets indexes into seq and their complements\n ps = [(ss, i-ss) for ss in powerset(i) if 0 in ss and ss<i]\n return frozenset(\n tuple(sorted((prod(seq[i] for i in a), prod(seq[i] for i in b))))\n for a, b in ps)",
"def gen_primes():\n D = defaultdict(list)\n q = 2\n while True:\n if q not in D:\n\n yield q \n D[q * q] = [q]\n else:\n for p in D[q]:\n D[p + q].append(p)\n del D[q]\n q += 1",
"def distr(self,X):\r\n return {x:X.count(x) for x in set(X)}",
"def dice_set_freqs(n, m):\n # The total number of permutations of N dice is N!\n permutations = factorial(n)\n\n # Use itertools to get the sets.\n for outcome in combinations_with_replacement(range(1, m+1), n):\n # For each set, the weight is N!/a!b!c!... where a, b, c... are\n # the sizes of each group of equal values.\n weight = permutations\n # We run through the set counting an dividing as we go\n run_len = 0\n prev_roll = None\n for roll in outcome:\n if roll != prev_roll:\n prev_roll = roll\n run_len = 0\n run_len = run_len + 1\n if run_len > 1:\n weight = weight // run_len\n yield outcome, weight",
"def ar_gen(frequentItemSets):\n# print frequentItemSets\n for fItemSet in frequentItemSets:\n if fItemSet:\n itemSets = fItemSet.keys()\n for itemSet in itemSets:\n subsets = subset_gen(itemSet)\n# print itemSet\n# print subsets\n if subsets:\n for subset in subsets:\n sptSubSet = supportItemSet(subset, frequentItemSets)\n sptSubSets = supportItemSet(itemSet, frequentItemSets)\n print subset,'->', itemSet.difference(subset), 'confidence=',sptSubSets/float(sptSubSet)",
"def _findRedundantProteins(protToPeps, pepToProts, proteins=None):\n if proteins is None:\n proteins = viewkeys(protToPeps)\n\n pepFrequency = _getValueCounts(pepToProts)\n protPepCounts = _getValueCounts(protToPeps)\n\n getCount = operator.itemgetter(1)\n getProt = operator.itemgetter(0)\n\n #TODO: quick and dirty solution\n #NOTE: add a test for merged proteins\n proteinTuples = list()\n for protein in proteins:\n if isinstance(protein, tuple):\n proteinTuples.append(protein)\n else:\n proteinTuples.append(tuple([protein]))\n\n sort = list()\n for protein in sorted(proteinTuples, reverse=True):\n if len(protein) == 1:\n protein = protein[0]\n\n protPepFreq = [pepFrequency[pep] for pep in protToPeps[protein]]\n if min(protPepFreq) > 1:\n sortValue = (len(protPepFreq)*-1, sorted(protPepFreq, reverse=True))\n sort.append((protein, sortValue))\n sortedProteins = map(getProt, sorted(sort, key=getCount, reverse=True))\n\n redundantProteins = set()\n for protein in sortedProteins:\n for pep in protToPeps[protein]:\n if pepFrequency[pep] <= 1:\n break\n else:\n protPepFrequency = Counter(protToPeps[protein])\n pepFrequency.subtract(protPepFrequency)\n redundantProteins.add(protein)\n return redundantProteins",
"def all_permutations(support):\n support = np.array(support)\n\n def gen(p):\n for perm in permutations(support):\n perm = np.array(perm)\n p[perm] = p[support]\n yield p\n p[support] = p[perm]\n return gen",
"def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()",
"def __query_pairs(self):\n\n probs = self.clf.predict_proba(self.all_features)[:,1] # unlabeled_features\n\n probs_df = pd.DataFrame(probs, index=self.all_features.index.values, columns=['proba'])\n probs_df['certainty'] = abs(0.5 - probs_df.proba)\n probs_df.sort_values(by='certainty', axis=0, inplace=True)\n\n uncertain_pairs = probs_df[:self.n_uncertain]\n match_pairs = probs_df[probs_df.proba > 0.5].sample(self.n_match)\n notmatch_pairs = probs_df[probs_df.proba < 0.5].sample(self.n_notmatch)\n\n pairs_to_label = pd.concat([uncertain_pairs,\n match_pairs,\n notmatch_pairs], axis=0, ignore_index=False)\n\n return pairs_to_label.index.values",
"def permutations(xs):\n if not xs:\n yield []\n else:\n for x, xs in selections(xs):\n for ys in permutations(xs):\n yield [x] + ys",
"def multinomial_pmf(sample, probabilities):\r\n # TODO\r\n a=[]\r\n b=[]\r\n i=0\r\n key_list=[]\r\n value_list=[]\r\n for key,value in sample.items():\r\n key_list.append(key)\r\n value_list.append(value)\r\n b=list(sample)\r\n while i< len(b):\r\n a.append(probabilities.keys()[probabilities.values().index(value_list[i])])\r\n\r\n\r\n return a",
"def find_prod_ids(dist_list, no_prod):\n list_of_prod = []\n for worker in dist_list:\n\n if len(list_of_prod) < no_prod:\n list_of_prod.append(worker)\n\n else:\n\n for i in list_of_prod:\n\n if i[1] > worker[1]:\n list_of_prod.remove(i)\n list_of_prod.append(worker)\n worker = i\n\n else:\n\n continue\n PIDs = []\n for i in list_of_prod:\n PIDs.append(i[0])\n PIDs.sort()\n return (PIDs)",
"def main(pairs, freq):\n total_dominant_offspring = 0\n pr_dom = [\n pr_dominant_offpring(offspring_zygosity(parent_1, parent_2))\n for parent_1, parent_2 in pairs\n ]\n for freq, pr_dom in zip(freq, pr_dom):\n pair_offspring = freq * 2\n total_dominant_offspring += pr_dom * pair_offspring\n\n return total_dominant_offspring",
"def myCombinations(iterable, r):\n for perm in itertools.permutations(iterable, r):\n if sorted(perm) == list(perm):\n yield perm",
"def apriori_gen(Ls):\n Lks = Ls[len(Ls) -1] #L(k-1)\n LLength = len(Ls)\n Lc = combinations(Lks, r = LLength+1)\n fs = frozenset([i for i in Lc])\n\n Ck =[] #L(k)\n for s in fs:\n ckItem = frozenset()\n for ss in s:\n ckItem = ckItem.union(ss)\n if not has_infrequent_subset(ckItem, Lks):\n Ck.append(ckItem)\n\n# print \"Ck:\",Ck\n return Ck",
"def reduce_pairs(pairs):\n return set(map(reduce_bits, filter(differ_by_one, pairs)))",
"def find_pairs(factors):\n singles = list(combinations([f for f, v in factors.items() if v >= 1], 2))\n # If factor f has multiplicity over 1, then (f, f) is also a pair\n doubles = [(f, f) for f, v in factors.items() if v >= 2]\n return singles + doubles",
"def getmulticombos(peeps):\n\n\tret = []\n\n\tfor p in peeps:\n\t\tu,s = getcombos(p)\n\n\t\tbestu = getbesttriplet(u)\n\t\tbests = getbesttriplet(s)\n\n\t\tret.append((bestu, bests))\n\n\treturn ret",
"def power_set(A):\n\n L = list()\n for i in range(len(A) + 1):\n L.extend([set(j) for j in itertools.combinations(A, i)])\n return L\n\n raise NotImplementedError(\"Problem 4 Incomplete\")",
"def joint_frequencies_combo(self, alleles):\n\n representations = [1 << i for i in range(len(alleles))]\n\n intrenal_hap_dict_per_group = {group2: self.build_intrenal_hap_dict(alleles, group2)\n for group2 in self.hap_dict_per_group}\n\n result = {}\n\n for c in representations:\n hap = {group2: internal[c] for group2, internal in intrenal_hap_dict_per_group.items()}\n result[c] = self.effective_joint_frequency(hap)\n\n for C in combinations(representations, 2):\n hap = {group2: internal[C[0]] & internal[C[1]] for group2, internal in intrenal_hap_dict_per_group.items()}\n result[C[0]|C[1]] = self.effective_joint_frequency(hap)\n\n for C in combinations(representations, 3):\n hap = {group2: internal[C[0]] & internal[C[1]] & internal[C[2]]\n for group2, internal in intrenal_hap_dict_per_group.items()}\n result[C[0]|C[1]|C[2]] = self.effective_joint_frequency(hap)\n\n for r in range(4,len(alleles)):\n for C in combinations(representations, r):\n hap = {group2: reduce(and_,itemgetter(*C)(internal))\n for group2, internal in intrenal_hap_dict_per_group.items()}\n result[sum(C)] = self.effective_joint_frequency(hap)\n\n if len(alleles)>=4:\n hap = {group2: reduce(and_,internal.values())\n for group2, internal in intrenal_hap_dict_per_group.items()}\n result[sum(representations)] = self.effective_joint_frequency(hap)\n\n return result",
"def cartesian_product(input_sets, elem_size=1):\n import itertools\n out = []\n # ::-1 reverse order to be backwards compatiable with old\n # function below\n for r in itertools.product(*input_sets[::-1]):\n out.append(r)\n out = np.asarray(out).T[::-1, :]\n return out\n\n # try:\n # from pyapprox.cython.utilities import cartesian_product_pyx\n # # # fused type does not work for np.in32, np.float32, np.int64\n # # # so envoke cython cast\n # # if np.issubdtype(input_sets[0][0],np.signedinteger):\n # # return cartesian_product_pyx(input_sets,1,elem_size)\n # # if np.issubdtype(input_sets[0][0],np.floating):\n # # return cartesian_product_pyx(input_sets,1.,elem_size)\n # # else:\n # # return cartesian_product_pyx(\n # # input_sets,input_sets[0][0],elem_size)\n # # always convert to float then cast back\n # cast_input_sets = [np.asarray(s, dtype=float) for s in input_sets]\n # out = cartesian_product_pyx(cast_input_sets, 1., elem_size)\n # out = np.asarray(out, dtype=input_sets[0].dtype)\n # return out\n # except:\n # print('cartesian_product extension failed')\n\n # num_elems = 1\n # num_sets = len(input_sets)\n # sizes = np.empty((num_sets), dtype=int)\n # for ii in range(num_sets):\n # sizes[ii] = input_sets[ii].shape[0]/elem_size\n # num_elems *= sizes[ii]\n # # try:\n # # from pyapprox.weave import c_cartesian_product\n # # # note c_cartesian_product takes_num_elems as last arg and cython\n # # # takes elem_size\n # # return c_cartesian_product(input_sets, elem_size, sizes, num_elems)\n # # except:\n # # print ('cartesian_product extension failed')\n\n # result = np.empty(\n # (num_sets*elem_size, num_elems), dtype=type(input_sets[0][0]))\n # for ii in range(num_elems):\n # multi_index = ind2sub(sizes, ii, num_elems)\n # for jj in range(num_sets):\n # for kk in range(elem_size):\n # result[jj*elem_size+kk, ii] =\\\n # input_sets[jj][multi_index[jj]*elem_size+kk]\n # return result",
"def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)",
"def bruteForcePopulation(N):\n return list(itertools.permutations(range(N), N))",
"def __iter__(self):\n from sage.combinat.posets.posets import FinitePosets_n\n n = 0\n while True:\n for P in FinitePosets_n(n):\n yield P\n n += 1",
"def slave_freq_pair_pc(args):\n pos1, x1 = args[0]\n pos2, x2 = args[1]\n return args, freq_pair_pc(pos1, pos2, x1, x2)",
"def pair_combos(iterable):\n pairs = set()\n for a in iterable:\n for b in iterable:\n pairs.add(a + b)\n return list(pairs)"
] | [
"0.780175",
"0.6109361",
"0.5713249",
"0.56769186",
"0.5640308",
"0.55529904",
"0.5530571",
"0.55021",
"0.5501083",
"0.54204476",
"0.54203075",
"0.5342395",
"0.532664",
"0.5325811",
"0.5297416",
"0.5295746",
"0.52800983",
"0.52746207",
"0.52708566",
"0.52666736",
"0.52485013",
"0.5244464",
"0.52432764",
"0.52170134",
"0.52066904",
"0.51952934",
"0.51804674",
"0.51778215",
"0.51446396",
"0.51289696"
] | 0.77215075 | 1 |
prods = list of products. Returns list of tuples (A, count) where A is a possible set, and count is the realtive change of seeing A. | def probability(prods, prod_dict_As, count_dict):
for p in prods:
if p not in prod_dict_As:
raise Exception("Think we cannot make the product {}.".format(p))
# Argh, Python, this is a reference!
#possible_As = prod_dict_As[prods[0]]
possible_As = set( prod_dict_As[prods[0]] )
for p in prods[1:]:
possible_As &= prod_dict_As[p]
ret = []
for A in possible_As:
count = 1
for p in prods:
count *= count_dict[(p,A)]
ret.append((A,count))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prodgreqs(A):\n pairs = list(prodgreqs_base(A))\n pairs.sort(key = lambda pfp : pfp.product)\n current_prod = -1\n current_freq = 0\n for pfp in pairs:\n if current_prod == -1:\n current_prod = pfp.product\n current_freq = pfp.freq\n else:\n if current_prod == pfp.product:\n current_freq += pfp.freq\n else:\n yield ProdFreqPair(current_prod, current_freq)\n current_prod = pfp.product\n current_freq = pfp.freq\n yield ProdFreqPair(current_prod, current_freq)",
"def _findRedundantProteins(protToPeps, pepToProts, proteins=None):\n if proteins is None:\n proteins = viewkeys(protToPeps)\n\n pepFrequency = _getValueCounts(pepToProts)\n protPepCounts = _getValueCounts(protToPeps)\n\n getCount = operator.itemgetter(1)\n getProt = operator.itemgetter(0)\n\n #TODO: quick and dirty solution\n #NOTE: add a test for merged proteins\n proteinTuples = list()\n for protein in proteins:\n if isinstance(protein, tuple):\n proteinTuples.append(protein)\n else:\n proteinTuples.append(tuple([protein]))\n\n sort = list()\n for protein in sorted(proteinTuples, reverse=True):\n if len(protein) == 1:\n protein = protein[0]\n\n protPepFreq = [pepFrequency[pep] for pep in protToPeps[protein]]\n if min(protPepFreq) > 1:\n sortValue = (len(protPepFreq)*-1, sorted(protPepFreq, reverse=True))\n sort.append((protein, sortValue))\n sortedProteins = map(getProt, sorted(sort, key=getCount, reverse=True))\n\n redundantProteins = set()\n for protein in sortedProteins:\n for pep in protToPeps[protein]:\n if pepFrequency[pep] <= 1:\n break\n else:\n protPepFrequency = Counter(protToPeps[protein])\n pepFrequency.subtract(protPepFrequency)\n redundantProteins.add(protein)\n return redundantProteins",
"def find_prod_ids(dist_list, no_prod):\n list_of_prod = []\n for worker in dist_list:\n\n if len(list_of_prod) < no_prod:\n list_of_prod.append(worker)\n\n else:\n\n for i in list_of_prod:\n\n if i[1] > worker[1]:\n list_of_prod.remove(i)\n list_of_prod.append(worker)\n worker = i\n\n else:\n\n continue\n PIDs = []\n for i in list_of_prod:\n PIDs.append(i[0])\n PIDs.sort()\n return (PIDs)",
"def get_most_combined_products(self):\n self.product_pairs = self.products.groupby(\"id\").agg({\"products\": lambda x: list(x)}).reset_index()\n self.product_pairs['product_pairs'] = self.product_pairs['products'].apply(\n lambda x: np.array(list(product(x, x))).tolist())\n self.product_pairs = pd.DataFrame(np.concatenate(list(self.product_pairs['product_pairs'])).tolist())\n self.product_pairs['total_pairs'] = 1\n self.product_pairs = self.product_pairs[self.product_pairs[0] != self.product_pairs[1]]\n self.product_pairs = self.product_pairs.groupby([0, 1]).agg({\"total_pairs\": \"sum\"}).reset_index()\n self.product_pairs = self.product_pairs.sort_values('total_pairs', ascending=False)\n self.product_pairs = self.product_pairs.rename(columns={0: \"pair_1\", 1: \"pair_2\"})\n self.product_pairs['product_pair'] = self.product_pairs.apply(\n lambda row: \" - \".join(list(sorted([row['pair_1'], row['pair_2']]))), axis=1)\n self.product_pairs = self.product_pairs.groupby(\"product_pair\").agg({\"total_pairs\": \"first\"}).reset_index()\n return self.product_pairs",
"def prodgreqs_base(A):\n choices = [ list(range(xi+1)) for xi in A ]\n M = len(choices) + 1\n for yi in itertools.product(*choices):\n prod, freq = 1, 1\n for a, y, x in zip(range(2, M+1), yi, A):\n prod *= a ** y\n freq *= math.factorial(x) // math.factorial(y) // math.factorial(x-y)\n yield ProdFreqPair(prod, freq)",
"def handle(self, *args, **options):\n\n ProductPair.objects.all().delete()\n person = Person.objects.get(name='klapshov')\n meals = Meal.objects.filter(person=person)\n for m in meals:\n products = [i.product for i in m.intake_set.all()]\n print(products)\n pairs = []\n for n in range(0, len(products)-1):\n for nn in range(n+1, len(products)):\n if n != nn:\n pairs.append([products[n], products[nn]])\n print(pairs)\n print(len(products), len(pairs))\n\n # REWRITE!!!!!!!!\n for p in pairs:\n try:\n pp = ProductPair.objects.get(product1=p[0], product2=p[1])\n except ProductPair.DoesNotExist:\n pp = ProductPair()\n pp.product1 = p[0]\n pp.product2 = p[1]\n pp.count = 1\n p2p = ProductToPerson.objects.get(\n person=person, product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n else:\n pp.count += 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n\n for p in pairs:\n p.reverse()\n try:\n pp = ProductPair.objects.get(product1=p[0], product2=p[1])\n except ProductPair.DoesNotExist:\n pp = ProductPair()\n pp.product1 = p[0]\n pp.product2 = p[1]\n pp.count = 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()\n else:\n pp.count += 1\n p2p = ProductToPerson.objects.get(\n person=person,\n product=p[0]\n )\n pp.ratio = round(pp.count/p2p.intakes_count*100)\n pp.save()",
"def product_counter_v1(products):\n counter_dict = create_counter(products)\n sorted_p = sort_counter(counter_dict)\n return sorted_p",
"def fine_substitutes(self, product_choice):\n substitutes = self.db.query(\"\"\"\n SELECT product.id, product.name, product.nutrition_grade,\n product.url, count(*) FROM product\n JOIN product_category ON product.id = product_category.product_id\n WHERE\n product.id != :product_choice_id\n\n AND product_category.category_id IN (\n SELECT category_id FROM product_category\n WHERE product_id = :product_choice_id\n )\n\n AND product.nutrition_grade < (\n SELECT nutrition_grade FROM product\n WHERE product.id = :product_choice_id\n )\n\n -- On groupe par nom de produit pour l'aggrégation\n GROUP BY product.id\n\n -- On ordonne par nombre décroissant de tags communs\n ORDER BY count(*) DESC, MAX(:product_choice_nutrition_grade) ASC\n \"\"\", product_choice_id=product_choice.id,\n product_choice_nutrition_grade=product_choice.nutrition_grade)\n return [self.model(**substitute) for substitute in substitutes]",
"def collect_type_tuples(products, strict_products=False):\n result = defaultdict(list)\n \"\"\"@type: dict of (ProductType, list[unicode])\"\"\"\n i_count = 0\n if ProductTypeDict.VERBOSE:\n print(u'Collecting type tuples from products')\n for product in products:\n context = ProductTypeDict.get_product_tag_context(product)\n product_tuples = ProductTypeDict.collect_sqn_type_tuples(product.sqn, with_spellings=not strict_products, context=context)\n\n for type_tuple, sqn in product_tuples.viewitems():\n result[type_tuple].append(sqn)\n\n i_count += 1\n if ProductTypeDict.VERBOSE and i_count % 100 == 0: print(u'.', end='')\n if ProductTypeDict.VERBOSE:\n print()\n print(u\"Collected %d type tuples\" % len(result))\n return result",
"def _findSamesetProteins(protToPeps, proteins=None):\n proteins = viewkeys(protToPeps) if proteins is None else proteins\n\n equalEvidence = ddict(set)\n for protein in proteins:\n peptides = protToPeps[protein]\n equalEvidence[tuple(sorted(peptides))].add(protein)\n equalProteins = list()\n for proteins in viewvalues(equalEvidence):\n if len(proteins) > 1:\n equalProteins.append(tuple(sorted(proteins)))\n return equalProteins",
"def count_products(list_products):\n for each_item in ADD_PRODUCTS: #This iterates in the dictionary\n num_of_products = list_products.count(each_item) #This count each product\n if num_of_products > 0:\n price = ADD_PRODUCTS[each_item]\n print num_of_products, each_item + \"(s)\", \"a\", (\"Q%.2f c/u\") % price",
"def _compute_count_product_negations(self):\n\n for record in self:\n record.count_product_negations = len(record.products_rejected_ids)",
"def _compute_unique_approval_scores(self, profile: list[set[int]]) -> list[int]:\n unique_approval_scores = np.zeros(self.m, dtype=int)\n for party in range(0, self.m):\n for ballot in profile:\n if ballot == {party}:\n unique_approval_scores[party] += 1\n return list(unique_approval_scores)",
"def count_by_product(self, **query):\n return self._do_count_by_product(query)",
"def probability(freqlst):\n\tproblist = []\n\ttotal = 0\n\ttotes = 0\n\tfor elem in freqlst:\n\t\ttotal = total + elem\n\tfor item in freqlst:\n\t\tprob = item / total\n\t\tproblist.append(prob)\n\tfor la in problist:\n\t\ttotes = totes + la\n\treturn problist",
"def products(self):\r\n return self._products",
"def distr(self,X):\r\n return {x:X.count(x) for x in set(X)}",
"def inventory_report(prod_list):\n prod_list = list(set(prod_list))\n x = 0\n price = 0\n weight = 0\n flammability = 0\n stealability = 0\n for item in prod_list:\n x += 1\n price += item.price\n weight += item.weight\n flammability += item.flammability\n if stealability != 'Not so stealable...':\n stealability += 1\n\n avg_price = price / x\n avg_weight = weight / x\n avg_flammability = flammability / x\n print(f'There are {x} unique products in this list. The average price is {avg_price}, '\n f'average weight is {avg_weight},'\n f'and the average flammability is {avg_flammability}.')\n if stealability >= len(prod_list) / 2:\n print('Many of these items are highly stealable!')\n return avg_price, avg_weight, avg_flammability",
"def __query_pairs(self):\n\n probs = self.clf.predict_proba(self.all_features)[:,1] # unlabeled_features\n\n probs_df = pd.DataFrame(probs, index=self.all_features.index.values, columns=['proba'])\n probs_df['certainty'] = abs(0.5 - probs_df.proba)\n probs_df.sort_values(by='certainty', axis=0, inplace=True)\n\n uncertain_pairs = probs_df[:self.n_uncertain]\n match_pairs = probs_df[probs_df.proba > 0.5].sample(self.n_match)\n notmatch_pairs = probs_df[probs_df.proba < 0.5].sample(self.n_notmatch)\n\n pairs_to_label = pd.concat([uncertain_pairs,\n match_pairs,\n notmatch_pairs], axis=0, ignore_index=False)\n\n return pairs_to_label.index.values",
"def get_participation_in_pairing(self):\n entries = self.c.select(pairing=1)\n\n frequency = dict()\n pairs = []\n for e in entries:\n c1, c2 = e.data['parents']\n pairs.append(tuple(sorted([c1, c2])))\n if c1 not in frequency.keys():\n frequency[c1] = 0\n frequency[c1] += 1\n if c2 not in frequency.keys():\n frequency[c2] = 0\n frequency[c2] += 1\n return (frequency, pairs)",
"def get_prices(name,products,sales):\r\n return tuple((products[0],products[1]*((1-tuple(filter(lambda x: x[0]==name, sales))[0][1]))) for products in products)",
"def check_proportion_list(proportions):\r\n \r\n if str(type(proportions[0])) == \"<class 'float'>\":\r\n prop_type = 'list'\r\n count = 0.00\r\n for element in proportions:\r\n count += float(element)\r\n \r\n if count != float(1):\r\n diff = 1 - count\r\n bad_prop = proportions[-1]\r\n proportions[-1] = round(float(proportions[-1]) + diff,6)\r\n print('Proportion Set 0:\\n----------------\\n' +\r\n 'Entered proportions not equivalent to 1,\\n' \r\n + str(bad_prop) + ' changed to ' + str(proportions[-1])\r\n + '\\n')\r\n \r\n \r\n \r\n \r\n elif str(type(proportions[0])) == \"<class 'list'>\":\r\n for i in range(len(proportions)):\r\n prop_type = 'list/list'\r\n count = 0.00\r\n for element in proportions[i]:\r\n count += float(element)\r\n \r\n if count != float(1):\r\n diff = 1 - count\r\n bad_prop = proportions[i][-1]\r\n proportions[i][-1] = round(float(proportions[i][-1]) + diff,6)\r\n print('Proportion Set ' + str(i) + ':\\n----------------\\n' +\r\n 'Entered proportions not equivalent to 1,\\n' \r\n + str(bad_prop) + ' changed to ' + str(proportions[i][-1])\r\n + '\\n')\r\n \r\n \r\n\r\n return proportions, prop_type",
"def get_beverage_ids_list(self):\n\n def insert_in_category_has_product(beverage_product_id):\n \"\"\"Insert products Ids in category_has_product.\"\"\"\n try:\n # Get beverage id in category table\n self.cursor.execute(sql_queries.SELECT_BEVERAGE_CATEGORY_ID)\n beverage_category_id = self.cursor.fetchone()[0]\n # [0] returns \"int\" not tuple\n\n data_product_id = {'product_id': beverage_product_id,\n 'category_id': beverage_category_id,\n }\n self.cursor.execute(sql_queries.INSERT_PRODUCT_CATEGORY,\n data_product_id)\n self.mydb.commit()\n\n except mysql.connector.Error as err:\n print(f\"Erreur lors de l'execution de \"\n f\"'insert_in_category_has_product'. \"\n f\"Détails de l'erreur : {err}\")\n\n try:\n # Select ids in product table.\n self.cursor.execute(sql_queries.SELECT_BEVERAGE_PRODUCT_ID)\n beverage_ids = self.cursor.fetchall() # list of tuples\n beverage_ids_list = [i[0] for i in beverage_ids]\n # transform list of tuples into list of integers\n\n for beverage_product_id in beverage_ids_list:\n insert_in_category_has_product(beverage_product_id)\n\n except mysql.connector.Error as err:\n print(f\"Erreur lors de l'exécution de 'get_beverage_ids_list'. \"\n f\"Détails de l'erreur : {err}\")",
"def generate_products():\n # initialize list of noun and adj\n num_products = 30\n products = [0] * num_products\n prices = [0] * num_products\n weights = [0] * num_products\n flammabilities = [0] * num_products\n\n # initlize random word object\n random = RandomWords()\n\n adj = [random.get_random_word(includePartOfSpeech=\"adjective\")\n for product in products]\n noun = [random.get_random_word(includePartOfSpeech=\"noun\")\n for product in products]\n products = [noun + \" \" + adj for noun, adj in zip(adj, noun)]\n\n prices = [random.randint(5, 100) for price in prices]\n weights = [random.randint(5, 100) for weight in weights]\n flammabilities = [random.randint(0.0, 2.5)\n for flammability in flammabilities]\n\n return products, prices, weights, flammabilities",
"def test_enumerating_protomers(self):\n\n mol = Molecule.from_smiles(\"Oc2ccc(c1ccncc1)cc2\")\n\n # there should be three protomers for this molecule so restrict the output\n protomers = mol.enumerate_protomers(max_states=2)\n\n assert mol not in protomers\n assert len(protomers) == 2\n\n # now make sure we can generate them all\n protomers = mol.enumerate_protomers(max_states=10)\n\n assert mol not in protomers\n assert len(protomers) == 3\n\n # make sure each protomer is unique\n unique_protomers = set(protomers)\n assert len(protomers) == len(unique_protomers)",
"def test_enumerating_protomers(self):\n\n mol = Molecule.from_smiles(\"Oc2ccc(c1ccncc1)cc2\")\n\n # there should be three protomers for this molecule so restrict the output\n protomers = mol.enumerate_protomers(max_states=2)\n\n assert mol not in protomers\n assert len(protomers) == 2\n\n # now make sure we can generate them all\n protomers = mol.enumerate_protomers(max_states=10)\n\n assert mol not in protomers\n assert len(protomers) == 3\n\n # make sure each protomer is unique\n unique_protomers = set(protomers)\n assert len(protomers) == len(unique_protomers)",
"def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result",
"def probs(self) -> List:\n return self._probs",
"def product_peps(dp, occ):\n\n L1,L2 = len(dp),len(dp[0])\n peps = zeros(dp, 1)\n for i in range(L1):\n for j in range(L2):\n peps[i,j][0,0,0,0,occ[i][j]] = 1.\n\n return peps",
"def recommend_next_product(self, prod_list):\n scores = defaultdict(float)\n for prod in prod_list:\n for item in self._purchased.find({PROD1: prod}):\n if not item[PROD2] in prod_list:\n scores[item[PROD2]] += math.log(item[TIMES])\n if len(scores) == 0:\n return None\n max_tuple = max(scores.items(), key = operator.itemgetter(1))\n return max_tuple[0]"
] | [
"0.63840234",
"0.59333855",
"0.5842505",
"0.5733728",
"0.5702081",
"0.5599455",
"0.5538646",
"0.54229885",
"0.5393193",
"0.5324072",
"0.52548593",
"0.52327603",
"0.5222254",
"0.5216476",
"0.5201859",
"0.5191278",
"0.5166486",
"0.5156769",
"0.5152277",
"0.5132815",
"0.5082037",
"0.5076626",
"0.5073941",
"0.5064823",
"0.50500154",
"0.50500154",
"0.5047237",
"0.50427115",
"0.50154215",
"0.50122046"
] | 0.6677144 | 0 |
Given the root dir of the images tree, get a list of all of the files that contain cards. | def get_cards_in_deck(root, region_codes):
# TODO: speed up matching in some way to allow for all regions at once
# TODO: it should be possible to multithread some matching with Pool
root_parts = pathlib.Path(root).parts
all_files = []
for code in region_codes:
all_files.extend(glob.glob(f'{root}/{code}/**/*', recursive=True))
card_paths = [f for f in all_files if not os.path.isdir(f)]
# path as key replicates data but is a good optimization for lookups
cards_l = {}
for n in card_paths:
rel = str(pathlib.Path(n).relative_to(*root_parts))
this_card = Card(rel, cv2.imread(n))
this_card.load_keypoints(SIFT_OBJ)
cards_l[rel] = this_card
print('Read', len(cards_l), 'images.')
return cards_l | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_card_files(card_dir: str = \"./cards\") -> List[Path]:\n p: Path = Path(card_dir)\n return list(p.glob('**/*.yaml'))",
"def my_root_listdir(root_dir):\n root_listdir = [\n images_dir\n for images_dir in os.listdir(root_dir)\n if not any(\n characters in images_dir for characters in [\".\", \"test\", \"train\", \"valid\"]\n )\n ]\n summ = 0\n for images_dir in root_listdir:\n summ += len(os.listdir(root_dir + \"/\" + images_dir)) / 2 - 2\n print(\"Sum of images in directories: \", int(summ))\n return root_listdir",
"def get_files_list(tree):\n result = list()\n for (dir_path, _, file_names) in walk(tree):\n if file_names:\n for file in file_names:\n if file.lower().endswith(('.png', '.jpg', '.jpeg')):\n result.append(path.join(dir_path, file))\n\n return result",
"def get_nifty_list(root_dir, name=\"\"):\n \n file_list = glob(root_dir + \"/**/*.nii.gz\", recursive=True)\n file_list = sorted([file for file in file_list if name in file])\n return file_list",
"def make_image_list(directory):\r\n\tonly_files = [file for file in listdir(directory) if isfile(join(directory, file))]\r\n\treturn only_files",
"def recursive_glob(self, rootdir='.', suffix=''):\n\n valid_image_files = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_path = os.path.join(looproot, filename)\n label_path = image_path.replace(\"images\", \"labels\").replace(\"bmp\", \"txt\")\n if os.path.isfile(label_path):\n valid_image_files.append(image_path)\n\n return valid_image_files",
"def get_image_bases(image_root: str) -> list:\n return list(sorted(os.listdir(image_root), key=lambda x: tuple(\n int(x.split('.')[0].split('-')[i]) for i in range(1, len(x.split('-'))))))",
"def find_candidate_images(images_path):\n images = []\n for root, dirs, files in os.walk(images_path):\n for name in files:\n file_path = os.path.abspath(os.path.join(root, name))\n if (os.path.splitext(name)[1]).lower() in [\".jpg\", \".png\", \".jpeg\"]:\n images.append(file_path)\n return images",
"def collect_image_files():\n negs = [] # Non image files found\n for filename in os.listdir('.'):\n if filename.lower().endswith('.jpg') or filename.lower().\\\n endswith('.jpeg'):\n jpg_files.append(filename)\n elif filename.lower().endswith('.gif'):\n gif_files.append(filename)\n elif filename.lower().endswith('.png'):\n png_files.append(filename)\n else:\n negs.append(filename)\n return negs",
"def list_image_files(dir, filter=None):\n for entry in os.listdir(dir):\n path = os.path.join(dir, entry)\n if os.path.isdir(path):\n for p in list_image_files(path, filter):\n yield p\n elif any((entry.lower().endswith(ext) for ext in image_exts)):\n if filter and not filter(path):\n continue\n yield path",
"def return_images(directory):\r\n allfiles = os.listdir(directory)\r\n image_list = [im for im in allfiles if '.jpg' in str(im)]\r\n image_list = [directory + im for im in image_list]\r\n return image_list",
"def loadimages(root):\n imgs = []\n\n def add_json_files(path,):\n for imgpath in glob.glob(path+\"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('png',\"json\")))\n for imgpath in glob.glob(path+\"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg',\"json\")):\n imgs.append((imgpath,imgpath.replace(path,\"\").replace(\"/\",\"\"),\n imgpath.replace('jpg',\"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path) \n if os.path.isdir(os.path.join(path,o))]\n if len(folders)>0:\n for path_entry in folders: \n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs",
"def compile_files(root):\n files = [os.path.join(root, f) for f in os.listdir(root) if not f.startswith(\".\")]\n \n return files",
"def get_lists_in_dir(dir_path):\n image_list = []\n\n for filename in glob.glob(dir_path + '/*.jpg'):\n image_list.append(filename)\n return image_list",
"def loadimages(root):\n imgs = []\n\n def add_json_files(path, ):\n for imgpath in glob.glob(path + \"/*.png\"):\n if exists(imgpath) and exists(imgpath.replace('png', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('png', \"json\")))\n for imgpath in glob.glob(path + \"/*.jpg\"):\n if exists(imgpath) and exists(imgpath.replace('jpg', \"json\")):\n imgs.append((imgpath, imgpath.replace(path, \"\").replace(\"/\", \"\"),\n imgpath.replace('jpg', \"json\")))\n\n def explore(path):\n if not os.path.isdir(path):\n return\n folders = [os.path.join(path, o) for o in os.listdir(path)\n if os.path.isdir(os.path.join(path, o))]\n if len(folders) > 0:\n for path_entry in folders:\n explore(path_entry)\n else:\n add_json_files(path)\n\n explore(root)\n\n return imgs",
"def list_all_files(dir):\n\n result = []\n for root, _, filenames in os.walk(dir):\n for name in filenames:\n filename, ext = os.path.splitext(name)\n if ext == '.cs' or ext == '.xaml':\n result.append(os.path.join(root, name))\n return result",
"def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths",
"def recursive_glob(rootdir=\".\", suffix=\"\"):\n image_paths = []\n for looproot, _, filenames in os.walk(rootdir):\n for filename in filenames:\n if filename.endswith(suffix):\n image_paths.append(os.path.join(looproot, filename))\n return image_paths",
"def parse_dir_imgs(root_pth):\n def visit(imgpths, pth, names):\n # Appends detected image filenames to a list.\n imgpths.extend([os.path.join(pth, name) for name in names\n if os.path.splitext(name)[1].lower() in img_exts])\n # Walk down directory tree and get the image file paths\n imgpaths = []\n for dp, foo, names in os.walk(root_pth):\n visit(imgpaths, dp, names)\n # Make lowercased list of imagefilenames\n imgnames = [os.path.split(pth)[1].lower() for pth in imgpaths]\n return imgnames, imgpaths",
"def get_files(imagedir, ext='jpg|jpeg|bmp|png'):\n rex = re.compile(r'^.*\\.({})$'.format(ext), re.I)\n return [os.path.join(imagedir,base) for base in os.listdir(imagedir)\n if rex.match(base)]",
"def getfiles(path): \n global picture_list\n try:\n # dir_list has all files and directories in path\n # any directory is WITHOUT ending '/'\n dir_list = os.listdir(path)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getfiles, picture_list:\", picture_list\n picture_list = None\n return\n \n for line in dir_list:\n file = path + \"/\" + line\n if os.path.isdir(file):\n getfiles( file) # dig into subdirectory\n elif isPicture(file):\n picture_list.append(file)\n else: \n # neither picture file nor directory; ignore \n pass\n return",
"def get_image_list(source_dir):\n\n dir_list = os.path.os.listdir(source_dir)\n# print(dir_list)\n image_list = []\n os.chdir(source_dir)\n for file in dir_list:\n print(\"Inspecting.... : {}\".format(file))\n\n try:\n if Image.open(file).format:\n image_list.append(file)\n print(\"{} : is an image\".format(file))\n except Exception as e:\n print(\"{} : failed the imageness test.i \\n {}\".format(file, e))\n continue\n\n# print(image_list)\n return image_list",
"def get_data_images(path):\n\n return sorted(\n [os.path.join(root, filename) for root, dirnames, filenames in os.walk(path) for filename in\n filenames if\n filename.endswith('.jpg') and os.path.getsize(os.path.join(root, filename)) > 0]\n )",
"def get_lst_images(file_path):\n return [i for i in os.listdir(file_path) if i != '.DS_Store']",
"def get_file_list(rootdir): #{{{\n file_list = []\n for f in os.listdir(rootdir):\n if f == None or not f.endswith(\".csv\"):\n continue\n file_list.append(os.path.join(rootdir, f))\n \n return file_list",
"def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths",
"def readPlayerImageFiles(self):\n currentPath = os.path.dirname(os.path.abspath(__file__))\n listOfFileNames=[]\n for i in os.listdir(currentPath):\n if re.match(\"player\\_\\d+\",i): #i.endswith(\".gif\")\n listOfFileNames.append(currentPath+'/'+i)\n return listOfFileNames",
"def filelist(root):\n allfiles = []\n for path, subdirs, files in os.walk(root):\n for name in files:\n if name.find(\"xls\") >= 0:\n allfiles.append(os.path.join(path, name))\n return allfiles",
"def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels",
"def getcatalogs():\n \n # default path for the gthumb catalogs of the logged in user\n gpath = os.environ['HOME'] + \"/.local/share/gthumb/catalogs\"\n\n cats = [] \n cat_list = [] \n try:\n # dir_list has all files and directories in path\n # directories are WITHOUT ending '/'\n dir_list = os.listdir(gpath)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getcatalogs, gpath:\", gpath\n return []\n \n # get only the directories \n for line in dir_list:\n file = gpath + \"/\" + line\n #print file \n if os.path.isdir(file):\n cats.append(file)\n else: \n # not a directory; ignore \n #print \"not a directory:\", file \n pass\n\n # now get each catalog file from each directory\n for cat in cats:\n try:\n # dir_list has all files and directories in path\n # any directory is WITHOUT ending '/'\n dir_list = os.listdir(cat)\n except:\n # path may not be a directory or permission error\n print \"ERROR: in getcatalogs, cat:\", cat\n return []\n \n for line in dir_list:\n file = cat + \"/\" + line\n #print os.path.splitext(file)[1][1:]\n # append file only if it has catalog extension\n if os.path.splitext(file)[1][1:] == \"catalog\":\n cat_list.append(file)\n \n cat_list.sort() \n\n if random_mode:\n random.shuffle(cat_list)\n \n return cat_list"
] | [
"0.75481945",
"0.6519376",
"0.63496464",
"0.6327218",
"0.62881887",
"0.62469524",
"0.6230385",
"0.6211273",
"0.618625",
"0.60530376",
"0.6047469",
"0.60347384",
"0.6007315",
"0.60035497",
"0.5998598",
"0.59846973",
"0.5981891",
"0.5981891",
"0.59797746",
"0.59771097",
"0.5967962",
"0.596785",
"0.5891107",
"0.586967",
"0.5854754",
"0.5833618",
"0.5824804",
"0.581384",
"0.5807049",
"0.5801416"
] | 0.70987046 | 1 |
Test json_loads_array validates result. | def test_json_loads_array() -> None:
assert json_loads_array('[{"c":1.2}]') == [{"c": 1.2}]
with pytest.raises(
ValueError, match="Expected JSON to be parsed as a list got <class 'dict'>"
):
json_loads_array("{}")
with pytest.raises(
ValueError, match="Expected JSON to be parsed as a list got <class 'bool'>"
):
json_loads_array("true")
with pytest.raises(
ValueError, match="Expected JSON to be parsed as a list got <class 'NoneType'>"
):
json_loads_array("null") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_loads_an_array_json_file(self):\n from test.resources import array_json\n self.assertEqual(array_json._data, [1, 2, 3])\n self.assertEqual(len(array_json), 3)\n self.assertEqual(array_json[0], 1)",
"def test_load_json_array_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('[{\"a\": 1, \"B\": \"two\"}]')\n\n assert load_json(fname) == [{\"a\": 1, \"B\": \"two\"}]\n assert load_json_array(fname) == [{\"a\": 1, \"B\": \"two\"}]\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a dict\"\n ):\n load_json_object(fname)",
"def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")",
"def test_json_loads_object() -> None:\n assert json_loads_object('{\"c\":1.2}') == {\"c\": 1.2}\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'list'>\"\n ):\n json_loads_object(\"[]\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'bool'>\"\n ):\n json_loads_object(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a dict got <class 'NoneType'>\"\n ):\n json_loads_object(\"null\")",
"def test_from_json_string(self):\n string = '[{\"id\": 4, \"width\": 3, \"height\": 4, \"x\": 1, \"y\": 3}, \\\n {\"id\": 3, \"width\": 6, \"height\": 2, \"x\": 1, \"y\": 9}]'\n jsonized = Base.from_json_string(string)\n self.assertEqual(len(jsonized), 2)\n self.assertTrue(type(jsonized) is list)",
"def test_list_2(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4}]')\n self.assertTrue(check_json_array(jdic, jobj))",
"def test_from_json_string(self):\n json_str = '[{\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8}, \\\n {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0}]'\n jason_list = Base.from_json_string(json_str)\n self.assertTrue(type(jason_list) is list)\n self.assertEqual(len(jason_list), 2)\n self.assertTrue(type(jason_list[0]) is dict)\n self.assertTrue(type(jason_list[1]) is dict)\n self.assertEqual(jason_list[0],\n {\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8})\n self.assertEqual(jason_list[1],\n {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0})",
"def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty], Base.from_json_string(json.dumps([dicty])))",
"def test_load_from_file_to_array_length(self):\n self.assertEqual(len(self.loaded_json_list), 620042)",
"def test_list(self):\n jobj = JList(parent = 'some', keys = [])\n jdic = json.loads('[]')\n self.assertTrue(check_json_array(jdic, jobj))",
"def test_list_2f(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test9\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))",
"def test_simplef(self):\n samp1 = JObject(keys = ['status', 'result'])\n j = json.loads('{\"status\": \"success\", \"resultd\": \"yes\"}')\n self.assertFalse(check_json_object(j, samp1))",
"def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))",
"def test_data_parse_vanilla_json(self):\n lines = ['{\"a\": \"val\", \"b\": \"val2\"}']\n dat, dat_type = parser._parse_data(lines)\n self.assertEqual({\"a\": \"val\", \"b\": \"val2\"}, dat)",
"def assertValidJSON(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_json(data)",
"def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data",
"def test_empty_array(self):\n req = '[]'\n resp = '{\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32600, \"message\": \"InvalidRequestError: Recieved an empty batch message\"}, \"id\": null}'\n status = 400\n r_status, r_resp = self.exec_handler(req)\n self.assertEqual(r_status, status)\n self.assertEqual(simplejson.loads(r_resp), simplejson.loads(resp))",
"def test_list_3(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4},{\"test1\":3, \"test2\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))",
"def test_return_type(self):\n self.assertEqual(type(self.s0.from_json_string(self.string)), list)",
"def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty, dicty],\n Base.from_json_string(json.dumps([dicty, dicty])))",
"def test_simple(self):\n samp1 = JObject(keys = ['status', 'result'])\n j = json.loads('{\"status\": \"success\", \"result\": \"yes\"}')\n self.assertTrue(check_json_object(j, samp1))",
"def test_load_json_object_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('{\"a\": 1, \"B\": \"two\"}')\n\n assert load_json(fname) == {\"a\": 1, \"B\": \"two\"}\n assert load_json_object(fname) == {\"a\": 1, \"B\": \"two\"}\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a list\"\n ):\n load_json_array(fname)",
"def test_list_4(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2']),\n JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4},{\"test1\":3, \"test2\":4}]')\n self.assertTrue(check_json_array(jdic, jobj))",
"def test_array_abc_sequence(parser):\n obj = parser.parse(b'[1, 2, 3, 4, 5]')\n assert isinstance(obj, simdjson.Array)\n\n # __iter__\n assert list(iter(obj)) == [1, 2, 3, 4, 5]\n # __len__\n assert len(obj) == 5\n # __contains__\n assert 3 in obj\n assert 7 not in obj\n # __getitem__\n assert obj[2] == 3\n with pytest.raises(IndexError):\n obj[99]\n # __reversed__, implemented via __len__ and __getitem__ for now.\n assert list(reversed(obj)) == [5, 4, 3, 2, 1]",
"def test_parse_results_error():\n error_result = [{\"error\": \"test\"}]\n assert [{\"title\": \"Error\",\n \"subtitle\": \"test\",\n \"valid\": False}] == parse_results(error_result)",
"def test_load_json_value_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('\"two\"')\n\n assert load_json(fname) == \"two\"\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a dict\"\n ):\n load_json_object(fname)\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a list\"\n ):\n load_json_array(fname)",
"def test_list(self, array: dict) -> None:\r\n item = read_items(array)\r\n if read_type(item) == 'object':\r\n logger.debug('list -> dict')\r\n self.test_dict(obj=item)\r\n elif read_type(item) == 'array':\r\n logger.debug('list -> list')\r\n self.test_list(array=item)",
"def test_listf(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[]')\n self.assertFalse(check_json_array(jdic, jobj))",
"def test_lti20_good_json(self):\r\n for json_str, expected_comment in self.GOOD_JSON_INPUTS:\r\n score, comment = self.xmodule.parse_lti_2_0_result_json(json_str)\r\n self.assertEqual(score, 0.1)\r\n self.assertEqual(comment, expected_comment)",
"def test_list_4f(self):\n jobj = JList(parent = 'some', keys = [JObject(parent = None, keys = ['test1', 'test2']),\n JObject(parent = None, keys = ['test1', 'test2'])])\n jdic = json.loads('[{\"test1\":3, \"test2\":4},{\"test1\":3, \"test23\":4}]')\n self.assertFalse(check_json_array(jdic, jobj))"
] | [
"0.77304274",
"0.725045",
"0.67286676",
"0.67286676",
"0.66942936",
"0.667589",
"0.6655001",
"0.65915096",
"0.6579132",
"0.65585667",
"0.64578795",
"0.6457878",
"0.64489824",
"0.6448039",
"0.6438813",
"0.6417333",
"0.6408036",
"0.6396981",
"0.6394695",
"0.6379384",
"0.63767636",
"0.6362762",
"0.6362421",
"0.6351288",
"0.6329074",
"0.63249016",
"0.63242537",
"0.63215524",
"0.629356",
"0.6240383"
] | 0.8247545 | 0 |
Test json_loads_object validates result. | def test_json_loads_object() -> None:
assert json_loads_object('{"c":1.2}') == {"c": 1.2}
with pytest.raises(
ValueError, match="Expected JSON to be parsed as a dict got <class 'list'>"
):
json_loads_object("[]")
with pytest.raises(
ValueError, match="Expected JSON to be parsed as a dict got <class 'bool'>"
):
json_loads_object("true")
with pytest.raises(
ValueError, match="Expected JSON to be parsed as a dict got <class 'NoneType'>"
):
json_loads_object("null") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_simplef(self):\n samp1 = JObject(keys = ['status', 'result'])\n j = json.loads('{\"status\": \"success\", \"resultd\": \"yes\"}')\n self.assertFalse(check_json_object(j, samp1))",
"def test_simple(self):\n samp1 = JObject(keys = ['status', 'result'])\n j = json.loads('{\"status\": \"success\", \"result\": \"yes\"}')\n self.assertTrue(check_json_object(j, samp1))",
"def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty], Base.from_json_string(json.dumps([dicty])))",
"def testfromjson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual([dicty, dicty],\n Base.from_json_string(json.dumps([dicty, dicty])))",
"def assertValidJSON(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_json(data)",
"def test_from_json_string(self):\n json_str = '[{\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8}, \\\n {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0}]'\n jason_list = Base.from_json_string(json_str)\n self.assertTrue(type(jason_list) is list)\n self.assertEqual(len(jason_list), 2)\n self.assertTrue(type(jason_list[0]) is dict)\n self.assertTrue(type(jason_list[1]) is dict)\n self.assertEqual(jason_list[0],\n {\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8})\n self.assertEqual(jason_list[1],\n {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0})",
"def test_load_an_object_json_file(self):\n from test.resources import object_json\n self.assertEqual(object_json._data, {'answer': 42})\n self.assertEqual(len(object_json), 1)\n self.assertEqual(object_json['answer'], 42)",
"def test_loads_a_non_object_json_file(self):\n from test.resources import simple_json\n self.assertEqual(simple_json._data, 'test')",
"def test_return_value(self):\n self.assertEqual(self.r0.from_json_string(self.string), self.d)",
"def test_base_case_json(self):\n json_data = '{\"a\": 1}'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a\" : 1}'))",
"def test_from_json_string(self):\n string = '[{\"id\": 4, \"width\": 3, \"height\": 4, \"x\": 1, \"y\": 3}, \\\n {\"id\": 3, \"width\": 6, \"height\": 2, \"x\": 1, \"y\": 9}]'\n jsonized = Base.from_json_string(string)\n self.assertEqual(len(jsonized), 2)\n self.assertTrue(type(jsonized) is list)",
"def test_load_json_object_data(tmp_path: Path) -> None:\n fname = tmp_path / \"test5.json\"\n with open(fname, \"w\", encoding=\"utf8\") as handle:\n handle.write('{\"a\": 1, \"B\": \"two\"}')\n\n assert load_json(fname) == {\"a\": 1, \"B\": \"two\"}\n assert load_json_object(fname) == {\"a\": 1, \"B\": \"two\"}\n with pytest.raises(\n HomeAssistantError, match=\"Expected JSON to be parsed as a list\"\n ):\n load_json_array(fname)",
"def test_dump_load(self):\n payload = {\"a\": [1, 2, 3]}\n self.assertEqual(load_json(dump_json(payload)), payload)",
"def test_data_parse_vanilla_json(self):\n lines = ['{\"a\": \"val\", \"b\": \"val2\"}']\n dat, dat_type = parser._parse_data(lines)\n self.assertEqual({\"a\": \"val\", \"b\": \"val2\"}, dat)",
"def test_json_loads_array() -> None:\n assert json_loads_array('[{\"c\":1.2}]') == [{\"c\": 1.2}]\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'dict'>\"\n ):\n json_loads_array(\"{}\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'bool'>\"\n ):\n json_loads_array(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'NoneType'>\"\n ):\n json_loads_array(\"null\")",
"def test_json_loads_array() -> None:\n assert json_loads_array('[{\"c\":1.2}]') == [{\"c\": 1.2}]\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'dict'>\"\n ):\n json_loads_array(\"{}\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'bool'>\"\n ):\n json_loads_array(\"true\")\n with pytest.raises(\n ValueError, match=\"Expected JSON to be parsed as a list got <class 'NoneType'>\"\n ):\n json_loads_array(\"null\")",
"def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))",
"def test_loader_loads_from_str():\n base_json = '{\"foo\": \"bar\"}'\n json_test = {\"foo\": \"bar\"}\n assert whenzat.loader(base_json, from_file=False) == json_test",
"def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object",
"def test_decode(self):\n result = User.objects.from_json(json.dumps(self.users_dict))\n self.assertEqual(self.users, result)",
"def test_deserialize(self):\r\n\r\n # test that from_json produces no exceptions\r\n self.assertDeserializeEqual('10:20:30', '\"10:20:30\"')",
"def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))",
"def is_json(my_object):\n try:\n json.loads(my_object)\n except ValueError:\n return False\n\n return True",
"def test_load_an_object_json_file(self):\n from test.resources import malaga\n self.assertEqual(len(malaga.data), 5018112)\n self.assertEqual(malaga.Model, 'iPhone 4')",
"def test_dictionary(self):\n self.assertIsInstance(self.test1json, dict)",
"def validate_json(self):\n pass",
"def json_loads(self, string: str) -> object:\n return json.loads(string)",
"def test_json(self):\n\t\tdecoded = json.loads(json.dumps(self.filter.to_js_obj()))\n\t\tself.assertIsNotNone(decoded, \"JSON conversion failed!\")\n\t\tself.assertEqual(self.filter.to_js_obj(), decoded, \"JSON conversion mismatch!\")",
"def test_load_json_fobj():\n\n file_name = 'test_fooof_all'\n\n with open(os.path.join(TEST_DATA_PATH, file_name + '.json'), 'r') as f_obj:\n data = load_json(f_obj, '')\n\n assert data",
"def test_load_json_str():\n\n file_name = 'test_fooof_all'\n\n data = load_json(file_name, TEST_DATA_PATH)\n\n assert data"
] | [
"0.7466225",
"0.7436751",
"0.7261869",
"0.7108797",
"0.707524",
"0.7068392",
"0.7052709",
"0.69498384",
"0.6922862",
"0.68458074",
"0.679536",
"0.6768142",
"0.674017",
"0.6738646",
"0.66757935",
"0.66757935",
"0.66631764",
"0.66617775",
"0.66461724",
"0.65988183",
"0.6568571",
"0.6558547",
"0.6552933",
"0.6524331",
"0.6522027",
"0.6489396",
"0.6478069",
"0.6476712",
"0.6426723",
"0.642228"
] | 0.8140474 | 0 |
Test deprecated test_find_unserializable_data logs a warning. | async def test_deprecated_test_find_unserializable_data(
caplog: pytest.LogCaptureFixture,
) -> None:
# pylint: disable-next=hass-deprecated-import,import-outside-toplevel
from homeassistant.util.json import find_paths_unserializable_data
find_paths_unserializable_data(1)
assert (
"uses find_paths_unserializable_data from homeassistant.util.json"
in caplog.text
)
assert "should be updated to use homeassistant.helpers.json module" in caplog.text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_serialize_no_metadata(self):\n pass # pragma: no cover",
"def test_serialize_sinfo(self):\n self.assert_raises(RuntimeError, self.instance.serialize,\n self.testing_options['objects'][0],\n add_serializer_info=True)",
"def test_ignore_unrecognized_fields():\n\n class Foo(DumpableAttrs):\n foo: int\n\n s = \"\"\"\\\n!Foo\nfoo: 1\nbar: 2\n\"\"\"\n with pytest.warns(CorrWarning):\n assert yaml.load(s) == Foo(1)",
"def test__pickle_unpickle(self):\n pass",
"def find_json_unserializable(data, quickcheck=False):\n needs_check = True\n if quickcheck:\n try:\n # Might be a more efficient way to do this check. We duplicate a lot of\n # work by doing the check for unserializable data this way.\n json.dumps(data)\n except Exception:\n # If there is unserializable data, find out where it is.\n # is_serializable = False\n pass\n else:\n # is_serializable = True\n needs_check = False\n\n if needs_check:\n # mode = 'new'\n # if mode == 'new':\n scalar_types = (int, float, str, type(None))\n container_types = (tuple, list, dict)\n serializable_types = scalar_types + container_types\n walker = ub.IndexableWalker(data)\n for prefix, value in walker:\n *root, key = prefix\n if not isinstance(key, scalar_types):\n # Special case where a dict key is the error value\n # Purposely make loc non-hashable so its not confused with\n # an address. All we can know in this case is that they key\n # is at this level, there is no concept of where.\n yield {'loc': root + [['.keys', key]], 'data': key}\n elif not isinstance(value, serializable_types):\n yield {'loc': prefix, 'data': value}",
"def test_data_object_vaporise(self):\n pass",
"def test_default_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = sy.serialize(obj, to_proto=True)\n\n assert sy.serialize(obj) == blob",
"def test_serialize(self):\n self.assert_raises(TypeError, self.instance.serialize, (1,))",
"def test_default_deserialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid))\n\n obj2 = sy.deserialize(blob=blob)\n assert obj == obj2",
"def test_serialization():\n\n # Class is serializable.\n ray.put(DummyPredictor)\n\n # Instance is not serializable.\n predictor = DummyPredictor()\n with pytest.raises(PredictorNotSerializableException):\n ray.put(predictor)",
"def test_instances(self):\n\n @deprecate(bar=\"use baz instead\")\n def foo(bar=None, baz=None):\n pass\n\n @deprecate(baz=\"use bar instead\")\n def food(bar=None, baz=None):\n pass\n\n with warnings.catch_warnings(record=True) as w:\n foo(bar=True)\n food(baz=True)\n self.assertEqual(len(w), 2, \"Not all warnings preserved.\")",
"def test_ds(self, obj):\n pass",
"def test_no_deprecated_traits_in_table(self):\n # Set the ssv for three datasets to deprecated.\n for ds in self.datasets[1:3]:\n ssv = ds.source_study_version\n ssv.i_is_deprecated = True\n ssv.save()\n response = self.client.get(self.get_url())\n context = response.context\n table = context['source_dataset_table']\n for ds in self.datasets:\n if ds.source_study_version.i_is_deprecated:\n self.assertNotIn(ds, table.data)\n else:\n self.assertIn(ds, table.data)",
"def test_fix_odk_sunmission(self):\n data = {\n \"@a\": \"a\",\n \"b\": \"b\",\n \"orx:meta\": \"should_not_be_there\"\n }\n\n fixed_data = sources.__fix_odk_data(data)\n\n self.assertEqual(fixed_data, {\n \"a\": \"a\",\n \"b\": \"b\"\n })",
"def test_DL_import_wrong_file_serialized(self):\n filepath = '5.txt'\n with open(filepath, 'wb') as file:\n pickle.dump([\"This is a wrong dataset\"], file)\n # Check if exception was raised for wrong data type\n with self.assertRaises(Exception):\n flow_processing_input.DetectorsLocation(9999, filepath)\n os.remove(filepath)",
"def test_deserialize_bad_data(self):\n data = \"this is not a dictionary\"\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)",
"def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)",
"def test_default_serializer_cleanup():\n path = _dump([0])\n assert os.path.exists(path)\n list(_load(path))\n assert not os.path.exists(path)",
"def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False",
"def test_serialize_object(self):\n test_obj = self.TestObject(prop1='x', prop2=1234)\n\n with self.assertRaises(TypeError):\n serialize(test_obj)",
"def check_pickle() -> list:\n try:\n with open(\"data.pkl\", mode='r+b') as open_pickle:\n data = pickle.load(open_pickle)\n except FileNotFoundError as _:\n data = load_data()\n with open(\"data.pkl\", mode='w+b') as open_pickle:\n pickle.dump(data, open_pickle)\n return data",
"def __getstate__(self):\n raise IOError(\"You tried to serialize something that should not\"\n \" be serialized.\")",
"def test_pickle_load(self):\n l = [1, 2, 3, 4, 5]\n self.plugin.save_data(l)\n\n l = self.plugin.load_data()\n self.assertIn(4, l)",
"def test_deprecated_private_variables(attr):\n with pytest.warns(AstropyDeprecationWarning):\n resolve_name(\"astropy\", \"cosmology\", \"flrw\", attr)",
"def _TestReadSerialized(self, serializer_object, json_dict):\n # We use json.dumps to make sure the dict does not serialize into\n # an invalid JSON string such as one that contains Python string prefixes\n # like b'' or u''.\n json_string = json.dumps(json_dict)\n unserialized_object = serializer_object.ReadSerialized(json_string)\n\n self.assertIsNotNone(unserialized_object)\n return unserialized_object",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def test_missing_data_sources(self):",
"def deserialize(self, data):\n return NotImplementedError",
"def test_bad_valuetype():\n test = [{'key': {'key1': 'val'}}, ['key']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'Bad data found' in str(t_result.failure())",
"def _delicious_data_test(self):\r\n # Blatant copy/paste, but I'm on a plane right now so oh well.\r\n # Now let's do some db sanity checks.\r\n res = Bmark.query.all()\r\n self.assertEqual(\r\n len(res),\r\n 19,\r\n \"We should have 19 results, we got: \" + str(len(res)))\r\n\r\n # verify we can find a bookmark by url and check tags, etc\r\n check_url = u'http://www.ndftz.com/nickelanddime.png'\r\n check_url_hashed = generate_hash(check_url)\r\n found = Bmark.query.filter(Bmark.hash_id == check_url_hashed).one()\r\n\r\n self.assertTrue(\r\n found.hashed.url == check_url, \"The url should match our search\")\r\n self.assertEqual(\r\n len(found.tags),\r\n 7,\r\n \"We should have gotten 7 tags, got: \" + str(len(found.tags)))\r\n self.assertEqual(\r\n 'importer',\r\n found.inserted_by,\r\n \"The bookmark should have imported: \" + found.inserted_by)\r\n\r\n # and check we have a right tag or two\r\n self.assertTrue(\r\n 'canonical' in found.tag_string(),\r\n 'Canonical should be a valid tag in the bookmark')\r\n\r\n # and check the long description field\r\n self.assertTrue(\r\n \"description\" in found.extended,\r\n \"The extended attrib should have a nice long string in it\")"
] | [
"0.64343673",
"0.6035663",
"0.5727462",
"0.57251585",
"0.5675099",
"0.5574621",
"0.551062",
"0.54578316",
"0.5383934",
"0.5342037",
"0.53340733",
"0.5258394",
"0.52225083",
"0.5216664",
"0.5208067",
"0.5180121",
"0.51705754",
"0.5160198",
"0.5145018",
"0.5122322",
"0.5117121",
"0.51168454",
"0.5100411",
"0.50904137",
"0.50895596",
"0.5071966",
"0.50577193",
"0.50387037",
"0.50252706",
"0.50198716"
] | 0.69288146 | 0 |
Test deprecated save_json logs a warning. | async def test_deprecated_save_json(
caplog: pytest.LogCaptureFixture, tmp_path: Path
) -> None:
# pylint: disable-next=hass-deprecated-import,import-outside-toplevel
from homeassistant.util.json import save_json
fname = tmp_path / "test1.json"
save_json(fname, TEST_JSON_A)
assert "uses save_json from homeassistant.util.json" in caplog.text
assert "should be updated to use homeassistant.helpers.json module" in caplog.text | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(obj):\n import warnings\n warnings.warn(\"simplejson.dumps(s) should be used instead of write(s)\",\n DeprecationWarning)\n return dumps(obj)",
"def test_save_json_with_invalid_step(temp_dir):\n data = json.dumps({\"k\": \"v\", \"list\": [1, 2, 3]})\n\n with pytest.raises(ValueError):\n save_json(temp_dir, data, step={\"invalid\": \"dict\"})",
"def _save_document(keep_path, keep_json):\n with open(keep_path, 'w+') as keep_file:\n keep_file.write(json.dumps(keep_json))",
"def test_json_dump(self, force_field):\n import json\n\n json.dumps(force_field._to_smirnoff_data())",
"def test_save_to_file(self):\n self.assertFalse(os.path.exists(\"file.json\"))",
"def test_save_json(temp_dir):\n data = json.dumps({\"k\": \"v\", \"list\": [1, 2, 3]})\n save_json(temp_dir, data, step=1)\n\n assert os.path.exists(os.path.join(temp_dir, \"json\", \"1.json\"))",
"def test_valid_ld(self):\n self.assertEqual(self.obj.to_json_string(self.valid_ld),\n json.dumps(self.valid_ld))",
"def test_regular_dump(self):\n try:\n _build_test_dirs()\n dicti = {\n 'array': [1, 2, 3],\n 'string': 'trololo',\n 'int': 1,\n 'float': 4.32,\n 'true': True,\n 'false': False,\n 'null': None\n }\n with open(_TEST_FILE, 'w+') as fileobj:\n morejson.dump(dicti, fileobj)\n with open(_TEST_FILE, 'r') as fileobj:\n self.assertEqual(dicti, json.load(fileobj))\n finally:\n _dismantle_test_dirs()",
"def save_json(node):\n return _api_internal._save_json(node)",
"def test_update_to_non_json():\n starting_db = create_db(STARTING_DB_INPUT)\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n \"this isn't json :(\"\n )",
"def test_save_method(self):\n\n models.storage.save()\n self.assertTrue(os.path.exists('file.json'))",
"def test_save(self):\n\n expected = {\n self.file_to_test: {\n \"example.com\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.invalid,\n },\n },\n }\n\n self.inactive_db.database = expected.copy()\n self.inactive_db.save()\n\n self.assertEqual(\n expected, PyFunceble.helpers.Dict().from_json_file(self.storage_file)\n )",
"def test_save_fg_fobj(tfg):\n\n file_name = 'test_fooof_fileobj'\n\n with open(os.path.join(TEST_DATA_PATH, file_name + '.json'), 'w') as f_obj:\n save_fg(tfg, f_obj, TEST_DATA_PATH, False, True, False, False)\n\n assert os.path.exists(os.path.join(TEST_DATA_PATH, file_name + '.json'))",
"def test_save_json_not_existed_dir(temp_dir):\n data = json.dumps({\"k\": \"v\", \"list\": [1, 2, 3]})\n dist = os.path.join(temp_dir, 'not_existed')\n save_json(dist, data, step=1)\n\n assert os.path.exists(os.path.join(dist, \"json\", \"1.json\"))",
"def save_json_to_file(i):\n\n import json\n import ck.strings\n\n fn = i['json_file']\n\n if i.get('safe', '') == 'yes':\n d = i['dict']\n\n sd = {}\n\n # Check main unprintable keys\n for k in d:\n try:\n json.dumps(d[k])\n except Exception as e:\n pass\n else:\n sd[k] = d[k]\n\n i['dict'] = sd\n\n r = ck.strings.dump_json(i)\n if r['return'] > 0:\n return r\n s = r['string'].replace('\\r', '')+'\\n'\n\n return save_text_file({'text_file': fn, 'string': s})",
"def testtojson(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual(json.dumps([dicty]), Base.to_json_string([dicty]))",
"def test_non_list_of_dicts_arg(self):\n self.assertEqual(self.obj.to_json_string(666), '666')",
"def test_json_string_to_file_empty(self):\n Square.save_to_file([])\n with open(\"Square.json\") as a_file:\n self.assertEqual(json.loads(a_file.read()), [])",
"def test_badFormat(self):\n with open(os.path.join(self.directory, \"service2.json\"), \"w\") as f:\n f.write(\"this is not json\")\n self.pump()\n self.assertNodesEqual(knownNodes(self.disco, \"service2\", \"staging\"), [])",
"def save_json(dict_obj, path, name):\n if 'txt' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'w') as json_file:\n json.dump(dict_obj, json_file)",
"def test_sktime_save_model_raises_invalid_serialization_format(auto_arima_model, model_path):\n with pytest.raises(MlflowException, match=\"Unrecognized serialization format: \"):\n flavor.save_model(\n sktime_model=auto_arima_model, path=model_path, serialization_format=\"json\"\n )",
"def save_vuln_json(self, data):\n self.helper.store_json_content(data, \"snyk-feed/vulnerability-data.json\")",
"async def test_update_with_json_attrs_bad_json(\n hass: HomeAssistant,\n mqtt_mock_entry: MqttMockHAClientGenerator,\n caplog: pytest.LogCaptureFixture,\n) -> None:\n await help_test_update_with_json_attrs_bad_json(\n hass,\n mqtt_mock_entry,\n caplog,\n select.DOMAIN,\n DEFAULT_CONFIG,\n )",
"def testtojson2(self):\n dicty = {\"id\": 5, \"class\": \"string\", \"list\": [], \"set\": {}}\n self.assertEqual(json.dumps([dicty, dicty]),\n Base.to_json_string([dicty, dicty]))",
"def test_dumps_datetime(self):\n try:\n _build_test_dirs()\n dicti = {\n 'datetime': datetime.datetime.now(),\n 'array': [1, 2, 3],\n 'string': 'trololo',\n 'int': 1,\n 'float': 4.32,\n 'true': True,\n 'false': False,\n 'null': None\n }\n with open(_TEST_FILE, 'w+') as fileobj:\n morejson.dump(dicti, fileobj)\n with open(_TEST_FILE, 'r') as fileobj:\n self.assertEqual(dicti, morejson.load(fileobj))\n finally:\n _dismantle_test_dirs()",
"def test_save_to_file(self):\n rect = Rectangle(1, 1)\n types = (int, float, str, tuple, list, dict, bool)\n insts = [rect] + [Rectangle(1, 1, id=t()) for t in types]\n fname = 'Rectangle.json'\n try:\n remove(fname)\n except FileNotFoundError:\n pass\n self.assertIsNone(Rectangle.save_to_file(None))\n with open(fname) as ifile:\n self.assertEqual(ifile.read(), '[]')\n for index in range(len(insts)):\n self.assertIsNone(Rectangle.save_to_file(insts[index:]))\n with open(fname) as ifile:\n self.assertEqual(ifile.read(), Rectangle.to_json_string(\n [obj.to_dictionary() for obj in insts[index:]]\n ))",
"def save_json(self, stock_name, stock_dict, market):\n with open(f\"{self.json_path}/{market}_{stock_name}.json\", 'w') as f:\n json.dump(stock_dict, f)\n if self.debug:\n print(f\" JsonHelper.save_json() --> save {self.json_path}/{market}_{stock_name}.json success\")",
"def test_serialize_no_metadata(self):\n pass # pragma: no cover",
"def json(filepath, mode=DataSaver.MODE_OVERWRITE, date_format=None,\n double_precision=10, force_ascii=True, date_unit='ms'):\n format_file = DataSaver.FORMAT_JSON\n kwargs = locals()\n _apply_datasaver(format_file, kwargs, last_uuid)\n return None",
"def test_save_npy_with_invalid_step(temp_dir):\n data = np.array([[1, 2, 3], [4, 5, 6]])\n\n with pytest.raises(ValueError):\n save_npy(temp_dir, data, step={\"invalid\": \"dict\"})"
] | [
"0.6194405",
"0.5898329",
"0.58882964",
"0.5858988",
"0.58045053",
"0.5784072",
"0.56817424",
"0.567576",
"0.56687623",
"0.5630771",
"0.5492047",
"0.5476971",
"0.54741037",
"0.5469141",
"0.54161847",
"0.54124707",
"0.53882706",
"0.5366019",
"0.53471386",
"0.53345484",
"0.53281057",
"0.53098315",
"0.53082645",
"0.53071344",
"0.5304724",
"0.52949005",
"0.5256671",
"0.52416277",
"0.5240902",
"0.5240283"
] | 0.7213232 | 0 |
Filter subreads from input_fofn using pls2fasta, and create all_reads_fasta. | def _filter_subreads(self):
logging.info("Start to filter subreads in fofn.")
if op.exists(self.ori_all_reads_fasta) and self.force_redo is not True:
msg = "{fa} already exists, skip pls2fasta".format(fa=self.ori_all_reads_fasta)
logging.warn(msg)
else:
logging.debug("{f} does not exist, call pls2fasta".
format(f=self.ori_all_reads_fasta))
filter_summary = op.join(self.filtered_region_dir,
"filtered_summary.csv")
cmd = "filter_plsh5.py --debug " + \
"--filter='MinReadScore=0.80,MinSRL=500,MinRL=100' " + \
"--trim='True' --outputDir={fr} ".format(
fr=self.filtered_region_dir) + \
"--outputSummary={sm} ".format(sm=filter_summary) + \
"--outputFofn={rgn} ".format(rgn=self.region_fofn) + \
"{in_fofn}".format(in_fofn=self.input_fofn)
logging.info("CMD: {cmd}".format(cmd=cmd))
_o, _c, _m = backticks(cmd)
if _c != 0:
raise RuntimeError("CMD failed. " + str(_o) + ' ' + str(_m))
cmd = "pls2fasta -trimByRegion " + \
"-regionTable {rgn} ".format(rgn=self.region_fofn) + \
"{fofn} {fa} ".format(fofn=self.input_fofn,
fa=self.ori_all_reads_fasta)
logging.info("CMD: {cmd}".format(cmd=cmd))
_o, _c, _m = backticks(cmd)
if _c != 0:
raise RuntimeError("CMD failed. " + str(_o) + ' ' + str(_m))
logging.info("{f} created.".format(f=self.ori_all_reads_fasta))
logging.debug("Copying {ori_f} to {f}.".format(
ori_f=self.ori_all_reads_fasta, f=self.all_reads_fasta))
shutil.copyfile(self.ori_all_reads_fasta, self.all_reads_fasta) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter_reads(alignment_file, readdb, read_dirs, quality_threshold=7, recursive=False, trim=False):\n assert alignment_file.endswith(\"bam\"), \"Alignment file must be in BAM format: {}\".format(alignment_file)\n # grab aligned segment\n if trim:\n assert isinstance(trim, int), \"Trim needs to be an integer: {}\".format(trim)\n else:\n trim = np.inf\n n_bases = 0\n n_files = 0\n with closing(pysam.AlignmentFile(alignment_file, 'rb')) as bamfile:\n name_indexed = pysam.IndexedReads(bamfile)\n name_indexed.build()\n for name, fast5 in parse_read_name_map_file(readdb, read_dirs, recursive=recursive):\n try:\n if trim < n_bases:\n print(\"Filtered {} files for {} bases\".format(n_files, n_bases))\n break\n iterator = name_indexed.find(name)\n for aligned_segment in iterator:\n if aligned_segment.is_secondary or aligned_segment.is_unmapped \\\n or aligned_segment.is_supplementary or aligned_segment.has_tag(\"SA\"):\n continue\n # get data and sanity check\n if aligned_segment.query_qualities is not None:\n if np.mean(aligned_segment.query_qualities) < quality_threshold:\n continue\n n_files += 1\n n_bases += aligned_segment.query_length\n yield fast5, aligned_segment\n except KeyError:\n print(\"Found no alignments for {}\".format(fast5))",
"def filter_fasta_fp(input_seqs_fp, output_seqs_fp, seqs_to_keep, negate=False):\r\n input_seqs = parse_fasta(open(input_seqs_fp, 'U'))\r\n output_f = open(output_seqs_fp, 'w')\r\n return filter_fasta(input_seqs, output_f, seqs_to_keep, negate)",
"def separate_amplicons( subread_input, reference_fofn, locus, output=None ):\n # Convert input to list if needed\n if isinstance(subread_input, str):\n file_list = read_list_file( subread_input )\n if output is None:\n output = subread_input\n elif isinstance(subread_input, list):\n file_list = subread_input\n if output is None:\n msg = 'Output file must be specified with file-list input!'\n log.error( msg )\n raise ValueError( msg )\n # If the inputs are valid, check that the files haven't already been split\n if _split_exists( file_list, locus ):\n log.info(\"Separating subreads by amplicon for Locus %s\" % locus)\n return\n # Otherwise, separate the sequences and write the results\n log.info(\"Separating subreads by amplicon for Locus %s\" % locus)\n reference_fasta = _parse_reference_fofn( reference_fofn, locus )\n new_file_list = _separate_amplicons( file_list, reference_fasta, locus)\n write_list_file( new_file_list, output )",
"def process_fasta(in_fh, args, cluster_size_re, rna_seq_objs):\n for record in SeqIO.parse(in_fh, 'fasta'):\n sequence = '%s%s%s'.replace('T', 'U') % (\n args.prefix, str(record.seq), args.suffix\n )\n cluster_size = 1\n try:\n cluster_size = cluster_size_re.search(record.description)\n cluster_size = cluster_size.group(1)\n except AttributeError:\n print 'Not able to find cluster size. Setting to 1.'\n if cluster_size is None:\n cluster_size = 1\n\n # find structure\n curr_seq = RNASequence(record.id, cluster_size, sequence)\n if args.run_mfold:\n curr_seq.structure, curr_seq.energy_dict = run_mfold(\n sequence, args\n )\n curr_seq.free_energy = curr_seq.energy_dict['dG']\n else:\n rnafold_out = run_rnafold(sequence, args)\n rnafold_out = rnafold_out.split('\\n')\n try:\n curr_seq.structure, curr_seq.free_energy = (\n rnafold_out[1].split(' (')\n )\n except (ValueError, IndexError):\n print 'Error running RNAfold:\\n%s\\nExiting.' % rnafold_out\n sys.exit(1)\n\n print '%s\\n' % rnafold_out\n try:\n curr_seq.free_energy = abs(\n float(curr_seq.free_energy.replace(')', ''))\n )\n curr_seq.ensemble_free_energy = abs(\n float(rnafold_out[2].split('[')[1].replace(']', ''))\n )\n curr_seq.ensemble_probability = abs(float(\n rnafold_out[4].split(';')[0].replace(\n ' frequency of mfe structure in ensemble ', ''\n )\n ))\n curr_seq.ensemble_diversity = abs(float(\n rnafold_out[4].split(';')[1].replace(\n ' ensemble diversity ', ''\n )\n ))\n except IndexError:\n print (\n 'Error parsing RNAfold output. '\n '(Couldn\\'t find statistics.) Please check '\n 'RNAfold options.'\n )\n sys.exit(1)\n rna_seq_objs.append(curr_seq)",
"def _write_assigned_reads( input_fasta, assignments ):\n log.info(\"Separating subreads based on their amplicon assignments\")\n output_files = []\n writers = {}\n root_name = '.'.join( input_fasta.split('.')[:-1] )\n # Open up output writers for each group\n for group in assignments:\n output_file = \"%s_%s.fasta\" % (root_name, group)\n output_files.append( output_file )\n writers[group] = FastaWriter( output_file )\n # Write each record to it's appropriate group(s)\n for record in FastaReader( input_fasta ):\n name = record.name.split()[0]\n for group in assignments:\n if name in assignments[group]:\n writers[group].writeRecord( record )\n break\n # Close all of the output writers\n for group in writers:\n writers[group].close()\n return output_files",
"def filter_fasta(input_seqs, output_seqs_f, seqs_to_keep, negate=False):\r\n seqs_to_keep_lookup = {}.fromkeys([seq_id.split()[0]\r\n for seq_id in seqs_to_keep])\r\n # Define a function based on the value of negate\r\n if not negate:\r\n def keep_seq(seq_id):\r\n return seq_id.split()[0] in seqs_to_keep_lookup\r\n else:\r\n def keep_seq(seq_id):\r\n return seq_id.split()[0] not in seqs_to_keep_lookup\r\n\r\n for seq_id, seq in input_seqs:\r\n if keep_seq(seq_id):\r\n output_seqs_f.write('>%s\\n%s\\n' % (seq_id, seq))\r\n output_seqs_f.close()",
"def seqff(self):\r\n\r\n start = time.time()\r\n\r\n # load bininfo\r\n bininfo = load_bininfo(self.bininfodata_loc)\r\n\r\n # load input files\r\n if os.path.isdir(self.input_loc):\r\n input_list = [self.input_loc + x for x in os.listdir(self.input_loc)]\r\n\r\n elif os.path.isfile(self.input_loc):\r\n input_list = [self.input_loc]\r\n\r\n else:\r\n raise FileNotFoundError(\"error occurred : inputData is not a Directory or File\")\r\n\r\n for i, file in enumerate(input_list):\r\n filetype = file.split(\".\")[-1]\r\n # filetype : 'sam' or 'bam' or 'newtemp'\r\n if 'sam' in filetype:\r\n bincount = load_sam(file)\r\n\r\n elif 'newtemp' in filetype:\r\n bincount = load_counts(file)\r\n file = file.replace(\".newtemp\", \"\") # TEMP .newtemp -> .bam\r\n\r\n elif 'bam' in filetype:\r\n bincount = load_bam(file)\r\n\r\n else:\r\n continue\r\n\r\n #CREATE newtemp file in \"output_loc\"/newtemp/\r\n create_newtemp(bincount, file, self.newtemp_loc)\r\n\r\n newtemp = pd.DataFrame.from_dict(bincount, orient='index')\r\n newtemp.reset_index(level=0, inplace=True)\r\n newtemp.rename(columns={'index': 'binName', 0: 'counts'}, inplace=True)\r\n\r\n temp_bininfo = bininfo.copy(deep=True)\r\n temp_bininfo = temp_bininfo.merge(newtemp, on='binName',\r\n how='left') # missing value : NaN, not NA in pandas\r\n temp_bininfo['counts'] = temp_bininfo['counts'].fillna(0)\r\n\r\n temp_bininfo.sort_values(by='binorder', inplace=True)\r\n temp_bininfo.reset_index(drop=True)\r\n\r\n ####DATA PROCESSING #######################\r\n autosomebinsonly = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != 'NA') and \\\r\n (float(temp_bininfo['GC'][index]) > 0.316) and \\\r\n (temp_bininfo['CHR'][index] != 'chrX') and \\\r\n (temp_bininfo['CHR'][index] != 'chrY')\r\n autosomebinsonly.append(boolean)\r\n autosomebinsonly = pd.Series(autosomebinsonly)\r\n\r\n alluseablebins = []\r\n for index in range(61927):\r\n boolean = (temp_bininfo['FRS'][index] != \"NA\") and (float(temp_bininfo['GC'][index]) > 0.316)\r\n alluseablebins.append(boolean)\r\n alluseablebins = pd.Series(alluseablebins)\r\n\r\n #CREATE alluseablebins file in \"output_loc\"/alluseablebins\r\n #create_alluseablebins(alluseablebins, file, self.alluseablebins_loc)\r\n\r\n sum_counts = pd.Series(temp_bininfo['counts'])\r\n sum_counts = sum_counts[autosomebinsonly].sum(skipna=True)\r\n\r\n autoscaledtemp = pd.Series(temp_bininfo['counts'].loc[(autosomebinsonly)],\r\n copy=True) / sum_counts # NA-related code removed\r\n allscaledtemp = pd.Series(temp_bininfo['counts'].loc[(alluseablebins)], copy=True) / sum_counts\r\n\r\n gc_index = {}\r\n cnt = 0\r\n for index, isauto in enumerate(autosomebinsonly):\r\n if isauto:\r\n if temp_bininfo['GC'].iat[index] in gc_index:\r\n gc_index[temp_bininfo['GC'].iat[index]].append(float(autoscaledtemp.iat[cnt]))\r\n cnt += 1\r\n\r\n else:\r\n gc_index[temp_bininfo['GC'].iat[index]] = [float(autoscaledtemp.iat[cnt])]\r\n cnt += 1\r\n\r\n key_list = []\r\n val_list = []\r\n for key, val in gc_index.items():\r\n key_list.append(key)\r\n val_list.append(np.median(val))\r\n\r\n loess_var = loess(key_list, val_list) # default span : 0.75\r\n loess_var.fit()\r\n # y = loess.loess_prediction(newData, loessVar)\r\n # temp_loessPredict.loess_debugging(loessVar)\r\n\r\n ###prediction###\r\n loess_x = [float(gc) for index, gc in enumerate(temp_bininfo['GC']) if (alluseablebins[index])]\r\n # print(temp_bininfo['GC'])\r\n loess_fitted = loess_var.predict(loess_x)\r\n loess_fitted = list(loess_fitted.values)\r\n # print(loess_fitted)\r\n\r\n median_autoscaledtemp = np.median(autoscaledtemp)\r\n median_autoscaledtemp = float(median_autoscaledtemp) # for fixed constant\r\n\r\n normalizedbincount = [(x + (median_autoscaledtemp - loess_fitted[index])) for index, x in\r\n enumerate(allscaledtemp)]\r\n\r\n #CREATE normalizedbincount in \"output_loc\"/normalizedbincount\r\n create_normalizedbincount(normalizedbincount, file, self.normalizedbincount_loc)\r\n\r\n bincounts = pd.Series(data=np.repeat(a=0.0, repeats=61927), index=temp_bininfo['binName'], dtype=np.float64)\r\n\r\n sum_normalizedbincount = sum([val for val in normalizedbincount if not math.isnan(val)])\r\n sum_normalizedbincount = float(sum_normalizedbincount) # deep copy temporarily\r\n\r\n cnt = 0\r\n for index, x in enumerate(alluseablebins):\r\n if x == True:\r\n data = (normalizedbincount[cnt] / sum_normalizedbincount) * len(normalizedbincount)\r\n bincounts.iat[index] = data\r\n cnt += 1\r\n\r\n #CREATE bincounts in \"output_loc\"/bincounts\r\n create_bincounts(bincounts, file, self.bincounts_loc)\r\n\r\n wrsc = self.prediction(bincounts, self.B, self.mu, self.parameter_1, self.parameter_2)\r\n enet = np.dot(bincounts, (self.elnetbeta)) + (self.elnetintercept)\r\n ff = (wrsc+enet) / 2\r\n\r\n result_lines = list()\r\n result_lines.append(\"SeqFF\\tEnet\\tWRSC\")\r\n result_lines.append(\"{}\\t{}\\t{}\".format(ff, enet, wrsc))\r\n\r\n #CREATE results of seqff (seqff paper result covered) in \"output_loc\"/results\r\n create_results(result_lines, file, self.results_loc)\r\n\r\n end = time.time()\r\n elapsed = end - start\r\n h = int(elapsed) // 3600\r\n m = (int(elapsed) - (h * 3600)) // 60\r\n s = (int(elapsed) % 60)\r\n print(\"elapsed time: %d hr %d min %d sec\" % (h, m, s))\r\n print(\"elapsed :\", elapsed)\r\n print(\"progress : {} / {}\".format(i + 1, self.progress))",
"def load_feather(protein_feather, length_filter_pid=None, copynum_scale=False, copynum_df=None):\n protein_df = pd.read_feather(protein_feather).set_index('index')\n\n # Combine counts for residue groups\n from ssbio.protein.sequence.properties.residues import _aa_property_dict_one, EXTENDED_AA_PROPERTY_DICT_ONE\n aggregators = {\n 'aa_count_bulk' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Bulky'],\n 'subseqs' : ['metal_2_5D', 'metal_3D']},\n 'aa_count_carb' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Carbonylation susceptible'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},\n 'aa_count_chrg' : {'residues': _aa_property_dict_one['Charged'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'csa_2_5D', 'sites_2_5D', 'acc_2D', 'acc_3D',\n 'surface_3D']},\n 'aa_count_poschrg' : {'residues': _aa_property_dict_one['Basic'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},\n 'aa_count_negchrg' : {'residues': _aa_property_dict_one['Acidic'],\n 'subseqs' : ['metal_2_5D', 'metal_3D', 'acc_2D', 'acc_3D', 'surface_3D']},\n 'aa_count_tmstab' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM stabilizing'],\n 'subseqs' : ['tm_2D', 'tm_3D']},\n 'aa_count_tmunstab': {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['TM to Thr stabilizing'],\n 'subseqs' : ['tm_2D', 'tm_3D']},\n 'aa_count_dis' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Disorder promoting'],\n 'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',\n 'dna_2_5D']},\n 'aa_count_ord' : {'residues': EXTENDED_AA_PROPERTY_DICT_ONE['Order promoting'],\n 'subseqs' : ['disorder_2D', 'ss_disorder_2D', 'disorder_3D', 'ss_disorder_3D',\n 'dna_2_5D']}}\n\n # Do combination counts for all types of subsequences\n for suffix, info in aggregators.items():\n agg_residues = info['residues']\n for prefix in info['subseqs']:\n to_add_idxes = []\n for agg_res in agg_residues:\n to_add_idx = prefix + '_aa_count_' + agg_res\n if to_add_idx in protein_df.index:\n to_add_idxes.append(to_add_idx)\n subseq_agged_col = protein_df.loc[to_add_idxes, :].sum() # Add each residue series\n protein_df.loc[prefix + '_' + suffix] = subseq_agged_col # Append to df\n\n ## REMOVE OTHER STRAINS WITH DELETIONS (use float -- length_filter_pid=0.8 to get only strains with >80% length\n ## alternative to atlas2.calculate_residue_counts_perstrain wt_pid_cutoff param -- works a little differently just considering length\n if length_filter_pid:\n keep_cols = protein_df.loc['aa_count_total'][protein_df.loc['aa_count_total'] > protein_df.at['aa_count_total', 'K12'] * length_filter_pid].index\n protein_df = protein_df[keep_cols]\n\n # Multiply by proteomics copy number?\n if copynum_scale:\n if not isinstance(copynum_df, pd.DataFrame):\n raise ValueError('Please supply copy numbers')\n protein_id = op.basename(protein_feather).split('_protein')[0]\n if protein_id in copynum_df.index:\n copynum = copynum_df.at[protein_id, 'copynum']\n if copynum > 0: # TODO: currently keeping one copy of proteins with 0, is that ok?\n protein_df = protein_df * copynum\n\n return protein_df",
"def _apply_fasta_regex_func(infa, regex_func, outfa=None):\n # move the original file to a tmp folder\n out_dir = os.path.dirname(infa)\n tmp_dir = mkdtemp(dir=out_dir)\n old_fname = os.path.join(tmp_dir, \"original\") if outfa is None else infa\n new_fname = os.path.join(tmp_dir, \"filtered\")\n shutil.move(infa, old_fname)\n\n # perform the filtering\n excluded_contigs = []\n keep_contig = True\n with open(old_fname) as old, open(new_fname, \"w\") as new:\n for line in tqdm(old, desc=\"Filtering Fasta\", unit_scale=1, unit=\" lines\"):\n if line[0] == \">\":\n keep_contig = regex_func(line)\n if keep_contig is False:\n excluded_contigs.append(line[1:].split(\" \")[0].strip())\n if keep_contig:\n new.write(line)\n\n # move the filtered file to the original folder\n shutil.move(new_fname, outfa if outfa else infa)\n rm_rf(tmp_dir)\n\n return excluded_contigs",
"def filter_sequences(parent_directory, filename, label = 'Pfam:'):\n uniprot, repeated_seqs = load_uniprot('{}/{}'.format(parent_directory, filename))\n seq_fam = {}\n \n for u in uniprot:\n # Only select those sequences\n # - With the correct label\n # - That do not contain illegal characters\n # - Do not include sequences that are in multiple families\n index = find_index(u.dbxrefs, lambda x: x.startswith(label))\n if index != -1 and len(set(u._seq._data) - set(alphabet)) == 0 and u._seq._data not in repeated_seqs:\n seq_fam[u._seq._data] = u.dbxrefs[index][len(label):]\n \n filtered_fam = filtered_families(seq_fam)\n \n # Write processed output to a separate file\n with open('{}/processed.txt'.format(parent_directory), 'w') as f:\n f.write('\\n'.join(' '.join((fam, seq))\n for seq, fam in seq_fam.items()\n if fam in filtered_fam))",
"def splitFaFq(input_fa_or_fq, reads_per_split, out_dir, out_format, is_fq, reads_in_first_split=None):\n obj = FaFqSplitter(input_fa_or_fq=input_fa_or_fq,\n reads_per_split=reads_per_split,\n out_dir=out_dir, out_format=out_format,\n is_fq=is_fq)\n return obj.split(reads_in_first_split=reads_in_first_split)",
"def get_rappas_input_reads(pruning):\n output_dir = os.path.join(_working_dir, \"R\")\n\n # one read per fasta\n if cfg.get_mode(config) == cfg.Mode.LIKELIHOOD:\n return [os.path.join(output_dir, \"{query}_r0.fasta\")]\n # multiple reads per fasta\n else:\n # FIXME:\n # This is a dependency on pewo.templates.get_common_queryname_template result.\n # Look for a decent way to get rid of it.\n return [os.path.join(output_dir, pruning + \"_r\" + str(l) + \".fasta\")\n for l in config[\"read_length\"]]",
"def main():\n import logging\n from pbtranscript.__init__ import get_version\n log = logging.getLogger(__name__)\n args = get_args()\n from pbtranscript.Utils import setup_log\n setup_log(alog=log, level=logging.DEBUG)\n log.info(\"Running {f} v{v}.\".format(f=op.basename(__file__),\n v=get_version()))\n\n splitFaFq(input_fa_or_fq=args.input_fa_or_fq,\n reads_per_split=args.reads_per_split,\n out_dir=args.out_dir,\n out_format=args.out_format,\n is_fq=args.is_fq)",
"def filter_fastq_fp(input_seqs_fp, output_seqs_fp, seqs_to_keep, negate=False):\r\n input_seqs = parse_fastq(open(input_seqs_fp, 'U'), strict=False)\r\n output_f = open(output_seqs_fp, 'w')\r\n return filter_fastq(input_seqs, output_f, seqs_to_keep, negate)",
"def parse_reads_and_select_candidates(self, reads):\n st_time = time.time()\n # read_id_list = []\n total_reads = 0\n read_unique_id = 0\n for read in reads:\n # check if the read is usable\n if read.mapping_quality >= DEFAULT_MIN_MAP_QUALITY and read.is_secondary is False \\\n and read.is_supplementary is False and read.is_unmapped is False and read.is_qcfail is False:\n\n read.query_name = read.query_name + '_' + str(read_unique_id)\n if self.find_read_candidates(read=read):\n # read_id_list.append(read.query_name)\n total_reads += 1\n read_unique_id += 1\n\n if total_reads == 0:\n return []\n\n selected_allele_list = []\n postprocess_read_id_list = set()\n for pos in self.positional_allele_dictionary:\n if pos < self.region_start_position or pos > self.region_end_position:\n continue\n ref = self.reference_dictionary[pos]\n\n all_allele_dictionary = self.positional_allele_dictionary[pos]\n all_mismatch_count = 0\n for allele in all_allele_dictionary:\n all_mismatch_count += all_allele_dictionary[allele]\n\n # pick the top 2 most frequent allele\n allele_frequency_list = list(sorted(all_allele_dictionary.items(), key=operator.itemgetter(1, 0),\n reverse=True))[:PLOIDY]\n allele_list = self._filter_alleles(pos, allele_frequency_list)\n alt1 = allele_list[0] if len(allele_list) >= 1 else None\n alt2 = allele_list[1] if len(allele_list) >= 2 else '.'\n if alt1 is None:\n continue\n mq_rms = round(math.sqrt(self.rms_mq[pos]/self.coverage[pos]), 3) if self.coverage[pos] > 0 else 0\n dp = self.coverage[pos]\n ref_count = self.coverage[pos] - all_mismatch_count\n candidate_record = [self.chromosome_name] + self._get_record(pos, alt1, alt2, ref, ref_count) + [mq_rms] + [dp]\n postprocess_read_id_list.update(self.read_id_by_position[pos])\n selected_allele_list.append(candidate_record)\n\n postprocess_read_id_list = list(postprocess_read_id_list)\n if len(selected_allele_list) > 0:\n self.postprocess_reference()\n self.postprocess_reads(postprocess_read_id_list)\n\n return selected_allele_list",
"def discoverFromVCF(cls, inputFname, outputFname, refFastaFname=None, VCFOutputType=2, \\\n\t\t\t\t\tminMinorAlleleCoverage=1/4., maxMinorAlleleCoverage=3/4.,\\\n\t\t\t\t\tmaxNoOfReads=2., minNoOfReads=1/4., \\\n\t\t\t\t\tmaxNoOfReadsForGenotypingError=1, maxMajorAlleleCoverage=7/8., maxNoOfReadsForAllSamples=1000,\\\n\t\t\t\t\tnt_set = set(['a','c','g','t','A','C','G','T']), isqID2coverage=None, defaultCoverage=10, \\\n\t\t\t\t\toutputDelimiter='\\t',\\\n\t\t\t\t\treport=0, site_type=1):\n\t\timport csv\n\t\tfrom pymodule.utils import runLocalCommand, getColName2IndexFromHeader\n\t\tsys.stderr.write(\"Looking for heterozygous SNPs in %s (%s<=MAC<=%s).\\n\"%(os.path.basename(inputFname), \\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tminMinorAlleleCoverage, maxMinorAlleleCoverage))\n\t\treader =csv.reader(open(inputFname), delimiter='\\t')\n\t\t\n\t\t\n\t\tread_group2col_index = {'ref':0}\t#ref is at column 0. \"ref\" must not be equal to any read_group.\n\t\tread_group2coverage = {}\t#2011-9-2\n\t\tlocus_id2row_index = {}\n\t\tdata_matrix = []\n\t\t\n\t\ttid2refName = {}\t#dictionary storing the target references which have SNP calls\n\t\trefNameSet = set()\n\t\t\"\"\"\n\t\twriter = csv.writer(open(outputFname, 'w'), delimiter='\\t')\n\t\theader = ['sample', 'snp_id', 'chr', 'pos', 'qual', 'DP', 'minDP4', 'DP4_ratio', 'MQ']\n\t\tmoreHeader = ['GQ', 'GL', 'SB', 'QD', 'sndHighestGL', 'deltaGL']\n\t\t#['AF', 'AC','AN', 'Dels', 'HRun', 'HaplotypeScore','MQ0', 'QD']\t#2011-3-4 useless\n\t\tif VCFOutputType==2:\n\t\t\theader += moreHeader\n\t\tchr_pure_number_pattern = re.compile(r'[a-z_A-Z]+(\\d+)')\n\t\tchr_number_pattern = re.compile(r'chr(\\d+)')\n\t\t\"\"\"\n\t\t\n\t\tindividual_name2col_index = None\n\t\tcol_name2index = None\n\t\tcounter = 0\n\t\treal_counter = 0\n\t\t\n\t\t\n\t\tfor row in reader:\n\t\t\tif row[0] =='#CHROM':\n\t\t\t\trow[0] = 'CHROM'\t#discard the #\n\t\t\t\theader = row\n\t\t\t\tcol_name2index = getColName2IndexFromHeader(header, skipEmptyColumn=True)\n\t\t\t\tindividual_name2col_index = cls.getIndividual2ColIndex(header, col_name2index)\n\t\t\t\tcontinue\n\t\t\telif row[0][0]=='#':\t#2011-3-4\n\t\t\t\tcontinue\n\t\t\t\"\"\"\n\t\t\tif chr_number_pattern.search(row[0]):\n\t\t\t\tchr = chr_number_pattern.search(row[0]).group(1)\n\t\t\telif chr_pure_number_pattern.search(row[0]):\n\t\t\t\tchr = chr_pure_number_pattern.search(row[0]).group(1)\n\t\t\telse:\n\t\t\t\tsys.stderr.write(\"Couldn't parse the chromosome number/character from %s.\\n Exit.\\n\"%(row[0]))\n\t\t\t\tsys.exit(4)\n\t\t\t\"\"\"\n\t\t\tchr = row[0]\n\t\t\trefNameSet.add(chr)\n\t\t\t\n\t\t\tpos = row[1]\n\t\t\tquality = row[5]\n\t\t\t\n\t\t\toutputHet= False\n\t\t\t\n\t\t\tinfo = row[7]\n\t\t\tinfo_ls = info.split(';')\n\t\t\tinfo_tag2value = {}\n\t\t\tfor info in info_ls:\n\t\t\t\ttry:\n\t\t\t\t\ttag, value = info.split('=')\n\t\t\t\texcept:\n\t\t\t\t\t#sys.stderr.write(\"Error in splitting %s by =.\\n\"%info)\t###Error in splitting DS by =.\n\t\t\t\t\tcontinue\n\t\t\t\tinfo_tag2value[tag] = value\n\t\t\t\n\t\t\tcurrent_locus = '%s_%s'%(chr, pos)\n\t\t\trefBase = row[col_name2index['REF']]\n\t\t\taltBase = row[col_name2index['ALT']]\n\t\t\tif VCFOutputType==2:\t#2011-3-4 GATK\n\t\t\t\tformat_column = row[col_name2index['FORMAT']]\n\t\t\t\tformat_column_ls = format_column.split(':')\n\t\t\t\tformat_column_name2index = getColName2IndexFromHeader(format_column_ls)\n\t\t\t\tdata_row = ['NA']*(len(individual_name2col_index)+1)\t# extra 1 for the ref\n\t\t\t\tallele2count = {}\n\t\t\t\tfor individual_name, individual_col_index in individual_name2col_index.iteritems():\n\t\t\t\t\tread_group = individual_name\n\t\t\t\t\tif read_group not in read_group2col_index:\n\t\t\t\t\t\tread_group2col_index[read_group] = len(read_group2col_index)\n\t\t\t\t\t\t#2011-9-2\n\t\t\t\t\t\tif isqID2coverage:\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tisqID = read_group.split('_')[1]\n\t\t\t\t\t\t\t\tisqID = int(isqID)\n\t\t\t\t\t\t\t\tcoverage = isqID2coverage.get(isqID, defaultCoverage)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tsys.stderr.write('Except type: %s\\n'%repr(sys.exc_info()))\n\t\t\t\t\t\t\t\timport traceback\n\t\t\t\t\t\t\t\ttraceback.print_exc()\n\t\t\t\t\t\t\t\tsys.stderr.write(\"Coverage for %s not available. use default=%s.\\n\"%(read_group, defaultCoverage))\n\t\t\t\t\t\t\t\tcoverage = defaultCoverage\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcoverage = defaultCoverage\n\t\t\t\t\t\tread_group2coverage[read_group] = coverage\n\t\t\t\t\t\n\t\t\t\t\tcoverage = read_group2coverage[read_group]\n\t\t\t\t\tgenotype_data = row[individual_col_index]\n\t\t\t\t\tgenotype_data_ls = genotype_data.split(':')\n\t\t\t\t\tgenotype_call_index = format_column_name2index.get('GT')\n\t\t\t\t\tgenotype_quality_index = format_column_name2index.get('GQ')\n\t\t\t\t\tif genotype_quality_index is None:\n\t\t\t\t\t\tgenotype_quality_index = format_column_name2index.get('DP')\n\t\t\t\t\tdepth_index = format_column_name2index.get(\"DP\")\n\t\t\t\t\t#GL_index = format_column_name2index.get('GL')\n\t\t\t\t\tif len(genotype_data_ls)<len(format_column_name2index):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif depth_index is None or genotype_call_index is None:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#genotype_quality = genotype_data_ls[genotype_quality_index]\n\t\t\t\t\tgenotype_call = genotype_data_ls[genotype_call_index]\n\t\t\t\t\tdepth = int(genotype_data_ls[depth_index])\n\t\t\t\t\tif depth>maxNoOfReads*coverage or depth<minNoOfReads*coverage:\t#2011-3-29 skip. coverage too high or too low\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tallele = 'NA'\n\t\t\t\t\tif genotype_call=='0/1' or genotype_call =='1/0':\t#heterozygous, the latter notation is never used though.\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tGL_list = genotype_data_ls[GL_index]\n\t\t\t\t\t\tGL_list = GL_list.split(',')\n\t\t\t\t\t\tGL_list = map(float, GL_list)\n\t\t\t\t\t\tGL = GL_list[1]\n\t\t\t\t\t\tsndHighestGL = max([GL_list[0], GL_list[2]])\n\t\t\t\t\t\tdeltaGL = GL-sndHighestGL\n\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\tAD = genotype_data_ls[format_column_name2index.get('AD')]\n\t\t\t\t\t\tAD = map(int, AD.split(','))\n\t\t\t\t\t\tminorAlleleCoverage = min(AD)\n\t\t\t\t\t\tmajorAlleleCoverage = max(AD)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif minorAlleleCoverage<=maxMinorAlleleCoverage*coverage and minorAlleleCoverage>=minMinorAlleleCoverage*coverage \\\n\t\t\t\t\t\t\t\tand majorAlleleCoverage<=maxMajorAlleleCoverage*coverage:\n\t\t\t\t\t\t\tDP4_ratio = float(AD[0])/AD[1]\n\t\t\t\t\t\t\tallele = '%s%s'%(refBase, altBase)\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\t\t\tdata_row = [individual_name, 'chr%s:%s'%(chr, pos), chr, pos, quality, \\\n\t\t\t\t\t\t\t\t\tdepth, minorAlleleCoverage, DP4_ratio,\\\n\t\t\t\t\t\t\t\t\tinfo_tag2value.get('MQ'), genotype_quality, GL,\\\n\t\t\t\t\t\t\t\t\tinfo_tag2value.get('SB'), info_tag2value.get('QD'), sndHighestGL, deltaGL]\n\t\t\t\t\t\t\t#for i in range(3, len(moreHeader)):\n\t\t\t\t\t\t\t#\tinfo_tag = moreHeader[i]\n\t\t\t\t\t\t\t#\tdata_row.append(info_tag2value.get(info_tag))\n\t\t\t\t\t\t\twriter.writerow(data_row)\n\t\t\t\t\t\t\t\"\"\"\n\t\t\t\t\telif genotype_call=='./.':\t#missing\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telif genotype_call =='1/1':\n\t\t\t\t\t\tallele = '%s%s'%(altBase, altBase)\n\t\t\t\t\telif genotype_call =='0/0':\n\t\t\t\t\t\tallele = '%s%s'%(refBase, refBase)\n\t\t\t\t\tcol_index = read_group2col_index.get(read_group)\n\t\t\t\t\tdata_row[col_index] = allele\n\t\t\t\t\tif allele!='NA':\n\t\t\t\t\t\tif allele not in allele2count:\n\t\t\t\t\t\t\tallele2count[allele] = 0\n\t\t\t\t\t\tallele2count[allele] += 1\n\t\t\t\t\n\t\t\t\tif len(allele2count)>site_type-1:\t#whether polymorphic across samples or all sites in vcf\n\t\t\t\t\treal_counter += 1\n\t\t\t\t\tlocus_id2row_index[current_locus] = len(locus_id2row_index)\n\t\t\t\t\tdata_matrix.append(data_row)\n\t\t\t\"\"\"\n\t\t\telif VCFOutputType==1:\t#samtools. 2011-7-20 outdated.\n\t\t\t\tsample_id = row[8]\n\t\t\t\tfor tag in info_tag2value.keys():\n\t\t\t\t\tvalue = info_tag2value.get(tag)\n\t\t\t\t\tif tag=='DP4':\n\t\t\t\t\t\ttag = 'DP4_ratio'\n\t\t\t\t\t\tvalue = value.split(',')\n\t\t\t\t\t\tvalue = map(int, value)\n\t\t\t\t\t\tno_of_ref_allele = sum(value[0:2])\n\t\t\t\t\t\tno_of_non_ref_allele = sum(value[2:])\n\t\t\t\t\t\tMAC = min(no_of_ref_allele, no_of_non_ref_allele)\n\t\t\t\t\t\tif MAC<=maxMinorAlleleCoverage and MAC>=minMinorAlleleCoverage:\n\t\t\t\t\t\t\toutputHet = True\n\t\t\t\t\t\t\tvalue = float(no_of_ref_allele)/no_of_non_ref_allele\n\t\t\t\t\t\t\tinfo_tag2value['minDP4'] = min(no_of_ref_allele, no_of_non_ref_allele)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tvalue = None\n\t\t\t\t\t\tinfo_tag2value[tag] = value\n\t\t\t\tif outputHet:\n\t\t\t\t\treal_counter += 1\n\t\t\t\t\toutput_row = [sample_id, 'chr%s:%s'%(chr, pos), chr, pos, quality, info_tag2value.get('DP'), \\\n\t\t\t\t\t\t\t\tinfo_tag2value.get('minDP4'), info_tag2value.get('DP4_ratio'), info_tag2value.get('MQ')]\n\t\t\t\t\twriter.writerow(output_row)\n\t\t\t\"\"\"\n\t\t\tcounter += 1\n\t\t\tif counter%2000==0 and report:\n\t\t\t\tsys.stderr.write(\"%s\\t%s\\t%s\"%(\"\\x08\"*80, counter, real_counter))\n\t\tdel reader\n\t\t\n\t\tcls.outputCallMatrix(data_matrix, refFastaFname, outputFname=outputFname, refNameSet=refNameSet, \\\n\t\t\t\t\tread_group2col_index=read_group2col_index, \\\n\t\t\t\t\tlocus_id2row_index=locus_id2row_index, outputDelimiter=outputDelimiter)\n\t\t\n\t\tsys.stderr.write(\"%s\\t%s\\t%s.\\n\"%(\"\\x08\"*80, counter, real_counter))",
"def cut_seq_fasta_file(listOfFasta, PATH_FASTA_CUTOFF, INFO_folder, file_cutoff=None) :\n\n\tif file_cutoff == True :\n\t\tDICT_CUTOFF=set_dict_cutoff_init(listOfFasta, INFO_folder)\n\telse :\n\t\tDICT_CUTOFF=set_dict_cutoff(cutoff_file)\n\n\n\tprint \"\\n#################\"\n\tprint \"# Cutoff file\"\n\tprint \"#################\\n\"\n\n\tcreate_folder(PATH_FASTA_CUTOFF)\n\n\tdict_remove = {}\n\n\tprint \"\\n------------------------------------------\"\n\tprint \"| First read : Creation of the dictionnary\"\n\tprint \"------------------------------------------\\n\"\n\n\tfor my_file in listOfFasta :\n\t\tcurrent_file = os.path.basename(my_file)\n\t\tif current_file in DICT_CUTOFF:\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\t\t\tnumber_seq = len(list(seqiter))\n\t\t\tprogression = 1\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\n\t\t\tfor seq in seqiter :\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences read\\r\".format(current_file, progression/float(number_seq)*100, progression, number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\n\t\t\t\tif \"_D_\" in seq.id :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\t\t\t\telse :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"V\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tcontinue\n\t\t\t\telif len(seq) > DICT_CUTOFF[current_file][1] or len(seq) < DICT_CUTOFF[current_file][0] :\n\t\t\t\t\tif len(seq) > DICT_CUTOFF[current_file][1] :\n\t\t\t\t\t\tdict_remove[id_seq]=[seq.id,[], \"long\"]\n\t\t\t\t\telse :\n\t\t\t\t\t\tdict_remove[id_seq]=[seq.id,[], \"short\"]\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(current_file))\n\n\tprint \"\\n-----------------------------\"\n\tprint \"| Second read : Writing files\"\n\tprint \"-----------------------------\\n\"\n\n\tfor my_file in listOfFasta :\n\t\tcurrent_file = os.path.basename(my_file)\n\t\twith open(os.path.join(PATH_FASTA_CUTOFF, current_file), \"w\") as writing_file :\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\t\t\tnumber_seq = len(list(seqiter))\n\t\t\tprogression = 1\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\t\t\tfor seq in seqiter :\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences read\\r\".format(current_file, progression/float(number_seq)*100, progression, number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\n\t\t\t\tif \"_D_\" in seq.id :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\t\t\t\telse :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"V\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tdict_remove[id_seq][1].append(seq)\n\t\t\t\telse :\n\t\t\t\t\tSeqIO.write(seq, writing_file,\"fasta\")\n\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(current_file))\n\n\twrite_remove_cutoff(dict_remove, INFO_folder)\n\n\treturn",
"def run_fasta_checks(input_fasta_fp,\r\n mapping_fp,\r\n tree_fp=None,\r\n tree_subset=False,\r\n tree_exact_match=False,\r\n same_seq_lens=False,\r\n all_ids_found=False,\r\n suppress_barcode_checks=False,\r\n suppress_primer_checks=False):\r\n\r\n # Stores details of various checks\r\n fasta_report = {}\r\n\r\n # get sets of data for testing fasta labels/seqs\r\n sample_ids, barcodes, linkerprimerseqs = get_mapping_details(mapping_fp,\r\n suppress_barcode_checks, suppress_primer_checks)\r\n\r\n fasta_labels = get_fasta_labels(input_fasta_fp)\r\n\r\n total_seq_count = len(fasta_labels)\r\n\r\n fasta_report['duplicate_labels'], fasta_report['duplicate_ids'] =\\\r\n get_dup_labels_perc(fasta_labels)\r\n\r\n fasta_report['invalid_labels'], fasta_report['nosample_ids_map'] =\\\r\n check_labels_sampleids(fasta_labels, sample_ids, total_seq_count)\r\n\r\n fasta_report['invalid_seq_chars'], fasta_report['barcodes_detected'],\\\r\n fasta_report['linkerprimers_detected'],\\\r\n fasta_report['barcodes_at_start'] = check_fasta_seqs(input_fasta_fp,\r\n barcodes, linkerprimerseqs, total_seq_count)\r\n\r\n if same_seq_lens:\r\n fasta_report['same_seq_lens'] = check_fasta_seqs_lens(input_fasta_fp)\r\n else:\r\n fasta_report['same_seq_lens'] = False\r\n\r\n if all_ids_found:\r\n fasta_report['all_ids_found'] = check_all_ids(fasta_labels, sample_ids)\r\n else:\r\n fasta_report['all_ids_found'] = False\r\n\r\n if tree_subset:\r\n fasta_report['tree_subset'] = check_tree_subset(fasta_labels, tree_fp)\r\n else:\r\n fasta_report['tree_subset'] = False\r\n\r\n if tree_exact_match:\r\n fasta_report['tree_exact_match'] =\\\r\n check_tree_exact_match(fasta_labels, tree_fp)\r\n else:\r\n fasta_report['tree_exact_match'] = False\r\n\r\n return fasta_report",
"def filter_fastq(input_seqs, output_seqs_f, seqs_to_keep, negate=False):\r\n seqs_to_keep_lookup = {}.fromkeys([seq_id.split()[0]\r\n for seq_id in seqs_to_keep])\r\n # Define a function based on the value of negate\r\n if not negate:\r\n def keep_seq(seq_id):\r\n return seq_id.split()[0] in seqs_to_keep_lookup\r\n else:\r\n def keep_seq(seq_id):\r\n return seq_id.split()[0] not in seqs_to_keep_lookup\r\n\r\n for seq_id, seq, qual in input_seqs:\r\n if keep_seq(seq_id):\r\n output_seqs_f.write('@%s\\n%s\\n+\\n%s\\n' % (seq_id, seq, qual))\r\n output_seqs_f.close()",
"def build(self, fasta_files, subtype_file):\n\n # Check subtype file\n subtypes = {}\n\n for row in csv.reader(open(subtype_file,'r'),delimiter='\\t'):\n name = row[0]\n subt = row[1]\n\n subtypes[name] = subt.lower()\n \n if isinstance(fasta_files, str):\n # Create list\n fasta_files = [fasta_files]\n\n if len(fasta_files) != self._nloci:\n raise Exception(\"Missing fasta file. {} fasta files provided for {} number of loci.\".format(len(fasta_files), self._nloci))\n\n concat = LociConcat()\n sequences = concat.collect(fasta_files)\n \n for name,seqslist in sequences.iteritems():\n this_subt = subtypes[name]\n\n for seqs in seqslist:\n self.add(seqs, name, this_subt)",
"def extract_sequences(self, new_fasta, ids):\n assert isinstance(new_fasta, FASTA)\n new_fasta.create()\n for seq in self:\n if seq.id in ids: new_fasta.add_seq(seq)\n new_fasta.close()",
"def filter_sff_reads(sff_data, ids_to_keep=None, ids_to_remove=None):\r\n # TODO: Move to PyCogent\r\n header, reads = sff_data\r\n # Force evaluation of all reads. We have no choice, since we need\r\n # the total number of reads to be returned with the header.\r\n # Another design choice would be to go back and correct the header\r\n # when we write the binary SFF file to disk -- maybe we'll switch\r\n # to that strategy in the future.\r\n if ids_to_keep is not None:\r\n reads = [r for r in reads if r['Name'] in ids_to_keep]\r\n if ids_to_remove is not None:\r\n reads = [r for r in reads if r['Name'] not in ids_to_remove]\r\n header['number_of_reads'] = len(reads)\r\n return header, reads",
"def fasta_reader(fasta):\n # ditch the boolean (x[0]) and just keep the header/seq grouping\n fa_iter = (x[1] for x in itertools.groupby(fasta, lambda line: line[0] == \">\"))\n for header in fa_iter:\n # drop the \">\"\n name = next(header)[1:].strip()\n # join all sequence lines to one by iterating until the next group.\n read = \"\".join(s.strip() for s in next(fa_iter))\n yield name, read",
"def fastq_to_fasta(input_file, wanted_set):\n file_name = os.path.splitext(os.path.basename(input_file))[0]\n with open(file_name + \"_filtered.fasta\", \"w\") as out:\n for record in SeqIO.parse(input_file, \"fastq\"):\n ID = str(record.id)\n SEQ = str(record.seq)\n if ID in wanted_set:\n out.write(\">\" + ID + \"\\n\" + SEQ + \"\\n\")",
"def discoverFromBAM(cls, inputFname, outputFname, refFastaFname=None, monomorphicDiameter=100, \\\n\t\t\t\t\t\tmaxNoOfReads=300, minNoOfReads=2, minMinorAlleleCoverage=3, maxMinorAlleleCoverage=7,\\\n\t\t\t\t\t\tmaxNoOfReadsForGenotypingError=1, maxMajorAlleleCoverage=30, maxNoOfReadsForAllSamples=1000,\\\n\t\t\t\t\t\tnt_set = set(['a','c','g','t','A','C','G','T']), VCFOutputType=None, \\\n\t\t\t\t\t\toutputDelimiter='\\t',\\\n\t\t\t\t\t\tisqID2coverage=None, defaultCoverage=10, report=0, site_type=1):\n\t\timport pysam, csv\n\t\tsys.stderr.write(\"Looking for heterozygous SNPs in %s (%s<=MinorAC<=%s), maxNoOfReads=%s, \\\n\t\t\t\tmaxNoOfReadsForGenotypingError=%s, maxMajorAlleleCoverage=%s, maxNoOfReadsForAllSamples=%s.\\n\"%\\\n\t\t\t\t\t(os.path.basename(inputFname), minMinorAlleleCoverage, maxMinorAlleleCoverage ,\\\n\t\t\t\t\tmaxNoOfReads, maxNoOfReadsForGenotypingError, maxMajorAlleleCoverage, maxNoOfReadsForAllSamples))\n\t\tsamfile = pysam.Samfile(inputFname, \"rb\" )\n\t\tcurrent_locus = None\t# record of polymorphic loci\n\t\tprevious_locus = None\n\t\tcandidate_locus = None\n\t\tgood_polymorphic_loci = []\n\t\tread_group2no_of_snps_with_trialleles = {}\n\t\tread_group2no_of_snps_with_quad_alleles = {}\n\t\tread_group2no_of_snps_with_penta_alleles = {}\n\t\tread_group2no_of_good_hets = {}\n\t\tread_group2no_of_good_tris = {}\n\t\tcounter = 0\n\t\treal_counter = 0\n\t\t\n\t\tread_group2col_index = {'ref':0}\t#ref is at column 0. \"ref\" must not be equal to any read_group.\n\t\tlocus_id2row_index = {}\n\t\tdata_matrix = []\n\t\t\n\t\ttid2refName = {}\t#dictionary storing the target references which have SNP calls\n\t\trefNameSet = set()\t#reverse map of tid2refName\n\t\tfor pileupcolumn in samfile.pileup():\n\t\t\t#print\n\t\t\t#print 'coverage at base %s %s = %s'%(pileupcolumn.tid, pileupcolumn.pos , pileupcolumn.n)\n\t\t\tcounter += 1\n\t\t\trefName = samfile.getrname(pileupcolumn.tid)\n\t\t\tcurrent_locus = '%s_%s'%(refName, pileupcolumn.pos+1)\n\t\t\tif pileupcolumn.tid not in tid2refName:\n\t\t\t\ttid_str = str(pileupcolumn.tid)\n\t\t\t\ttid2refName[tid_str] = refName\n\t\t\t\trefNameSet.add(refName)\n\t\t\t\n\t\t\tread_group2base2count = {}\n\t\t\tread_group2depth = {}\n\t\t\tif pileupcolumn.n<=maxNoOfReadsForAllSamples:\n\t\t\t\tfor pileupread in pileupcolumn.pileups:\n\t\t\t\t\tread_group = None\n\t\t\t\t\t# find the read group\n\t\t\t\t\tfor tag in pileupread.alignment.tags:\n\t\t\t\t\t\tif tag[0]=='RG':\n\t\t\t\t\t\t\ttag_value = tag[1]\n\t\t\t\t\t\t\tif tag_value.find('sorted')==-1:\t# sometimes one read has >1 RGs, take the one without 'sorted'\n\t\t\t\t\t\t\t\tread_group = tag_value\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif read_group is None:\n\t\t\t\t\t\tsys.stderr.write(\"This read (tags:%s) has no non-sorted-embedded RG. Exit.\\n\"%(repr(pileupread.alignment.tags)))\n\t\t\t\t\t\tsys.exit(3)\n\t\t\t\t\tif read_group not in read_group2base2count:\n\t\t\t\t\t\tread_group2base2count[read_group] = {}\n\t\t\t\t\t\tread_group2depth[read_group] = 0\n\t\t\t\t\tif read_group not in read_group2col_index:\n\t\t\t\t\t\tread_group2col_index[read_group] = len(read_group2col_index)\n\t\t\t\t\t\n\t\t\t\t\tread_group2depth[read_group] += 1\n\t\t\t\t\tif pileupread.qpos<0 or pileupread.qpos>=len(pileupread.alignment.seq):\t#2011-7-13 need to investigate what happens here??\n\t\t\t\t\t\tcontinue\t#\n\t\t\t\t\tbase = pileupread.alignment.seq[pileupread.qpos]\n\t\t\t\t\tbase2count = read_group2base2count.get(read_group)\n\t\t\t\t\tif base in nt_set:\t#make sure it's a nucleotide\n\t\t\t\t\t\tif base not in base2count:\n\t\t\t\t\t\t\tbase2count[base] = 0\n\t\t\t\t\t\tbase2count[base] += 1\n\t\t\t\t\t#print '\\tbase in read %s = %s' % (pileupread.alignment.qname, base)\n\t\t\t\tdata_row = ['NA']*len(read_group2col_index)\n\t\t\t\t\n\t\t\t\tfound_one_het = False\t#2011 flag to see if any het in all samples is called at this locus.\n\t\t\t\tallele2count = {}\t#2011-3-29\n\t\t\t\tfor read_group, base2count in read_group2base2count.iteritems():\n\t\t\t\t\tdepth = read_group2depth.get(read_group)\n\t\t\t\t\tcol_index = read_group2col_index.get(read_group)\n\t\t\t\t\t\n\t\t\t\t\tif depth>maxNoOfReads:\t#2011-3-29 skip. coverage too high.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tallele = 'NA'\t#default\n\t\t\t\t\tif len(base2count)>=2:\n\t\t\t\t\t\titem_ls = base2count.items()\n\t\t\t\t\t\titem_ls.sort(cmp=sortCMPBySecondTupleValue)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif len(item_ls)==3:\n\t\t\t\t\t\t\tcls.addCountToDictionaryByKey(read_group2no_of_snps_with_trialleles, read_group)\n\t\t\t\t\t\t\tif item_ls[0][1]>maxNoOfReadsForGenotypingError:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif len(item_ls)==4:\n\t\t\t\t\t\t\tcls.addCountToDictionaryByKey(read_group2no_of_snps_with_quad_alleles, read_group)\n\t\t\t\t\t\t\tif item_ls[1][1]>maxNoOfReadsForGenotypingError:\t# because sorted, count_ls[0] < count_ls[1]\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\telif len(item_ls)>4:\t#shouldn't happen. but maybe deletion/insertion + 4 nucleotides\n\t\t\t\t\t\t\tcls.addCountToDictionaryByKey(read_group2no_of_snps_with_penta_alleles, read_group)\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tMinorAllele = item_ls[-2][0]\n\t\t\t\t\t\tMinorAC = item_ls[-2][1]\n\t\t\t\t\t\t\n\t\t\t\t\t\tMajorAllele = item_ls[-1][0]\n\t\t\t\t\t\tMajorAC = item_ls[-1][1]\n\t\t\t\t\t\tif MinorAC>=minMinorAlleleCoverage and MinorAC<=maxMinorAlleleCoverage and MajorAC<=maxMajorAlleleCoverage:\n\t\t\t\t\t\t\treal_counter += 1\n\t\t\t\t\t\t\tfound_one_het = True\n\t\t\t\t\t\t\t#pysam position is 0-based.\n\t\t\t\t\t\t\tallele = min(MinorAllele, MajorAllele) + max(MinorAllele, MajorAllele)\n\t\t\t\t\t\t\t#data_row = [read_group, pileupcolumn.tid, pileupcolumn.pos+1, MinorAC, MajorAC]\n\t\t\t\t\t\t\t#writer.writerow(data_row)\n\t\t\t\t\t\t\tif len(item_ls)>2:\n\t\t\t\t\t\t\t\tcls.addCountToDictionaryByKey(read_group2no_of_good_tris, read_group)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tcls.addCountToDictionaryByKey(read_group2no_of_good_hets, read_group)\n\t\t\t\t\t\telif MinorAC<=maxNoOfReadsForGenotypingError:\t#2011-3-29 it's homozygous with the major allele\n\t\t\t\t\t\t\tallele = MajorAllele+MajorAllele\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telif len(base2count)==1:\n\t\t\t\t\t\tbase = base2count.keys()[0]\n\t\t\t\t\t\tcount = base2count.get(base)\n\t\t\t\t\t\tif count>=minMinorAlleleCoverage:\n\t\t\t\t\t\t\tallele = '%s%s'%(base, base)\n\t\t\t\t\t\n\t\t\t\t\tdata_row[col_index] = allele\n\t\t\t\t\tif allele!='NA':\n\t\t\t\t\t\tif allele not in allele2count:\n\t\t\t\t\t\t\tallele2count[allele] = 0\n\t\t\t\t\t\tallele2count[allele] += 1\n\t\t\t\tif len(allele2count)>site_type-1:\t#polymorphic across samples\n\t\t\t\t\tlocus_id2row_index[current_locus] = len(locus_id2row_index)\n\t\t\t\t\tdata_matrix.append(data_row)\n\t\t\tif counter%1000==0:\n\t\t\t\tsys.stderr.write(\"%s\\t%s\\t%s\"%('\\x08'*80, counter, real_counter))\n\t\t\t\t\"\"\"\n\t\t\t\tif previous_locus!=None and previous_locus[0]==current_locus[0]:\n\t\t\t\t\tgap = current_locus[1]-previous_locus[1]\n\t\t\t\t\tif gap>=monomorphicDiameter:\n\t\t\t\t\t\tif candidate_locus is not None and candidate_locus==previous_locus:\n\t\t\t\t\t\t\t#prior candidate locus is in proper distance. there's no polymorphic locus in between.\n\t\t\t\t\t\t\tgood_polymorphic_loci.append(candidate_locus)\n\t\t\t\t\t\tcandidate_locus = current_locus\n\t\t\t\t\telse:\n\t\t\t\t\t\tcandidate_locus = None\n\t\t\t\tprevious_locus = current_locus\n\t\t\t\t\"\"\"\n\t\tsamfile.close()\n\t\tcls.outputCallMatrix(data_matrix, refFastaFname, outputFname=outputFname, \\\n\t\t\t\t\trefNameSet=refNameSet, read_group2col_index=read_group2col_index, \\\n\t\t\t\t\tlocus_id2row_index=locus_id2row_index, outputDelimiter=outputDelimiter)\n\t\t\n\t\tunique_read_group_ls = read_group2col_index.keys()\n\t\tunique_read_group_ls.sort()\n\t\tcls.reportValueOfDictionaryByKeyLs(read_group2no_of_good_hets, unique_read_group_ls, title=\"No of good hets\")\n\t\tcls.reportValueOfDictionaryByKeyLs(read_group2no_of_good_tris, unique_read_group_ls, title=\"No of good SNPs with tri-or-more alleles\")\n\t\tcls.reportValueOfDictionaryByKeyLs(read_group2no_of_snps_with_trialleles, unique_read_group_ls, title=\"No of SNPs with tri alleles\")\n\t\tcls.reportValueOfDictionaryByKeyLs(read_group2no_of_snps_with_quad_alleles, unique_read_group_ls, title=\"No of SNPs with 4 alleles\")\n\t\tcls.reportValueOfDictionaryByKeyLs(read_group2no_of_snps_with_penta_alleles, unique_read_group_ls, title=\"No of SNPs with 5-or-more alleles\")",
"def search_sequences(input_fasta_filepath, \n sequence_length,\n exclude_fasta_filepath,\n verbose,\n percent_match,\n full_primer_length,\n output_f,\n specificity_threshold,\n log_filepath, \n standard_index_file, \n search_range):\n \n # Check input and output files before generating data\n\n if isdir(output_f):\n raise IOError('%s is a directory, please specify a file path.' \\\n % output_f)\n \n try:\n output_filepath=open(output_f, 'w')\n except IOError:\n raise IOError('Unabled to open output filepath %s' %\\\n output_f)\n \n if standard_index_file:\n try:\n test_alignment_file = open(standard_index_file, \"U\")\n test_alignment_file.close()\n except IOError:\n raise IOError('Unable to open standard index file %s'%\\\n standard_index_file)\n \n if log_filepath:\n if isdir(log_filepath):\n raise IOError('log_filepath %s is a directory, please specify '+\\\n 'a filepath.' % log_filepath)\n try:\n test_log_f = open(log_filepath, 'w')\n except IOError:\n raise IOError('Unable to open log file %s' %\\\n log_filepath)\n \n region_slice=full_primer_length-sequence_length\n \n \n if log_filepath:\n log_f = open(log_filepath, 'w')\n if verbose:\n print(\"Building prospective primers\")\n if log_filepath:\n log_f.write(\"Building prosective primers\\n\")\n \n input_fasta_files=input_fasta_filepath.split(\":\")\n initial_primers=iterate_target_sequences(input_fasta_files,sequence_length,\\\n percent_match, search_range)\n \n if verbose:\n print(\"Constructing primer objects\")\n if log_filepath:\n log_f.write(\"Constructing primer objects\\n\")\n\n primers=construct_primers(initial_primers)\n\n if exclude_fasta_filepath:\n exclude_fasta_files=exclude_fasta_filepath.split(\":\")\n else:\n if not exclude_fasta_filepath:\n # Setting variable to 1 in case no exclusion files\n # Limits need for redundant functions\n seq_total_exclude=1\n \n if verbose and exclude_fasta_filepath:\n print(\"Counting sequences for excluded fasta file(s)\")\n if log_filepath:\n log_f.write(\"Counting sequences for excluded fasta file(s)\\n\")\n\n if exclude_fasta_filepath:\n seq_total_exclude=get_sequence_count(exclude_fasta_files)\n if verbose and exclude_fasta_filepath:\n print(\"Total sequences: %d\" % seq_total_exclude)\n if log_filepath and exclude_fasta_filepath:\n log_f.write(\"Total sequences: %d\\n\" % seq_total_exclude)\n \n if verbose and exclude_fasta_filepath:\n print(\"Finding specific hits\")\n if log_filepath and exclude_fasta_filepath:\n log_f.write(\"Finding specific hits\\n\")\n \n if exclude_fasta_filepath:\n primers=get_specific_hits(primers,exclude_fasta_files,\\\n specificity_threshold,sequence_length,region_slice,\\\n seq_total_exclude)\n \n seq_total_target=get_sequence_count(input_fasta_files)\n if verbose:\n print(\"Total number of target sequences: %d\" % seq_total_target)\n if log_filepath:\n log_f.write(\"Total number of target sequences: %d\\n\" \\\n % seq_total_target)\n\n if verbose:\n print(\"Finding sensitive primer regions.\")\n if log_filepath:\n log_f.write(\"Finding sensitive primer regions.\\n\")\n \n primers=get_sensitive_hits(primers,input_fasta_files,\\\n percent_match,sequence_length,region_slice)\n primers=calculate_percent_match(primers,seq_total_target,seq_total_exclude)\n \n if standard_index_file:\n standard_index_fasta = open(standard_index_file, \"U\")\n # Only read first file\n for label, seq in MinimalFastaParser(standard_index_fasta):\n standard_index_seq = seq\n break\n primers = append_std_aligned_index(primers, standard_index_seq,\n region_slice)\n \n else:\n standard_index_seq = None\n \n \n generate_denovo_output_file(primers,output_filepath,\\\n specificity_threshold, region_slice, standard_index_seq, percent_match,\n bool(exclude_fasta_filepath))\n \n if verbose:\n print(\"Module complete\")\n if log_filepath:\n log_f.write(\"Module complete\\n\")",
"def rc_all_reads_fasta(self):\n return op.join(self.out_dir, \"rc_all_reads.fasta\")",
"def test_filtered_scan(self):\n self.run_scan(self.tempdir, self.root_fcount + self.nest_fcount, ext=\".txt\")",
"def process_ssearch36_df(name, ssearch_df, fasta_inputs, out_dir):\n\n fasta_in = [x for x in fasta_inputs if name.split(\".fasta.ss\")[0] == re.sub(\".fasta$\",\"\",os.path.basename(x))][0]\n fasta_file = pyfasta.Fasta(fasta_in)\n print(name) \n try:\n os.mkdir(out_dir)\n except:\n pass\n with open(os.path.join(out_dir, os.path.basename(fasta_in)),\"w\") as out_f:\n for gene in (ssearch_df[\"gene\"].unique()):\n ssearch_tmp = ssearch_df[ssearch_df[\"gene\"] == gene]\n gene_match = (ssearch_tmp.sort_values(by=\"11\",ascending=False).head(n=1)[\"strain\"])\n if (any(gene_match.isin([gene]))):\n out_f.write(\">\" + gene +\"\\n\")\n out_f.write(str(fasta_file[gene]) + \"\\n\")",
"def main_SS(maf_file, segment_file, vaf_threshold = 1.05, filterSegments = False):\n all_mutations = pd.read_csv(maf_file, low_memory=False, delimiter='\\t')\n all_segments = pd.read_csv(segment_file, low_memory=False, delimiter='\\t')\n\n if not os.path.exists(\"./sample_mutations_withCN\"):\n os.makedirs(\"./sample_mutations_withCN\")\n if not os.path.exists(\"./pyclone_input\"):\n os.makedirs(\"./pyclone_input\")\n\n for i, sample in enumerate(all_mutations.Tumor_Sample_Barcode.unique()):\n print(\"Processing sample {}: {}\".format(i+1, sample))\n\n # Subset the mutations and segments to those belonging to the patient\n sample_mutations = all_mutations[all_mutations['Tumor_Sample_Barcode'] == sample]\n sample_segments = all_segments[all_segments['Tumor_Sample_Barcode'] == sample]\n\n patient_VAF = sample_mutations.loc[:, 'VAF']\n filter_VAF_index = (patient_VAF > vaf_threshold)\n\n # Remove the mutations where the condition is true for ALL segments, i.e. it has to be below\n # 0.05 for all sectors. If it's above 0.05 in any sector, keep the mutations. This will keep most\n # of the private mutations.\n num_filtered = filter_VAF_index.loc[filter_VAF_index == False, ]\n print(\"Patient {} has {} mutations with average VAF < {} removed\".format(sample, num_filtered.shape[0], vaf_threshold))\n # Filter out the variants\n sample_mutations = sample_mutations.loc[filter_VAF_index, ]\n # Get the segments dictionary for the patient.\n seg_dict = segments_to_dict(sample_segments)\n\n overlap_seg = pd.DataFrame()\n filtered_seg = pd.DataFrame()\n for _, mut_row in sample_mutations.iterrows():\n # Skip X and Y chromosome\n if (mut_row['Chromosome'] == \"X\" or mut_row['Chromosome'] == \"Y\"):\n continue\n\n # Search for the segment\n buf = search_overlap_singleSample(mut_row, seg_dict)\n # Skip if no overlapping segments\n if (buf.empty):\n continue\n elif filterSegments:\n print(\"--filterSegments specified. Will filter segments of low quality.\")\n if (buf.iloc[0]['numMarker'] < 100) or (buf.iloc[0]['end.pos'] - buf.iloc[0]['start.pos'] < 5000000) or (buf.iloc[0]['CNt'] >= 8):\n if (filtered_seg.empty):\n filtered_seg = buf.iloc[0].to_frame()\n else:\n filtered_seg = pd.concat([filtered_seg, buf.iloc[0]], axis=1)\n else:\n # Get copy number for mutations\n assigned_row = mut_row.copy(deep=True)\n assigned_row['CNt'] = buf.iloc[0]['CNt']\n assigned_row['Major_CN'] = buf.iloc[0]['A']\n assigned_row['Minor_CN'] = buf.iloc[0]['B']\n assigned_row['adjustedCN'] = buf.iloc[0]['adjustedCN']\n # Initialize dataframe for merging.\n if (overlap_seg.empty):\n overlap_seg = assigned_row.to_frame()\n else:\n overlap_seg = pd.concat([overlap_seg, assigned_row], axis=1)\n\n overlap_seg = overlap_seg.transpose()\n overlap_seg.to_csv(\"./sample_mutations_withCN/{}_SNV_withCN.maf\".format(sample),sep=\"\\t\", index=False)\n\n filtered_seg = filtered_seg.transpose()\n print(\"Sample {} has {} segments with marker<100 or smaller than 5 Mb or >= 8 copy number (Canopy guideline)\".format(sample, filtered_seg.shape[0]))\n filtered_seg.to_csv(\"./sample_mutations_withCN/{}_filtered_seg.maf\".format(sample),sep=\"\\t\", index=False)\n\n pyclone_input = overlap_seg.loc[:, ['Hugo_Symbol', 'Chromosome',\n 'Start_position', 'ref_count', 'alt_count', 'VAF', 'Major_CN',\n 'Minor_CN']]\n pyclone_input['mutation_id'] = pyclone_input['Hugo_Symbol'].map(str) + \"_\" + pyclone_input['Chromosome'].map(str) + \":\" + pyclone_input['Start_position'].map(str)\n pyclone_input['normal_cn'] = 2\n towrite = pyclone_input.loc[:, ['mutation_id', 'ref_count', 'alt_count', 'normal_cn', 'Minor_CN', 'Major_CN']]\n towrite.columns = ['mutation_id', 'ref_counts', 'var_counts', 'normal_cn', 'minor_cn', 'major_cn']\n towrite['ref_counts'] = towrite['ref_counts'].map(int)\n towrite['var_counts'] = towrite['var_counts'].map(int)\n towrite.to_csv(\"./pyclone_input/{}_mutations.tsv\".format(sample), sep='\\t', index=False)"
] | [
"0.59555817",
"0.5874914",
"0.5856849",
"0.57773864",
"0.5676103",
"0.5649386",
"0.56342393",
"0.55829614",
"0.5536078",
"0.55328315",
"0.54585654",
"0.5444906",
"0.5354874",
"0.5350444",
"0.53398484",
"0.53377277",
"0.5320953",
"0.5305111",
"0.52870345",
"0.5270289",
"0.52476585",
"0.5239773",
"0.5218237",
"0.5199463",
"0.5167171",
"0.51497173",
"0.5132311",
"0.5129633",
"0.5119797",
"0.51012784"
] | 0.79045886 | 0 |
Return tmp file for all_reads_fasta. | def tmp_all_reads_fasta(self):
return op.join(self.out_dir, "all_reads.fasta.tmp") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def rc_all_reads_fasta(self):\n return op.join(self.out_dir, \"rc_all_reads.fasta\")",
"def ori_all_reads_fasta(self):\n return op.join(self.out_dir, \"all_reads.fasta.ori\")",
"def createSequenceFile(sequences, tmpDir, filename='seq.fa'):\n seqfile = os.path.join(tmpDir, filename)\n with open(seqfile, 'w') as f:\n for name, sequence in sequences.iteritems():\n f.write(\">{}\\n{}\\n\".format(name, sequence))\n subprocess.call(\"pyfasta flatten {}\".format(seqfile), shell=True)\n return seqfile",
"def _tmp(self):\n tmpfn = tempfile.NamedTemporaryFile(prefix='tmp',\n suffix='.out',\n delete=False)\n return tmpfn.name",
"def writeTmpFastq(self, fw_reads_path, rev_reads_path):\n try:\n fq1 = open(fw_reads_path, \"w+\")\n fq1.write(reads1_string)\n fq1.close()\n fq2 = open(rev_reads_path, \"w+\")\n fq2.write(reads2_string)\n fq2.close()\n except OSError:\n pass",
"def writeTmpFastq(self, fw_reads_path, rev_reads_path):\n try:\n fq1 = open(fw_reads_path, \"w+\")\n fq1.write(reads1_string)\n fq1.close()\n fq2 = open(rev_reads_path, \"w+\")\n fq2.write(reads2_string)\n fq2.close()\n except OSError:\n pass",
"def palindrome_reads_fasta(self):\n return op.join(self.out_dir, \"palindrome_subreads.fasta\")",
"def make_temp_file():\n with tempfile.NamedTemporaryFile() as f:\n return f.name",
"def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def _create_temp_batch_file(self):\n return tempfile.NamedTemporaryFile(delete=False)",
"def tempfile():\n return mkstemp()[1]",
"def create_temporary_file():\n f = NamedTemporaryFile(delete=False)\n return f.name",
"def test_write_trunc_fasta(self):\r\n\r\n seq_order = ['seq1', 'seq2', 'seq3']\r\n\r\n seqs = {'seq1': 'ATCG', 'seq3': 'ACCC', 'seq2': 'GGACC'}\r\n\r\n output_dir = '/tmp/truncate_fasta_qual_test/'\r\n\r\n create_dir(output_dir)\r\n\r\n fasta_out_fp = output_dir + 'seqs_filtered.fna'\r\n\r\n write_trunc_fasta(seqs, fasta_out_fp, seq_order)\r\n\r\n expected_seqs = ['>seq1', 'ATCG', '>seq2', 'GGACC', '>seq3', 'ACCC']\r\n\r\n actual_fasta = open(fasta_out_fp, \"U\")\r\n\r\n actual_fasta = [line.strip() for line in actual_fasta]\r\n\r\n self.assertEqual(actual_fasta, expected_seqs)",
"def test_force_fasta_output(tmp_path, cores):\n\n out_path = os.fspath(tmp_path / \"out.fasta\")\n with open(out_path, \"w\") as out_file:\n py = subprocess.Popen(\n [\n sys.executable,\n \"-m\",\n \"cutadapt\",\n \"--fasta\",\n \"-o\",\n \"-\",\n \"--cores\",\n str(cores),\n \"-a\",\n \"TTAGACATATCTCCGTCG\",\n datapath(\"small.fastq\"),\n ],\n stdout=out_file,\n )\n _ = py.communicate()\n assert_files_equal(cutpath(\"small.fasta\"), out_path)",
"def getTinyFasta():\n return _getAbsPath('Fluidigm_human_amplicons_tiny.fasta')",
"def test_write_seqs_to_fasta(self):\r\n fd, output_fp = mkstemp(\r\n prefix=\"qiime_util_write_seqs_to_fasta_test\",\r\n suffix='.fasta')\r\n close(fd)\r\n self.files_to_remove.append(output_fp)\r\n seqs = [('s1', 'ACCGGTTGG'), ('s2', 'CCTTGG'),\r\n ('S4 some comment string', 'A')]\r\n exp = \">s1\\nACCGGTTGG\\n>s2\\nCCTTGG\\n>S4 some comment string\\nA\\n\"\r\n # works in write mode\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # calling again in write mode overwrites original file\r\n write_seqs_to_fasta(output_fp, seqs, 'w')\r\n self.assertEqual(open(output_fp).read(), exp)\r\n # works in append mode\r\n exp2 = exp + exp\r\n write_seqs_to_fasta(output_fp, seqs, 'a')\r\n self.assertEqual(open(output_fp).read(), exp2)",
"def TemporaryFile(mode='w+b',bufsize=_1,suffix='',prefix='tmp',dir=None):\n\tpass",
"def write_degapped_fasta_to_file(seqs, tmp_dir=\"/tmp/\"):\r\n fd, tmp_filename = mkstemp(dir=tmp_dir, prefix=\"degapped_\",\r\n suffix=\".fasta\")\r\n close(fd)\r\n\r\n with open(tmp_filename, 'w') as fh:\r\n for seq in degap_fasta_aln(seqs):\r\n fh.write(seq.to_fasta())\r\n\r\n return tmp_filename",
"def _tempfile(self):\n fd, path = tempfile.mkstemp(dir = os.path.join(self.root, \"temporary\"))\n try:\n return os.fdopen(fd, \"wb\"), path\n except:\n os.unlink(path)\n os.close(fd)\n raise",
"def _generate_to_tempfile(self, generator):\r\n (output_fd, output_path) = tempfile.mkstemp()\r\n with os.fdopen(output_fd, 'w') as output:\r\n generator.write(output)\r\n return output_path",
"def export_fasta(self, metadata, analysistype, reportpath, cutoff, program):\n logging.info('Creating FASTA-formatted files of outputs')\n for sample in metadata:\n # Set the name of the FASTA output file\n sample[analysistype].fasta_output = os.path.join(reportpath, '{sn}_{prog}.fasta'.format(sn=sample.name,\n prog=analysistype))\n # Remove the file if it exists. Otherwise, if the samples are processed by the pipeline more than\n # once, the same results will be appended to the file\n try:\n os.remove(sample[analysistype].fasta_output)\n except FileNotFoundError:\n pass\n # Process the sample only if the script could find targets\n if sample[analysistype].blastresults != 'NA' and sample[analysistype].blastresults:\n # Open the FASTA output file in append mode\n with open(sample[analysistype].fasta_output, 'a+') as fasta_output:\n for target in sorted(sample[analysistype].targetnames):\n index = 0\n for hit in sample[analysistype].blastlist:\n if hit['subject_id'] == target:\n # Set the name and percent id to avoid writing out the dictionary[key] multiple times\n if float(hit['percent_match']) >= cutoff:\n # If the 'align' option was not specified, the .dnaseq attribute will be an empty\n # dictionary. Populate this attribute as required\n try:\n # The .dnaseq attribute will not exist for amino-acid based searches\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n # The .targetsequence attribute will be sufficient\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except (KeyError, IndexError):\n # Align the protein (and nucleotide) sequences to the reference\n sample = self.alignprotein(sample=sample,\n analysistype=analysistype,\n target=target,\n program=program,\n index=index,\n hit=hit)\n try:\n if program == 'blastn':\n fasta = sample[analysistype].dnaseq[target][index]\n else:\n fasta = Seq(sample[analysistype].targetsequence[target][index])\n except IndexError:\n fasta = str()\n # Create the SeqRecord of the FASTA sequence\n if fasta:\n try:\n record = SeqRecord(fasta,\n id='{name}_{target}'\n .format(name=sample.name,\n target=target),\n description='')\n # Write the FASTA-formatted record to file\n fasta_output.write(record.format('fasta'))\n except (AttributeError, TypeError):\n pass\n index += 1\n # Return the updated metadata object\n return metadata",
"def _testfile():\r\n import tempfile\r\n return os.path.join(tempfile.gettempdir(), 'trash-%s' % os.getpid())",
"def create_temporary_ca_file(anchor_list):\n try:\n f, fname = tempfile.mkstemp()\n for a in anchor_list:\n s = a.output(fmt=\"PEM\")\n l = os.write(f, s)\n os.close(f)\n except:\n return None\n return fname",
"def write_SEQRES_fasta():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n choice1 = input('Enter name of the outfile: ') \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n print('Sequences successfully written!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')",
"def generate_fasta(seq_file, out_dir):\n\n LOGGER.info(\"Generating fasta file\", seq_file)\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, \"missing_seqs.log\")\n logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO)\n\n cnx = RfamDB.connect()\n cursor = cnx.cursor(raw=True)\n\n # fetch clan specific family full_region data and sequence description\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"ORDER BY fr.rfam_acc\")\n\n cursor.execute(query)\n\n for region in cursor:\n\n # new family\n if str(region[RFAM_ACC]) != rfam_acc:\n # check if there's no open file\n if fp_out is not None:\n fp_out.close()\n\n # open new fasta file\n fp_out = gzip.open(\n os.path.join(out_dir, str(region[RFAM_ACC]) + \".fa.gz\"), 'w')\n\n rfam_acc = region[RFAM_ACC]\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(sequence)\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def combine_fasta_files(fastas_paths, out_file):\n with open(out_file, 'w') as out:\n for filename in fastas_paths:\n for seq_record in SeqIO.parse(filename, \"fasta\"):\n out.write('>' + str(seq_record.id) + '\\n' + str(seq_record.seq) + '\\n')",
"def indexed(filename):\n\n with tempfile.NamedTemporaryFile() as tmp:\n with gzip.open(filename, \"r\") as raw:\n SeqIO.write(corrected_records(raw), tmp, \"fasta\")\n\n tmp.flush()\n yield SeqIO.index(tmp.name, \"fasta\")",
"def test_compute_seqs_per_file(self):\r\n fd, temp_fasta_fp = mkstemp(prefix='QiimeScriptUtilTests',\r\n suffix='.fasta')\r\n close(fd)\r\n temp_fasta = ['>seq', 'AAACCCCAAATTGG'] * 25\r\n open(temp_fasta_fp, 'w').write('\\n'.join(temp_fasta))\r\n\r\n actual_25 = self.pw._compute_seqs_per_file(temp_fasta_fp, 25)\r\n actual_2 = self.pw._compute_seqs_per_file(temp_fasta_fp, 2)\r\n actual_10 = self.pw._compute_seqs_per_file(temp_fasta_fp, 10)\r\n actual_5 = self.pw._compute_seqs_per_file(temp_fasta_fp, 5)\r\n actual_40 = self.pw._compute_seqs_per_file(temp_fasta_fp, 40)\r\n\r\n remove_files([temp_fasta_fp])\r\n\r\n self.assertEqual(actual_25, 1)\r\n self.assertEqual(actual_2, 13)\r\n self.assertEqual(actual_10, 3)\r\n self.assertEqual(actual_5, 5)\r\n self.assertEqual(actual_40, 1)",
"def write_fasta(self):\n patched_otus = get_patched_otus(\n self.db,\n self.settings,\n self.params[\"manifest\"]\n )\n\n sequence_otu_map = dict()\n\n sequences = get_sequences_from_patched_otus(\n patched_otus,\n self.params[\"data_type\"],\n sequence_otu_map\n )\n\n fasta_path = os.path.join(self.params[\"index_path\"], \"ref.fa\")\n\n write_sequences_to_file(fasta_path, sequences)\n\n index_id = self.params[\"index_id\"]\n\n self.db.indexes.update_one({\"_id\": index_id}, {\n \"$set\": {\n \"sequence_otu_map\": sequence_otu_map\n }\n })\n\n self.dispatch(\"indexes\", \"update\", [index_id])",
"def getTempFile():\n root = getDir(tempDir)\n for i in range(100):\n path = os.path.join(root, '%d-%d' % (\n os.getpid(), random.randint(100000, 999999)))\n if not os.path.isfile(path):\n return path\n raise NotImplementedError(\"getTempFile() appears to be failing\")"
] | [
"0.6950289",
"0.65887046",
"0.6245512",
"0.6187316",
"0.60011536",
"0.60011536",
"0.59201795",
"0.59051627",
"0.58881676",
"0.58435273",
"0.57945323",
"0.5721843",
"0.571925",
"0.57050365",
"0.5691162",
"0.5662805",
"0.56540394",
"0.5609064",
"0.5592718",
"0.55707186",
"0.55594414",
"0.55572003",
"0.55495983",
"0.5545288",
"0.5526402",
"0.55144787",
"0.55082196",
"0.5504606",
"0.549931",
"0.54937524"
] | 0.87710905 | 0 |
Return palindrome reads to palindrome_subreads.fasta. | def palindrome_reads_fasta(self):
return op.join(self.out_dir, "palindrome_subreads.fasta") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _split_palindrome(self):\n if not op.exists(self.sdp_out_file) or self.force_redo is True:\n self._self_align()\n\n logging.debug(\"Parsing sdp and detect plindrome reads\")\n split_table = {}\n with SDPReader(self.sdp_out_file) as reader:\n for sdp in reader:\n if sdp.score <= self.palindrome_score_cutoff:\n split_table[str(sdp.qID)] = sdp\n\n logging.debug(\"Splitting palindrom reads.\")\n with FastaReader(self.ori_all_reads_fasta) as reader, \\\n FastaWriter(self.tmp_all_reads_fasta) as writer, \\\n FastaWriter(self.palindrome_reads_fasta) as palindrome_writer:\n for r in reader:\n if r.name in split_table:\n # found a palindrome\n sdp = split_table[r.name]\n # Write palindrome subreads to palindrome_subreads.fasta\n palindrome_writer.writeRecord(r.name, r.sequence)\n#\n# # split this read in the middle\n# split_point = int(sdp.qstart +\n# (sdp.alnqstart + sdp.alnqend)/2)\n# # Write the first half\n# rname_1 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw, s=sdp.qstart,\n# e=split_point)\n# writer.writeRecord(rname_1,\n# r.sequence[0:(split_point-sdp.qstart)])\n#\n# # Write the second half\n# rname_2 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw,\n# s=(split_point+1), e=sdp.qend)\n# writer.writeRecord(rname_2,\n# r.sequence[(split_point-sdp.qstart):])\n else:\n writer.writeRecord(r.name, r.sequence)\n\n logging.debug(\"Moving {i} to {o}.\".format(i=self.tmp_all_reads_fasta,\n o=self.all_reads_fasta))\n shutil.move(self.tmp_all_reads_fasta, self.all_reads_fasta)",
"def palindrome(self):\n vas = []\n file = self.read1()\n print(file[0])\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n s_ii = s_i[::-1]\n if s_ii == s_i and s_i!= \"\":\n vas.append(s_i)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas",
"def _self_align(self):\n logging.info(\"Splitting palindrome.\")\n logging.debug(\"Making reverse complement sequences of reads in \" +\n \"{i} to {o}\".format(i=self.ori_all_reads_fasta,\n o=self.rc_all_reads_fasta))\n num_reads = revcmp_fasta(self.ori_all_reads_fasta,\n self.rc_all_reads_fasta)\n\n reads_per_split = max(1, int(num_reads/self.nproc) + 1)\n logging.debug(\"Splitting {f} to small files each containing {n} reads.\".\n format(f=self.ori_all_reads_fasta, n=reads_per_split))\n fs = FastaSplitter(input_fasta=self.ori_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"reads.split.\")\n fs.split()\n sp_fasta_files = fs.out_fns\n\n logging.debug(\"Splitting {f} to smaller files.\".\n format(f=self.rc_all_reads_fasta))\n rc_fs = FastaSplitter(input_fasta=self.rc_all_reads_fasta,\n reads_per_split=reads_per_split,\n out_dir=self.out_dir,\n out_prefix=\"rc_reads.split.\")\n rc_fs.split()\n rc_sp_fasta_files = rc_fs.out_fns\n\n logging.debug(\"Aligning each read in {i} to its revese compelement \" +\n \"read using sdpMatcher.\".format(i=self.ori_all_reads_fasta))\n\n sdps = [\"{f}.sdp\".format(f=f) for f in sp_fasta_files]\n jobs = []\n for f, rc_f, sdp in zip(sp_fasta_files, rc_sp_fasta_files, sdps):\n cmd = \"sdpMatcher {f} {rc_f} \".format(f=f, rc_f=rc_f) + \\\n \"10 -local > {sdp} \".format(sdp=sdp)\n logging.debug(\"CMD: {cmd}\".format(cmd=cmd))\n jobs.append(cmd)\n\n pool = Pool(processes=self.nproc)\n rets = pool.map(backticks, jobs)\n pool.close()\n pool.join()\n\n for i, job in enumerate(jobs):\n if rets[i][1] != 0:\n errMsg = \"Job {j} failed.\".format(j=job) + str(rets[i][2])\n raise RuntimeError(errMsg)\n\n logging.debug(\"Concatenating all sdp outputs to {f}\".\n format(f=self.sdp_out_file))\n cat_files(src=sdps, dst=self.sdp_out_file)\n\n logging.debug(\"Cleaning intermediate fasta & sdp files.\")\n fs.rmOutFNs()\n rc_fs.rmOutFNs()\n\n for f in sdps:\n os.remove(f)",
"def palindrom():\r\n pal = []\r\n\r\n sub_str = gen_substring(\"abaabbaab\")\r\n\r\n for i in range(len(sub_str)):\r\n\r\n rev = reverse_string(sub_str[i])\r\n\r\n if rev == sub_str[i]:\r\n\r\n pal.append(rev)\r\n\r\n return pal",
"def test_check_seqs_reverse_primers(self):\r\n\r\n # Initial test, should truncate all seqs\r\n in_seqs = self.in_seqs_reverse_primers\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Second test with a mismatch in seq a, should not find reverse primer\r\n # and will write out entire sequence.\r\n\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # With reverse_primer_mismatches allowed set to 1,\r\n # should restore truncation.\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch_allowed\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False,\r\n reverse_primer_mismatches=1)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Testing truncate_remove, which should not write sequences where\r\n # the reverse primer is not found\r\n in_seqs = self.in_seqs_reverse_primers\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_full_remove\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_remove',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Testing truncate_remove, with reverse_primer_mismatches set to 1\r\n # should allow all 4 sequences to be written, truncated\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch_allowed\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=1,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_remove',\r\n rev_primers=rev_primers_test,\r\n qual_out=False,\r\n reverse_primer_mismatches=1)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)",
"def find_reverse_palindromes(dna: str, min_len: int=4, max_len: int=12, zero_based: bool=True):\n def helper_for_non_zero_based(indexes: List[Tuple[int, int]]):\n if not zero_based:\n return [(i + 1, l) for i, l in indexes]\n else:\n return indexes\n\n length = len(dna)\n result = []\n for i in range(length):\n for l in range(min(min_len, length - i), min(max_len + 1, length - i + 1)):\n if l > max_len or l < min_len:\n continue\n sub_dna = dna[i: i + l]\n if sub_dna == reverse_complement(sub_dna):\n result.append((i, l))\n return helper_for_non_zero_based(result)",
"def search_palindromes(src_file, min_len):\n #Get digit source\n source = NumReader(src_file)\n #Old digits. Should always be length 100-200, unless there aren't enough digits.\n old_d = []\n #Current digit (possibly None)\n cur_d = source.read(1)[0]\n #Future digits. Should always be length 100-200, unless there aren't enough digits.\n next_d = source.read(100)\n #List of accumulated palindromes as strings\n pals = []\n\n #Keep running until out of digits\n while source.has_digits:\n #Look for palindrome centered at current digit\n branch_len = pal_length(old_d, next_d)\n cur_length = 1 + 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Look for \"even\" palindrome centered at current digit\n #Shift current digit into old buffer\n old_d.insert(0, cur_d)\n cur_d = None\n branch_len = pal_length(old_d, next_d)\n cur_length = 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Pull next digit\n cur_d = next_d.pop(0)\n\n #Maintain buffers\n if len(old_d) > 50:\n old_d = old_d[:50]\n if len(next_d) < 50:\n next_d += source.read(50)\n return pals",
"def check_palindrome():",
"def no_abab():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"abab\", prompt=False\n ).stdout(\"NO\", regex=False\n ).exit()",
"def find_palindromes(self, start_file: str, result_file: str) -> list:\n input_words = self.read_file(start_file)\n result_words = []\n stack = ArrayStack()\n\n for word in input_words:\n for letter in word:\n stack.push(letter)\n\n reversed_line = ''\n\n while not stack.isEmpty():\n reversed_line += stack.pop()\n\n if word == reversed_line:\n result_words.append(word)\n\n if len(result_words) != 0:\n self.write_file(result_words, result_file)\n return result_words",
"def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n",
"def back_translate(self):\n base = Bio.Alphabet._get_base_alphabet(self.alphabet)\n if not isinstance(base, Bio.Alphabet.ProteinAlphabet):\n raise ValueError(\"Nucleic acids cannot be back translated!\")\n\n # right now this just uses the most-prevalent codon for each AA\n # TODO: select codons with a weighted average using random.choice\n return Seq(\n \"\".join([CodonUsage.SynonymousCodons[seq3(AA).upper()][0] for AA in str(self)]),\n IUPAC.unambiguous_dna,\n )",
"def palindrome_itertive(a):\n # TODO make this less crappy\n start = 0 \n end = len(a) - 1\n while start != end:\n # print(end)\n # print('start: ', start, ' a: ', a[start])\n # print('end: ', end, ' a: ', a[end])\n if not a[start] == a[end]:\n return False\n else:\n start += 1\n end -= 1\n return True",
"def is_palindrome(sub):\n for i in range(len(sub)):\n if sub[i] != sub[len(sub) - i - 1]:\n return False\n return True",
"def revtranslate_align(aaseqs, dnaseqs, check=False, trim=False):\n\n align = new_align(aaseqs)\n\n for name, seq in aaseqs.iteritems():\n try:\n dna = dnaseqs[name].upper()\n dnalen = len(dna)\n aalen = sum(int(a != \"-\") for a in seq)\n\n if len(dna) != aalen * 3:\n if trim:\n # make dna a multiple of three\n dna = dna[:(len(dna) // 3) * 3]\n\n if len(dna) > aalen * 3:\n # trim dna\n dna = dna[:aalen*3]\n else:\n # trim peptide to match nucleotide\n j = 0\n for i in xrange(len(seq)):\n if seq[i] != '-':\n j += 1\n if j > len(dna) // 3:\n seq = seq[:i] + \"-\" * (len(seq) - i)\n break\n\n aalen2 = sum(int(a != \"-\") for a in seq)\n assert len(dna) == aalen2 * 3, (\n len(dna), aalen2 * 3)\n\n util.logger(\"trim dna (%d) and pep (%d)\" %\n (dnalen - len(dna), aalen - aalen2))\n\n else:\n # is last residue X?\n for i in xrange(len(seq)-1, -1, -1):\n if seq[i] == \"-\":\n continue\n if seq[i] == \"X\":\n # repair\n seq = seq[:i] + \"-\" * (len(seq)-i)\n dna = dna[:-3]\n break\n\n align[name] = seqlib.revtranslate(seq, dna, check=check)\n except seqlib.TranslateError:\n raise\n\n return align",
"def reverse_complement_RNA(RNAsequence):\n complement = \"\"\n for nucleotide in RNAsequence:\n if nucleotide == \"A\":\n complement = \"U\" + complement\n if nucleotide == \"C\":\n complement = \"G\" + complement\n if nucleotide == \"G\":\n complement = \"C\" + complement\n if nucleotide == \"U\":\n complement = \"A\" + complement\n return complement",
"def get_shortest_palindrome(text):\n strlen = len(text)\n unique_chars = len(set(text))\n print(set(text))\n if unique_chars == strlen:\n return (\"\".join(list(reversed(text[1:])))+text)\n if text==\"\" or strlen==1 or unique_chars==1:\n return text\n if is_palindrome(text):\n return text\n if strlen//unique_chars > 100:\n d = {}\n for char in set(text):\n \n left_pad = []\n #print(strlen)\n i = strlen-1\n while(i!=0):\n left_pad.append(text[i])\n #print(left_pad)\n #print(\"text[:i-1]: \",text[:i],i)\n if is_palindrome(text[:i]):\n # print(\"\".join(left_pad)+text)\n return (\"\".join(left_pad)+text)\n i = i -1",
"def get_reverse(sequence):\n #Convert all rna_sequence to upper case:\n sequence=sequence.upper()\n #reverse rna sequence:\n rna_rev_list=sequence[::-1]\n return rna_rev_list",
"def correctfasta(vectint, records):\n\n\n# go through each sequence in genome file\n for record in records:\n if record in vectint:\n # We have the remove keyword. Do not process sequence record\n recordseq = records[record]\n if \"remove\" in vectint[record]:\n continue\n if \"trim3\" in vectint[record]:\n # We cannot work directly on the records hash\n # duplicate the sequence, and modify it\n recordseq = recordseq[:vectint[record][\"trim3\"]]\n if \"trim5\" in vectint[record]:\n # We cannot work directly on the records hash\n # duplicate the sequence, and modify it\n recordseq = recordseq[vectint[record][\"trim5\"]:]\n # print modified sequence\n if len(recordseq.seq) > 0:\n print(\">\"+record)\n print(recordseq.seq)\n else:\n # print unmodified sequence\n print(\">\"+record)\n print(records[record].seq)",
"def palindrome_search(sequence, min_len, max_len, alphabet, prob_cutoff=None):\n # get the sequence complement\n trans_table = str.maketrans('ACGT', 'TGCA')\n seq_complement = sequence.translate(trans_table)\n # gets the base composition\n nucs = base_stats(sequence, alphabet, False, True)\n # define maches bases\n matches = ['AT', 'TA', 'GC', 'CG']\n # probability of a match according tho the background\n p_match = 0\n # iterates tohrough the bases matches\n for b in matches:\n # calculate the probabilities\n p_match += nucs[b[0]] * nucs[b[1]]\n # checks if the results matches\n assert p_match == sum([nucs[b[0]] * nucs[b[1]] for b in matches])\n # initialize the container of possible probability using length and mismatches\n # as the indexes\n prob_dict = defaultdict(float)\n # iterates through the range of lengths\n for length in range(min_len, max_len):\n # iterates throught the half of the sequence\n for mismatches in range(0, (length // 2) + 1):\n # get the probabilities and number the mismatches\n p = probability(length, mismatches, p_match)\n prob_dict[(length, mismatches)] = prob_dict.get((length, mismatches), 0.0) + p\n # create an container for the results\n palindromes = []\n # iterates through the range of lengths\n for length in range(min_len, max_len):\n # defined mismatch threshold\n half_l = length // 2\n mismatches_cutoff = 0.5 * half_l\n half_list = range(half_l)\n # iterates throught to find the starts\n for start in range(0, (len(sequence) - length + 1)):\n # gets the putative palindromes\n seq = sequence[start:start + length]\n # gets the complement\n seq_comp = seq_complement[start:start + length]\n mismatches = 0\n # iterates throught the half lengths\n for i in half_list:\n # check for mismatches and increment the counts\n if seq[i] != seq_comp[-i - 1]:\n mismatches += 1\n # check if the number of mismatches is allowed\n if mismatches <= mismatches_cutoff:\n # look up the probability,\n pr = prob_dict[(length, mismatches)]\n # if it passes the cutoff\n if pr <= prob_cutoff:\n # add the results into the container\n # count the number of the palindrome in the sequence\n cnt_pal = get_pattern_count(sequence, seq)\n palindromes += [[length, start, pr, mismatches, cnt_pal, seq]]\n return palindromes",
"def reverse_read(read):\n reversed_read = \"\"\n for i in range(len(read)-1, -1, -1):\n if read[i] == \"A\":\n reversed_read += \"T\"\n elif read[i] == \"T\":\n reversed_read += \"A\"\n elif read[i] == \"G\":\n reversed_read += \"C\"\n elif read[i] == \"C\":\n reversed_read += \"G\"\n else:\n raise ValueError(\"One of the read contains wrong characters.\")\n\n return reversed_read",
"def ReverseComplement(self):\n if (self.translated == False):\n for i in range(len(self.alignment)):\n self.alignment[i].seq = self.alignment[i].seq.reverse_complement()\n self.Show(self.displayedColumn)\n self.BackupAlignment()\n else:\n self.AlertMessage(\"Can't reverse-complement protein sequences.\", 'medium')",
"def back_translate(aln_file, seqdict):\n aln = SeqIO.parse(aln_file.name, 'fasta')\n bt_seq = []\n for prot_seq in aln:\n codon = 0\n bt = ''\n nuc = seqdict[prot_seq.id]\n for aa in prot_seq:\n if aa == '-':\n bt += '---'\n else:\n bt += nuc[codon*3:(codon*3)+3]\n codon += 1\n bt_seq.append(bt)\n return bt_seq",
"def revcomp(self, seq):\n tab = self.maketrans(b'ACNGT', b'TGNCA')\n return seq.translate(tab)[::-1]",
"def invert_seq(self):\n if not self.data['DNAseq']:\n self.invert_seq_var.set(0)\n self.warning('No DNA sequence loaded','You have to load a DNA sequence first')\n return\n inverted=''\n for count in range(len(self.data['DNAseq'])):\n pos=-count-1\n inverted=inverted+self.data['DNAseq'][pos]\n self.data['DNAseq']=inverted\n #\n # Update\n #\n self.update_sequence_window()\n return",
"def reverse_rna_complement(seq):\n\n seq_upper = seq.isupper()\n\n seq = seq[::-1]\n\n seq = seq.upper()\n\n #compute complement\n seq = seq.replace('A','u')\n seq = seq.replace('T','a')\n seq = seq.replace('G','c')\n seq = seq.replace('C','g')\n\n if seq_upper:\n return seq.upper()\n else:\n return seq",
"def shortestPalindrome(self, string):\n\t\tif not string:\n\t\t\treturn ''\n\t\tright = 0\n\t\tcenter = 0\n\t\tdataString = string\n\t\tstring = self.interleave(string)\n\t\tdps = [0] * len(string)\n\t\t\n\t\tfor i in range(1, len(string)):\n\t\t\tmirror = 2*center - i\n\t\t\tif i + dps[mirror] < right:\n\t\t\t\tdps[i] = dps[mirror]\n\t\t\telse:\n\t\t\t\tcenter = i\n\t\t\t\tmirror = 2 * center - right - 1\n\t\t\t\tridx = right + 1\n\t\t\t\t# print (i, center, right, mirror)\n\t\t\t\twhile ridx < len(string):\n\t\t\t\t\tif mirror >= 0 and string[mirror] == string[ridx]:\n\t\t\t\t\t\tmirror -= 1\n\t\t\t\t\t\tridx += 1\n\t\t\t\t\telse :\n\t\t\t\t\t\tbreak\n\t\t\t\t# print (i, center, ridx, mirror)\n\t\t\t\tright = ridx - 1\n\t\t\t\tdps[i] = right - i\n\n\t\t# print (string)\n\t\tidx = len(dps) - 1\n\t\twhile idx > 0:\n\t\t\tif idx == dps[idx]:\n\t\t\t\tbreak\n\t\t\tidx -= 1\n\t\t# print (idx, 'idx')\n\t\treturn dataString[:idx - 1 - len(dataString): -1] + dataString",
"def countPalindromicSubsequences(self, s: str) -> int:\n MOD = 10 ** 9 + 7\n \n def dp(i, j) -> (int, set):\n distinct = set()\n if i > j:\n return (0, distinct)\n if i == j:\n distinct.add(s[i])\n return (1, distinct)\n ret = 0\n for c in 'abcd':\n l = s.find(c, i, j)\n if l < 0:\n continue\n r = s.rfind(c, i, j)\n sub_ret, sub_set = dp(l, r)\n print(sub_ret, sub_set)\n # print(f'{c}-{sub_set}-{c}')\n ret += sub_ret + 1\n ret %= MOD\n distinct.union(sub_set)\n distinct.add(c)\n\n return ret, distinct\n return dp(0, len(s))[0]",
"def reverse_complement_strand(dna):\n assert (is_dna(dna))\n return ''.join(_rev_mapping[nn] for nn in dna[::-1])",
"def palindrome(x):\n pass"
] | [
"0.7124422",
"0.64792925",
"0.6309654",
"0.6244965",
"0.61357105",
"0.605843",
"0.6032168",
"0.5964192",
"0.57816607",
"0.5762167",
"0.5736995",
"0.5725491",
"0.5662916",
"0.5592227",
"0.5575531",
"0.55341953",
"0.5533105",
"0.5491442",
"0.5485938",
"0.5482197",
"0.5467225",
"0.54530895",
"0.5408092",
"0.538706",
"0.5384978",
"0.5384443",
"0.5382793",
"0.53645885",
"0.5360634",
"0.53306204"
] | 0.89094687 | 0 |
Call SDPMatcher to align every read to its reverse complementary reads. | def _self_align(self):
logging.info("Splitting palindrome.")
logging.debug("Making reverse complement sequences of reads in " +
"{i} to {o}".format(i=self.ori_all_reads_fasta,
o=self.rc_all_reads_fasta))
num_reads = revcmp_fasta(self.ori_all_reads_fasta,
self.rc_all_reads_fasta)
reads_per_split = max(1, int(num_reads/self.nproc) + 1)
logging.debug("Splitting {f} to small files each containing {n} reads.".
format(f=self.ori_all_reads_fasta, n=reads_per_split))
fs = FastaSplitter(input_fasta=self.ori_all_reads_fasta,
reads_per_split=reads_per_split,
out_dir=self.out_dir,
out_prefix="reads.split.")
fs.split()
sp_fasta_files = fs.out_fns
logging.debug("Splitting {f} to smaller files.".
format(f=self.rc_all_reads_fasta))
rc_fs = FastaSplitter(input_fasta=self.rc_all_reads_fasta,
reads_per_split=reads_per_split,
out_dir=self.out_dir,
out_prefix="rc_reads.split.")
rc_fs.split()
rc_sp_fasta_files = rc_fs.out_fns
logging.debug("Aligning each read in {i} to its revese compelement " +
"read using sdpMatcher.".format(i=self.ori_all_reads_fasta))
sdps = ["{f}.sdp".format(f=f) for f in sp_fasta_files]
jobs = []
for f, rc_f, sdp in zip(sp_fasta_files, rc_sp_fasta_files, sdps):
cmd = "sdpMatcher {f} {rc_f} ".format(f=f, rc_f=rc_f) + \
"10 -local > {sdp} ".format(sdp=sdp)
logging.debug("CMD: {cmd}".format(cmd=cmd))
jobs.append(cmd)
pool = Pool(processes=self.nproc)
rets = pool.map(backticks, jobs)
pool.close()
pool.join()
for i, job in enumerate(jobs):
if rets[i][1] != 0:
errMsg = "Job {j} failed.".format(j=job) + str(rets[i][2])
raise RuntimeError(errMsg)
logging.debug("Concatenating all sdp outputs to {f}".
format(f=self.sdp_out_file))
cat_files(src=sdps, dst=self.sdp_out_file)
logging.debug("Cleaning intermediate fasta & sdp files.")
fs.rmOutFNs()
rc_fs.rmOutFNs()
for f in sdps:
os.remove(f) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _split_palindrome(self):\n if not op.exists(self.sdp_out_file) or self.force_redo is True:\n self._self_align()\n\n logging.debug(\"Parsing sdp and detect plindrome reads\")\n split_table = {}\n with SDPReader(self.sdp_out_file) as reader:\n for sdp in reader:\n if sdp.score <= self.palindrome_score_cutoff:\n split_table[str(sdp.qID)] = sdp\n\n logging.debug(\"Splitting palindrom reads.\")\n with FastaReader(self.ori_all_reads_fasta) as reader, \\\n FastaWriter(self.tmp_all_reads_fasta) as writer, \\\n FastaWriter(self.palindrome_reads_fasta) as palindrome_writer:\n for r in reader:\n if r.name in split_table:\n # found a palindrome\n sdp = split_table[r.name]\n # Write palindrome subreads to palindrome_subreads.fasta\n palindrome_writer.writeRecord(r.name, r.sequence)\n#\n# # split this read in the middle\n# split_point = int(sdp.qstart +\n# (sdp.alnqstart + sdp.alnqend)/2)\n# # Write the first half\n# rname_1 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw, s=sdp.qstart,\n# e=split_point)\n# writer.writeRecord(rname_1,\n# r.sequence[0:(split_point-sdp.qstart)])\n#\n# # Write the second half\n# rname_2 = \"{movie}/{zmw}/{s}_{e}\".format(\n# movie=sdp.movie, zmw=sdp.zmw,\n# s=(split_point+1), e=sdp.qend)\n# writer.writeRecord(rname_2,\n# r.sequence[(split_point-sdp.qstart):])\n else:\n writer.writeRecord(r.name, r.sequence)\n\n logging.debug(\"Moving {i} to {o}.\".format(i=self.tmp_all_reads_fasta,\n o=self.all_reads_fasta))\n shutil.move(self.tmp_all_reads_fasta, self.all_reads_fasta)",
"def mergeChainedAlignedSegments(chainedAlignedSegments, refSequence, readSequence):\n cAR = pysam.AlignedSegment()\n aR = chainedAlignedSegments[0]\n cAR.query_name = aR.query_name\n \n #Parameters we don't and therefore set properly\n #cAR.flag = aR.flag\n #cAR.mapq = aR.mapq\n #cAR.mrnm = 0\n #cAR.mpos=0\n #cAR.isize=0\n #cAR.qual = \"<\" * len(readSequence)\n #cAR.tags = aR.tags \n cAR.next_reference_id = -1\n cAR.reference_start = aR.reference_start #Reference start\n cAR.is_reverse = aR.is_reverse\n cAR.query_sequence = reverseComplement(readSequence) if cAR.is_reverse else readSequence\n cAR.reference_id = aR.reference_id\n cigarList = []\n pPos = aR.reference_start\n #Iterate from the other end of the sequence if reversed\n pQPos = -(len(readSequence)-1) if cAR.is_reverse else 0 \n \n for aR in chainedAlignedSegments:\n assert cAR.is_reverse == aR.is_reverse\n #Add a deletion representing the preceding unaligned reference positions\n assert aR.reference_start >= pPos\n if aR.reference_start > pPos:\n cigarList.append((2, aR.reference_start - pPos))\n pPos = aR.reference_start \n \n #Add an insertion representing the preceding unaligned read positions\n #make it a soft clip if it is the first chained alignment\n qPos = getFirstNonClippedPositionInRead(aR, readSequence)\n assert qPos >= pQPos\n if qPos > pQPos:\n cigarList.append((4 if aR == chainedAlignedSegments[0] else 1, qPos - pQPos)) \n pQPos = qPos\n \n #Add the operations of the cigar, filtering hard and soft clipping\n for op, length in aR.cigar:\n assert op in (0, 1, 2, 4, 5)\n if op in (0, 1, 2):\n cigarList.append((op, length))\n if op in (0, 2): #Is match or deletion\n pPos += length\n if op in (0, 1): #Is match or insertion\n pQPos += length\n \n assert pPos <= len(refSequence)\n \n #Set reference end coordinate (which is exclusive)\n #cAR.reference_end = pPos #We don't do this because it is set by cigar string\n \n #Now add any trailing, necessary soft clipping\n if cAR.is_reverse:\n assert pQPos <= 1\n if pQPos < 1:\n cigarList.append((4, -pQPos + 1))\n else:\n assert pQPos <= len(readSequence)\n if pQPos < len(readSequence):\n cigarList.append((4, len(readSequence) - pQPos))\n \n cAR.cigar = tuple(cigarList)\n \n #Check ops\n for op, length in cAR.cigar: #We should have no hard clipped ops\n assert op in (0, 1, 2, 4)\n \n #Reference sequence check coordinates\n assert sum([ length for op, length in cigarList if op in (0, 2)]) == cAR.reference_end - cAR.reference_start\n assert cAR.reference_start >= 0 and cAR.reference_start < len(refSequence)\n assert cAR.reference_end >= 0 and cAR.reference_end <= len(refSequence)\n \n #Read sequence check coordinates\n assert cAR.query_alignment_start >= 0 and cAR.query_alignment_start < len(readSequence)\n assert cAR.query_alignment_end >= 0 and cAR.query_alignment_end <= len(readSequence)\n assert cAR.query_alignment_start + sum([ length for op, length in cigarList if op in (0, 1)]) == cAR.query_alignment_end\n \n return cAR",
"def test_process_barcode_paired_end_data_orientation_reverse_in_read1(\r\n self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TTTTT']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'CGATCGA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a forward primer match in read 2, should reverse read order\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'GGTTCATC', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'CAA', '+', ')*+', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'GATCGATCGATCGATCG', '+',\r\n \"'()*+,-./01234567\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def align_reads(self):\n self._test_folder_existance(\n self._pathcreator.required_read_alignment_folders()\n )\n assert self._args.paired_end in [True, False]\n self._pathcreator.set_ref_seq_paths_by_species()\n self._ref_seq_files = self._pathcreator.get_ref_seq_files()\n self._pathcreator.set_ref_seq_path_list()\n self._test_align_file_existance()\n if not self._args.paired_end:\n # Single end reads\n self._read_files = self._pathcreator.get_read_files()\n self._lib_names = self._pathcreator.get_lib_names_single_end()\n self._pathcreator.set_read_files_dep_file_lists_single_end(\n self._read_files, self._lib_names\n )\n self._prepare_reads_single_end()\n print(f\"controller align_single_end_reads start {datetime.now()}\")\n self._align_single_end_reads()\n print(f\"controller align_single_end_reads stop {datetime.now()}\")\n else:\n # Paired end reads\n self._read_file_pairs = self._pathcreator.get_read_file_pairs()\n self._lib_names = self._pathcreator.get_lib_names_paired_end()\n self._pathcreator.set_read_files_dep_file_lists_paired_end(\n self._read_file_pairs, self._lib_names\n )\n self._prepare_reads_paired_end()\n print(f\"controller align_paired_end_reads start {datetime.now()}\")\n self._align_paired_end_reads()\n print(f\"controller align_paired_end_reads stop {datetime.now()}\")\n print(\n f\"controller generate_read_alignment_stats start {datetime.now()}\"\n )\n self._generate_read_alignment_stats(\n self._lib_names,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n self._pathcreator.read_alignments_stats_path,\n self._args.paired_end,\n )\n print(f\"controller generate_read_alignment_stats stop {datetime.now()}\")\n if self._args.crossalign_cleaning:\n self._remove_crossaligned_reads()\n\n if self._args.paired_end:\n # Build a bam file containing fragments merged from read\n # pairs\n if not self._args.no_fragment_building:\n fragments = True\n # sort the bam files by name and sam tag hit index to\n # accelerate fragment building\n print(\n f\"controller sort bams by name and index start {datetime.now()}\"\n )\n self._sort_bams_by_name_and_index()\n print(\n f\"controller sort bams by name and index end {datetime.now()}\"\n )\n # build the fragments bam file\n print(f\"controller build_fragments start {datetime.now()}\")\n self._build_fragments()\n print(f\"controller build_fragments stop {datetime.now()}\")\n # generate fragment alignment stats\n print(\n f\"controller generate_fragment_alignmnet_stats start {datetime.now()}\"\n )\n self._generate_read_alignment_stats(\n self._lib_names,\n self._pathcreator.aligned_fragments_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n self._pathcreator.fragment_alignments_stats_path,\n self._args.paired_end,\n fragments,\n )\n print(\n f\"controller generate_fragment_alignmnet_stats stop {datetime.now()}\"\n )\n # write fragment stats table\n print(\n f\"controller write_alignment_stats_table fragments start {datetime.now()}\"\n )\n self._write_alignment_stat_table(\n self._pathcreator.fragment_alignments_stats_path,\n self._pathcreator.fragment_alignment_stats_table_path,\n self._pathcreator.fragment_alignment_stats_table_transposed_path,\n fragments,\n )\n print(\n f\"controller write_alignment_stats_table fragments stop {datetime.now()}\"\n )\n print(\n f\"controller write_alignment_stats_table reads start {datetime.now()}\"\n )\n self._write_alignment_stat_table(\n self._pathcreator.read_alignments_stats_path,\n self._pathcreator.read_alignment_stats_table_path,\n self._pathcreator.read_alignment_stats_table_transposed_path,\n )\n print(\n f\"controller write_alignment_stats_table reads stop {datetime.now()}\"\n )",
"def test_process_barcode_paired_stitched_reverse_primer_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'AAAAAA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'GATCG']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n\r\n # With reverse primer match, should write in order of read2, read 1\r\n process_barcode_paired_stitched(fastq1_data,\r\n bcs_out, reads1_out, bc1_len=3, bc2_len=4,\r\n rev_comp_bc1=True, rev_comp_bc2=False,\r\n attempt_read_orientation=True,\r\n forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq_out_not_oriented=fastq1_out_not_oriented,\r\n switch_bc_order=False)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'TCGCGAT', '+', \"567'&%$\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGAT', '+',\r\n '43210/.-,+*)(', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def test_process_barcode_paired_end_data_orientation_forward_in_read2(\r\n self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TTCCA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a forward primer match in read 2, should reverse read order\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'GGTTCATC', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'CAA', '+', ')*+', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'GATCGATCGATCGATCG', '+',\r\n \"'()*+,-./01234567\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def read_pair_align(read1, read2):\n r1pos = [x+1 for x in read1.positions]\n r2pos = [x+1 for x in read2.positions]\n if read1.mate_is_reverse and r1pos[0] < r2pos[0]: # read1 is earlier\n read = [r1pos[0], r1pos[-1], r2pos[0], r2pos[-1]]\n elif read2.mate_is_reverse and r2pos[0] < r1pos[0]: # read2 is earlier\n read = [r2pos[0], r2pos[-1], r1pos[0], r1pos[-1]]\n else:\n read = []\n # print(\"Skipping read pair from error in alignment.\")\n # print(\"%s--%s> <%s--%s\" % tuple(read))\n return read",
"def ReverseComplement(self):\n if (self.translated == False):\n for i in range(len(self.alignment)):\n self.alignment[i].seq = self.alignment[i].seq.reverse_complement()\n self.Show(self.displayedColumn)\n self.BackupAlignment()\n else:\n self.AlertMessage(\"Can't reverse-complement protein sequences.\", 'medium')",
"def test_process_barcode_paired_end_data_orientation_forward_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'GATCGA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a match to the forward primer, should parse out primers in\r\n # the given order of read 1 and read 2.\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATCGAGGT', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGATCG', '+',\r\n ')*+,-./01234567', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCCAA', '+', \"'()*+\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def _align_paired_end_reads(self):\n read_aligner = ReadAligner(self._args.segemehl_bin, self._args.progress)\n if self._file_needs_to_be_created(self._pathcreator.index_path):\n read_aligner.build_index(\n self._pathcreator.ref_seq_path_list,\n self._pathcreator.index_path,\n )\n for read_path_pair, output_path, nomatch_path in zip(\n self._pathcreator.processed_read_path_pairs,\n self._pathcreator.read_alignment_bam_paths,\n self._pathcreator.unaligned_reads_paths,\n ):\n if not self._file_needs_to_be_created(output_path):\n continue\n read_aligner.run_alignment(\n read_path_pair,\n self._pathcreator.index_path,\n self._pathcreator.ref_seq_path_list,\n output_path,\n nomatch_path,\n int(self._args.processes),\n int(self._args.segemehl_accuracy),\n float(self._args.segemehl_evalue),\n self._args.split,\n paired_end=True,\n )",
"def test_check_seqs_reverse_primers(self):\r\n\r\n # Initial test, should truncate all seqs\r\n in_seqs = self.in_seqs_reverse_primers\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Second test with a mismatch in seq a, should not find reverse primer\r\n # and will write out entire sequence.\r\n\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # With reverse_primer_mismatches allowed set to 1,\r\n # should restore truncation.\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch_allowed\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False,\r\n reverse_primer_mismatches=1)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Testing truncate_remove, which should not write sequences where\r\n # the reverse primer is not found\r\n in_seqs = self.in_seqs_reverse_primers\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_full_remove\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_remove',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Testing truncate_remove, with reverse_primer_mismatches set to 1\r\n # should allow all 4 sequences to be written, truncated\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch_allowed\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=1,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_remove',\r\n rev_primers=rev_primers_test,\r\n qual_out=False,\r\n reverse_primer_mismatches=1)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)",
"def process_barcode_paired_end_data(read1_data,\r\n read2_data,\r\n output_bc_fastq,\r\n output_fastq1,\r\n output_fastq2,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n attempt_read_orientation=False,\r\n forward_primers=None,\r\n reverse_primers=None,\r\n output_bc_not_oriented=None,\r\n fastq1_out_not_oriented=None,\r\n fastq2_out_not_oriented=None):\r\n\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n found_primer_match = False\r\n # Break from orientation search as soon as a match is found\r\n if attempt_read_orientation:\r\n # First check forward primers\r\n for curr_primer in forward_primers:\r\n if curr_primer.search(read1_data[sequence_index]):\r\n read1 = read1_data\r\n read2 = read2_data\r\n found_primer_match = True\r\n break\r\n if curr_primer.search(read2_data[sequence_index]):\r\n read1 = read2_data\r\n read2 = read1_data\r\n found_primer_match = True\r\n break\r\n # Check reverse primers if forward primers not found\r\n if not found_primer_match:\r\n for curr_primer in reverse_primers:\r\n if curr_primer.search(read1_data[sequence_index]):\r\n read1 = read2_data\r\n read2 = read1_data\r\n found_primer_match = True\r\n break\r\n if curr_primer.search(read2_data[sequence_index]):\r\n read1 = read1_data\r\n read2 = read2_data\r\n found_primer_match = True\r\n break\r\n else:\r\n read1 = read1_data\r\n read2 = read2_data\r\n\r\n if not found_primer_match and attempt_read_orientation:\r\n read1 = read1_data\r\n read2 = read2_data\r\n output_bc = output_bc_not_oriented\r\n output_read1 = fastq1_out_not_oriented\r\n output_read2 = fastq2_out_not_oriented\r\n else:\r\n output_bc = output_bc_fastq\r\n output_read1 = output_fastq1\r\n output_read2 = output_fastq2\r\n\r\n bc_read1 = read1[sequence_index][0:bc1_len]\r\n bc_read2 = read2[sequence_index][0:bc2_len]\r\n bc_qual1 = read1[quality_index][0:bc1_len]\r\n bc_qual2 = read2[quality_index][0:bc2_len]\r\n if rev_comp_bc1:\r\n bc_read1 = str(DNA(bc_read1).rc())\r\n bc_qual1 = bc_qual1[::-1]\r\n if rev_comp_bc2:\r\n bc_read2 = str(DNA(bc_read2).rc())\r\n bc_qual2 = bc_qual2[::-1]\r\n\r\n bc_lines = format_fastq_record(read1[header_index],\r\n bc_read1 + bc_read2,\r\n np.hstack([bc_qual1, bc_qual2]))\r\n output_bc.write(bc_lines)\r\n seq1_lines = format_fastq_record(read1[header_index],\r\n read1[sequence_index][bc1_len:], read1[quality_index][bc1_len:])\r\n output_read1.write(seq1_lines)\r\n seq2_lines = format_fastq_record(read2[header_index],\r\n read2[sequence_index][bc2_len:], read2[quality_index][bc2_len:])\r\n output_read2.write(seq2_lines)\r\n\r\n return",
"def test_process_barcode_paired_end_data_orientation_rev_in_read2(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TTTTTT']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TCCAA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a reverse primer in read 2, should write in current order.\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATCGAGGT', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGATCG', '+',\r\n ')*+,-./01234567', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCCAA', '+', \"'()*+\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def test_process_barcode_paired_stitched_forward_primer_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'GATCGA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n\r\n # With forward primer match, should write in order of read 1, read 2\r\n process_barcode_paired_stitched(fastq1_data,\r\n bcs_out, reads1_out, bc1_len=3, bc2_len=4,\r\n rev_comp_bc1=True, rev_comp_bc2=True,\r\n attempt_read_orientation=True,\r\n forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq_out_not_oriented=fastq1_out_not_oriented,\r\n switch_bc_order=True)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'CGATGAT', '+', '7654&%$', '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'GATCGATCGATCG', '+',\r\n \"'()*+,-./0123\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def process(\n self,\n name_and_reads: Tuple[str, Iterable[reads_pb2.Read]],\n ) -> Iterable[Tuple[str, List[reads_pb2.Read]]]:\n\n name, reads = name_and_reads[0], list(name_and_reads[1])\n # Note, examples will only be included in one of the initial counters since\n # we are returning early.\n if not reads:\n self.no_reads_counter.inc()\n return\n\n # Do not error for labels that have multiple alignments to correct molecule.\n # One of the alignments may be a supplementary alignment.\n if self.is_label and len(reads) > 1:\n logging.info('Unexpected: %d labels for %s', len(reads),\n reads[0].fragment_name)\n self.multiple_alignments_counter.inc()\n\n reads_copy = copy.deepcopy(reads)\n for read in reads_copy:\n assert read.aligned_sequence\n base_index = 0\n expanded_sequence = ''\n expanded_cigar_str = ''\n new_cigar_ops = []\n if not self.is_label:\n pw = struct_utils.get_int_field(read.info, 'pw')\n ip = struct_utils.get_int_field(read.info, 'ip')\n new_pw = []\n new_ip = []\n\n for op in read.alignment.cigar:\n # Skip over ops we don't want, such as soft clips.\n if op.operation not in dc_constants.OPS_TO_CONSIDER:\n base_index += op.operation_length\n continue\n if op.operation in dc_constants.READ_ADVANCING_OPS:\n start = base_index\n end = start + op.operation_length\n expanded_sequence += read.aligned_sequence[start:end]\n base_index += op.operation_length\n if not self.is_label:\n new_pw += pw[start:end]\n new_ip += ip[start:end]\n else:\n # Add a special token in sequence where we have deletion.\n expanded_sequence += dc_constants.GAP_OR_PAD * op.operation_length\n\n new_cigar_ops.append(op)\n op_char = cigar_utils.CIGAR_OPS_TO_CHAR[op.operation]\n expanded_cigar_str += op_char * op.operation_length\n\n # Update the read sequence.\n read.aligned_sequence = expanded_sequence\n assert len(read.aligned_sequence) == len(expanded_cigar_str)\n\n # Update the read cigar to only include ops that were kept.\n del read.alignment.cigar[:]\n read.alignment.cigar.extend(new_cigar_ops)\n\n # Save pw, ip, and expanded cigar string to be used downstream.\n if not self.is_label:\n struct_utils.set_int_field(read.info, 'pw', new_pw)\n struct_utils.set_int_field(read.info, 'ip', new_ip)\n # PW and IP won't be the same length as read.aligned_sequence here\n # because we haven't yet spaced out PW and IP based on gaps/padding.\n struct_utils.set_string_field(read.info, 'expanded_cigar',\n expanded_cigar_str)\n yield name, reads_copy",
"def process_barcode_paired_stitched(read_data,\r\n output_bc_fastq,\r\n output_fastq,\r\n bc1_len=6,\r\n bc2_len=6,\r\n rev_comp_bc1=False,\r\n rev_comp_bc2=False,\r\n attempt_read_orientation=False,\r\n forward_primers=None,\r\n reverse_primers=None,\r\n output_bc_not_oriented=None,\r\n fastq_out_not_oriented=None,\r\n switch_bc_order=False):\r\n\r\n header_index = 0\r\n sequence_index = 1\r\n quality_index = 2\r\n\r\n read_seq = read_data[sequence_index]\r\n read_qual = read_data[quality_index]\r\n\r\n found_primer_match = False\r\n # Break from orientation search as soon as a match is found\r\n if attempt_read_orientation:\r\n for curr_primer in forward_primers:\r\n if curr_primer.search(read_data[sequence_index]):\r\n found_primer_match = True\r\n break\r\n if not found_primer_match:\r\n for curr_primer in reverse_primers:\r\n if curr_primer.search(read_data[sequence_index]):\r\n read_seq = str(DNA(read_seq).rc())\r\n read_qual = read_qual[::-1]\r\n found_primer_match = True\r\n break\r\n\r\n if not found_primer_match and attempt_read_orientation:\r\n output_bc = output_bc_not_oriented\r\n output_read = fastq_out_not_oriented\r\n else:\r\n output_bc = output_bc_fastq\r\n output_read = output_fastq\r\n\r\n bc_read1 = read_seq[0:bc1_len]\r\n bc_read2 = read_seq[-bc2_len:]\r\n bc_qual1 = read_qual[0:bc1_len]\r\n bc_qual2 = read_qual[-bc2_len:]\r\n\r\n if rev_comp_bc1:\r\n bc_read1 = str(DNA(bc_read1).rc())\r\n bc_qual1 = bc_qual1[::-1]\r\n if rev_comp_bc2:\r\n bc_read2 = str(DNA(bc_read2).rc())\r\n bc_qual2 = bc_qual2[::-1]\r\n\r\n if switch_bc_order:\r\n bc_read1, bc_read2 = bc_read2, bc_read1\r\n bc_qual1, bc_qual2 = bc_qual2, bc_qual1\r\n\r\n bc_lines = format_fastq_record(read_data[header_index],\r\n bc_read1 + bc_read2,\r\n np.hstack([bc_qual1, bc_qual2]))\r\n output_bc.write(bc_lines)\r\n seq_lines = format_fastq_record(read_data[header_index],\r\n read_seq[bc1_len:-bc2_len], read_qual[bc1_len:-bc2_len])\r\n output_read.write(seq_lines)\r\n\r\n return",
"def translations(self):\n rc = self.reverseComplement().sequence\n for reverseComplemented in False, True:\n for frame in 0, 1, 2:\n seq = rc if reverseComplemented else self.sequence\n # Get the suffix of the sequence for translation. I.e.,\n # skip 0, 1, or 2 initial bases, depending on the frame.\n # Note that this makes a copy of the sequence, which we can\n # then safely append 'N' bases to to adjust its length to\n # be zero mod 3.\n suffix = seq[frame:]\n lengthMod3 = len(suffix) % 3\n if lengthMod3:\n suffix += ('NN' if lengthMod3 == 1 else 'N')\n yield TranslatedRead(self, translate(suffix), frame,\n reverseComplemented)",
"def _rc_seq(self):\n logger.debug(\"Extracting sequences on the reverse strand\")\n sequences_rc = []\n table = str.maketrans({'a': 't', 'c': 'g', 'g': 'c', 't': 'a',\n 'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'})\n for sequence in self.sequences:\n sequences_rc.append(sequence.translate(table)[::-1])\n self.sequences_rc = sequences_rc",
"def strip_adapters(self, reads):\n for a_set in self.adapter_tables:\n M = len(reads[0][1])\n N = min(M, len(a_set[0][0]))\n start = string_trimming.overlap(\n a_set[0][0], reads[0][1][:N][::-1], a_set[0][1])\n stop = M - string_trimming.overlap(\n a_set[1][0], reads[0][1][-N:], a_set[1][1])\n if stop - start < M:\n reads[0][1] = reads[0][1][start:stop]\n reads[0][3] = reads[0][3][start:stop]",
"def revtranslate_align(aaseqs, dnaseqs, check=False, trim=False):\n\n align = new_align(aaseqs)\n\n for name, seq in aaseqs.iteritems():\n try:\n dna = dnaseqs[name].upper()\n dnalen = len(dna)\n aalen = sum(int(a != \"-\") for a in seq)\n\n if len(dna) != aalen * 3:\n if trim:\n # make dna a multiple of three\n dna = dna[:(len(dna) // 3) * 3]\n\n if len(dna) > aalen * 3:\n # trim dna\n dna = dna[:aalen*3]\n else:\n # trim peptide to match nucleotide\n j = 0\n for i in xrange(len(seq)):\n if seq[i] != '-':\n j += 1\n if j > len(dna) // 3:\n seq = seq[:i] + \"-\" * (len(seq) - i)\n break\n\n aalen2 = sum(int(a != \"-\") for a in seq)\n assert len(dna) == aalen2 * 3, (\n len(dna), aalen2 * 3)\n\n util.logger(\"trim dna (%d) and pep (%d)\" %\n (dnalen - len(dna), aalen - aalen2))\n\n else:\n # is last residue X?\n for i in xrange(len(seq)-1, -1, -1):\n if seq[i] == \"-\":\n continue\n if seq[i] == \"X\":\n # repair\n seq = seq[:i] + \"-\" * (len(seq)-i)\n dna = dna[:-3]\n break\n\n align[name] = seqlib.revtranslate(seq, dna, check=check)\n except seqlib.TranslateError:\n raise\n\n return align",
"def test_process_barcode_paired_end_data_orientation_no_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'AYA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With no matches, should write to the not_oriented files, and keep\r\n # in the same order of file 1 and file 2\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATCGAGGT', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGATCG', '+',\r\n ')*+,-./01234567', '']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCCAA', '+', \"'()*+\", '']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def main (fastq):\n\t\n\t\n\t\n\tfor record in SeqIO.parse(fastq, \"fastq\"):\n\t\t\n\t\tQ = record.letter_annotations[\"phred_quality\"]\n\n\t\tif record.id[-2:]==\"_1\":\n\t\t\n\t\t\tupperseq = SeqRecord( record.seq.reverse_complement(), id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q[::-1]\n\t\t\tprint upperseq.format(\"fastq\"),\n\t\t\n\t\telse:\n\t\t\tupperseq = SeqRecord( record.seq, id = record.id, description = \"\" )\n\t\t\tupperseq.letter_annotations[\"phred_quality\"] = Q\t\t\t\n\t\t\tprint upperseq.format(\"fastq\"),",
"def chainFn(alignedSegments, refSeq, readSeq, scoreFn=\\\n lambda alignedSegment, refSeq, readSeq : \\\n sum([ length for op, length in alignedSegment.cigar if op == 0]), maxGap=200):\n #Score function is number of aligned pairs\n def getStartAndEndCoordinates(alignedSegment):\n \"\"\"Gets the start and end coordinates in both the reference and query, using coordinates\n relative to the original read and reference equence\n \"\"\"\n return alignedSegment.reference_start, getFirstNonClippedPositionInRead(alignedSegment, readSeq), \\\n alignedSegment.reference_end-1, getLastNonClippedPositionInRead(alignedSegment, readSeq) \n \n alignedSegmentToScores = dict([ (aR, scoreFn(aR, refSeq, readSeq)) for aR in alignedSegments])\n alignedSegmentToCoordinates = dict([ (aR, getStartAndEndCoordinates(aR)) for \\\n aR in alignedSegments])\n alignedSegmentPointers = {}\n \n #Currently uses sloppy quadratic algorithm to find highest chain\n alignedSegments = sorted(alignedSegments, key=lambda aR : alignedSegmentToCoordinates[aR][0]) \n #Sort by reference coordinate\n for i in xrange(len(alignedSegments)):\n aR = alignedSegments[i]\n rStart, qStart, rEnd, qEnd = alignedSegmentToCoordinates[aR]\n score = alignedSegmentToScores[aR]\n for j in xrange(i): #Look at earlier alignments in list\n aR2 = alignedSegments[j]\n rStart2, qStart2, rEnd2, qEnd2 = alignedSegmentToCoordinates[aR2]\n assert rStart2 <= rStart\n if rStart > rEnd2 and qStart > qEnd2 and aR.is_reverse == aR2.is_reverse and \\\n rStart - rEnd2 + qStart - qEnd2 <= maxGap and \\\n score + alignedSegmentToScores[aR2] > alignedSegmentToScores[aR]: \n #Conditions for a chain\n alignedSegmentToScores[aR] = score + alignedSegmentToScores[aR2]\n alignedSegmentPointers[aR] = aR2\n \n #Now find highest scoring alignment \n aR = max(alignedSegments, key=lambda aR : alignedSegmentToScores[aR])\n \n #Construct chain of alignedSegments\n chain = [ aR ]\n while aR in alignedSegmentPointers:\n aR = alignedSegmentPointers[aR]\n chain.append(aR)\n chain.reverse()\n \n return chain",
"def _pair_reads_sorted(join_direction=\"left\"):\n\n def _join(forward_reads, reverse_reads, cmp_regex):\n counter = count(1)\n r1s, r2s = forward_reads, reverse_reads\n if join_direction.lower() == \"left\":\n def _discard(r1, r2, k1, k2):\n logging.debug(\"Dropped read: %s\", k2)\n i, r2 = counter.next(), r2s.next()\n k2 = extract_compare_key([r2], cmp_regex, track=i).next()\n return i, r1, r2, k1, k2\n\n else:\n def _discard(r1, r2, k1, k2):\n logging.debug(\"Dropped read: %s\", k1)\n i, r1 = counter.next(), r1s.next()\n k1 = extract_compare_key([r1], cmp_regex, track=i).next()\n return i, r1, r2, k1, k2\n\n while True:\n i, r1, r2 = counter.next(), r1s.next(), r2s.next()\n k1, k2 = extract_compare_key((r1, r2), cmp_regex, track=i)\n while k1 != k2:\n i, r1, r2, k1, k2 = _discard(r1, r2, k1, k2)\n yield r1, r2\n\n\n return _join",
"def run_demultiplex_and_trim(self, opts, **kwargs):\n \n import logging\n self.logger = logging.getLogger('demultip')\n \n sample_primer_dict = {}\n \n if not opts:\n sys.exit(\"command line options not getting to main method\")\n \n metafile = opts.m\n \n # extract .gz to temp file location\n if 'gzipFilename' in kwargs:\n self.logger.info(\"Incoming kwargs detected...gzip file?\")\n #sequence_file = kwargs.get('gzipFilename')\n else:\n self.logger.info(\"No kwargs, normal Fastq file\")\n #sequence_file = opts.f\n self.logger.info(\"processing {0} total sequences\".format(str((self.r1_tot+self.r2_tot)/4)))\n self.logger.info(\"using the first {0} bases of primer in search\".format(self.search_length))\n\n \n #extract the relevant data from the metadata file, can maybe change this to non-qiime1\n self.logger.info(\"Getting header and mapping data...\")\n header, mapping_data, run_description, errors, warnings = process_id_map(metafile)\n self.logger.debug(\"metadata headers {0}\".format(header))\n self.logger.debug(\"csv mapping data from {0}...\\n{1}\".format(metafile, \"\\n\".join([str(x) for x in mapping_data])))\n \n # get the primer regex search patterns\n self.logger.info(\"Generating regex search patterns...\")\n forward_primers, forward_primers_rc, reverse_primers, reverse_primers_rc = self.create_primer_regex_patterns(header, mapping_data)\n self.primer_pattern_dict_list = {'fp' : forward_primers, 'fprc' : forward_primers_rc, 'rp' : reverse_primers, 'rprc' : reverse_primers_rc}\n \n \n self.logger.debug(\"forward_primer patterns\\n{0}\\n\".format(\"\\n\".join([str(x.pattern) for x in self.primer_pattern_dict_list.get('fp')])))\n self.logger.debug(\"reverse_primers patterns\\n{0}\\n\".format(\"\\n\".join([str(x.pattern) for x in self.primer_pattern_dict_list.get('rp')])))\n \n # replace all extra characters in header with underscore\n intab = '.-+|=:;,&$'\n outtab = '__________'\n trantab = maketrans(intab, outtab)\n \n for samples in mapping_data:\n try:\n sample_primer_dict[samples[header.index('SampleID')].translate(trantab)] = (samples[header.index('LinkerPrimerSequence')], samples[header.index('ReversePrimer')])\n except Exception as e:\n self.logger.error(\"Can not find {0} in header fields, please make sure metadata file has the required fields\".format(e))\n \n self.logger.debug(\"sample_primer_dict...{0}\".format(\"\\n\".join(x) for x in sample_primer_dict.items()))\n self.logger.info(\"Starting demultiplex process...\")\n \n bar = progressbar.ProgressBar(max_value=(self.r1_tot+self.r2_tot)/4,redirect_stdout=True)\n \n for r1, r2 in itertools.izip(self.R1.itervalues(), self.R2.itervalues()):\n #self.logger.debug(\"r1 {0}\".format(r1))\n #self.logger.debug(\"r2 {0}\".format(r1))\n\n pair_seq_dict = {'r1' : r1, 'r2' : r2}\n self.logger.debug(\"new read pair\\n\")\n self.logger.debug(\"processing new read pair {0}\".format(pair_seq_dict.keys()))\n \n self.logger.debug(\"processing seq ID - R1 {0}... R2 {1}\".format(r1.id, r2.id))\n self.logger.debug(\"R1 sequence - {0}...\".format(r1.seq[0:50]))\n self.logger.debug(\"R2 sequence - {0}...\".format(r2.seq[0:50]))\n\n self.sample_id = \"\"\n # because we process two sequences at a time (R1 and R2)\n self.processed_seqs += 2\n\n self.f_primer_found = []\n self.r_primer_found = []\n \n self.logger.debug(\"Looking in pair read for patterns...\")\n \n search_result = self.regex_search_through_sequence(pair_seq_dict, self.primer_pattern_dict_list)\n #self.logger.debug(\"pre read correction search_result - {0}\".format(search_result))\n #search_result = self.correct_orientation_of_reads(search_result)\n #self.logger.debug(\"post read correction search_result - {0}\".format(search_result))\n \n try:\n if type(search_result) == list and len(search_result) > 1:\n self.logger.debug(\"search result - {0}\".format(search_result[0]))\n self.logger.debug(\"search result - {0}\".format(search_result[1]))\n except IndexError as e:\n self.logger.debug(\"search result - {0}\".format(search_result))\n self.logger.debug(\"error in list index {0}\".format(e))\n \n \n read_pair_proceed = self.screen_read_pair_suitability(search_result)\n \n self.logger.debug(\"proceed with read pair ? {0}\".format(read_pair_proceed))\n \n if read_pair_proceed != 'failed':\n try:\n sample_id = self.get_sample_id_from_primer_sequence(sample_primer_dict, \n search_result[0].get('pattern'), \n search_result[1].get('pattern'))\n self.logger.debug(\"- R1 ID -> {0} & R2 ID -> {1} from sample {2}\".format(r1.id, r2.id, sample_id))\n except IndexError as e:\n # sample is missing one or both the patterns keys\n self.logger.debug(\"Sample seq is missing a pattern, {0}- discarding read\".format(e))\n output = self.record_buffer_and_writer({'discarded' : pair_seq_dict})\n self.unmapped_count += 2\n continue\n try:\n new_seq = self.clip_primers_from_seq(search_result, self.primer_pattern_dict_list, pair_seq_dict, sample_primer_dict, sample_id)\n self.logger.debug(\"clipped read returned...{0} seqs\".format(len(new_seq)))\n output = self.record_buffer_and_writer(new_seq)\n self.both_primers_count += 2\n except Exception as e:\n output = self.record_buffer_and_writer({'discarded' : pair_seq_dict})\n self.logger.debug(\"attempt to clip sequence failed - errmsg - {0} - discarding read {1}\".format(e, output))\n self.unmapped_count += 2\n continue\n \n bar.update(self.processed_seqs) \n \n if output == \"cleared\":\n self.record_buffer = {}\n self.logger.debug(\"buffer check {0}\".format(self.record_buffer))\n\n elif read_pair_proceed == 'failed':\n self.unmapped_count += 2\n output = self.record_buffer_and_writer({'discarded' : pair_seq_dict})\n bar.update(self.processed_seqs) \n \n self.logger.info(\"__________________________\")\n self.logger.info(\"Samples successfully mapped (F+R found): {0}\".format(self.both_primers_count))\n self.logger.info(\"Read pairs in alternate orientation - {0}\".format(str(len(self.alternate_orientation))))\n self.logger.info(\"Sequences not mapped: {0}\".format(self.unmapped_count))\n self.logger.info(\"Total sequences checked: {0}\".format(self.processed_seqs))\n \n self.logger.info(\"writing alternate record IDs...\")\n with open(\"alternate_orientation_records.txt\", 'w') as f:\n for sequence_id in self.alternate_orientation:\n output_id = ''.join(sequence_id)\n f.write(output_id) \n \n self.logger.info(\"Run finished\")",
"def parse_match(self, read_id, alignment_position, length, read_sequence, ref_sequence, qualities):\n start = alignment_position\n stop = start + length\n for i in range(start, stop):\n\n self.coverage[i] += 1\n allele = read_sequence[i-alignment_position]\n ref = ref_sequence[i-alignment_position]\n self.base_dictionary[read_id][i] = (allele, qualities[i-alignment_position])\n # self._update_base_dictionary(read_id, i, allele, qualities[i-alignment_position])\n if allele != ref:\n self.mismatch_count[i] += 1\n self._update_read_allele_dictionary(read_id, i, allele, MISMATCH_ALLELE, qualities[i-alignment_position])\n else:\n self.match_count[i] += 1\n # this slows things down a lot. Don't add reference allele to the dictionary if we don't use them\n # self._update_read_allele_dictionary(i, allele, MATCH_ALLELE)",
"def test_align_unaligned_seqs(self):\n res = align_unaligned_seqs(self.seqs1_fp, RNA)\n self.assertEqual(res.toFasta(), self.seqs1_aln)",
"def test_align_invert(self):\n al = align(self.amp1, self.amp2, inverse=False)\n\n al_inv = align(self.amp2, self.amp1, inverse=True)\n\n print(al.R)\n print(al_inv.R)\n\n print(al.T)\n print(al_inv.T)",
"def test_demultiplex_sequences_alternate_settings(self):\r\n\r\n file_data = {}\r\n file_data['mapping_file'] = self.valid_mapping_data_golay_upper\r\n file_data['fasta_files'] = [self.valid_fasta_file_with_bc_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n file_data['unassigned_seqs_f'] = FakeOutFile()\r\n file_data['unassigned_qual_f'] = FakeOutFile()\r\n\r\n keep_barcode = True,\r\n barcode_type = 12\r\n max_bc_errors = 1\r\n start_index = 500\r\n write_unassigned_reads = True\r\n disable_bc_correction = False\r\n added_demultiplex_field = 'Added_Demultiplex'\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n demultiplex_sequences(file_data, keep_barcode, barcode_type,\r\n max_bc_errors, start_index, write_unassigned_reads,\r\n disable_bc_correction, added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_500 ABCD0001 orig_bc=TACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=1\\nTACTCGTCGATGCAGGACGAGACGAGGTT\\n'\r\n expected_demultiplexed_qual_seq = '>s1_500 ABCD0001 orig_bc=TACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=1\\n29 13 24 14 10 14 16 13 30 10 13 11 30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'AGCAGCACTTGT,1,s2': 0, 'ACCGCAGAGTCA,1,s3': 0,\r\n 'AACTCGTCGATG,1,s1': 1}\r\n expected_bc_freqs = {'TACTCGTCGATG': 1, 'GCCGCAGAGTCA': 1,\r\n 'AGCAGCACTTGT': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [1, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)",
"def find_read_candidates(self, read):\n self.read_allele_dictionary = {}\n ref_alignment_start = read.reference_start\n ref_alignment_stop = self.get_read_stop_position(read)\n # if the region has reached a very high coverage, we are not going to parse through all the reads\n if self.coverage[ref_alignment_start] > 300:\n return False\n cigar_tuples = read.cigartuples\n read_sequence = read.query_sequence\n read_id = read.query_name\n read_quality = read.query_qualities\n ref_sequence = self.fasta_handler.get_sequence(chromosome_name=self.chromosome_name,\n start=ref_alignment_start,\n stop=ref_alignment_stop+10)\n\n self.read_info[read_id] = (ref_alignment_start, ref_alignment_stop, read.mapping_quality, read.is_reverse)\n for pos in range(ref_alignment_start, ref_alignment_stop):\n self.read_id_by_position[pos].append((read_id, ref_alignment_start, ref_alignment_stop))\n for i, ref_base in enumerate(ref_sequence):\n self.reference_dictionary[ref_alignment_start + i] = ref_base\n\n # read_index: index of read sequence\n # ref_index: index of reference sequence\n read_index = 0\n ref_index = 0\n found_valid_cigar = False\n for cigar in cigar_tuples:\n cigar_code = cigar[0]\n length = cigar[1]\n # get the sequence segments that are effected by this operation\n ref_sequence_segment = ref_sequence[ref_index:ref_index+length]\n read_quality_segment = read_quality[read_index:read_index+length]\n read_sequence_segment = read_sequence[read_index:read_index+length]\n\n if cigar_code != 0 and found_valid_cigar is False:\n read_index += length\n continue\n found_valid_cigar = True\n\n # send the cigar tuple to get attributes we got by this operation\n ref_index_increment, read_index_increment = \\\n self.parse_cigar_tuple(cigar_code=cigar_code,\n length=length,\n alignment_position=ref_alignment_start+ref_index,\n ref_sequence=ref_sequence_segment,\n read_sequence=read_sequence_segment,\n read_id=read_id,\n quality=read_quality_segment)\n\n # increase the read index iterator\n read_index += read_index_increment\n ref_index += ref_index_increment\n\n # after collecting all alleles from reads, update the global dictionary\n for position in self.read_allele_dictionary.keys():\n if position < self.region_start_position or position > self.region_end_position:\n continue\n self.rms_mq[position] += read.mapping_quality * read.mapping_quality\n for record in self.read_allele_dictionary[position]:\n # there can be only one record per position in a read\n allele, allele_type = record\n\n if allele_type == MATCH_ALLELE or allele_type == MISMATCH_ALLELE:\n # If next allele is indel then group it with the current one, don't make a separate one\n if position + 1 <= ref_alignment_stop and position + 1 in self.read_allele_dictionary.keys():\n next_allele, next_allele_type = list(self.read_allele_dictionary[position + 1].keys())[0]\n if next_allele_type == INSERT_ALLELE or next_allele_type == DELETE_ALLELE:\n continue\n self.positional_read_info[position].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position, allele, allele_type,\n read.mapping_quality)\n else:\n # it's an insert or delete, so, add to the previous position\n self.positional_read_info[position-1].append(\n (read_id, ref_alignment_start, ref_alignment_stop, read.mapping_quality))\n self._update_positional_allele_dictionary(read_id, position-1, allele, allele_type,\n read.mapping_quality)\n return True"
] | [
"0.6444837",
"0.5993634",
"0.59170055",
"0.5713154",
"0.569583",
"0.564132",
"0.56328523",
"0.552024",
"0.5510628",
"0.55094206",
"0.54319066",
"0.5429327",
"0.5372529",
"0.5343879",
"0.5330321",
"0.5324704",
"0.52589524",
"0.52114356",
"0.51807946",
"0.5178548",
"0.51682466",
"0.51433194",
"0.51354975",
"0.5133801",
"0.5119044",
"0.5080689",
"0.50769943",
"0.50716734",
"0.5057038",
"0.5055425"
] | 0.656948 | 0 |
There exist some chimeric reads in which adapters are either missing or not recognizable. These are called palindrome reads conform to the | def _split_palindrome(self):
if not op.exists(self.sdp_out_file) or self.force_redo is True:
self._self_align()
logging.debug("Parsing sdp and detect plindrome reads")
split_table = {}
with SDPReader(self.sdp_out_file) as reader:
for sdp in reader:
if sdp.score <= self.palindrome_score_cutoff:
split_table[str(sdp.qID)] = sdp
logging.debug("Splitting palindrom reads.")
with FastaReader(self.ori_all_reads_fasta) as reader, \
FastaWriter(self.tmp_all_reads_fasta) as writer, \
FastaWriter(self.palindrome_reads_fasta) as palindrome_writer:
for r in reader:
if r.name in split_table:
# found a palindrome
sdp = split_table[r.name]
# Write palindrome subreads to palindrome_subreads.fasta
palindrome_writer.writeRecord(r.name, r.sequence)
#
# # split this read in the middle
# split_point = int(sdp.qstart +
# (sdp.alnqstart + sdp.alnqend)/2)
# # Write the first half
# rname_1 = "{movie}/{zmw}/{s}_{e}".format(
# movie=sdp.movie, zmw=sdp.zmw, s=sdp.qstart,
# e=split_point)
# writer.writeRecord(rname_1,
# r.sequence[0:(split_point-sdp.qstart)])
#
# # Write the second half
# rname_2 = "{movie}/{zmw}/{s}_{e}".format(
# movie=sdp.movie, zmw=sdp.zmw,
# s=(split_point+1), e=sdp.qend)
# writer.writeRecord(rname_2,
# r.sequence[(split_point-sdp.qstart):])
else:
writer.writeRecord(r.name, r.sequence)
logging.debug("Moving {i} to {o}.".format(i=self.tmp_all_reads_fasta,
o=self.all_reads_fasta))
shutil.move(self.tmp_all_reads_fasta, self.all_reads_fasta) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_palindrome():",
"def test_process_barcode_paired_end_data_orientation_reverse_in_read1(\r\n self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TTTTT']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'CGATCGA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a forward primer match in read 2, should reverse read order\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'GGTTCATC', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'CAA', '+', ')*+', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'GATCGATCGATCGATCG', '+',\r\n \"'()*+,-./01234567\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def palindrome(self):\n vas = []\n file = self.read1()\n print(file[0])\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n s_ii = s_i[::-1]\n if s_ii == s_i and s_i!= \"\":\n vas.append(s_i)\n self.print(vas)\n self.write(vas)\n logging.debug(\"Starting with to\")\n return vas",
"def test_palendrome_long_list_true():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n test_ll.push('c')\n test_ll.push('d')\n test_ll.push('c')\n test_ll.push('b')\n test_ll.push('a')\n assert linked_palendrome(test_ll) is True",
"def palindrome_reads_fasta(self):\n return op.join(self.out_dir, \"palindrome_subreads.fasta\")",
"def palCheck(input_string):\n\n # ADD NECESSARY LINES OF CODE SO THAT ALL UNITTESTS PASS\n\n d = Deque()\n for char in input_string:\n d.addFront(char)\n\n while d.size() > 1:\n firstChar = d.removeRear()\n lastChar = d.removeFront()\n if firstChar != lastChar:\n print(\"No, '\" + input_string + \"', is not a palindrom\")\n return False\n\n print(\"Yes, '\" + input_string + \"', is a palindrom!!\")\n return True",
"def search_palindromes(src_file, min_len):\n #Get digit source\n source = NumReader(src_file)\n #Old digits. Should always be length 100-200, unless there aren't enough digits.\n old_d = []\n #Current digit (possibly None)\n cur_d = source.read(1)[0]\n #Future digits. Should always be length 100-200, unless there aren't enough digits.\n next_d = source.read(100)\n #List of accumulated palindromes as strings\n pals = []\n\n #Keep running until out of digits\n while source.has_digits:\n #Look for palindrome centered at current digit\n branch_len = pal_length(old_d, next_d)\n cur_length = 1 + 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Look for \"even\" palindrome centered at current digit\n #Shift current digit into old buffer\n old_d.insert(0, cur_d)\n cur_d = None\n branch_len = pal_length(old_d, next_d)\n cur_length = 2 * branch_len\n #If long enough, add to list\n if cur_length >= min_len:\n p = pal_str(cur_d, old_d[:branch_len])\n pals.append((p, source.digits_read - len(next_d)))\n\n #Pull next digit\n cur_d = next_d.pop(0)\n\n #Maintain buffers\n if len(old_d) > 50:\n old_d = old_d[:50]\n if len(next_d) < 50:\n next_d += source.read(50)\n return pals",
"def checkPalindrome(self, s: str) -> str:\n # return s[:len(s) // 2] == s[(len(s) + 1) // 2::-1]\n return s == s[::-1]",
"def test_palendrome_returns_false():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n test_ll.push('c')\n assert linked_palendrome(test_ll) is False",
"def test_palendrome_even_list_false():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n test_ll.push('c')\n test_ll.push('a')\n assert linked_palendrome(test_ll) is False",
"def test_process_barcode_paired_stitched_reverse_primer_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'AAAAAA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'GATCG']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n\r\n # With reverse primer match, should write in order of read2, read 1\r\n process_barcode_paired_stitched(fastq1_data,\r\n bcs_out, reads1_out, bc1_len=3, bc2_len=4,\r\n rev_comp_bc1=True, rev_comp_bc2=False,\r\n attempt_read_orientation=True,\r\n forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq_out_not_oriented=fastq1_out_not_oriented,\r\n switch_bc_order=False)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'TCGCGAT', '+', \"567'&%$\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGAT', '+',\r\n '43210/.-,+*)(', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def test_process_barcode_paired_end_data_orientation_forward_in_read2(\r\n self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TTCCA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a forward primer match in read 2, should reverse read order\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'GGTTCATC', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'CAA', '+', ')*+', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'GATCGATCGATCGATCG', '+',\r\n \"'()*+,-./01234567\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def part3():\n Input = raw_input('please enter a string:')\n for i in range(len(Input)):\n if Input[i] != Input[len(Input)-i-1]:\n print('It is not a palidrome')\n break\n else:\n i = i + 1",
"def test_pal_list_len_two_false():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n assert linked_palendrome(test_ll) is False",
"def test_palendrome_returns_true():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n test_ll.push('a')\n assert linked_palendrome(test_ll) is True",
"def test_palendrome_even_list_true():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('b')\n test_ll.push('b')\n test_ll.push('a')\n assert linked_palendrome(test_ll) is True",
"def test_check_seqs_reverse_primers(self):\r\n\r\n # Initial test, should truncate all seqs\r\n in_seqs = self.in_seqs_reverse_primers\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Second test with a mismatch in seq a, should not find reverse primer\r\n # and will write out entire sequence.\r\n\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # With reverse_primer_mismatches allowed set to 1,\r\n # should restore truncation.\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch_allowed\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_only',\r\n rev_primers=rev_primers_test,\r\n qual_out=False,\r\n reverse_primer_mismatches=1)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Testing truncate_remove, which should not write sequences where\r\n # the reverse primer is not found\r\n in_seqs = self.in_seqs_reverse_primers\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_full_remove\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_remove',\r\n rev_primers=rev_primers_test,\r\n qual_out=False)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Testing truncate_remove, with reverse_primer_mismatches set to 1\r\n # should allow all 4 sequences to be written, truncated\r\n in_seqs = self.in_seqs_reverse_primers_mismatch\r\n bc_map = self.bc_map_fixed_len_bc1\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_in_seqs_reverse_primers_mismatch_allowed\r\n rev_primers_test = self.reverse_primers_fixed_len_bc1\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=1,\r\n disable_primer_check=False,\r\n reverse_primers='truncate_remove',\r\n rev_primers=rev_primers_test,\r\n qual_out=False,\r\n reverse_primer_mismatches=1)\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)",
"def no_abab():\n check50.run(\"python3 palindrome.py\"\n ).stdout(\"Word? \", regex=False\n ).stdin(\"abab\", prompt=False\n ).stdout(\"NO\", regex=False\n ).exit()",
"def palindromes():\n for n in count(1):\n if str(n) == str(n)[::-1]:\n yield n",
"def test_process_barcode_paired_end_data_orientation_rev_in_read2(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TTTTTT']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'TCCAA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a reverse primer in read 2, should write in current order.\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATCGAGGT', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGATCG', '+',\r\n ')*+,-./01234567', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCCAA', '+', \"'()*+\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def check_palindrome_using_reverse(self):\n slow = self.head\n fast = self.head\n midnode = None\n prev_to_slow = None\n while fast and fast.next:\n prev_to_slow = slow\n slow = slow.next\n fast = fast.next.next\n if fast:\n midnode = slow\n slow = slow.next\n prev_to_slow.next = None\n second_half = slow\n second_half = LinkedListReverse.iterative_reverse(second_half)\n res = CheckPalindrome.compare_list(self.head, second_half)\n second_half = LinkedListReverse.iterative_reverse(second_half)\n if midnode:\n prev_to_slow.next = midnode\n midnode.next = second_half\n else:\n prev_to_slow.next = second_half\n return res",
"def test_process_barcode_paired_end_data_orientation_forward_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'GATCGA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With a match to the forward primer, should parse out primers in\r\n # the given order of read 1 and read 2.\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATCGAGGT', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGATCG', '+',\r\n ')*+,-./01234567', '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCCAA', '+', \"'()*+\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def isPalindrome(s):\r\n return isPal(toChars(s))",
"def test_pal_list_len_two_true():\n from kth_to_last import LinkedList\n from palendrome import linked_palendrome\n test_ll = LinkedList()\n test_ll.push('a')\n test_ll.push('a')\n assert linked_palendrome(test_ll) is True",
"def strip_adapters(self, reads):\n for a_set in self.adapter_tables:\n M = len(reads[0][1])\n N = min(M, len(a_set[0][0]))\n start = string_trimming.overlap(\n a_set[0][0], reads[0][1][:N][::-1], a_set[0][1])\n stop = M - string_trimming.overlap(\n a_set[1][0], reads[0][1][-N:], a_set[1][1])\n if stop - start < M:\n reads[0][1] = reads[0][1][start:stop]\n reads[0][3] = reads[0][3][start:stop]",
"def test_process_barcode_paired_end_data_orientation_no_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n fastq2_data = [\"HWI-ST830\", \"GGTTCCAA\", np.arange(3, 11, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n reads2_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'AYA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n fastq2_out_not_oriented = FakeOutFile()\r\n\r\n # With no matches, should write to the not_oriented files, and keep\r\n # in the same order of file 1 and file 2\r\n process_barcode_paired_end_data(fastq1_data, fastq2_data,\r\n bcs_out, reads1_out, reads2_out, bc1_len=5, bc2_len=3,\r\n rev_comp_bc1=False, rev_comp_bc2=False,\r\n attempt_read_orientation=True, forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq1_out_not_oriented=fastq1_out_not_oriented,\r\n fastq2_out_not_oriented=fastq2_out_not_oriented)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_reads = reads2_out.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'ATCGAGGT', '+', \"$%&'($%&\", '']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCGATCGATCGATCG', '+',\r\n ')*+,-./01234567', '']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)\r\n\r\n actual_reads_not_oriented = fastq2_out_not_oriented.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'TCCAA', '+', \"'()*+\", '']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def is_palindrome2(some_string):\n return reverse_string(some_string) == some_string",
"def test_process_barcode_paired_stitched_forward_primer_match(self):\r\n\r\n fastq1_data = [\"HWI-ST830\", \"ATCGATCGATCGATCGATCG\",\r\n np.arange(3, 23, dtype=np.int8)]\r\n reads1_out = FakeOutFile()\r\n bcs_out = FakeOutFile()\r\n forward_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'GATCGA']))]\r\n reverse_primers = [compile(''.join([self.iupac[symbol] for\r\n symbol in 'ATA']))]\r\n output_bc_not_oriented = FakeOutFile()\r\n fastq1_out_not_oriented = FakeOutFile()\r\n\r\n # With forward primer match, should write in order of read 1, read 2\r\n process_barcode_paired_stitched(fastq1_data,\r\n bcs_out, reads1_out, bc1_len=3, bc2_len=4,\r\n rev_comp_bc1=True, rev_comp_bc2=True,\r\n attempt_read_orientation=True,\r\n forward_primers=forward_primers,\r\n reverse_primers=reverse_primers,\r\n output_bc_not_oriented=output_bc_not_oriented,\r\n fastq_out_not_oriented=fastq1_out_not_oriented,\r\n switch_bc_order=True)\r\n\r\n actual_bcs = bcs_out.data.split('\\n')\r\n expected_bcs = ['@HWI-ST830', 'CGATGAT', '+', '7654&%$', '']\r\n self.assertEqual(actual_bcs, expected_bcs)\r\n\r\n actual_reads = reads1_out.data.split('\\n')\r\n expected_reads = ['@HWI-ST830', 'GATCGATCGATCG', '+',\r\n \"'()*+,-./0123\", '']\r\n self.assertEqual(actual_reads, expected_reads)\r\n\r\n actual_bcs_not_oriented = output_bc_not_oriented.data.split('\\n')\r\n expected_bcs = ['']\r\n self.assertEqual(actual_bcs_not_oriented, expected_bcs)\r\n\r\n actual_reads_not_oriented = fastq1_out_not_oriented.data.split('\\n')\r\n expected_reads = ['']\r\n self.assertEqual(actual_reads_not_oriented, expected_reads)",
"def reverse_read(read):\n reversed_read = \"\"\n for i in range(len(read)-1, -1, -1):\n if read[i] == \"A\":\n reversed_read += \"T\"\n elif read[i] == \"T\":\n reversed_read += \"A\"\n elif read[i] == \"G\":\n reversed_read += \"C\"\n elif read[i] == \"C\":\n reversed_read += \"G\"\n else:\n raise ValueError(\"One of the read contains wrong characters.\")\n\n return reversed_read",
"def check_palindrome(s):\n return s[::-1] == s"
] | [
"0.6330099",
"0.5880869",
"0.58668864",
"0.58557117",
"0.5848267",
"0.5844541",
"0.5809427",
"0.57850903",
"0.57663035",
"0.57657886",
"0.57587874",
"0.57221115",
"0.5686201",
"0.5674231",
"0.56126064",
"0.5611727",
"0.5600394",
"0.5596763",
"0.55905193",
"0.5541581",
"0.55363095",
"0.55249715",
"0.54912955",
"0.5480581",
"0.54795325",
"0.5473565",
"0.54717267",
"0.5443273",
"0.54416686",
"0.54322314"
] | 0.6142979 | 1 |
Detach the hidden state, and optionally zero the hidden data | def detach_hidden(self, zero=False):
if zero:
self.hidden = self._make_hidden(self.batch_size)
else:
self.hidden[0].detach() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detach_hidden(self, zero=False):\n if zero:\n self.hidden = self._make_hidden(self.batch_size)\n else:\n self.hidden = self.hidden.detach()",
"def reset_hidden(hidden, mask):\n if len(mask) != 0:\n hidden[:, mask, :] = 0\n \n return hidden",
"def detach_hidden(hidden: Any) -> Any:\n return apply_to_tensor(hidden, Tensor.detach)",
"def reset_hidden(self, hidden, reset_flags):\n # detach it from history (pytorch mechanics)\n if self.rnn_type in ['lstm', 'mylstm']:\n h = Variable(hidden[0].data)\n c = Variable(hidden[1].data)\n hidden = (h, c)\n for b, flag in enumerate(reset_flags):\n if flag.data[0] == 1: # data[0] access the data in Variable\n hidden[0][:, b, :].data.fill_(0)\n hidden[1][:, b, :].data.fill_(0)\n elif self.rnn_type == 'gru':\n hidden = Variable(hidden.data)\n for b, flag in enumerate(reset_flags):\n if flag.data[0] == 1: # data[0] access the data in Variable\n hidden[:, b, :].data.fill_(0)\n else:\n print(\"Not support this type yet.\")\n exit(0)\n return hidden",
"def unHide(self):\n self.visible = True",
"def _repackage_hidden(h: nd.NDArray):\n return h.detach()",
"def hidden(self, hidden):\n\n self._hidden = hidden",
"def reset_hidden(self, batch_size):\n\n hidden = {}\n hidden[\"h\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n hidden[\"c\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n return hidden",
"def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')",
"def hide(self):\n self.visible = False",
"def ensure_hidden(self):\n self.set_visible(False)",
"def clearState(self):\n self.physicalState = (None for unused in self.indVars)",
"def detach_hidden(h):\n if isinstance(h, torch.Tensor):\n return h.detach()\n return tuple(detach_hidden(v) for v in h)",
"def reset(self):\n\n # the 'cached' data to be displayed by the hex view\n self.data = None\n self.mask = None\n self.data_size = 0\n self.delta = None\n\n self.address = 0\n self.fade_address = 0\n\n # pinned memory / breakpoint selections\n self._pinned_selections = []",
"def hidden():\n return False",
"def setIsolateHidden( self, state ):\n self._isolatedHidden = state\n \n super(XNode, self).setVisible(self.isVisible())",
"def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()",
"def destroy(self):\r\n self.visible = False",
"def hide(self):\n\n if not 'd-none' in str(self.class_):\n self.old_class = self.class_\n self.class_ = 'd-none'\n\n self.viz = False\n\n return self",
"def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()",
"def hidden(self):\n return self._hidden",
"def detach_states(self):\n self._dynamic_embeddings.detach_states()",
"def hide(self):\n self.set_visible(False)",
"def test_set_hidden(self):\n self.test_object.set_hidden(False)\n self.assertFalse(self.test_object.get_hidden())",
"def hide(self):\n self._dev.hide()",
"def reset(self, batch_size: Optional[int] = 1):\n self.hidden = self.get_hidden(batch_size)",
"def reset(self, batch_size: Optional[int] = 1):\n self.hidden = self.get_hidden(batch_size)",
"def do_hf_unhide(self, arg):\n self.show_hidden_frames = True\n self.refresh_stack()",
"def off(self):\n if self._state or (settings.log_state_of_switched_off_managers and self._state is None):\n if self._hidden:\n self.log_state_change('H')\n else:\n self.log_state_change('-')\n self._state = False",
"def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True"
] | [
"0.81687796",
"0.7248448",
"0.69499934",
"0.6717769",
"0.669204",
"0.6651705",
"0.6403011",
"0.6300455",
"0.6255132",
"0.62323743",
"0.62123096",
"0.61785233",
"0.6158845",
"0.61502695",
"0.6142751",
"0.61369663",
"0.6118904",
"0.6113743",
"0.60545844",
"0.6038087",
"0.598468",
"0.59729093",
"0.5944118",
"0.59400946",
"0.59110117",
"0.5910982",
"0.5910982",
"0.5899966",
"0.58576834",
"0.5796504"
] | 0.8106021 | 1 |
Detach the hidden state, and optionally zero the hidden data | def detach_hidden(self, zero=False):
if zero:
self.hidden = self._make_hidden(self.batch_size)
else:
self.hidden = self.hidden.detach() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def detach_hidden(self, zero=False):\n if zero:\n self.hidden = self._make_hidden(self.batch_size)\n else:\n self.hidden[0].detach()",
"def reset_hidden(hidden, mask):\n if len(mask) != 0:\n hidden[:, mask, :] = 0\n \n return hidden",
"def detach_hidden(hidden: Any) -> Any:\n return apply_to_tensor(hidden, Tensor.detach)",
"def reset_hidden(self, hidden, reset_flags):\n # detach it from history (pytorch mechanics)\n if self.rnn_type in ['lstm', 'mylstm']:\n h = Variable(hidden[0].data)\n c = Variable(hidden[1].data)\n hidden = (h, c)\n for b, flag in enumerate(reset_flags):\n if flag.data[0] == 1: # data[0] access the data in Variable\n hidden[0][:, b, :].data.fill_(0)\n hidden[1][:, b, :].data.fill_(0)\n elif self.rnn_type == 'gru':\n hidden = Variable(hidden.data)\n for b, flag in enumerate(reset_flags):\n if flag.data[0] == 1: # data[0] access the data in Variable\n hidden[:, b, :].data.fill_(0)\n else:\n print(\"Not support this type yet.\")\n exit(0)\n return hidden",
"def unHide(self):\n self.visible = True",
"def _repackage_hidden(h: nd.NDArray):\n return h.detach()",
"def hidden(self, hidden):\n\n self._hidden = hidden",
"def reset_hidden(self, batch_size):\n\n hidden = {}\n hidden[\"h\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n hidden[\"c\"] = torch.Tensor(np.zeros((batch_size, self._hidden_size))).to(self._device)\n return hidden",
"def toggle_hidden(self):\n self.show_hidden = not self.show_hidden\n self.reload('.')",
"def hide(self):\n self.visible = False",
"def ensure_hidden(self):\n self.set_visible(False)",
"def clearState(self):\n self.physicalState = (None for unused in self.indVars)",
"def detach_hidden(h):\n if isinstance(h, torch.Tensor):\n return h.detach()\n return tuple(detach_hidden(v) for v in h)",
"def reset(self):\n\n # the 'cached' data to be displayed by the hex view\n self.data = None\n self.mask = None\n self.data_size = 0\n self.delta = None\n\n self.address = 0\n self.fade_address = 0\n\n # pinned memory / breakpoint selections\n self._pinned_selections = []",
"def hidden():\n return False",
"def setIsolateHidden( self, state ):\n self._isolatedHidden = state\n \n super(XNode, self).setVisible(self.isVisible())",
"def toggle_hidden(self):\n if self.hidden:\n self.show()\n else:\n self.hide()",
"def destroy(self):\r\n self.visible = False",
"def hide(self):\n\n if not 'd-none' in str(self.class_):\n self.old_class = self.class_\n self.class_ = 'd-none'\n\n self.viz = False\n\n return self",
"def toggle_hidden(self):\n AbstractChild.toggle_hidden(self)\n self.accFrame.update_values()\n self.botFrame.update_values()\n # On toggle hidden\n self.on_toggle_hidden()",
"def hidden(self):\n return self._hidden",
"def detach_states(self):\n self._dynamic_embeddings.detach_states()",
"def hide(self):\n self.set_visible(False)",
"def test_set_hidden(self):\n self.test_object.set_hidden(False)\n self.assertFalse(self.test_object.get_hidden())",
"def hide(self):\n self._dev.hide()",
"def reset(self, batch_size: Optional[int] = 1):\n self.hidden = self.get_hidden(batch_size)",
"def reset(self, batch_size: Optional[int] = 1):\n self.hidden = self.get_hidden(batch_size)",
"def do_hf_unhide(self, arg):\n self.show_hidden_frames = True\n self.refresh_stack()",
"def off(self):\n if self._state or (settings.log_state_of_switched_off_managers and self._state is None):\n if self._hidden:\n self.log_state_change('H')\n else:\n self.log_state_change('-')\n self._state = False",
"def clear(self):\n self.np.fill(OFF)\n self.np.show()\n return True"
] | [
"0.8106021",
"0.7248448",
"0.69499934",
"0.6717769",
"0.669204",
"0.6651705",
"0.6403011",
"0.6300455",
"0.6255132",
"0.62323743",
"0.62123096",
"0.61785233",
"0.6158845",
"0.61502695",
"0.6142751",
"0.61369663",
"0.6118904",
"0.6113743",
"0.60545844",
"0.6038087",
"0.598468",
"0.59729093",
"0.5944118",
"0.59400946",
"0.59110117",
"0.5910982",
"0.5910982",
"0.5899966",
"0.58576834",
"0.5796504"
] | 0.81687796 | 0 |
No. 1 tests collection for MedicinalProductInteraction. | def test_medicinalproductinteraction_1(base_settings):
filename = (
base_settings["unittest_data_dir"] / "medicinalproductinteraction-example.json"
)
inst = medicinalproductinteraction.MedicinalProductInteraction.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "MedicinalProductInteraction" == inst.resource_type
impl_medicinalproductinteraction_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "MedicinalProductInteraction" == data["resourceType"]
inst2 = medicinalproductinteraction.MedicinalProductInteraction(**data)
impl_medicinalproductinteraction_1(inst2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_visualize_equipment(self):\n pass",
"def test_intercommunalitys_get(self):\n pass",
"def test_intent_classifier_get_details_all(self):\n pass",
"def test_get_scenarios(self):\n pass",
"def test_intent_classifier_get_details(self):\n pass",
"def test_intent_classifier_vaporise(self):\n pass",
"def test_metrostations_get(self):\n pass",
"def test_intent_classifier_add_testing_samples(self):\n pass",
"def test_intent_classifier_get_testing_samples(self):\n pass",
"def test_summarize_recipe(self):\n pass",
"def test_indicate(self):\n self.objective.Indicate()",
"def test_indicate(self):\n self.objective.Indicate()",
"def test_composition(self):",
"def test_intent_classifier_test(self):\n pass",
"def testBeliefs1sk(self):",
"def test_get_recipe_information(self):\n pass",
"def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")",
"def test_intent_classifier_curate(self):\n pass",
"def test_visualize_recipe_equipment_by_id(self):\n pass",
"def test_visualize_recipe_nutrition(self):\n pass",
"def test_actor_matches_activity(self):",
"def test_multi(self):\n self.assertEqual(6, foo.multi(2, 3))",
"def test(self):\n raise NotImplementedError",
"def test_creature(self):\n self.assertEqual(len(self.processor), 3)",
"def test_multi(self):\n self.assertEqual(6, multi(2, 3))",
"def test():\n\t\treturn [\"vice.multizone\",\n\t\t\t[\n\t\t\t\ttest_from_output(),\n\t\t\t\tmig_matrix_row.test(run = False),\n\t\t\t\tmig_matrix.test(run = False),\n\t\t\t\tmig_specs.test(run = False),\n\t\t\t\tzone_array.test(run = False),\n\t\t\t\t_multizone.test(run = False),\n\t\t\t\tsrc_test(run = False)\n\t\t\t]\n\t\t]",
"def test_get_collection(self):\n pass",
"def test_intent_classifier_create(self):\n pass",
"def _test(self):",
"def _test(self):"
] | [
"0.62932193",
"0.60196525",
"0.6018355",
"0.5950828",
"0.5924243",
"0.59123635",
"0.5907507",
"0.5899704",
"0.58986557",
"0.58589035",
"0.5851156",
"0.5851156",
"0.58460665",
"0.5829347",
"0.58013",
"0.57753193",
"0.5757767",
"0.5756155",
"0.5752222",
"0.57447743",
"0.57250094",
"0.5714998",
"0.57046133",
"0.56996673",
"0.56664824",
"0.56657875",
"0.5662509",
"0.5642316",
"0.5615535",
"0.5615535"
] | 0.7485463 | 0 |
Main inference function. Given data loaders, output the main attributes from the model. | def inference(self):
for partition, loader in self.loaders.items():
avg_loss, (y, y_hat), post, attentions, tags = self.eval_loader(
loader)
self.preds[partition] = {
'tag': tags,
'y': y,
'y_hat': y_hat,
# 'posteriors': post,
# 'attentions': attentions
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inference(model, data, diagnostics, seed, extra_fitting_args):\n pass",
"def inference(self, dataset, model_dir):\n raise NotImplementedError",
"def inference(self, data_loader):\n\n # Handling inference for Multiclass classification\n outputs_list = []\n with torch.no_grad():\n for bi, d in enumerate(data_loader):\n ids = d['ids']\n mask = d['mask']\n\n # send them to the cpu\n ids = ids.to(self.device, dtype=torch.long)\n mask = mask.to(self.device, dtype=torch.long)\n\n outputs = self.model(\n input_ids=ids,\n attention_mask=mask\n )\n outputs_list.extend(torch.softmax(outputs, dim=1).cpu().detach().numpy().tolist())\n\n print(\"This the output from the Multiclass classification model\", outputs_list[0])\n\n inferences = np.argmax(outputs_list, axis=1)\n inferences = [self.mapping[str(pred)] for pred in inferences]\n\n return inferences",
"def main(_):\n if not FLAGS.model_output_dir:\n raise ValueError(\n \"Undefined model output directory. Perhaps you forgot to set the --model_output_dir flag?\")\n \n if FLAGS.predict_input_file:\n decode()\n else:\n train()",
"def inference(self, inputs):\n # NOTE: This makes the assumption that your model expects text to be tokenized\n # with \"input_ids\" and \"token_type_ids\" - which is true for some popular transformer models, e.g. bert.\n # If your transformer model expects different tokenization, adapt this code to suit\n # its expected input format.\n input_ids = inputs[\"input_ids\"]\n input_ids = input_ids.to(self.device)\n\n coarse_result = self.model.generate(input_ids = input_ids, )\n coarse_result = coarse_result.to(\"cpu\")\n fined_result = self.tokenizer.decode(coarse_result[0].tolist()[inputs[\"original_length\"]+1:],\n skip_special_tokens = True)\n #logger.info(\"Model predicted: '%s'\", fined_result)\n\n return [fined_result]",
"def infer(trainer, data_dir, patch_size, output_dir=None, device='cpu'):\n\n if output_dir is not None and not osp.exists(output_dir):\n os.mkdir(output_dir)\n\n data_dir = Path(data_dir).expanduser()\n img_paths = list((data_dir / 'images').iterdir())\n\n print(f'Predicting {len(img_paths)} images from {data_dir} ...')\n predictions = [\n predict(trainer, img_path, patch_size, device=device)\n for img_path in tqdm(img_paths)\n ]\n\n if output_dir is not None:\n save_predictions(predictions, img_paths, output_dir)\n\n return predictions",
"def inference_context(model):\n training_mode = model.training\n model.eval()\n yield\n model.train(training_mode)",
"def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)",
"def run_infer(infer_model, model_dir, infer_sess):\n with infer_model.graph.as_default():\n loaded_infer_model, global_step = model_helper.create_or_load_model(\n model_dir, infer_model.model, infer_sess)\n \n output_tuple = loaded_infer_model.infer(infer_sess)\n return output_tuple",
"def infer(self, request, datastore=None):\n model = request.get(\"model\")\n if not model:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n \"Model is not provided for Inference Task\",\n )\n\n task = self._infers.get(model)\n if not task:\n raise MONAILabelException(\n MONAILabelError.INVALID_INPUT,\n f\"Inference Task is not Initialized. There is no model '{model}' available\",\n )\n\n request = copy.deepcopy(request)\n request[\"description\"] = task.description\n\n image_id = request[\"image\"]\n if isinstance(image_id, str):\n datastore = datastore if datastore else self.datastore()\n if os.path.exists(image_id):\n request[\"save_label\"] = False\n else:\n request[\"image\"] = datastore.get_image_uri(request[\"image\"])\n\n if os.path.isdir(request[\"image\"]):\n logger.info(\"Input is a Directory; Consider it as DICOM\")\n\n logger.debug(f\"Image => {request['image']}\")\n else:\n request[\"save_label\"] = False\n\n if self._infers_threadpool:\n\n def run_infer_in_thread(t, r):\n handle_torch_linalg_multithread(r)\n return t(r)\n\n f = self._infers_threadpool.submit(run_infer_in_thread, t=task, r=request)\n result_file_name, result_json = f.result(request.get(\"timeout\", settings.MONAI_LABEL_INFER_TIMEOUT))\n else:\n result_file_name, result_json = task(request)\n\n label_id = None\n if result_file_name and os.path.exists(result_file_name):\n tag = request.get(\"label_tag\", DefaultLabelTag.ORIGINAL)\n save_label = request.get(\"save_label\", False)\n if save_label:\n label_id = datastore.save_label(\n image_id, result_file_name, tag, {\"model\": model, \"params\": result_json}\n )\n else:\n label_id = result_file_name\n\n return {\"label\": label_id, \"tag\": DefaultLabelTag.ORIGINAL, \"file\": result_file_name, \"params\": result_json}",
"def inference(self, x, data):\n\n ## Global features concatenated\n if self.u_dim > 0:\n u = data.u.view(-1, self.u_dim)\n x = torch.cat((x, u), 1)\n\n ## Final MLP map\n \n # Edge level inference\n if 'edge' in self.task:\n x = self.forward_2pt(x, data.edge_index)\n\n # Node or graph level inference\n else:\n x = self.mlp_final(x)\n\n return x",
"def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n source_vocab_filepath = os.path.join(args.model, 'source.vocab')\n source_vocab = Vocab(vocab_filepath=source_vocab_filepath)\n target_vocab_filepath = os.path.join(args.model, 'target.vocab')\n target_vocab = Vocab(vocab_filepath=target_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['with_attention']:\n decoder = Attention(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n indexes = putils.index_dataset(\n args.data, source_vocab.item2idx, target_vocab.item2idx,\n dataset_params['is_character_based'], dataset_params['max_seq_len'],\n dataset_params['is_reversed'])\n if args.random > 0:\n random.shuffle(indexes)\n for seq_num in range(args.random):\n seq = indexes[seq_num]\n print('-'*80)\n print('>', ' '.join([source_vocab.idx2item[idx]\n for idx in seq[0]]))\n print('=', ' '.join([target_vocab.idx2item[idx]\n for idx in seq[1]]))\n # TODO: add support for OOV\n predicted_idx, _ = _decode(seq[0], encoder, decoder,\n checkpoint['with_attention'],\n dataset_params['max_seq_len'])\n print('<', ' '.join([target_vocab.idx2item[idx]\n for idx in predicted_idx]))\n else:\n _evaluate(indexes, encoder, decoder, target_vocab, checkpoint,\n dataset_params)",
"def eval_loader(self, loader):\r\n\t\treturn predict(model=self.model,\r\n\t\t\tpipeline=self.pipeline,\r\n\t\t\tdataloader=loader,\r\n\t\t\ttask=self.task,\r\n\t\t\tmode=\"eval\")",
"def inference(self):\n raise NotImplementedError",
"def run_inference(dataset, model, executor_):\n for batch in dataset:\n results = model.inference(batch)\n for stats in model.worker_pool.imap(get_stats_from_code, zip(results, batch, [executor_]*len(batch))):\n if stats is not None:\n yield stats\n return",
"def inference():\n\n sents = request.get_json(force=True)['sents']\n\n vecs = tokenize_inputs(sents)\n results = model(vecs)\n\n result = dict()\n result['pred'] = [str(sample.numpy()[0]) for sample in results]\n \n response = flask.Response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n\n print(result)\n\n return result",
"def do_inference(self, output_file = None):\n return",
"def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)",
"def infer():\n\n # Create StreamManagerApi object\n stream_manager_api = StreamManagerApi()\n # Use InitManager method init StreamManagerApi\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(args.pipeline_path, \"rb\") as f:\n pipeline_str = f.read()\n\n # Configuring a stream\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n # Stream_name encoded in UTF-8\n stream_name = args.stream_name.encode()\n print(stream_name)\n predictions = []\n with open(args.label_path, 'rt') as f:\n val_cls = f.read().rstrip(\"\\n\").split(\"\\n\")\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(args.instances_path)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n for file_name in os.listdir(args.img_path):\n pred_data = []\n # Gets the Address of each image\n img_id = int(file_name.split('.')[0])\n file_path = args.img_path + file_name\n size = (cv2.imread(file_path)).shape\n\n # Read each photo in turn\n with open(file_path, \"rb\") as f:\n img_data = f.read()\n if not img_data:\n print(f\"read empty data from img:{file_name}\")\n continue\n # The element value img_data\n data_input.data = img_data\n boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)\n pred_data.append({\"boxes\": boxes_output,\n \"box_scores\": scores_output,\n \"img_id\": img_id,\n \"image_shape\": size})\n\n parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)\n print(f\"Inferred image:{file_name} success!\")\n\n # Save the result in JSON format\n if not os.path.exists(args.res_path):\n os.makedirs(args.res_path)\n with open(args.res_path + 'predictions_test.json', 'w') as f:\n json.dump(predictions, f)\n stream_manager_api.DestroyAllStreams()",
"def inference(self, inputs):\n\n input_ids = torch.tensor([inputs[\"head_ids\"]], dtype=torch.long).to(self.device)\n attention_masks = torch.tensor([inputs[\"attention_masks\"]], dtype=torch.bool).to(self.device)\n \n # Handling inference for sequence_classification.\n with torch.no_grad():\n output = self.model(input_ids, attention_masks)\n predict_label = output[0].argmax(dim=2)\n predict_string = self.tokenizer.decode_sent(input_ids[0].detach().cpu().numpy(), predict_label[0].detach().cpu().numpy())\n\n logger.info(\"Model predicted: '%s'\", predict_string)\n return [{'predict': predict_string}]",
"def inference_preprocess(self):\n return",
"def inference():\n if request.method == \"POST\":\n data = request.json #\n src_img = np.array(data[\"src\"]).astype(np.uint8) # Parsing data\n ref_img = np.array(data[\"ref\"]).astype(np.uint8) #\n ref_label = int(data[\"ref_label\"]) #\n result = get_inference(src_img, ref_img, ref_label) # Calling helper function\n return jsonify({\"result\": result.tolist()}) # Returning results into json",
"def main():\n flags = PARSER.parse_args()\n\n if flags.to == 'savedmodel':\n to_savedmodel(input_shape=flags.input_shape,\n model_fn=unet_fn,\n src_dir=flags.checkpoint_dir,\n dst_dir='./saved_model',\n input_names=['IteratorGetNext'],\n output_names=['total_loss_ref'],\n use_amp=flags.use_amp,\n use_xla=flags.use_xla,\n compress=flags.compress)\n if flags.to == 'tensorrt':\n ds = Dataset(data_dir=flags.data_dir,\n batch_size=1,\n augment=False,\n gpu_id=0,\n num_gpus=1,\n seed=42)\n iterator = ds.test_fn(count=1).make_one_shot_iterator()\n features = iterator.get_next()\n\n sess = tf.Session()\n\n def input_data():\n return {'input_tensor:0': sess.run(features)}\n\n to_tensorrt(src_dir=flags.savedmodel_dir,\n dst_dir='./tf_trt_model',\n precision=flags.precision,\n feed_dict_fn=input_data,\n num_runs=1,\n output_tensor_names=['Softmax:0'],\n compress=flags.compress)\n if flags.to == 'onnx':\n to_onnx(src_dir=flags.savedmodel_dir,\n dst_dir='./onnx_model',\n compress=flags.compress)",
"def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n left_vocab_filepath = os.path.join(args.model, 'left.vocab')\n left_vocab = Vocab(vocab_filepath=left_vocab_filepath)\n right_vocab_filepath = os.path.join(args.model, 'right.vocab')\n right_vocab = Vocab(vocab_filepath=right_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n if checkpoint['encoder']['model_type'] == 'transformer':\n encoder = TEncoder(input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n dropout=checkpoint['encoder']['dropout'],\n num_attention_heads=checkpoint['encoder']['num_attention_heads'])\n else:\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['decoder']['model_type'] == 'transformer':\n decoder = TDecoder(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n dropout=checkpoint['decoder']['dropout'],\n num_attention_heads=checkpoint['decoder']['num_attention_heads'])\n elif checkpoint['decoder']['with_attention']:\n decoder = Attention(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n pairs = putils.convert_to_seq_pairs(args.data)\n indexed_pairs = putils.index_pairs(pairs, left_vocab.char2idx,\n right_vocab.char2idx)\n if dataset_params['reverse']:\n indexed_pairs = [(y, x) for x, y in indexed_pairs]\n source_vocab = right_vocab\n target_vocab = left_vocab\n else:\n source_vocab = left_vocab\n target_vocab = right_vocab\n if args.random > 0:\n random.shuffle(indexed_pairs)\n for seq_num in range(args.random):\n seq = indexed_pairs[seq_num]\n print('-'*80)\n input_str = ' '.join(\n ''.join([source_vocab.idx2char[idx] for idx in seq[0] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n gold_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in seq[1] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n predicted_idxx = decode(seq[0], args.itemize, encoder, decoder,\n dataset_params['max_seq_len'])\n pred_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in predicted_idxx\n if idx not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n print('>', input_str)\n print('=', gold_str)\n print('<', pred_str)\n else:\n _evaluate(indexed_pairs, args.itemize, encoder, decoder,\n target_vocab.idx2char, dataset_params['max_seq_len'])",
"def evaluate(self, data_loader, extra_output=None):\n raise NotImplementedError",
"def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list",
"def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))",
"def main():\n os.makedirs(PATH)\n fetch_data()\n convert_to_json(model_list, 'models.json', is_model=True)\n convert_to_json(backend_list, 'backends.json')\n convert_to_json(type_list, 'types.json')\n convert_to_json(featurizer_list, 'featurizers.json')",
"def main():\n args, config = parse_args()\n\n \"\"\"\n Log on wandb for track of experiments\n \"\"\"\n wandb.init(project=\"adaptive-finetuning-resnet\", name=f'Inference_{config.VERSION}', config=config)\n\n \"\"\"\n Set config GPUs and torch cuda device\n \"\"\"\n config.GPUS = str(0)\n torch.cuda.set_device(0)\n\n \"\"\"\n Create the model, put it to GPU and then create dataloader\n \"\"\"\n model = eval(config.MODULE)(config=config.NETWORK)\n model = model.cuda()\n\n val_loader = make_dataloader(config, mode='val', distributed=False)\n\n \"\"\"\n Load the model with pretrained weights\n \"\"\"\n assert config.NETWORK.PRETRAINED_MODEL != '', \"For inference, there must be pre-trained weights\"\n\n pretrain_state_dict = torch.load(config.NETWORK.PRETRAINED_MODEL, map_location = lambda storage, loc: storage)['net_state_dict']\n smart_model_load(model, pretrain_state_dict, loading_method=config.NETWORK.PRETRAINED_LOADING_METHOD)\n\n \"\"\"\n Pass the model and val loader for validation\n \"\"\"\n print(\"Inference started!!\")\n val_accuracy = do_validation(config, model, val_loader)\n print(f\"Inference complete!!\\nAccuracy:{val_accuracy}\")\n\n wandb.log({'Accuracy': val_accuracy})",
"def main(args):\r\n\r\n # Logging info\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s - '\r\n '%(funcName)s: %(message)s',\r\n '%H:%M:%S')\r\n logger = logging.getLogger(__name__)\r\n logger.setLevel('INFO')\r\n stream = logging.StreamHandler()\r\n stream.setLevel('INFO')\r\n stream.setFormatter(formatter)\r\n logger.addHandler(stream)\r\n\r\n set_seed(args.seed)\r\n device = torch.device(\r\n 'cuda' if torch.cuda.is_available() and args.cuda else 'cpu')\r\n model_name = f'{args.name}_lr{args.lr}_z{args.latent_dim}' \\\r\n + f'_h{args.hidden_dim}_p{args.p_dropout}'\r\n model_dir = os.path.join(args.results, model_name)\r\n logger.info(f'Directory for saving and loading models: {model_dir}')\r\n\r\n if not args.eval:\r\n # Model directory\r\n new_model_dir(model_dir, logger=logger)\r\n\r\n # Dataloaders\r\n train_loader, valid_loader = get_dataloaders(\r\n args.data, args.t_hours, args.n_bins,\r\n validation=True, dynamic=args.dynamic,\r\n batch_size=args.bs, logger=logger)\r\n logger.info(\r\n f'Train {args.model_type}-{args.t_hours} ' +\r\n f'with {len(train_loader.dataset)} samples')\r\n\r\n # Load model\r\n n_tokens = len(np.load(\r\n os.path.join(\r\n args.data, '_dicts', f'{args.t_hours}_{args.n_bins}.npy'),\r\n allow_pickle=True).item())\r\n model = init_model(\r\n args.model_type, n_tokens, args.latent_dim, args.hidden_dim,\r\n p_dropout=args.p_dropout, dt=args.dt,\r\n weighted=args.weighted, dynamic=args.dynamic)\r\n logger.info(f'#params in model: {get_n_param(model)}')\r\n\r\n # Optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\r\n loss_f = BCE()\r\n model = model.to(device)\r\n\r\n # Training\r\n trainer = Trainer(\r\n model, loss_f, optimizer,\r\n device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)\r\n trainer.train(\r\n train_loader, valid_loader,\r\n epochs=args.epochs, early_stopping=args.early_stopping)\r\n\r\n # Save model\r\n metadata = vars(args)\r\n metadata['n_tokens'] = n_tokens\r\n save_model(trainer.model, model_dir, metadata=metadata)\r\n\r\n if args.test:\r\n # Load model\r\n model = load_model(model_dir, is_gpu=args.cuda)\r\n metadata = load_metadata(model_dir)\r\n\r\n # Dataloader\r\n test_loader, _ = get_dataloaders(\r\n metadata['data'], metadata['t_hours'], metadata['n_bins'],\r\n validation=False, dynamic=metadata['dynamic'], batch_size=128,\r\n shuffle=False, logger=logger)\r\n\r\n # Evaluate\r\n loss_f = BCE()\r\n evaluator = Trainer(\r\n model, loss_f,\r\n device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)\r\n evaluator._valid_epoch(test_loader)"
] | [
"0.7079301",
"0.69063145",
"0.68889844",
"0.6414583",
"0.63948715",
"0.6255342",
"0.62492347",
"0.6212708",
"0.61944205",
"0.617807",
"0.6174323",
"0.6170213",
"0.6147475",
"0.6098715",
"0.609512",
"0.60736245",
"0.6059464",
"0.6036302",
"0.6023182",
"0.59940803",
"0.5947019",
"0.58842665",
"0.5878138",
"0.58762187",
"0.5847876",
"0.5842864",
"0.5830676",
"0.580859",
"0.5806355",
"0.577346"
] | 0.6913925 | 1 |
Returns the global maximum value and position. | def globalMaximum(self):
# The global maximum is at one peak's position
potential_max = list()
for func, pos, height, width in zip(self.peaks_function,
self.peaks_position,
self.peaks_height,
self.peaks_width):
potential_max.append((func(pos, pos, height, width), pos))
return max(potential_max) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_max_position(self):\n raise NotImplementedError()",
"def max_position(self):\n raise NotImplementedError",
"def max_positions(self):\n return self.args.max_positions",
"def get_max(self):\n return self.max[-1]",
"def max(self):\n assert self.__stack\n return self.__max_values[-1]",
"def get_max(self):\n\t\tif self.right:\n\t\t\treturn self.right.get_max()\n\t\treturn self.value",
"def _get_maximum(self):\n return self._maximum",
"def get_max_value(self):\n max_value = max(self.values)\n return max_value",
"def __get_max_pos(self):\n query = text('select max(song_position) as \"max_position\" from setlist where show_id = :id')\n query = query.bindparams(id=self.id)\n\n result = db.engine.execute(query)\n\n for row in result:\n max_position = row['max_position']\n if max_position is None:\n max_position = 0\n\n result.close()\n\n return max_position",
"def find_max(self):\n if self.right:\n return self.right.find_max()\n return self.data",
"def max_positions(self):\n return None",
"def find_max(self):\n\n if self.right:\n return self.right.find_max()\n\n return self.data",
"def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max",
"def max_positions(self):\n return self.student.max_positions() # also needed in validation runs.",
"def max_point(self):\n x = self.max(0).idxmax()\n y = self.loc[:, x].idxmax()\n return x, y",
"def _getMaxPosInDb(self):\n sql = (\"SELECT MAX(position) max_pos FROM einzelteile\"\n \" WHERE baugruppe = '%s' AND b_index = '%s'\"\n % (self._assembly.teilenummer, self._assembly.t_index))\n rset = sqlapi.RecordSet2(sql=sql)\n if len(rset) > 0 and rset[0][\"max_pos\"]:\n return int(rset[0][\"max_pos\"])\n else:\n return 0",
"def find_max(self):\n\n max_x = -10\n max_y = -10\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x > max_x:\n max_x = x\n if y > max_y:\n max_y = y\n return max_x, max_y",
"def x_max(self):\n return self.get_max_value(self.X_INDEX)",
"def return_the_maximum(self):\n\n return self.__max_stack[-1]",
"def maxim(self) -> (int, float('inf')):\n\t\treturn 2",
"def get_best_position(self):\n # Todo: implement\n best_value_global = -inf\n position = None\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n position = particle.best_position\n best_value_global = particle.best_value\n return position",
"def getImageMax(self):\n fname = '%s::%s'%(self.__class__.__name__, self.getImageMax.__name__)\n if (not self.lhaveImage):\n print(\"%s: DSM image not yet computed\"%fname)\n return None, None\n maxIndex = c_int(1)\n maxValue = c_float(1)\n ierr = c_int(1)\n self.lib.xcloc_getImageMax(maxIndex, maxValue, ierr)\n if (ierr.value != 0):\n print(\"%s: Failed to get max value and index of DSM image\"%fname)\n return None, None\n imax = maxIndex.value - 1 # Fortran to C\n vmax = maxValue.value\n return imax, vmax",
"def max(self):\n return self._max_coords",
"def _max_in_bounds(self, max):\n if max >= self.valmax:\n if not self.closedmax:\n return self.val[1]\n max = self.valmax\n\n if max <= self.val[0]:\n max = self.val[0]\n return self._stepped_value(max)",
"def get_best_value(self):\n # Todo: implement\n best_value_global = -inf\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n best_value_global = particle.best_value\n return best_value_global # Remove this line",
"def native_max_value(self) -> float:\n return self._device.max_offset",
"def get_max(self):\n # 0(1)\n return self.max_stack.peek()\n\n # Don't need find_max we returned max_stack.peek()",
"def get_max(self):\n return self._max",
"def maxx(self):\n return self.__maxx",
"def max_position_limit(self):\n return self._read(MX_MAX_POSITION_LIMIT)"
] | [
"0.75708455",
"0.7472142",
"0.7386652",
"0.73621035",
"0.735912",
"0.7274368",
"0.72637445",
"0.7262161",
"0.7253182",
"0.72366637",
"0.72155946",
"0.71909815",
"0.7190395",
"0.71811473",
"0.71685374",
"0.7157625",
"0.71497184",
"0.7140186",
"0.7138031",
"0.7129402",
"0.7128304",
"0.71165997",
"0.7108778",
"0.7092591",
"0.7084194",
"0.7064608",
"0.7057191",
"0.70302945",
"0.70043707",
"0.69944465"
] | 0.81460285 | 0 |
Returns all visible maximums value and position sorted with the global maximum first. | def maximums(self):
# The maximums are at the peaks position but might be swallowed by
# other peaks
maximums = list()
for func, pos, height, width in zip(self.peaks_function,
self.peaks_position,
self.peaks_height,
self.peaks_width):
val = func(pos, pos, height, width)
if val >= self.__call__(pos, count=False)[0]:
maximums.append((val, pos))
return sorted(maximums, reverse=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def globalMaximum(self):\n # The global maximum is at one peak's position\n potential_max = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n potential_max.append((func(pos, pos, height, width), pos))\n return max(potential_max)",
"def _generate_value_maxes(self):\n\n value_maxes = {name: 0 for name in self.group_names}\n\n data_to_ax = self.get_transform_func('data_to_ax')\n\n if self.plot == 'violinplot':\n value_maxes = self._get_value_maxes_violin(value_maxes, data_to_ax)\n\n else:\n for child in self.ax.get_children():\n\n group_name, value_pos = self._get_value_pos(child, data_to_ax)\n\n if (value_pos is not None\n and value_pos > value_maxes[group_name]):\n value_maxes[group_name] = value_pos\n\n return value_maxes",
"def get_gridpoint_max(self):\n ind_array = np.indices(self.results_array.shape)\n maxes = []\n\n def get_max(x, y, z):\n \"\"\"\n Would be funnier if I knew a Max.\n \"\"\"\n if isinstance(self.results_array[x][y][z], tuple):\n num_zeros = self.tup_max_length - len(self.results_array[x][y][z])\n if num_zeros != 0:\n print('Number of zeros: ', num_zeros)\n hist_arr = np.array(self.results_array[x][y][z])\n maxes.append(max(hist_arr))\n\n vget_max = np.vectorize(get_max, otypes=[list])\n vget_max(ind_array[0], ind_array[1], ind_array[2])\n return maxes",
"def find_max(self):\n\n max_x = -10\n max_y = -10\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x > max_x:\n max_x = x\n if y > max_y:\n max_y = y\n return max_x, max_y",
"def maxs(self):\n return self._maxs",
"def max_positions(self):\n return (self.encoder.max_positions(), self.decoder.max_positions())",
"def max_positions(self):\n return self.encoder.max_positions()",
"def absmax(self, location = False):\n if not location:\n if self.is_empty():\n return 0\n else:\n return max(tuple(abs(i).max() for i in self.__m__.values()))\n\n mx = None\n block = None\n index = None\n for k,v in self.__m__.items():\n if mx is None:\n block = k\n index = numpy.unravel_index(numpy.argmax(abs(v)),v.shape)\n mx = abs(v[index])\n else:\n i = numpy.unravel_index(numpy.argmax(abs(v)),v.shape)\n if abs(v[i]) > mx:\n block = k\n index = i\n mx = abs(v[index])\n\n return mx, block, index",
"def max_positions(self):\n return self.args.max_positions",
"def get_local_maxes(self, use_full=False, strict=False, x_y=None):\n if x_y is None:\n if use_full:\n x, y = self.x_full, self.y_full\n y_offset = 0\n else:\n x, y = self.x, self.y\n y_offset = self.y_offset\n else:\n x, y = x_y\n y_offset = 0\n\n if strict:\n # take only those greater than both adjacent\n maxes = sps.argrelextrema(y, np.greater)[0]\n else:\n # take all greater/equal to both sides\n maxes = sps.argrelextrema(y, np.greater_equal)[0]\n\n # check that max_y values > 0\n maxes = maxes[y[maxes] > 0]\n\n # filter capped values on both sides\n maxes = maxes[y[maxes] != 5 - y_offset]\n\n max_x = x[maxes]\n max_y = y[maxes]\n\n return max_x, max_y",
"def max_positions(self):\n return self.student.max_positions() # also needed in validation runs.",
"def max_positions(self):\n return None",
"def get_max_and_min(self):\n max_x = float('-inf')\n min_x = float('inf')\n max_y = float('-inf')\n min_y = float('inf')\n max_z = float('-inf')\n min_z = float('inf')\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n counter = 0\n for src, node in self._graph.get_all_v().items():\n if node.location is not None:\n x = node.location.x\n y = node.location.y\n z = node.location.z\n counter += 1\n max_x = x if x > max_x else max_x\n min_x = x if x < min_x else min_x\n max_y = y if y > max_y else max_y\n min_y = y if y < min_y else min_y\n max_z = z if z > max_z else max_z\n min_z = z if z < min_z else min_z\n if counter > 4:\n ans = max_x, max_y, max_z, min_x, min_y, min_z\n return ans",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions())",
"def findMax(img):\n\td = minMaxLoc(img)\n\treturn {\"maxVal\":d[\"maxVal\"], \"maxLoc\":d[\"maxLoc\"]}",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions, self.embed_positions.max_positions())",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_target_positions\n return min(self.max_target_positions, self.embed_positions.max_positions)",
"def max_positions(self):\n return (self.cfg.max_source_positions, self.cfg.max_target_positions)",
"def max_positions(self):\n if self.embed_positions is None:\n return self.max_source_positions\n return min(self.max_source_positions, self.embed_positions.max_positions)",
"def argmaxY( self ):\n max = -1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] > max: max = p[1]\n return max",
"def max_positions(self):\n return self.decoder.max_positions()",
"def max_position(self):\n raise NotImplementedError",
"def get_best_value(self):\n # Todo: implement\n best_value_global = -inf\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n best_value_global = particle.best_value\n return best_value_global # Remove this line",
"def find_max_coords(self):\n all_max_bound = []\n all_min_bound = []\n shape_dict = self.shape_dict\n for zone_id in shape_dict:\n zone_shape = shape_dict[zone_id]\n max_bound_zone = zone_shape.max_bound\n min_bound_zone = zone_shape.min_bound\n all_max_bound.append(max_bound_zone)\n all_min_bound.append(min_bound_zone)\n\n map_max_bound, unused_max = Utils.calculate_boundaries(all_max_bound)\n unused_min, map_min_bound = Utils.calculate_boundaries(all_min_bound)\n\n return (map_max_bound, map_min_bound)",
"def max(self):\n max_i = np.nanargmax(self.ys)\n return self.xs[max_i], self.ys[max_i]",
"def get_best_position(self):\n # Todo: implement\n best_value_global = -inf\n position = None\n for particle in self.particles:\n if particle.best_value >= best_value_global:\n position = particle.best_position\n best_value_global = particle.best_value\n return position",
"def max_positions(self):\r\n return (self.args.max_source_positions, self.args.max_target_positions)"
] | [
"0.6995808",
"0.68941957",
"0.66584855",
"0.6647283",
"0.6497538",
"0.6467697",
"0.63477576",
"0.6342275",
"0.6313204",
"0.6293297",
"0.6282581",
"0.6270059",
"0.626008",
"0.62317383",
"0.62317383",
"0.6225912",
"0.622096",
"0.6213579",
"0.6213579",
"0.6213579",
"0.6201832",
"0.62016225",
"0.6186891",
"0.6153251",
"0.6136166",
"0.6094971",
"0.6081573",
"0.60758084",
"0.60625595",
"0.60588974"
] | 0.7002531 | 0 |
Order the peaks to change position, height, width and number. | def changePeaks(self):
# Change the number of peaks
if self.minpeaks is not None and self.maxpeaks is not None:
npeaks = len(self.peaks_function)
u = self.random.random()
r = self.maxpeaks - self.minpeaks
if u < 0.5:
# Remove n peaks or less depending on the minimum number of peaks
u = self.random.random()
n = min(npeaks - self.minpeaks, int(round(r * u * self.number_severity)))
for i in range(n):
idx = self.random.randrange(len(self.peaks_function))
self.peaks_function.pop(idx)
self.peaks_position.pop(idx)
self.peaks_height.pop(idx)
self.peaks_width.pop(idx)
self.last_change_vector.pop(idx)
else:
# Add n peaks or less depending on the maximum number of peaks
u = self.random.random()
n = min(self.maxpeaks - npeaks, int(round(r * u * self.number_severity)))
for i in range(n):
self.peaks_function.append(self.random.choice(self.pfunc_pool))
self.peaks_position.append([self.random.uniform(self.min_coord, self.max_coord) for _ in range(self.dim)])
self.peaks_height.append(self.random.uniform(self.min_height, self.max_height))
self.peaks_width.append(self.random.uniform(self.min_width, self.max_width))
self.last_change_vector.append([self.random.random() - 0.5 for _ in range(self.dim)])
for i in range(len(self.peaks_function)):
# Change peak position
shift = [self.random.random() - 0.5 for _ in range(len(self.peaks_position[i]))]
shift_length = sum(s**2 for s in shift)
shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0
shift = [shift_length * (1.0 - self.lambda_) * s \
+ self.lambda_ * c for s, c in zip(shift, self.last_change_vector[i])]
shift_length = sum(s**2 for s in shift)
shift_length = self.move_severity / math.sqrt(shift_length) if shift_length > 0 else 0
shift = [s*shift_length for s in shift]
new_position = []
final_shift = []
for pp, s in zip(self.peaks_position[i], shift):
new_coord = pp + s
if new_coord < self.min_coord:
new_position.append(2.0 * self.min_coord - pp - s)
final_shift.append(-1.0 * s)
elif new_coord > self.max_coord:
new_position.append(2.0 * self.max_coord - pp - s)
final_shift.append(-1.0 * s)
else:
new_position.append(new_coord)
final_shift.append(s)
self.peaks_position[i] = new_position
self.last_change_vector[i] = final_shift
# Change peak height
change = self.random.gauss(0, 1) * self.height_severity
new_value = change + self.peaks_height[i]
if new_value < self.min_height:
self.peaks_height[i] = 2.0 * self.min_height - self.peaks_height[i] - change
elif new_value > self.max_height:
self.peaks_height[i] = 2.0 * self.max_height - self.peaks_height[i] - change
else:
self.peaks_height[i] = new_value
# Change peak width
change = self.random.gauss(0, 1) * self.width_severity
new_value = change + self.peaks_width[i]
if new_value < self.min_width:
self.peaks_width[i] = 2.0 * self.min_width - self.peaks_width[i] - change
elif new_value > self.max_width:
self.peaks_width[i] = 2.0 * self.max_width - self.peaks_width[i] - change
else:
self.peaks_width[i] = new_value
self._optimum = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _layout(self):\n top = 2.0\n y = 0.0\n x = 0.0\n maxend = 0.0\n for track in self._tracks:\n track.set_view(self.view.species, self.view.seqname, \n self.view.start, self.view.end)\n tracksize = track.get_size()\n y -= tracksize[1]\n track.set_pos(track.pos_offset[0] + x, track.pos_offset[1] + y)\n self.size = [self.view.end - self.view.start + 1, 0 - y]",
"def splitDetectorPeakInfo(self):\r\n\t\tsplit_raw_min = np.amin(self.splitData)\r\n\t\tsplit_min = split_raw_min - self.splitBaseline\r\n\t\t\t\t\r\n\t\tsplit_raw_max = np.amax(self.splitData)\r\n\t\tsplit_max = split_raw_max - self.splitBaseline\r\n\t\r\n\t\tself.splitMax = split_max\r\n\t\tself.splitMin = split_min",
"def positions(self, tileID, numSamples):",
"def detect_min_max(arr):\n\n max_value = max(np.absolute(np.reshape(arr, -1)))\n peaks_max = []\n peaks_min = []\n x_max = []\n y_max = []\n z_max = []\n x_min = []\n y_min = []\n z_min = []\n\n for j1 in range(10, arr.shape[0]-10):\n for j2 in range(10, arr.shape[1]-10):\n for j3 in range(10, arr.shape[2]-10):\n if (np.absolute(arr[j1, j2, j3]) > 0.3*max_value):\n\n aaaa = [\n arr[j1, j2, j3 + 1], arr[j1, j2 + 1, j3],\n arr[j1 + 1, j2, j3], arr[j1, j2, j3 - 1],\n arr[j1, j2 - 1, j3], arr[j1 - 1, j2, j3],\n arr[j1 + 1, j2 + 1, j3 + 1],\n arr[j1 - 1, j2 - 1, j3 - 1],\n arr[j1 - 1, j2 + 1, j3 + 1], arr[j1, j2 + 1, j3 + 1],\n arr[j1, j2 - 1, j3 - 1], arr[j1, j2 - 1, j3 + 1],\n arr[j1, j2 + 1, j3 - 1], arr[j1 + 1, j2, j3 + 1],\n arr[j1 - 1, j2, j3 - 1], arr[j1 - 1, j2, j3 + 1],\n arr[j1 + 1, j2, j3 - 1], arr[j1 + 1, j2 + 1, j3],\n arr[j1 - 1, j2 - 1, j3], arr[j1 + 1, j2 - 1, j3],\n arr[j1 - 1, j2 + 1, j3], arr\n [j1 + 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 + 1, j3 - 1], arr\n [j1 - 1, j2 - 1, j3 + 1], arr\n [j1 + 1, j2 - 1, j3 - 1], arr\n [j1 - 1, j2 + 1, j3 - 1]]\n bbbb = [\n arr[j1, j2, j3 + 9], arr[j1, j2 + 9, j3],\n arr[j1 + 9, j2, j3], arr[j1, j2, j3 - 9],\n arr[j1, j2 - 9, j3], arr[j1 - 9, j2, j3]]\n\n if ((arr[j1, j2, j3] > max(aaaa)) and (max(aaaa) > max(bbbb))):\n peaks_max = np.append(peaks_max, arr[j1, j2, j3])\n x_max = np.append(x_max, j1)\n y_max = np.append(y_max, j2)\n z_max = np.append(z_max, j3)\n\n if ((arr[j1, j2, j3] < min(aaaa)) and (min(aaaa) < min(bbbb))):\n peaks_min = np.append(peaks_min, arr[j1, j2, j3])\n x_min = np.append(x_min, j1)\n y_min = np.append(y_min, j2)\n z_min = np.append(z_min, j3)\n\n return peaks_min, np.vstack(\n (x_min, y_min, z_min)), peaks_max, np.vstack(\n (x_max, y_max, z_max))",
"def FindPeaks_graph(self):\n import string\n \n maxima = self['FP_LOC'].copy()\n maxima = num.where(maxima)\n maxima = (maxima[1],maxima[0])\n detectimg = self['FP_DETECT'].copy()\n \n id = self._getGraphId()\n root = 'FindPeaks_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n\n doStamp(detectimg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawCross(maxima,length=7,color='green')\n \n strpeaks = string.strip('%i'% (self['M_NPEAKS']))\n text = 'NP=%s' % strpeaks \n \n # Painted.Graffiti(text,commtextpos)\n \n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['FindPeaks'] = epsname\n self['figcomms']['FindPeaks'] = text",
"def prepareMarkerSequence(self):\n # first prepare the markers for the first channel of the pulse\n # generaton\n markerSequence1shape1 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n markerSequence1shape2 = zeros(self.numberOfPoints(), dtype=numpy.int8)\n for marker in self.markersList1:\n markerSequence1shape1[:] += marker._shape1\n markerSequence1shape2[:] += marker._shape2\n\n # take care of marker ovelap\n for i in range(len(markerSequence1shape1)):\n if markerSequence1shape1[i] > 1:\n markerSequence1shape1[i] = 1\n if markerSequence1shape2[i] > 2:\n markerSequence1shape2[i] = 2\n\n self.markerArray1[:] = [sum(i)for i in zip(\n markerSequence1shape1[:], markerSequence1shape2[:])]\n\n # if there are 2 channels the second one is prepared here\n if self.markersChannels == 2:\n\n markerSequence2shape1 = zeros(\n self.numberOfPoints(), dtype=numpy.int)\n markerSequence2shape2 = zeros(\n self.numberOfPoints(), dtype=numpy.int)\n\n if self.markersList2 == ():\n self.markersList2 = self.markersList1\n for marker in self.markersList2:\n markerSequence2shape1[:] += marker._shape1\n markerSequence2shape2[:] += marker._shape2\n for i in range(len(markerSequence2shape1)):\n if markerSequence2shape1[i] > 1:\n markerSequence2shape1[i] = 1\n if markerSequence2shape2[i] > 2:\n markerSequence2shape2[i] = 2\n\n self.markerArray2[:] = [sum(i)for i in zip(\n markerSequence2shape1[:], markerSequence2shape2[:])]",
"def get_atom_pos(self, data):\n\n\n if 'neighborhood_size' in self.args:\n neighborhood_size = self.args['neighborhood_size']\n else:\n neighborhood_size = 30\n if 'threshold' in self.args:\n threshold = self.args['threshold']\n else:\n threshold = 30\n\n #Use filters to calculate peaks\n data_max = filters.maximum_filter(data, neighborhood_size)\n maxima = (data == data_max)\n data_min = filters.minimum_filter(data, neighborhood_size)\n diff = ((data_max - data_min) > threshold)\n maxima[diff == 0] = 0\n\n labeled, num_objects = ndimage.label(maxima)\n slices = ndimage.find_objects(labeled)\n x, y = [], []\n for dy,dx in slices:\n x_center = (dx.start + dx.stop - 1)/2\n x.append(x_center)\n y_center = (dy.start + dy.stop - 1)/2\n y.append(y_center)\n\n\n posiitons=[x,y]\n\n return positions",
"def sample_pin_position_range():\n #Create a sample goniometer\n g = TopazInHouseGoniometer()\n\n #Initialize the leg limits\n g.relative_sample_position = column([0.0, 0.0, 0.0])\n g.getplatepos(0.0, 0.0, 0.0)\n g.calculate_leg_xy_limits(visualize=True)\n\n# if True:\n# pylab.show()\n# return\n\n n = 17\n positions = np.linspace(-8, 8, n) #Range calculated in mm\n allowed = np.zeros( (n,n,n) )\n for (ix, x) in enumerate(positions):\n print \"Calculating x\", x\n for (iy, y) in enumerate(positions):\n for (iz, z) in enumerate(positions):\n #Set up\n g.relative_sample_position = column([x, y, z])\n allowed[ix,iy,iz] = g.are_angles_allowed([0., 0., 0.], return_reason=False)\n\n #Do a plot\n\n pylab.figure(1, figsize=[15,15])\n pylab.title(\"Allowable XZ sample positions\")\n for (iy, y) in enumerate(positions):\n print \"At y of\", y, \", # of points = \", np.sum( allowed[:, iy,:])\n if iy < 16:\n pylab.subplot(4,4,iy+1)\n pylab.pcolor(positions, positions, allowed[:, iy, :].transpose(), norm=pylab.Normalize(0, 1))\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"y = %.3f mm\" % y)\n pylab.draw()\n pylab.axis('equal')\n pylab.show()\n #pylab.",
"def scatteringPeakInfo(self):\r\n\t\tself.scatteringBaseline = (np.mean(self.scatData[0:10]))\r\n\t\tself.scatteringBaselineNoiseThresh = 3*np.std(self.scatData[0:10])\r\n\r\n\t\traw_max = np.amax(self.scatData)\r\n\t\tmax = raw_max - self.scatteringBaseline\r\n\t\t\r\n\t\tself.scatteringMaxPos = np.argmax(self.scatData)\r\n\t\tself.scatteringMax = max",
"def addHigherPiks(self, PiksList):\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1]\n for kol in range(position1, position2): ##for each column checking all pixels over finden group\n line=0\n while not((kol, line) in PiksList):\n if self.getPixel(kol,line)==0: ##if they are black add them to PiksList\n PiksList.append((kol,line))\n line+=1\n PiksList.sort()## at the end sort the PiksList with number of column\n return PiksList",
"def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)",
"def calc_scores(x, y, peaks, score_measure, n_peaks_influence):\n\n scores = []\n n_peaks_all = []\n\n for i, row in enumerate(peaks):\n n_peaks = len(row)\n if n_peaks == 0:\n score = 0\n elif score_measure == 0:\n score = 1\n elif score_measure == 1: # median height\n heights = [y[i, k] for k in row]\n score = np.median(heights)\n elif score_measure == 2: # mean height\n heights = [y[i, k] for k in row]\n score = np.mean(heights)\n elif score_measure == 3: # mean area\n score = simpson(y[i], x[i]) / n_peaks\n elif score_measure == 4: # mean area\n score = simpson(y[i], x[i])\n\n scores.append(score)\n n_peaks_all.append(n_peaks)\n\n if n_peaks == 0:\n scores_peaks = 0\n elif n_peaks_influence == 0:\n scores_peaks = scores\n elif n_peaks_influence == 1:\n scores_peaks = [n*score for n, score in zip(n_peaks_all, scores)]\n elif n_peaks_influence == 2:\n scores_peaks = [score**(n/50)\n for n, score in zip(n_peaks_all, scores)]\n\n bar4.update(bar4.value + 1)\n\n n_peaks_all = [n_peaks for scores_peaks, n_peaks in sorted(zip(scores_peaks, n_peaks_all))]\n n_peaks_all.reverse()\n\n return scores_peaks, scores, n_peaks_all",
"def maximums(self):\n # The maximums are at the peaks position but might be swallowed by \n # other peaks\n maximums = list()\n for func, pos, height, width in zip(self.peaks_function,\n self.peaks_position,\n self.peaks_height,\n self.peaks_width):\n val = func(pos, pos, height, width)\n if val >= self.__call__(pos, count=False)[0]:\n maximums.append((val, pos))\n return sorted(maximums, reverse=True)",
"def getPeakProperties(self):\n peaks = {}\n for pname in self.properties:\n peaks[pname] = self.peak_fitter.getPeakProperty(pname)\n\n # x,y,z corrections.\n if (pname == \"x\"):\n peaks[pname] -= float(self.peak_finder.margin)\n\n elif (pname == \"y\"):\n peaks[pname] -= float(self.peak_finder.margin)\n \n elif (pname == \"z\"):\n peaks[pname] = self.peak_fitter.rescaleZ(peaks[pname])\n\n return peaks",
"def define_number_positions(self):\n self.number_positions = np.array([\n [(\n int((j + 0.5) * SCREEN_WIDTH // 9),\n int((i + 0.5) * SCREEN_WIDTH // 9)\n ) for j in range(9)]\n for i in range(9)\n ])",
"def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)",
"def get_peaks(self):\n peaks = np.array([i for i in range(self.npks)\n if self.polar_angle[i] < self.polar_max])\n x, y, z = (np.rint(self.xp[peaks]).astype(np.int16),\n np.rint(self.yp[peaks]).astype(np.int16),\n np.rint(self.zp[peaks]).astype(np.int16))\n polar, azi = self.polar_angle[peaks], self.azimuthal_angle[peaks]\n intensity = self.intensity[peaks]\n if self.Umat is not None:\n H, K, L = self.get_hkls()\n H = np.array(H)[peaks]\n K = np.array(K)[peaks]\n L = np.array(L)[peaks]\n diffs = np.array([self.diff(i) for i in peaks])\n else:\n H = K = L = diffs = np.zeros(peaks.shape, dtype=float)\n return list(zip(peaks, x, y, z, polar, azi, intensity, H, K, L, diffs))",
"def peaks(self, threshold, plotit):\n\n raise (\"Error abstract class, peaks not implemented\")",
"def reshape(self, bottom, top):\n\t\tpass",
"def getAndSortFiducialPoints(self, center):\r\n # self.__registrationStatus.setText('Registration processing...')\r\n # pNode = self.parameterNode()\r\n # fixedAnnotationList = slicer.mrmlScene.GetNodeByID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if fixedAnnotationList != None:\r\n # fixedAnnotationList.RemoveAllChildrenNodes()\r\n markerCenters = center\r\n nbCenter = len(center)\r\n for k in range(nbCenter):\r\n point = [0]\r\n for i in range(nbCenter):\r\n U,V,W = 0,0,0\r\n for j in range(nbCenter):\r\n d = 0\r\n if i != j and markerCenters[i]!=(0,0,0):\r\n d2 = (markerCenters[i][0]-markerCenters[j][0])**2+(markerCenters[i][1]-markerCenters[j][1])**2+(markerCenters[i][2]-markerCenters[j][2])**2\r\n d = d2**0.5\r\n # print markerCenters[i],markerCenters[j]\r\n #print d\r\n if d >=45 and d<=53:\r\n U += 1\r\n elif d >53 and d<60:\r\n V +=1\r\n elif d >=70 and d<80:\r\n W +=1\r\n #print U,V,W\r\n if U+V+W>=3:\r\n #print markerCenters[i]\r\n point.extend([i])\r\n point.remove(0)\r\n minX = [999,999,999,999]\r\n maxX = [-999,-999,-999,-999]\r\n sorted = [[0,0,0] for l in range(4)]\r\n sortedConverted = [[0,0,0] for l in range(4)]\r\n for i in range(2):\r\n for k in point:\r\n if markerCenters[k][0]<= minX[0]:\r\n minX[0] = markerCenters[k][0]\r\n minX[1] = k\r\n elif markerCenters[k][0]<= minX[2]:\r\n minX[2] = markerCenters[k][0]\r\n minX[3] = k\r\n if markerCenters[k][0]>= maxX[0]:\r\n maxX[0] = markerCenters[k][0]\r\n maxX[1] = k\r\n elif markerCenters[k][0]>= maxX[2]:\r\n maxX[2] = markerCenters[k][0]\r\n maxX[3] = k\r\n if markerCenters[minX[1]][1] < markerCenters[minX[3]][1]:\r\n sorted[0] = minX[1]\r\n sorted[1] = minX[3]\r\n else:\r\n sorted[0] = minX[3]\r\n sorted[1] = minX[1]\r\n if markerCenters[maxX[1]][1]>markerCenters[maxX[3]][1]:\r\n sorted[2] = maxX[1]\r\n sorted[3] = maxX[3]\r\n else:\r\n sorted[2] = maxX[3]\r\n sorted[3] = maxX[1]\r\n sorted2 = [0,0,0,0]\r\n if 1:#self.horizontalTemplate.isChecked():\r\n sorted2[0]=sorted[2]\r\n sorted2[2]=sorted[0]\r\n sorted2[1]=sorted[3]\r\n sorted2[3]=sorted[1]\r\n else:\r\n sorted2[0]=sorted[3]\r\n sorted2[2]=sorted[1]\r\n sorted2[1]=sorted[0]\r\n sorted2[3]=sorted[2]\r\n # logic = slicer.modules.annotations.logic()\r\n # logic.SetActiveHierarchyNodeID(pNode.GetParameter('fixedLandmarksListID'))\r\n # if pNode.GetParameter(\"Template\")=='4points':\r\n # nbPoints=4\r\n # elif pNode.GetParameter(\"Template\")=='3pointsCorners':\r\n # nbPoints=3\r\n l = slicer.modules.annotations.logic()\r\n l.SetActiveHierarchyNodeID(slicer.util.getNode('Fiducial List_fixed').GetID())\r\n for k in range(4) :\r\n fiducial = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\r\n fiducial.SetReferenceCount(fiducial.GetReferenceCount()-1)\r\n fiducial.SetFiducialCoordinates(markerCenters[sorted2[k]])\r\n fiducial.SetName(str(k))\r\n fiducial.Initialize(slicer.mrmlScene)\r\n\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeRed\")\r\n if sRed ==None :\r\n sRed = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode1\")\r\n # sRed.SetSliceVisible(1)\r\n m= sRed.GetSliceToRAS()\r\n m.SetElement(0,3,sortedConverted[3][0])\r\n m.SetElement(1,3,sortedConverted[3][1])\r\n m.SetElement(2,3,sortedConverted[3][2])\r\n sRed.Modified()\r\n return sorted2",
"def newPeaks(self, peaks, peaks_type):\n c_peaks = self.formatPeaks(peaks, peaks_type)\n self.clib.pfitNewPeaks(self.mfit,\n c_peaks,\n ctypes.c_char_p(peaks_type.encode()),\n c_peaks.shape[0])",
"def reshape(self, bottom, top):\r\n pass",
"def reshape(self,bottom,top):\n pass",
"def __init__(self, peaks, pki, parent):\n self.filters = list(peaks.keys())\n self.deblendedPeaks = peaks\n self.parent = parent\n for pki, peak in self.deblendedPeaks.items():\n peak.multiColorPeak = self\n\n # Fields common to the peak in all bands that will be set by the deblender\n # In the future this is likely to contain information about the probability of the peak\n # being a point source, color-information about templateFootprints, etc.\n self.pki = pki\n self.skip = False\n self.deblendedAsPsf = False\n self.x = self.deblendedPeaks[self.filters[0]].peak.getFx()\n self.y = self.deblendedPeaks[self.filters[0]].peak.getFy()",
"def relabel(peak_ids, oldparams, mask):\n spot_data = {}\n peak_num = 1\n for peak in peak_ids:\n #coords = np.where(mask == peak)\n paramsnew = oldparams[peak-1,:] # object 1 will be fitparams row 0\n # Rearrange params from fit function so coordinates lead.\n spot_data[peak_num] = paramsnew[[1,2,3,0,4,5,6]]\n peak_num = peak_num + 1\n return spot_data",
"def _preprocessing(self):\n if self.resize:\n self.click_list = self._remapping_coord(self.click_list,\n self.input_size,\n self.orig_size)\n clickers = self._get_clickers(self.click_list)\n clicks_list = clickers.get_clicks()\n clicks_lists = self._points_transform([clicks_list], self.image_width)\n points_nd = self._get_points_nd(clicks_lists, self.net_clicks_limit)\n return points_nd",
"def assignPositions(self):\n n = int(math.ceil(self.numAtoms**(1.0/3.0))) # Number of atoms in a direction\n particle = 0 # Particles placed so far\n \n for x in range(0, n):\n for y in range(0, n):\n for z in range(0, n):\n if (particle < self.numAtoms):\n self.atoms[particle].x = x * self.sigma\n self.atoms[particle].y = y * self.sigma \n self.atoms[particle].z = z * self.sigma\n particle += 1",
"def incandPeakInfo(self):\t\r\n\t\tself.incandBaseline = (np.mean(self.wideBandIncandData[0:10]))\r\n\t\t\t\t\r\n\t\traw_incand_max = np.amax(self.wideBandIncandData)\r\n\t\tincand_max = raw_incand_max - self.incandBaseline\r\n\t\tincand_max_index = np.argmax(self.wideBandIncandData)\r\n\t\t\r\n\t\tself.incandMax =incand_max\r\n\t\tself.incandMaxPos = incand_max_index",
"def setup(self):\r\n m = re.match(r'\\[([0-9]+),([0-9]+)]',\r\n self.value.strip().replace(' ', ''))\r\n if m:\r\n # Note: we subtract 15 to compensate for the size of the dot on the screen.\r\n # (is a 30x30 image--lms/static/green-pointer.png).\r\n (self.gx, self.gy) = [int(x) - 15 for x in m.groups()]\r\n else:\r\n (self.gx, self.gy) = (0, 0)",
"def updateArrays(self):\n for channelNumber in range(0, 8):\n self.channels[channelNumber][self.currentPosition]=self._voltage_get(channelNumber)#update next element in each array\n self.currentPosition+=1\n if self.currentPosition>=self.numberOfPoints:#reset position to beginning when we hit max number of points (like rolling oscilloscope)\n self.currentPosition=0\n self.cursorXS = self.getCurrentPositionArray()\n #could also set the next points to NaN's to make a gap!"
] | [
"0.5916728",
"0.56701946",
"0.5532572",
"0.54608786",
"0.5455434",
"0.5438626",
"0.5307399",
"0.52490765",
"0.5241294",
"0.5218205",
"0.52033854",
"0.5194276",
"0.5190472",
"0.5177038",
"0.517002",
"0.51685995",
"0.51646316",
"0.5159973",
"0.51592517",
"0.51470447",
"0.514527",
"0.5141938",
"0.51320904",
"0.5129863",
"0.51134866",
"0.50961083",
"0.5088",
"0.5084249",
"0.5081005",
"0.5072977"
] | 0.6554263 | 0 |
Open the self.dev PyEZ Device instance. | def open(self):
# Move all of the connection arguments into connect_args
connect_args = {}
# check for mode
if self.get_option('port') is None:
if self.get_option('mode') == 'telnet':
connect_args['port'] = 23
elif self.get_option('mode') == 'serial':
connect_args['port'] = '/dev/ttyUSB0'
else:
connect_args['port'] = 830
else:
connect_args['port'] = self.get_option('port')
if (self.get_option('mode') == 'telnet' or
self.get_option('mode') == 'serial'):
if self.get_option('baud') is None:
# Default baud if serial or telnet mode
connect_args['baud'] = 9600
if self.get_option('attempts') is None:
# Default attempts if serial or telnet mode
connect_args['attempts'] = 10
connect_args['host'] = self.get_option('host')
# connect_args['port'] = self.get_option('port')
connect_args['user'] = self.get_option('remote_user')
connect_args['passwd'] = self.get_option('password')
connect_args['ssh_private_key_file'] = self.get_option('private_key_file')
connect_args['ssh_config'] = self.get_option('pyez_ssh_config')
connect_args['timeout'] = self.get_option('persistent_connect_timeout')
try:
log_connect_args = dict(connect_args)
log_connect_args["passwd"] = "NOT_LOGGING_PARAMETER"
self.queue_message("vvvv", "Creating device parameters: %s" % log_connect_args)
timeout = connect_args.pop("timeout")
self.dev = jnpr.junos.device.Device(**connect_args)
self.queue_message("vvvv", "Opening device.")
self.dev.open()
self.queue_message("vvvv", "Device opened.")
self.dev.timeout = self.get_option('persistent_command_timeout')
self.queue_message("vvvv", "Setting default device timeout to %d." % timeout)
# Exceptions raised by close() or open() are all sub-classes of
# ConnectError, so this should catch all connection-related exceptions
# raised from PyEZ.
except pyez_exception.ConnectError as ex:
raise AnsibleError("Unable to make a PyEZ connection: %s" % (str(ex))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def open_device_dialog(self):\n res, device = DeviceDialog.get_device(self.indexer)\n if res and device:\n self.serial = device.serial\n if self.serial:\n caps_str = None\n self.open_device(self.serial, caps_str)",
"def open(self) -> None:\n if not self.__opened:\n if self.path is None:\n self.path = HID.enumerate_devices(self.vendor_id)[0]\n self.device.open_path(self.path)\n self.device.set_nonblocking(True)\n self.__opened = True",
"def open_device(self):\n\t\t# open device\n\t\t# declare ctype variables\n\t\thdwf = c_int()\n\n\t\tprint \"\\nOpening device\"\n\t\tdwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))\n\n\t\tif hdwf.value == 0:\n\t\t\tprint \"failed to open device\"\n\t\t\tquit()\n\n\t\tself.interface_handler = hdwf\n\n\t\thzSysIn = c_double()\n\t\t#max_buffer_size_in = c_int()\n\n\t\tdwf.FDwfDigitalInInternalClockInfo(self.interface_handler, byref(hzSysIn))\n\t\t#dwf.FDwfDigitalInBufferSizeInfo(self.interface_handler, byref(max_buffer_size_in))\n\n\t\tself.internal_clock_freq = hzSysIn.value\n\n\t\t#print \"internal digital in frequency is \" + str(hzSysIn.value)\n\t\t#print \"digital in max buffer size: \" + str(max_buffer_size_in.value)",
"def open(self):\n self.device = ConnectHandler(\n device_type='vyos',\n host=self.hostname,\n username=self.username,\n password=self.password,\n timeout=self.timeout,\n port=self.port\n )",
"def open_device(dev: Device):\n try:\n dev.open()\n yield dev\n finally:\n dev.close()",
"def _open_device(self):\r\n self._lib = windll.LoadLibrary(\"lib\\\\ps2000a.dll\")\r\n c_handle = c_int16()\r\n with self._driver_lock:\r\n m = self._lib.ps2000aOpenUnit(byref(c_handle),None)\r\n if m == 286:\r\n m = self._lib.ps2000aChangePowerSource(c_handle,\r\n c_int32(m))\r\n check_result(m)\r\n self._handle = c_handle\r\n\r\n return True",
"def open(self):\n if dev[self.id] != FLI_INVALID_DEVICE:\n raise FliError(\"Device already opened\")\n dev[self.id] = FLIDEVICE_CAMERA\n\n # set default parameters\n self.setTemperature(CCD_TEMP)\n self.setHBin(1)\n self.setVBin(1)\n self.setExpTime(0)\n self.setFrame(0, 0, 1072, 1033)\n with self.lock:\n self.status = READY\n self.visibleExpArea = (24, 9, 1048, 1033)\n self.defaultExpArea = (0, 0, 1072, 1033)\n self.expArea = (0, 0, 1072, 1033)\n self.regions = ((0, 0, 0), (0, 0, 0))",
"def open(self):\n self.__port.open()",
"def open(self):\n try:\n self.handle = self.rm.get_instrument(self.visaName)\n self.handle.write('*RST') #reset device to default\n time.sleep(.5)\n self.handle.write(':FORM:DATA ASC') #return ASCII\n except Exception:\n print('Dvm34411.open() failed !')\n raise\n return True",
"def open(self):\n try:\n self._ser = serial.Serial(self.device, 9600, timeout=1)\n\t self.log.info(u\"= = > Virtual Amp opened({}).\".format(self.device))\n except:\n error = u\"Error while opening device : {}\".format(self.device)\n raise Mprsg6zException(error)",
"def performOpen(self, options={}):\n self._lib = CDLL('sc5520a_uhfs.dll')\n self._set_signatures()\n # The expected format for the serial number is: 10001A4C\n addr = self.getAddress()\n # handle = self._lib.sc5520a_uhfsOpenDevice(1, addr.encode('utf-8'),0)\n handle = c_void_p()\n status = self._lib.sc5520a_uhfsOpenDevice(1, addr.encode('utf-8'),0, byref(handle)) \n # raise TypeError(handle)\n # assert status == SCI_SUCCESS (SCI_SUCCESS = 0)\n\n # raise TypeError(handle)\n if not handle:\n msg = 'Failed to connect to the instrument with serial number: %s'\n raise RuntimeError(msg % addr)\n self._handle = handle\n self._addr = addr",
"def performOpen(self, options={}):\n self.switch = USB_Digital_Switch() # Create an instance of the switch class\n self.serial_number = str(self.getAddress())\n\n status = self.establish_connection() # Connect the switch (pass the serial number as an argument if required)\n if status > 0:\n resp = self.switch.Send_SCPI(\":MN?\", \"\") # Read model name\n self.model_number = str(resp[2])\n self.setModel(self.model_number)\n self.log(self.model_number, level = 30)",
"def open_device(self, device):\n raise NotImplementedError(\"implement in derived transport class\")",
"def open(self, device_id):\n self._js[device_id].open()",
"def acquire(self):\n clf = nfc.ContactlessFrontend()\n\n if clf.open('usb:{bus}:{dev}'.format(bus = self.usb_bus,\n dev = self.usb_dev)):\n print(\"dev {0} acquired successfully\".format(self.usb_target))\n self.hw_connected = True\n return True\n\n print(\"dev {0} not found\".format(self.usb_target))\n return False",
"def driver_open_display(self):\n raise Exception(\"missing implementation \")",
"def open(self):\n self._command = \"open\"",
"def open(self, reconfigure_fpga=False):\n\n if self._is_open:\n raise ValueError(\"OVDevice doubly opened\")\n\n # Open our connection to our FTDI device.\n rc = self.ftdi.open()\n if rc:\n error = IOError(\"couldn't open connection to our USB device!\")\n error.errno = rc\n raise error\n\n # Configure the FPGA, if necessary.\n self.configure_fpga(self.firmware.get_bitstream_file(), not reconfigure_fpga)\n\n # Start our background thread for comms.\n self._start_comms_thread()\n\n # Apply our default LED values.\n self._apply_default_leds()\n\n # Finally, mark ourselves as open.\n self._is_open = True",
"def open_device(self, serial: str, caps_str: str=None):\n self.close()\n self.serial = serial\n\n if self.device_list:\n for dev in self.device_list:\n if dev.serial == serial:\n self.setWindowTitle(\"TCam Capture - {}({})\".format(dev.model, serial))\n # update device menu so that mark on opened camera is drawn\n self.update_device_list(self.device_list)\n break\n\n self.view = TcamView(self.serial, self)\n self.view.set_settings(self.settings)\n self.view.register_device_lost(self.lost_device)\n # create preemptive pipline to make tcambin available\n # needed for properties\n self.view.create_pipeline(caps_str)\n self.view.image_saved.connect(self.saved_image)\n self.view.new_pixel_under_mouse.connect(self.new_pixel_under_mouse)\n self.view.current_fps.connect(self.current_fps)\n self.view.format_selected.connect(self.format_selected_callback)\n self.view.setSizePolicy(QtWidgets.QSizePolicy.Expanding,\n QtWidgets.QSizePolicy.Expanding)\n self.setCentralWidget(self.view)\n self.caps_desc = self.view.get_caps_desc()\n self.data.tcam = self.view.get_tcam()\n self.view.pause()\n\n self.props = QDockWidget(\"Properties\")\n self.props_widget = PropertyDialog(self.data, self.view, self.props)\n self.props.setWidget(self.props_widget)\n self.props.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.props.setFloating(False)\n self.addDockWidget(Qt.LeftDockWidgetArea, self.props)\n self.set_device_menus_enabled(True)\n self.props_action.setVisible(True)\n self.cache.last_serial = self.serial\n self.cache.save()\n self.play(caps_str)",
"def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: UDP port opening started...\" % \\\n self.__class__.__name__)\n self._udp_socket.bind(tuple(['',self._port]))\n logger.debug(\"%s: ...UDP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Instantiate router\n self._platform = router.HorizonRouteable()\n self._platform._version = self._version\n self._platform.message_routed = self.message_received\n def tmp():\n return self.__str__()\n self._platform.__str__ = tmp\n self._router = router.HorizonRouter(platform = self._platform, \n clients = [], \n send_all = self._send_all)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...UDP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"UDP Port open failed!\\n\" + str(ex))",
"def init_dev(dev, di_file=default_di_file):\n\n fx3.init_dev(dev,di_file)",
"def open_keyboard(self, instance):\n self.popup.open()",
"def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: Serial port opening started...\" % \\\n self.__class__.__name__)\n self._serial_port.open()\n logger.debug(\"%s: ...serial port opening complete.\" % \\\n self.__class__.__name__)\n \n # Open failed\n except serial.SerialException as ex:\n logger.error(\"%s: ...serial port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"Serial Port open failed!\\n\" + str(ex))",
"def openSensel():\n handle = None\n (error, device_list) = sensel.getDeviceList()\n if device_list.num_devices != 0:\n (error, handle) = sensel.openDeviceByID(device_list.devices[0].idx)\n return handle",
"def open_com_driver( self ):\r\n self.gui.print_info_string( \"Opening comm port\" )\r\n val = self.com_driver.open()\r\n if val:\r\n status = \"Open\"\r\n self.gui.print_info_string( \"Comm port open OK....\" )\r\n self.logger.info( \"open_driver, opened ok\" )\r\n msg = \"Open Comm Port OK\"\r\n self.gui.print_info_string( msg )\r\n\r\n else:\r\n self.gui.print_info_string( \"Comm port open NG\" )\r\n status = \"Open Failed\"\r\n self.logger.info( \"open failed, ignored\" )\r\n msg = \"Open Comm Port NG\"\r\n self.gui.print_info_string( msg )\r\n\r\n self.gui.set_open( status )\r\n\r\n return",
"def open_idf(self):\n\n self.save()\n\n filepath = self.idfname\n\n import os\n import platform\n import subprocess\n\n if platform.system() == \"Darwin\": # macOS\n subprocess.call((\"open\", filepath))\n elif platform.system() == \"Windows\": # Windows\n os.startfile(filepath)\n else: # linux variants\n subprocess.call((\"xdg-open\", filepath))",
"def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: UDP port opening started...\" % \\\n self.__class__.__name__)\n self._udp_socket.bind(('',self._port))\n self._socket = HorizonTransport_Socket(sock = self._udp_socket,\n host = self._addr[0],\n port = self._addr[1],\n name = \"%s:%d\" % self._addr,\n store_timeout = self.store_timeout,\n version = self.version)\n self._socket.opened = True\n logger.debug(\"%s: ...UDP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...UDP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"UDP Port open failed!\\n\" + str(ex))",
"def _doOpenTool(self):\n self._cmdOpenTool()",
"def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")",
"def open(self, deviceID):\r\n \r\n return self._performAction(deviceID, _API_DEVICE_ACTION_OPEN)"
] | [
"0.6904389",
"0.68133426",
"0.67640007",
"0.6750752",
"0.65204847",
"0.64496225",
"0.6357437",
"0.62946606",
"0.6099149",
"0.6057723",
"0.60555756",
"0.6051541",
"0.5988858",
"0.5986539",
"0.59722483",
"0.5957039",
"0.5942295",
"0.59388256",
"0.59096193",
"0.5903228",
"0.58085513",
"0.57926065",
"0.57832366",
"0.57796186",
"0.57650733",
"0.57585895",
"0.57290655",
"0.57230926",
"0.57192945",
"0.5697699"
] | 0.694745 | 0 |
Get the capabilities for network api.. | def get_capabilities(self):
return json.dumps({'network_api': 'pyez'}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get_capability(self):\n requests = self.__get_capability_request()\n exception = self.__get_capability_exception()\n layers = self.__get_capability_layer()\n \n capability = { \"requests\": requests,\n \"exception\" : exception,\n \"layers\" : layers}\n return capability",
"def capabilities(self):\n pass",
"def capabilities(self):\n return []",
"def capabilities(self):\n return None",
"def capability(self):\n code, data, capabilities = (\n self.__send_command(\"CAPABILITY\", withcontent=True))\n if code == \"OK\":\n return capabilities\n return None",
"def get_capabilities(self, method='get'):\n self.client.getcapabilities()\n\n self._has_capabilities = True",
"def capabilities(self) -> dto.Capabilities:\n raise NotImplementedError",
"def to_capabilities(self):",
"def capabilities(self):\n\n class Capabilities(ct.Structure):\n _fields_ = [(\"Size\", ct.c_ulong),\n (\"AcqModes\", ct.c_ulong),\n (\"ReadModes\", ct.c_ulong),\n (\"FTReadModes\", ct.c_ulong),\n (\"TriggerModes\", ct.c_ulong),\n (\"CameraType\", ct.c_ulong),\n (\"PixelModes\", ct.c_ulong),\n (\"SetFunctions\", ct.c_ulong),\n (\"GetFunctions\", ct.c_ulong),\n (\"Features\", ct.c_ulong),\n (\"PCICard\", ct.c_ulong),\n (\"EMGainCapability\", ct.c_ulong)]\n\n stru = Capabilities()\n stru.Size = ct.sizeof(stru)\n self.lib.GetCapabilities(ct.pointer(stru))\n\n return stru",
"def capabilities(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"capabilities\")",
"def get_capabilities(http_conn):\n parsed, conn = http_conn\n headers = {'Accept-Encoding': 'gzip'}\n conn.request('GET', parsed.path, '', headers)\n resp = conn.getresponse()\n body = resp.read()\n http_log((parsed.geturl(), 'GET',), {'headers': headers}, resp, body)\n if resp.status < 200 or resp.status >= 300:\n raise ClientException.from_response(\n resp, 'Capabilities GET failed', body)\n resp_headers = resp_header_dict(resp)\n return parse_api_response(resp_headers, body)",
"def get_capabilities(params,defaults):\n cap = CapabilitiesController (params,defaults)\n return cap.get_capabilities()",
"def get_capabilities(disk):\n\n #TODO\n return \"Unknown\"",
"def get_server_capabilities(self):\n capabilities = {}\n system = self._get_host_details()\n capabilities['server_model'] = system['Model']\n rom_firmware_version = (\n system['Oem']['Hp']['Bios']['Current']['VersionString'])\n capabilities['rom_firmware_version'] = rom_firmware_version\n capabilities.update(self._get_ilo_firmware_version())\n capabilities.update(self._get_number_of_gpu_devices_connected())\n if self._get_tpm_capability():\n capabilities['trusted_boot'] = 'true'\n\n if self._get_cpu_virtualization():\n capabilities['cpu_vt'] = 'true'\n if self._get_nvdimm_n_status():\n capabilities['nvdimm_n'] = 'true'\n try:\n self.get_secure_boot_mode()\n capabilities['secure_boot'] = 'true'\n except exception.IloCommandNotSupportedError:\n # If an error is raised dont populate the capability\n # secure_boot\n pass\n if self._is_sriov_enabled():\n capabilities['sriov_enabled'] = 'true'\n return capabilities",
"def get_capabilities(self):\n\n service = self.__get_service()\n capability = self.__get_capability()\n contents = {\"service\" : service, \"capability\" : capability}\n return contents, self.params['format']",
"def default_capabilities(self):",
"def test_get_capabilities():\n capabilties = (\n \"Capability Identity : Capa1\\r\\n State : Installed\\r\\n\"\n \"Capability Identity : Capa2\\r\\n State : Disabled\\r\\n\"\n )\n\n mock = MagicMock(return_value=capabilties)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n with patch.dict(dism.__grains__, {\"osversion\": 10}):\n out = dism.get_capabilities()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Capabilities\"]\n )\n assert out == [\"Capa1\", \"Capa2\"]",
"def test_get_hyperflex_capability_info_list(self):\n pass",
"def status(self):\n try:\n capabilities = []\n with manager.connect(host=netconf_server_ip,\n port=int(netconf_server_port),\n username= netconf_server_username,\n password=netconf_server_password,\n hostkey_verify=False) as m:\n\n for c in m.server_capabilities:\n capabilities.append(c)\n return capabilities\n\n except:\n return \"Can not establish connection with the server, something went wrong\"",
"def supported_capabilities(self) -> Optional['outputs.SupportedCapabilitiesResponse']:\n return pulumi.get(self, \"supported_capabilities\")",
"def remote_desired_capabilities(self):\n return dat2obj(pn_connection_remote_desired_capabilities(self._impl))",
"def list_caps():\n global _CAPABILITIES_MAP\n\n try:\n return tuple(sorted(_CAPABILITIES_MAP.keys()))\n\n except NameError:\n pass # We can remedy this.\n\n loop = get_loop()\n\n controller_connection = CioRoot(loop)\n\n _CAPABILITIES_MAP = {}\n\n for capability_id in controller_connection.init():\n _CAPABILITIES_MAP[capability_id] = {\n 'acquire': controller_connection.acquire,\n 'release': controller_connection.release,\n }\n\n return tuple(sorted(_CAPABILITIES_MAP.keys()))",
"def extended_capabilities(self):\n buf = (ctypes.c_uint8 * 32)()\n self._dll.JLINKARM_GetEmuCapsEx(buf, 32)\n return list(buf)",
"def test_available_capabilities():\n capabilties = (\n \"Capability Identity : Capa1\\r\\n State : Installed\\r\\n\"\n \"Capability Identity : Capa2\\r\\n State : Not Present\\r\\n\"\n )\n\n mock = MagicMock(return_value=capabilties)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n with patch.dict(dism.__grains__, {\"osversion\": 10}):\n out = dism.available_capabilities()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Capabilities\"]\n )\n assert out == [\"Capa2\"]",
"def getcapabilities(self):\n reader = WFSCapabilitiesReader(self.version, auth=self.auth)\n return openURL(\n reader.capabilities_url(self.url), timeout=self.timeout,\n headers=self.headers, auth=self.auth\n )",
"def get_caps(self):\n return ObjectCapabilities.get_capabilities(self)",
"def capabilities(self):\n return self._dll.JLINKARM_GetEmuCaps()",
"def test_wmts_get_capabilities(self):\n ref_hash = 'b49538ed143340f11230eac8b8f9ecca'\n req_url = r'http://localhost/reproject/test/wmts/wmts.cgi?Request=GetCapabilities'\n if DEBUG:\n print('\\nTesting WMTS GetCapablities')\n print('URL: ' + req_url)\n response = get_url(req_url)\n\n # Check if the response is valid XML\n try:\n XMLroot = ElementTree.XML(response.read())\n XMLdict = XmlDictConfig(XMLroot)\n xml_check = True\n except:\n xml_check = False\n self.assertTrue(xml_check, 'GetCapabilities response is not a valid XML file. URL: ' + req_url)\n\n refXMLtree = ElementTree.parse(os.path.join(os.getcwd(), 'mod_reproject_test_data/GetCapabilities.1.0.0.xml'))\n refXMLroot = refXMLtree.getroot()\n refXMLdict = XmlDictConfig(refXMLroot)\n\n check_result = check_dicts(XMLdict, refXMLdict)\n #check_result = check_tile_request(req_url, ref_hash)\n self.assertTrue(check_result, 'WTMTS Get GetCapabilities Request does not match what\\'s expected. URL: ' + req_url)",
"def wms_getcapabilities(self, response, params, permission):\n xml = response.text\n\n if response.status_code == requests.codes.ok:\n # parse capabilities XML\n ElementTree.register_namespace('', 'http://www.opengis.net/wms')\n ElementTree.register_namespace('qgs', 'http://www.qgis.org/wms')\n ElementTree.register_namespace('sld', 'http://www.opengis.net/sld')\n ElementTree.register_namespace(\n 'xlink', 'http://www.w3.org/1999/xlink'\n )\n root = ElementTree.fromstring(xml)\n\n # use default namespace for XML search\n # namespace dict\n ns = {'ns': 'http://www.opengis.net/wms'}\n # namespace prefix\n np = 'ns:'\n if not root.tag.startswith('{http://'):\n # do not use namespace\n ns = {}\n np = ''\n\n root_layer = root.find('%sCapability/%sLayer' % (np, np), ns)\n if root_layer is not None:\n # remove broken info format 'application/vnd.ogc.gml/3.1.1'\n feature_info = root.find('.//%sGetFeatureInfo' % np, ns)\n if feature_info is not None:\n for format in feature_info.findall('%sFormat' % np, ns):\n if format.text == 'application/vnd.ogc.gml/3.1.1':\n feature_info.remove(format)\n\n # filter and update layers by permission\n permitted_layers = permission['public_layers']\n queryable_layers = permission['queryable_layers']\n for group in root_layer.findall('.//%sLayer/..' % np, ns):\n for layer in group.findall('%sLayer' % np, ns):\n layer_name = layer.find('%sName' % np, ns).text\n if layer_name not in permitted_layers:\n # remove not permitted layer\n group.remove(layer)\n else:\n # update queryable\n if layer_name in queryable_layers:\n layer.set('queryable', '1')\n else:\n layer.set('queryable', '0')\n\n # get permitted attributes for layer\n permitted_attributes = permission['layers'].get(\n layer_name, {}\n )\n\n # remove layer displayField if attribute not permitted\n # (for QGIS GetProjectSettings)\n display_field = layer.get('displayField')\n if (display_field and\n display_field not in permitted_attributes):\n layer.attrib.pop('displayField')\n\n # filter layer attributes by permission\n # (for QGIS GetProjectSettings)\n attributes = layer.find('%sAttributes' % np, ns)\n if attributes is not None:\n for attr in attributes.findall(\n '%sAttribute' % np, ns\n ):\n if attr.get('name') not in permitted_attributes:\n # remove not permitted attribute\n attributes.remove(attr)\n\n # update queryable for root layer\n if queryable_layers:\n root_layer.set('queryable', '1')\n else:\n root_layer.set('queryable', '0')\n\n # filter LayerDrawingOrder by permission\n # (for QGIS GetProjectSettings)\n layer_drawing_order = root.find(\n './/%sLayerDrawingOrder' % np, ns\n )\n if layer_drawing_order is not None:\n layers = layer_drawing_order.text.split(',')\n # remove not permitted layers\n layers = [\n l for l in layers if l in permitted_layers\n ]\n layer_drawing_order.text = ','.join(layers)\n\n # filter ComposerTemplates by permission\n # (for QGIS GetProjectSettings)\n templates = root.find(\n '%sCapability/%sComposerTemplates' % (np, np), ns\n )\n if templates is not None:\n permitted_templates = permission.get('print_templates', [])\n for template in templates.findall(\n '%sComposerTemplate' % np, ns\n ):\n template_name = template.get('name')\n if template_name not in permitted_templates:\n # remove not permitted print template\n templates.remove(template)\n\n if not templates.find('%sComposerTemplate' % np, ns):\n # remove ComposerTemplates if empty\n root.find('%sCapability' % np, ns).remove(templates)\n\n # write XML to string\n xml = ElementTree.tostring(\n root, encoding='utf-8', method='xml'\n )\n\n return Response(\n xml,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def get_capabilities(self):\n return Capabilities(javabridge.call(self.jobject, \"getCapabilities\", \"()Lweka/core/Capabilities;\"))"
] | [
"0.732674",
"0.7316955",
"0.7160758",
"0.70745724",
"0.7045996",
"0.6886132",
"0.6836141",
"0.68084925",
"0.67257965",
"0.6713764",
"0.6674197",
"0.66370296",
"0.6603681",
"0.6588467",
"0.6588442",
"0.65790033",
"0.6574301",
"0.6511568",
"0.649308",
"0.64927596",
"0.64711374",
"0.64301914",
"0.6420083",
"0.63507026",
"0.63186383",
"0.63137984",
"0.6298332",
"0.62912667",
"0.6289346",
"0.62513095"
] | 0.79206073 | 0 |
Get chassis inventory details from the device. | def get_chassis_inventory(self):
resp = self.dev.rpc.get_chassis_inventory()
return etree.tostring(resp) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_inventory(self):\n raise NotImplementedError(\"Subclasses define what returning the inventory entails\")",
"def list_chassis(self):\n return self.ironic_client.chassis.list()",
"def inventory(self):\n return self._inventory",
"def inventory(self):\n data = self.client.inventory(self.creds, self.transaction, self.environment)\n return list(data) if isinstance(data, set) else data",
"def get_inventory():\n return INVENTORY",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure and attributes\n # In real life, this code will be preceded by SNMP/other calls to the resource details and will not be static\n # run 'shellfoundry generate' in order to create classes that represent your data model\n\n '''\n resource = LanforgeResource.create_from_context(context)\n resource.vendor = 'specify the shell vendor'\n resource.model = 'specify the shell model'\n\n port1 = ResourcePort('Port 1')\n port1.ipv4_address = '192.168.10.7'\n resource.add_sub_resource('1', port1)\n\n return resource.create_autoload_details()\n '''\n return AutoLoadDetails([], [])",
"def getInventory(self):\n return str(self.inventory)",
"def get_inventory(self, node):",
"def get_equipment_from_inventory(self):\n return [x for x in self.inventory if x.is_equip()]",
"def test_get_chassis(self):\n resp = self.chassis_client.get_chassis(self.chassis.uuid)\n self.assertEqual(resp.status_code, 200)\n chassis = resp.entity\n self.assertEqual(chassis.description, self.chassis_description)\n self.assertEqual(chassis.extra, self.chassis_extra)",
"def list_inventory(self):\n\n print('Your inventory contains:')\n #i = 1\n #inv_dict = {}\n for item in self.bag_of_holding:\n if 'casted' not in item.name:\n try:\n print(item.name)\n except:\n pass\n\n #inv_dict[str(i)] = item\n #i += 1\n #return inv_dict",
"def list(self, filters=None):\n return self._list(\"/chassis\", filters=filters)",
"def show_inventory(table):\r\n if (table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in table:\r\n print('{}\\t{} (by:{})'.format(*row.values()))\r\n print('======================================')\r\n else:\r\n print ('Inventory is empty.\\n')\r\n # return None\r",
"def read_inventory_file():\n try:\n with open('inventory', 'r') as file:\n inventory = file.read()\n return inventory\n except OSError:\n pass",
"def InventoryDevices(self):\n self.logger.debug(\"Start Inventory...\")\n \n # Find our desired usb devices. These should be present in /dev somewhere.\n osDevices = os.listdir(\"/dev\")\n osDevices.sort()\n\n # Loop through all devices in /dev asking them what they are.\n for anOSDevice in osDevices:\n \n deviceName = \"/dev/\" + anOSDevice\n # We're making use of the unix command \"udevadm\". Read up on it!\n cmd = [\"udevadm\", \"info\", \"-q\", \"all\", \"-n\", deviceName]\n #print(cmd)\n pid=\"\"\n vid=\"\"\n uid=\"\"\n \n # Launch udevadm for the current device name.\n FNULL = open(os.devnull, 'w')\n proc = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=FNULL)\n while True:\n line = proc.stdout.readline()\n if len(line) != 0:\n #print(line.rstrip())\n # Parse out the pieces of the output lines looking for the relavent information.\n parts = re.split(\"[ ]\", line.__str__())\n #print(parts)\n if len(parts) > 1:\n kvParts = re.split(\"[=]\", parts[1].__str__())\n #print(kvParts)\n # We care about procuct id, vendor id and serial number.\n if (kvParts[0] == \"ID_VENDOR_ID\"):\n vid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_MODEL_ID\"):\n pid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_SERIAL\"):\n uid = kvParts[1][:-1]\n if (kvParts[0] == \"ID_SERIAL_SHORT\"):\n uid = kvParts[1][:-1]\n else:\n break\n\n # We found a device with a Product ID and Vendor ID. Is it one were expecting?\n if len(pid) > 0 and len(vid) > 0:\n self.logger.info( \"Checking if device with ProductID: \" + pid + \" and VendorID: \" + vid + \" on \" + deviceName + \" is needed...\") \n foundItem = next((x for x in self.expectedDevices if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and \n x.pid == pid and\n x.vid == vid and\n x.uid == uid and\n x.inventoried == False), None)\n \n if foundItem is not None:\n if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:\n if anOSDevice.startswith( 'tty') == True:\n # Device is a Serial USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n else:\n #Device is a plain USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n \n FNULL.close()\n\n\n # At this point, we may still not have all the found devices. So we'll fall back to using \"lsub\" to look for devices.\n # The reason they are not found is that some devices do not add an entry to /dev. However, lsusb does not give a\n # serial number\n cmd = [\"lsusb\"]\n # print(cmd)\n pid = \"\"\n vid = \"\"\n uid = \"\"\n\n # Launch udevadm for the current device name.\n FNULL = open(os.devnull, 'w')\n proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=FNULL)\n while True:\n line = proc.stdout.readline()\n if len(line) != 0:\n # print(line.rstrip())\n # Parse out the pieces of the output lines looking for the relavent information.\n parts = re.split(\"[ ]\", line.__str__())\n # print(parts)\n if len(parts) > 1:\n kvParts = re.split(\"[:]\", parts[5].__str__())\n # print(kvParts)\n # We care about procuct id, vendor id.\n vid = kvParts[0]\n pid = kvParts[1]\n\n # We found a device with a Product ID and Vendor ID. Is it one were expecting?\n if len(pid) > 0 and len(vid) > 0:\n self.logger.info(\n \"Checking if device with ProductID: \" + pid + \" and VendorID: \" + vid + \" is needed...\")\n foundItem = next((x for x in self.expectedDevices if\n isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) and\n x.pid == pid and\n x.vid == vid and\n x.uid == uid and\n x.inventoried == False), None)\n\n if foundItem is not None:\n if isinstance(foundItem, usb_serial_device.USBSerialDevice) == True:\n if anOSDevice.startswith('tty') == True:\n # Device is a Serial USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n else:\n # Device is a plain USB device.\n foundItem.devPath = deviceName\n foundItem.inventoried = True\n foundItem.checked = True\n\n\n else:\n break\n\n\n FNULL.close()\n\n # Here, we probe to see if any ethernet connected devices are up and listening for connections.\n while True:\n foundItem = next((x for x in self.expectedDevices if isinstance(x, (ethernet_device.EthernetDevice)) and \n x.inventoried == False and x.checked == False), None)\n if foundItem is not None:\n #socket.setdefaulttimeout(10.0)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(10.0)\n try:\n s.connect((foundItem.host, foundItem.port))\n foundItem.inventoried = True;\n except:\n foundItem.inventoried = False;\n # Okay to swallow!\n pass\n finally:\n s.close()\n foundItem.checked = True;\n else:\n break\n \n # Record what we found.\n self.logger.info(\"The following devices were inventoried:\")\n for x in self.expectedDevices:\n if x.inventoried == True:\n if isinstance(x, (usb_serial_device.USBSerialDevice, usb_device.USBDevice)) == True:\n self.logger.info(x.name + \" Device Node: \" + x.devPath)\n else:\n self.logger.info(x.name)\n self.foundDevices.append(x)",
"def show_inventory(self):\n\t\tclear_screen()\n\n\t\tprint(\"# INVENTORY #\\n\")\n\t\tprint(\"Weapon{:.>15} \".format(self.inventory['Weapon']))\n\t\tprint(\"Clothing{:.>13} \".format(self.inventory['Clothing']))\n\t\tprint(\"Items{:.>16} \".format(self.inventory['Items']))\n\n\t\tpress_enter()",
"def ansible_inventory(self):\n path_inventory = u'%s/inventories/%s' % (self.ansible_path, self.environment)\n path_lib = u'%s/library/beehive/' % (self.ansible_path)\n runner = Runner(inventory=path_inventory, verbosity=self.verbosity, \n module=path_lib)\n res = runner.get_inventory()\n resp = []\n for k,v in res.items():\n resp.append({u'group':k, u'hosts':u', '.join(v)})\n self.logger.debug(u'Ansible inventory nodes: %s' % res)\n self.result(resp, headers=[u'group', u'hosts'])",
"def inventory(env):\n envs = environments()\n check_env(env, envs)\n\n headers = [] # a list of fact descriptions to go\n # in the table header\n fact_names = [] # a list of inventory fact names\n fact_data = {} # a multidimensional dict for node and\n # fact data\n\n # load the list of items/facts we want in our inventory\n try:\n inv_facts = app.config['INVENTORY_FACTS']\n except KeyError:\n inv_facts = [('Hostname', 'fqdn'),\n ('IP Address', 'ipaddress'),\n ('OS', 'lsbdistdescription'),\n ('Architecture', 'hardwaremodel'),\n ('Kernel Version', 'kernelrelease')]\n\n # generate a list of descriptions and a list of fact names\n # from the list of tuples inv_facts.\n for desc, name in inv_facts:\n headers.append(desc)\n fact_names.append(name)\n\n query = AndOperator()\n fact_query = OrOperator()\n fact_query.add([EqualsOperator(\"name\", name) for name in fact_names])\n\n if env != '*':\n query.add(EqualsOperator(\"environment\", env))\n\n query.add(fact_query)\n\n # get all the facts from PuppetDB\n facts = puppetdb.facts(query=query)\n\n for fact in facts:\n if fact.node not in fact_data:\n fact_data[fact.node] = {}\n\n fact_data[fact.node][fact.name] = fact.value\n\n return Response(stream_with_context(\n stream_template(\n 'inventory.html',\n headers=headers,\n fact_names=fact_names,\n fact_data=fact_data,\n envs=envs,\n current_env=env\n )))",
"def inventory(env):\n envs = environments()\n check_env(env, envs)\n headers, fact_names = inventory_facts()\n\n return render_template(\n 'inventory.html',\n envs=envs,\n current_env=env,\n fact_headers=headers)",
"def show_inventory(table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title by: Artist\\n')\r\n for cd in table:\r\n print(cd)\r\n\r\n print('======================================')",
"def inventory(self, time: int) -> Inventory:\n self.refreshDroneStatus(time)\n return self.__inventory",
"def print_inventory(self):\n\t\tfor item, amount in self.inventoryDictionary.items():\n\t\t\tprint (\"Item: \" + item.name + \" Quantity: \" + str(amount))\n\t\t\tprint (item.description + \"\\n\")\n\n\t\tprint(\"Currently equipped: \")\n\t\tprint(\"Main Hand: \" + self.equippedMainHand.name)\n\t\tprint(\"Armor: \" + self.equippedArmor.name)",
"def get_inventory(self, context):\n with LoggingSessionContext(context) as logger, LogCommand(\n logger, \"get_inventory\"\n ):\n api = CloudShellSessionContext(context).get_api()\n\n resource_config = FirewallResourceConfig.from_context(\n self.SHELL_NAME, context, api, self.SUPPORTED_OS\n )\n\n cli_configurator = CheckpointCliConfigurator(\n self._cli, resource_config, logger\n )\n enable_disable_snmp_flow = CheckpointEnableDisableSnmpFlow(\n cli_configurator, logger\n )\n snmp_configurator = EnableDisableSnmpConfigurator(\n enable_disable_snmp_flow, resource_config, logger\n )\n\n resource_model = FirewallResourceModel.from_resource_config(resource_config)\n\n autoload_operations = CheckpointSnmpAutoloadFlow(logger, snmp_configurator)\n logger.info(\"Autoload started\")\n response = autoload_operations.discover(self.SUPPORTED_OS, resource_model)\n logger.info(\"Autoload completed\")\n return response",
"def get_item_inventory(self, item):\n return [item_data for item_data in self.inventory if item_data['item_name'] == item]",
"def Chassis(self):\n return self._Chassis",
"def get_inventory(self):\n from noc.inv.models.object import Object\n\n return list(Object.objects.filter(data__management__managed_object=self.id))",
"def get_armor_equipped(self):\n\t\treturn self.equippedArmor",
"def display_inventory(self) -> None:\n\n print(\"Your current inventory includes:\\n\" + \" | \".join(self.player.inventory))",
"def print_inventory(self):\r\n for item in self._inventory:\r\n print(item, '\\n')",
"def get_inventory(self, context):\n # See below some example code demonstrating how to return the resource structure\n # and attributes. In real life, of course, if the actual values are not static,\n # this code would be preceded by some SNMP/other calls to get the actual resource information\n '''\n # Add sub resources details\n sub_resources = [ AutoLoadResource(model ='Generic Chassis',name= 'Chassis 1', relative_address='1'),\n AutoLoadResource(model='Generic Module',name= 'Module 1',relative_address= '1/1'),\n AutoLoadResource(model='Generic Port',name= 'Port 1', relative_address='1/1/1'),\n AutoLoadResource(model='Generic Port', name='Port 2', relative_address='1/1/2'),\n AutoLoadResource(model='Generic Power Port', name='Power Port', relative_address='1/PP1')]\n\n\n attributes = [ AutoLoadAttribute(relative_address='', attribute_name='Location', attribute_value='Santa Clara Lab'),\n AutoLoadAttribute('', 'Model', 'Catalyst 3850'),\n AutoLoadAttribute('', 'Vendor', 'Cisco'),\n AutoLoadAttribute('1', 'Serial Number', 'JAE053002JD'),\n AutoLoadAttribute('1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/1', 'Model', 'WS-X4233-GB-EJ'),\n AutoLoadAttribute('1/1', 'Serial Number', 'RVE056702UD'),\n AutoLoadAttribute('1/1/1', 'MAC Address', 'fe80::e10c:f055:f7f1:bb7t16'),\n AutoLoadAttribute('1/1/1', 'IPv4 Address', '192.168.10.7'),\n AutoLoadAttribute('1/1/2', 'MAC Address', 'te67::e40c:g755:f55y:gh7w36'),\n AutoLoadAttribute('1/1/2', 'IPv4 Address', '192.168.10.9'),\n AutoLoadAttribute('1/PP1', 'Model', 'WS-X4232-GB-RJ'),\n AutoLoadAttribute('1/PP1', 'Port Description', 'Power'),\n AutoLoadAttribute('1/PP1', 'Serial Number', 'RVE056702UD')]\n\n return AutoLoadDetails(sub_resources,attributes)\n '''\n\n self._log(context, 'Begin autoload')\n resources = []\n attributes = []\n\n\n attributes.append(AutoLoadAttribute('', 'replication_address', self.get_replication_address(context)))\n attributes.append(AutoLoadAttribute('', 'connection_key', self.get_connection_key(context)))\n\n networks = self._get_newtork_interfaces(context)\n self._log(context, 'got networks')\n\n controllers = self._get_controllers(context)\n self._log(context, 'got controllers')\n ports = self._get_ports(context)\n\n model = None\n for controller in controllers:\n self._log(context, 'Processing ctrlt: ' + controller['name'] + ':' + controller['model'])\n resources.append(AutoLoadResource(model='Generic Storage Controller', name=controller['name'],\n relative_address=controller['name']))\n if model is None:\n model = controller['model']\n\n attributes.append(AutoLoadAttribute('', 'Model', model))\n\n for network in networks:\n self._log(context, 'Processing netwk: ' + network['name'] + ':' + str(network['address']))\n net_name = network['name']\n controller = net_name.split('.')[0]\n if 'vir0' in controller or 'vir1' in controller:\n attributes.append(AutoLoadAttribute('',str(controller + '_address'), str(network['address'])))\n continue\n if 'vir' in controller:\n continue\n if 'management' not in network['services']:\n continue\n resources.append(AutoLoadResource(model='Storage Network Port', name=net_name,\n relative_address=controller.upper() + '/' + str(network['address'])))\n\n for port in ports:\n if port['iqn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='iSCSI Storage Port', name=port['name'],\n relative_address=controller + '/' + port['portal']))\n attributes.append(AutoLoadAttribute(controller + '/' + port['portal'], 'iqn', port['iqn']))\n elif port['wwn'] is not None:\n port_name = port['name']\n controller = port_name.split('.')[0]\n resources.append(AutoLoadResource(model='FC Storage Port', name=port['name'],\n relative_address=controller + '/' + port['name'].split('.')[1]))\n attributes.append(AutoLoadAttribute(controller + '/' + port['name'].split('.')[1], 'wwn', port['wwn']))\n\n return AutoLoadDetails(resources, attributes)"
] | [
"0.65856266",
"0.6535421",
"0.64895856",
"0.6437434",
"0.6413848",
"0.62109286",
"0.6195449",
"0.6156714",
"0.615364",
"0.6138173",
"0.5979466",
"0.58476555",
"0.58304507",
"0.5812715",
"0.57982266",
"0.5759065",
"0.57481444",
"0.574362",
"0.57305074",
"0.57303625",
"0.5718042",
"0.5715193",
"0.5698755",
"0.5685146",
"0.56757826",
"0.5666543",
"0.56563544",
"0.5636462",
"0.56249696",
"0.558639"
] | 0.82395315 | 0 |
Get re name from the device. | def get_re_name(self):
return self.dev.re_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def name(self):\n return self.device.device_data[self.device_id]['name']",
"def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def name(self):\n return self._device.name",
"def name(self) -> str:\n return self.camera_info[\"device_name\"]",
"def name(self):\n return self._device.device_data[self._uuid]['name']",
"def name(self):\n return self.device.name()",
"def name(self):\n return self._device.description_pretty()",
"def get_device_name(self):\n name = self._device[\"name\"]\n if not name or name == \"--\":\n name = self._mac\n\n return name",
"def name(self) -> str:\n return self._device.name or self._device.mac",
"def name(self):\n if self._connection.location_names:\n return '{} {} {}'.format(self._device.location2, self._device.location, self._device.name)\n else:\n return self._device.name",
"def get_name() -> str:",
"def getDeviceName(self):\n name = str(nvmlDeviceGetName(self.handle))\n return name",
"def get_device_name(self, device: str) -> str | None:\n raise NotImplementedError()",
"async def async_get_device_name(self, device):\n if device not in self.last_results:\n return None\n return self.last_results[device].name",
"def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname",
"def get_device_name(self, device):\n with self.lock:\n # If not initialised and not already scanned and not found.\n if device not in self.hostname_cache:\n self.get_ddwrt_data()\n\n return self.hostname_cache.get(device, False)",
"def get_name():",
"def device_get_name(pnd):\n return _nfc.device_get_name(pnd)",
"def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")",
"def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")",
"def get_device_name(self, device):\n if not self.last_results:\n return None\n for client in self.last_results:\n if client.mac == device:\n return client.name\n return None",
"def get_char_name(self):\n return self._character_device_path.split('/')[-1]",
"def name(self):\n return self._meural_device[\"alias\"]",
"def get_name(self):\n\t\treturn call_sdk_function('PrlUsrInfo_GetName', self.handle)",
"def get_name() -> str:\n pass",
"def get_device_name(self, identity, device):\n device_info = self._get_device(identity, device)\n return device_info.get('Name', identity)",
"def get_name(self):\n return self.card_name",
"def get_device_name(self, device):\n if not self.last_results:\n return None\n for client in self.last_results:\n if client[\"mac\"] == device:\n return client[\"hostname\"]\n return None"
] | [
"0.7746113",
"0.7704492",
"0.7679899",
"0.7679899",
"0.7679899",
"0.76730424",
"0.763995",
"0.7550984",
"0.73723626",
"0.7287189",
"0.72534335",
"0.72094625",
"0.719057",
"0.7172307",
"0.7141149",
"0.7137775",
"0.7114999",
"0.7103347",
"0.6995812",
"0.69910777",
"0.69866925",
"0.69866925",
"0.69783556",
"0.6965052",
"0.6958779",
"0.6945414",
"0.6918946",
"0.6889757",
"0.6872416",
"0.6856664"
] | 0.81063586 | 0 |
send set chassis cluster enable rpc to the device. | def set_chassis_cluster_enable(self, cluster_id, node_id):
return self.dev.rpc.set_chassis_cluster_enable(
cluster_id=cluster_id, node=node_id,
reboot=True, normalize=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def enable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(True)",
"def enable(self):\n try:\n self.bus.open(self.BUS_NUMBER)\n self.write(AntennaDeployerCommand.ARM_ANTS, 0x00)\n self.bus.close()\n return True\n except:\n return False",
"def set_chassis_cluster_disable(self):\n return self.dev.rpc.set_chassis_cluster_disable(\n reboot=True, normalize=True)",
"def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)",
"def cmd_enable(self, app_name=None):\n rc = self.socket_command_with_project('enable', app_name)\n return rc",
"def enable(self, enable):\n\n self._enable = enable",
"def bdev_nvme_enable_controller(client, name, cntlid):\n\n params = {'name': name}\n\n if cntlid is not None:\n params['cntlid'] = cntlid\n\n return client.call('bdev_nvme_enable_controller', params)",
"async def _cmdf_chenable(self, substr, msg, privilege_level):\n enabled_str = None\n if utils.str_says_true(substr) or (len(substr) == 0):\n self._ch_msg_isenabled = True\n enabled_str = \"enabled.\"\n else:\n self._ch_msg_isenabled = False\n enabled_str = \"disabled.\"\n self._save_settings()\n\n buf = \"In-channel greetings is now \" + enabled_str\n await self._client.send_msg(msg, buf)\n return",
"def enable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/enable\")\n\t\treturn bool(response.json[\"success\"])",
"def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)",
"def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})",
"def enable():\n request = dict(id='gbn')\n _gbn_enable(request)",
"def set_enable(self, enable):\n\n with AutoUpdater._lock:\n if isinstance(enable, Bus):\n AutoUpdater.remove_link(self._enable)\n AutoUpdater.add_link(\n enable,\n self._enable)\n else:\n raise ValueError(\n \"ERROR: Invalid Enable input. Enable must be a \"\n \"1-bit Bus or a Connector.\")",
"def _set_enable(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enable\", rest_name=\"enable\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Represents whether the user account is enabled\\n(default=true)', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='username-enable', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enable must be of a type compatible with username-enable\"\"\",\n 'defined-type': \"brocade-aaa:username-enable\",\n 'generated-type': \"\"\"YANGDynClass(base=YANGBool, default=YANGBool(\"true\"), is_leaf=True, yang_name=\"enable\", rest_name=\"enable\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Represents whether the user account is enabled\\n(default=true)', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='username-enable', is_config=True)\"\"\",\n })\n\n self.__enable = t\n if hasattr(self, '_set'):\n self._set()",
"def enable(self, *args, **kwargs):\n pass",
"def setChairmanOverride(self, channel, isEnabled, unitCode=0):\n resp = self.XAPCommand('CHAIRO', channel, (1 if isEnabled else 0), unitCode=unitCode)\n return bool(int(resp))",
"def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)",
"async def enable(self) -> None:\n try:\n await self.adguard.request(\n \"parental/enable\", method=\"POST\", data=\"sensitivity=TEEN\"\n )\n except AdGuardHomeError as exception:\n raise AdGuardHomeError(\n \"Enabling AdGuard Home parental control failed\"\n ) from exception",
"def enable_server(self, server):\n log.info(\"Enabling %s in netscaler\", server)\n return self.post(\"server?action=enable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def _set_enable(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=enable.enable, is_container='container', presence=False, yang_name=\"enable\", rest_name=\"enable\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Config Openflow Version', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"enable must be of a type compatible with container\"\"\",\n 'defined-type': \"container\",\n 'generated-type': \"\"\"YANGDynClass(base=enable.enable, is_container='container', presence=False, yang_name=\"enable\", rest_name=\"enable\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Config Openflow Version', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-openflow', defining_module='brocade-openflow', yang_type='container', is_config=True)\"\"\",\n })\n\n self.__enable = t\n if hasattr(self, '_set'):\n self._set()",
"def set_enable(self, pwd, type='secret'):\n\n if type == 'secret':\n cmd = 'enable secret %s' %(pwd)\n else:\n cmd = 'enable password %s' %(pwd)\n\n output = self.iosapi.bcp_send_config_command(self.iosapi.netmiko_session, cmd)\n self.iosapi.bcp_log(\"info\", \"(%s) set_enable : Attempting to set enable\" %(__name__))\n return(output)",
"def enable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.enable\", {})",
"async def async_turn_on(self, **kwargs: Any) -> None:\n await self._on_off_cluster_handler.turn_on()\n self.async_write_ha_state()",
"async def enable(self, ctx):\n await self.config.guild(ctx.guild).auto.set(True)\n await ctx.send(_(\"Automatic voicechannel creation enabled.\"))",
"def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = ENABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)",
"def enable(ctx):\n\n fc_group_cfg = {}\n fc_group_cfg['FLEX_COUNTER_STATUS'] = ENABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", ACL, fc_group_cfg)",
"def enable_health(self, enable_health):\n\n self._enable_health = enable_health",
"def enable(self):\n self._enabled = True",
"def enable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'enable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)",
"def enable(self):\n return self._packet.get('enable', False)\n\n # TODO: TCONT and GEM lists"
] | [
"0.64136285",
"0.63151765",
"0.6265535",
"0.62011963",
"0.5983005",
"0.5968258",
"0.591594",
"0.58644897",
"0.5831258",
"0.58006275",
"0.5789824",
"0.5748024",
"0.5725565",
"0.5712674",
"0.5682932",
"0.5681965",
"0.5678034",
"0.5669555",
"0.5667108",
"0.5664479",
"0.5662961",
"0.5614734",
"0.55643445",
"0.555434",
"0.5549915",
"0.5501807",
"0.54985917",
"0.5497459",
"0.5493028",
"0.5481944"
] | 0.7949058 | 0 |
send set chassis cluster disable rpc to the device. | def set_chassis_cluster_disable(self):
return self.dev.rpc.set_chassis_cluster_disable(
reboot=True, normalize=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_chassis_cluster_enable(self, cluster_id, node_id):\n return self.dev.rpc.set_chassis_cluster_enable(\n cluster_id=cluster_id, node=node_id,\n reboot=True, normalize=True)",
"def do(self):\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = \"\"\n property_val = this_server.read_property(\"SdpMasterFQDN\")[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(\n const.CMD_Disable, None, self.disable_cmd_ended_cb\n )\n self.logger.debug(const.STR_DISABLE_CMS_SUCCESS)\n this_server.write_attr(\n \"activityMessage\", const.STR_DISABLE_CMS_SUCCESS, False\n )\n\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f\"{const.ERR_DISABLE_CMD_FAIL}{dev_failed}\"\n tango.Except.re_throw_exception(\n dev_failed,\n const.ERR_INVOKING_CMD,\n log_msg,\n \"SdpMasterLeafNode.DisableCommand()\",\n tango.ErrSeverity.ERR,\n )",
"def disable(self):\n try:\n self.bus.open(self.BUS_NUMBER)\n self.write(AntennaDeployerCommand.DISARM_ANTS, 0x00)\n self.bus.close()\n return True\n except:\n return False",
"async def disable(self, **kwargs) -> None: # pylint: disable=unused-argument\r\n await self.set_ena(False)",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)",
"def bdev_nvme_disable_controller(client, name, cntlid):\n\n params = {'name': name}\n\n if cntlid is not None:\n params['cntlid'] = cntlid\n\n return client.call('bdev_nvme_disable_controller', params)",
"def disable():\n request = dict(id='gbn')\n _gbn_disable(request)",
"def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})",
"def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})",
"async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"async def async_turn_off(self, **kwargs: Any) -> None:\n result = await self._on_off_cluster_handler.off()\n if result[1] is not Status.SUCCESS:\n return\n self._state = False\n self.async_write_ha_state()",
"def disable_server(self, server):\n log.info(\"Disabling %s in netscaler\", server)\n return self.post(\"server?action=disable\", {\"server\": {\"name\": server}}, content_type=self.content_type(\"server\"))",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)",
"async def async_turn_off(self, **kwargs: Any) -> None:\n await self._on_off_cluster_handler.turn_off()\n self.async_write_ha_state()",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", PORT_BUFFER_DROP, port_info)",
"def cmd_disable(self, app_name=None):\n rc = self.socket_command_with_project('disable', app_name)\n return rc",
"def disable(self):\n if not self.labExperiment:\n super().disable()\n else:\n self.zero()\n self.connection.query('close_dm')\n print(\"'BM1k' is now disbaled\")",
"def set_all_ports_admin_disabled(self):\n pass",
"def deactivate_cluster_parcel(self, cluster_name, product, version):\n return self._post(endpoint=('{}/clusters/{}/parcels/products/{}/'\n 'versions/{}/commands/deactivate').format(self.api_version,\n cluster_name,\n product,\n version)).json()",
"def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])",
"def disable(self):",
"async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()",
"def port_disable(self, port_num: int) -> None:\n raise NotImplementedError",
"def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Database.disable\", {})",
"def maintainance(self, on_off, instance_type):\n print((\"enabling\" if on_off else \"disabling\") + \" Maintainer mode\")\n tries = 60\n while True:\n reply = self.send_request(\n instance_type,\n requests.put,\n \"/_admin/cluster/maintenance\",\n '\"on\"' if on_off else '\"off\"',\n )\n if len(reply) > 0:\n print(\"Reply: \" + str(reply[0].text))\n if reply[0].status_code == 200:\n return\n print(f\"Reply status code is {reply[0].status_code}. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n else:\n print(\"Reply is empty. Sleeping for 3 s.\")\n time.sleep(3)\n tries -= 1\n if tries <= 0:\n action = \"enable\" if on_off else \"disable\"\n raise Exception(f\"Couldn't {action} maintainance mode!\")",
"def disable_cable_ports(cid):\n\n SQL.execute('''\n SELECT \n cpid,\n guid,\n port,\n hca\n FROM \n cable_ports \n WHERE\n cid = ?\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall(): \n if row['hca']:\n vlog(1, 'ignoring request to disable HCA p%s.' % (row['cpid']))\n continue\n\n if not DISABLE_PORT_STATE_CHANGE: \n ib_mgt.disable_port(int(row['guid']), int(row['port']))\n\n SQL.execute('''\n UPDATE\n cables \n SET\n online = 0\n WHERE\n cid = ?\n ;''', (\n cid,\n ));",
"def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)",
"def _nixie_disable():\n # type: () -> None\n GPIO.output(NIXIE_nOE, GPIO.HIGH)",
"def disable(self):\n self.error_code = 'DISABLED'\n self.running = False",
"def disable(ctx):\n\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = DISABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", PG_DROP, port_info)"
] | [
"0.6368087",
"0.63586265",
"0.5982028",
"0.59609485",
"0.5881299",
"0.5879154",
"0.585808",
"0.58371866",
"0.58231795",
"0.5772776",
"0.5765652",
"0.57600576",
"0.57569",
"0.57289124",
"0.57201433",
"0.5612662",
"0.55817485",
"0.5572804",
"0.55690765",
"0.55593354",
"0.55563074",
"0.5532201",
"0.55176526",
"0.5511893",
"0.55006033",
"0.5485225",
"0.5483774",
"0.5465402",
"0.5462",
"0.5460829"
] | 0.8469492 | 0 |
invoke jsnapy for persistent connection. | def invoke_jsnapy(self, data, action):
try:
self.queue_message("vvvv", "Creating jnpr.jsnapy.SnapAdmin instance.")
jsa = jnpr.jsnapy.SnapAdmin()
self.queue_message("vvvv", 'Executing %s action.' % action)
if action == 'check':
responses = jsa.check(data=data,
dev=self.dev,
pre_file='PRE',
post_file='POST')
elif action == 'snapcheck':
responses = jsa.snapcheck(data=data,
dev=self.dev,
file_name='PRE')
elif action == 'snap_pre':
responses = jsa.snap(data=data,
dev=self.dev,
file_name='PRE')
elif action == 'snap_post':
responses = jsa.snap(data=data,
dev=self.dev,
file_name='POST')
else:
raise AnsibleError("Unexpected action: %s." % (action))
self.queue_message("vvvv", 'The %s action executed successfully' % action)
except (pyez_exception.RpcError, pyez_exception.ConnectError) as ex:
raise AnsibleError("Error communicating with the device: %s" % str(ex))
results = {}
if isinstance(responses, list) and len(responses) == 1:
if action in ('snapcheck', 'check'):
for response in responses:
results['device'] = response.device
results['router'] = response.device
results['final_result'] = response.result
results['total_passed'] = response.no_passed
results['total_failed'] = response.no_failed
results['test_results'] = response.test_results
total_tests = int(response.no_passed) + int(response.no_failed)
results['total_tests'] = total_tests
pass_percentage = 0
if total_tests > 0:
pass_percentage = ((int(response.no_passed) * 100) //
total_tests)
results['passPercentage'] = pass_percentage
results['pass_percentage'] = pass_percentage
if results['final_result'] == 'Failed':
results['msg'] = 'Test Failed: Passed %s, Failed %s' % \
(results['total_passed'],
results['total_failed'])
else:
results['msg'] = 'Test Passed: Passed %s, Failed %s' % \
(results['total_passed'],
results['total_failed'])
elif action in ('snap_pre', 'snap_post'):
results['msg'] = "The %s action successfully executed." % (action)
else:
raise AnsibleError("Unexpected JSNAPy responses. Type: %s."
"Responses: %s" %
(type(responses), str(responses)))
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connect_to_master():",
"def connect():",
"def _connect_to_rdr_replica(self):\n self.gcp_env.activate_sql_proxy(replica=True)\n self.db_conn = self.gcp_env.make_mysqldb_connection()",
"def connect(dbapi_connection, connection_record):\n connection_record.info['pid'] = os.getpid()",
"def run(self):\n self.connect()",
"def exec_connect(provider):\n provider._query_provider._loaded = True\n provider._query_provider._connected = True\n provider.connect()",
"def maintainConnection():\n return RoboCaller().call(\"maintainConnection\", \"void\")",
"def __init__(self):\n self.try_to_connect()",
"def snap(target=None):\n global SESSION\n logger.debug('snap : {}'.format(target))\n if (SESSION and target and SESSION.modelview and SESSION.modelview.ready):\n SESSION.modelview.init(target)\n\n elif not SESSION:\n start(target)",
"def connect_subproc():\n return factory.connect_subproc([sys.executable, \"-u\", SERVER_FILE, \"-q\", \"-m\", \"stdio\"], \n SlaveService)",
"def do_connect(self, line):\n print \"\\n connecting to MongoDB backend ...\\n\"",
"def _connect(self):\n self.queue_message(\"log\", \"ssh connection done, starting junos-eznc\")\n self.open()\n if not self.dev.connected:\n return 1, b\"\", b\"not connected\"\n\n self._connected = True\n\n super(Connection, self)._connect()\n\n self._sub_plugin = {\"name\": \"pyez\", \"obj\": self.dev}\n self.queue_message(\n \"vvvv\",\n \"created pyez connection type\"\n )\n return (\n 0,\n to_bytes(self.dev._conn.session_id, errors=\"surrogate_or_strict\"),\n b\"\",\n )",
"def onSlave(self):",
"def connect():\n if not is_notebook():\n print('Python session is not running in a Notebook Kernel')\n return\n\n global _comm\n\n kernel = get_ipython().kernel\n kernel.comm_manager.register_target('tdb', handle_comm_opened)\n # initiate connection to frontend.\n _comm = Comm(target_name='tdb', data={})\n # bind recv handler\n _comm.on_msg(None)",
"async def _connect(self):\n pass",
"def connect(self, *args, **kwargs):",
"def _connect_async(self):\n self._pgconn = libpq.PQconnectStart(ascii_to_bytes(self.dsn))\n if not self._pgconn:\n raise exceptions.OperationalError('PQconnectStart() failed')\n elif libpq.PQstatus(self._pgconn) == libpq.CONNECTION_BAD:\n raise self._create_exception()\n\n libpq.PQsetNoticeProcessor(\n self._pgconn, self._notice_callback, ffi.NULL)",
"def first_connect(self, dbapi_connection, connection_record):",
"def connect_syndicate( username=CONFIG.SYNDICATE_OPENCLOUD_USER, password=CONFIG.SYNDICATE_OPENCLOUD_PASSWORD, user_pkey_pem=CONFIG.SYNDICATE_OPENCLOUD_PKEY ):\n debug = True \n if hasattr(CONFIG, \"DEBUG\"):\n debug = CONFIG.DEBUG\n \n client = syntool.Client( username, CONFIG.SYNDICATE_SMI_URL,\n password=password,\n user_pkey_pem=user_pkey_pem,\n debug=debug )\n\n return client",
"def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])",
"def connect():\n\n crate = get_crate()\n crate.mch_comms.ipmitool_shell_connect()",
"def connect(self):\n\t\tpass",
"def connect(self) -> None:",
"def snap_run(args):\n\n ## TODO: link snap num to host name\n\n logger.debug(\"ETCD config file: \"+args.etcd_config_file)\n etcd_params = read_yaml(args.etcd_config_file)\n logger.debug(\"CORR config file: \"+args.corr_config_file)\n logger.debug(\"HOST SNAP: \"+args.host_snap)\n logger.debug(\"SNAP NUMBER: \"+str(args.snap_num))\n\n delay_params = read_yaml(args.delay_config_file)\n delays = (np.asarray(delay_params['cal_solutions']['delays']).ravel())[(args.snap_num-1)*6:(args.snap_num)*6]\n ants = np.asarray(delay_params['cal_solutions']['antenna_order'])\n \n logger.info(\"snap.py.snap_run() creatting process to handle snap: {}\".format(args.host_snap))\n my_snap = dsaX_snap.dsaX_snap(args.host_snap,args.corr_config_file,number=args.snap_num)\n\n etcd_host, etcd_port = parse_endpoint(etcd_params['endpoints'])\n logger.info(\"snap.py.snap_run() etcd host={}, etcd port={}\".format(etcd_host, etcd_port))\n etcd = etcd3.client(host=etcd_host, port=etcd_port)\n watch_ids = []\n keym = '/mon/snap/' + str(args.snap_num) + '/armed_mjd'\n keym3 = '/mon/snap/' + str(args.snap_num) + '/delays'\n keym4 = '/mon/snap/' + str(args.snap_num) + '/antenna_order'\n keym2 = '/mon/snap/' + str(args.snap_num)\n\n \n # add watch on cmd for snapnum\n cmd = etcd_params['snap_command'] + str(args.snap_num)\n logger.info('snap.py.snap_run() watch cmd= {}'.format(cmd))\n\n\n watch_id = etcd.add_watch_callback(cmd, process_command(my_snap,etcd,keym,keym2,delays,ants,keym3,keym4))\n watch_ids.append(watch_id)\n\n # add watch on cmd for snap 0\n cmd = etcd_params['snap_command'] + str(0)\n logger.info('snap.py.snap_run() watch cmd= {}'.format(cmd))\n watch_id = etcd.add_watch_callback(cmd, process_command(my_snap,etcd,keym,keym2,delays,ants,keym3,keym4))\n watch_ids.append(watch_id)\n\n \n # main loop\n while True:\n \n #md = my_snap.get_monitor_data()\n #if md!=-1:\n # etcd.put(key, md)\n sleep(10)",
"def mock_conn(self, VERBOSE):\n return psycopg2.connect(**self.postgresql.dsn())",
"def connect(self, ac_name = None):\n\t\twebnotes.conn = webnotes.db.Database(user = self.get_db_name(), \\\n\t\t\tpassword = getattr(conf,'db_password', ''))",
"def _start(self):\n \n self.logger.msg1(\"Starting Phenoscoring - \" + self.config.action)\n return self.dbpath, self.config",
"def _connect(self):\n try: \n self.r = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n except:\n raise",
"def __init__(self, jsock):\n self.jsock = jsock",
"def connect(self, *args, **kw):\n\n return self.get_pool(*args, **kw).connect()"
] | [
"0.5772906",
"0.5252777",
"0.5099669",
"0.50921255",
"0.4938454",
"0.49262697",
"0.4921192",
"0.4911013",
"0.48786032",
"0.48148036",
"0.47887895",
"0.47836024",
"0.47610974",
"0.47570044",
"0.47287473",
"0.47155914",
"0.46973544",
"0.46935",
"0.46934238",
"0.46922463",
"0.4690862",
"0.46784154",
"0.46654204",
"0.46584153",
"0.4655413",
"0.46519113",
"0.46511889",
"0.46504992",
"0.46149957",
"0.460497"
] | 0.52720755 | 1 |
Rollback the device configuration to the specified id. Rolls back the configuration to the specified id. Assumes the configuration is already opened. Does NOT commit the configuration. | def rollback_configuration(self, id):
if self.dev is None or self.config is None:
raise AnsibleError('The device or configuration is not open.')
if id == 'rescue':
self.queue_message("log", "Rolling back to the rescue configuration.")
try:
self.config.rescue(action='reload')
self.queue_message("log", "Rescue configuration loaded.")
except (self.pyez_exception.RpcError,
self.pyez_exception.ConnectError) as ex:
raise AnsibleError('Unable to load the rescue configuraton: '
'%s' % (str(ex)))
elif id >= 0 and id <= 49:
self.queue_message("log", "Loading rollback %d configuration.", id)
try:
self.config.rollback(rb_id=id)
self.queue_message("log", "Rollback %d configuration loaded.", id)
except (self.pyez_exception.RpcError,
self.pyez_exception.ConnectError) as ex:
raise AnsibleError('Unable to load the rollback %d '
'configuraton: %s' % (id, str(ex)))
else:
raise AnsibleError('Unrecognized rollback configuraton value: %s'
% (id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def savepoint_rollback(self, id):\n self.execute(\"ROLLBACK TO SAVEPOINT {}\".format(id))",
"def rollback_workflow(self, execution_id):\n raise NotImplementedError",
"def rollback(self):\n if self._transaction is None:\n raise TransactionNotStartedError(\"Cannot call rollback without a transaction\")\n else:\n def _resetTxn(result):\n self._transaction = None\n d = self._config.rollback(self._transaction)\n d.addCallback(_resetTxn)\n return d",
"def rollback(commit_id):\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n maintenance_up()\n checkout_latest()\n git_reset(commit_id)\n gzip_assets()\n deploy_to_s3()\n maintenance_down()",
"def rollback(self, target_revision_id):\n url = DeckhandClient.get_path(\n DeckhandPaths.ROLLBACK\n ).format(target_revision_id)\n\n response = self._post_request(url)\n self._handle_bad_response(response)",
"def undo_configure(self, sub_array_id: int):\n subarray_name = self._tel.tm.subarray(sub_array_id)\n subarray = con_config.get_device_proxy(subarray_name)\n self._log(f\"commanding {subarray_name} with End command\")\n subarray.command_inout(\"End\")",
"def rollback(self):\n\n if not self.is_active:\n return\n\n if self.is_context_active:\n raise states.RolledBack(self)\n else:\n self.__do_rollback()\n self._cleanup()",
"def rollback(self):\n self._rollback = True",
"def rollback(self):\n raise NotImplementedError",
"def rollback(self, rollback_to):\n raise NotImplementedError",
"def rollback(self):\n try:\n if self._cur_batch:\n self._cur_batch.rollback()\n except ValueError:\n # ignore \"Batch must be in progress to rollback\" error\n pass\n self._cur_batch = None\n self._num_mutations = 0",
"def restore_config(self):\n self._clear_previous_windows_assigment()\n self._restart_i3_config()",
"def rollback(self):\n pass",
"def _do_rollback(self):\n self.backend.rollback()",
"def rollback(self):\n self.conn.rollback()",
"def cancel_execution_with_rollback(self, execution_id: str):\n execution_url = self.get_execution_url(execution_id)\n try:\n self.logger.info(\"Canceling SSM execution: {}\".format(execution_url))\n self.ssm_client.stop_automation_execution(AutomationExecutionId=execution_id, Type='Cancel')\n self.wait_for_execution_completion(execution_id)\n rollback_execution_id = self.get_step_output(execution_id, constants.rollback_step_name,\n constants.rollback_execution_id_output_name)\n if rollback_execution_id:\n rollback_execution_url = self.get_execution_url(rollback_execution_id)\n self.logger.info(f\"Waiting [RollbackExecution] completed SSM execution: {rollback_execution_url}\")\n self.wait_for_execution_completion(rollback_execution_id)\n except ClientError as e:\n self.logger.error(\"Failed to cancel SSM execution [%s] due to: %s\", execution_url, e.response)\n raise e",
"def rollback(self, project_id, transaction):\n request_pb = _datastore_pb2.RollbackRequest(\n project_id=project_id, transaction=transaction\n )\n # Response is empty (i.e. no fields) but we return it anyway.\n return _rpc(\n self.client._http,\n project_id,\n \"rollback\",\n self.client._base_url,\n self.client._client_info,\n request_pb,\n _datastore_pb2.RollbackResponse,\n )",
"def undo_abort(self, sub_array_id: int):\n subarray_name = self._tel.tm.subarray(sub_array_id)\n subarray = con_config.get_device_proxy(subarray_name)\n self._log(f\"commanding {subarray_name} with Restart command\")\n subarray.command_inout(\"Restart\")",
"def RollbackVdisk(self, data, vdiskid, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks/\"+vdiskid+\"/rollback\"\n return self.client.post(uri, data, headers, query_params, content_type)",
"def remove_trap_config(self, context, storage_id, trap_config):\n # Currently not implemented\n pass",
"def rollback(self):\n self._connection.execute_nonquery(\"sql\", \"ROLLBACK\", True)",
"def rollback(self, dry_run=False, force=False):\n eh = SimpleErrorHandler()\n\n out = self._client.execute('rollback', n=dry_run, f=force, eh=eh)\n\n return bool(eh)",
"def rollback(self) -> None:\n with self.lock:\n self.wait(self._rollback_gen())",
"def restore_running_config(device, path, file, timeout=60):\n try:\n device.execute(\n \"copy {path}{file} running-config replace\".format(path=path, file=file),\n timeout=timeout\n )\n except SubCommandFailure as e:\n raise SubCommandFailure(\n \"Could not replace saved configuration on \"\n \"device {device}\\nError: {e}\".format(device=device.name, e=str(e))\n )",
"def rollback(self):\n self.db.rollback()",
"async def rollback(self):\n if not self._active:\n raise ProfileSessionInactiveError()\n await self._teardown(commit=False)\n self._active = False",
"def rollback(self):\r\n self.db.rollback()",
"def rollback_one(self, migration, force=False):\n logger.info(\"Rolling back %s\", migration.id)\n self.ensure_internal_schema_updated()\n migration.process_steps(self, \"rollback\", force=force)\n self.log_migration(migration, \"rollback\")\n with self.transaction():\n self.unmark_one(migration, log=False)",
"def rollback(self) -> None:\n if self._transaction is None:\n pass\n else:\n self._transaction.rollback(_to_root=True)",
"def restore_config(self, config):\n for src in config:\n # Remove .prev\n dst, _ = splitext(src)\n LOGGER.debug('Restoring %s from %s', dst, src)\n shutil.move(src, dst)"
] | [
"0.6042772",
"0.5595296",
"0.55862564",
"0.55803925",
"0.5574582",
"0.5410788",
"0.5168469",
"0.51220083",
"0.50194913",
"0.49942717",
"0.49847025",
"0.49685273",
"0.49579942",
"0.49227747",
"0.48944387",
"0.4886762",
"0.48730212",
"0.48716104",
"0.48346812",
"0.4829784",
"0.48218244",
"0.4820792",
"0.48133853",
"0.4802752",
"0.47855988",
"0.4781371",
"0.47775742",
"0.47560626",
"0.47336185",
"0.47217634"
] | 0.8159079 | 0 |
Check the candidate configuration. Assumes the configuration is already opened. Performs the equivalent of a "commit check", but does NOT commit the configuration. | def check_configuration(self):
try:
self.config.commit_check()
self.queue_message("log", "Configuration checked.")
except (self.pyez_exception.RpcError,
self.pyez_exception.ConnectError) as ex:
raise AnsibleError('Failure checking the configuraton: %s' %
(str(ex))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_configuration(self):\n self.ensure_one()\n getattr(self, '%s_check_configuration' % self.provider, lambda: None)()",
"def check_config(cfg):",
"def check_config():\n\n if not config_instance:\n LOG.error(\"Failed to load the config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"CONFIG_VERSION\"):\n LOG.warning( \"The config file does not specify CONFIG_VERSION! I will \"\n \"try to continue anyway, but this field is recommended to allow \"\n \"some internal tests to work. I will assume the value '(1,0)'!\" )\n config_instance.CONFIG_VERSION = (1, 0)\n\n major, minor = config_instance.CONFIG_VERSION\n expected_major, expected_minor = EXPECTED_CONFIG_VERSION\n\n if major < expected_major:\n LOG.critical(\"The config system has undergone a major change! \"\n \"I cannot continue without an upgrade!\")\n sys.exit(9)\n\n if minor < expected_minor:\n LOG.warning(\"The config system has undergone a minor change! \"\n \"It should work, but you still should review the docs!\")\n\n if major == expected_major and minor == expected_minor:\n LOG.debug( \"Config version OK!\" )\n\n if not hasattr(config_instance, \"GENERATORS\"):\n LOG.critical(\"Variable 'GENERATORS' not found in config!\")\n sys.exit(9)\n\n if not hasattr(config_instance, \"TARGETS\"):\n LOG.critical(\"Variable 'TARGETS' not found in config!\")\n sys.exit(9)",
"def check_config(config):\n pass",
"def check_configs(self):\n\n pass",
"def _check_config(self):",
"def check_config(self):\n try:\n config_metadata = self.dbc.get_metadata(\"config.txt\")\n except rest.ErrorResponse:\n print str(datetime.datetime.now()) \\\n + \": No config.txt in Dropbox directory. Exiting.\"\n sys.exit()\n if config_metadata[\"modified\"] != self.config_date:\n print str(datetime.datetime.now()) + \": Config changed\"\n self.config_date = config_metadata[\"modified\"]\n try:\n self.dbc.get_file(\"config.txt\")\n except rest.ErrorResponse as e:\n print str(datetime.datetime.now()) + e.reason\n return False\n self.config.reload(self.local_directory + \"/\" + \"config.txt\")\n return True\n return False",
"def check_configuration(self):\n\n return bool(os.path.isfile(self.config_path) and\n self.validate_configuration_file())",
"def check_configuration(self, configuration):\n super(Hipchap, self).check_configuration(configuration)",
"def verifyConfiguration(self):\n logEvent = \"%sverify\" % self._loggingPrefix\n self._eventLogger.eventBegin(logEvent)\n\n FaultCohesive.verifyConfiguration(self)\n Integrator.verifyConfiguration(self)\n ModuleFaultCohesiveKin.verifyConfiguration(self, self.mesh())\n\n for eqsrc in self.eqsrcs.components():\n eqsrc.verifyConfiguration()\n \n self._eventLogger.eventEnd(logEvent)\n return",
"def checkconfig(self): \n validconfig = {\n 'loglevel': lambda s: s in self.loglevels,\n 'logfilelevel': lambda s: s in self.loglevels,\n 'nodes': lambda s: isinstance(s, list),\n 'pynodes': lambda s: isinstance(s, list)\n }\n alive = True\n for key in self.config: \n if (key in validconfig and \n not validconfig[key](self.config[key])):\n logging.critical(\"Invalid configuration option {}: {}\".format(\n key, self.config[key]))\n alive = False\n return alive",
"def check_config(outconfig):\n self.log.info(\"Checking if all the necessary files exist.\")\n\n # Perform necessary checks\n\n log.info(\"All necessary files exist for {} configuration.\".format(outconfig[\"Flavor\"]))\n\n return",
"def check_config(self):\n # Check if tool is at all included in workflow\n if \"external\" not in self.config[\"tools\"][\"dna\"]:\n return # External not run, don't check configuration # pragma: no cover",
"def check_configuration(self, configuration):\n super(Pixiv_bot, self).check_configuration(configuration)",
"def __check_config(self):\n if not os.path.exists(self.__config_path):\n return False\n else:\n return True",
"def commit_check(ctx):\n result = ctx.run(f\"{VENV_PREFIX} cz check --rev-range master..\", warn=True)\n if result.exited == 3: # NO_COMMIT_FOUND\n exit(0)\n else:\n exit(result.exited)",
"def check_config(self):\n # Check if tool is at all included in workflow\n if self.__class__.name not in self.config[\"tools\"][\"rna\"]:\n return # STAR not run, don't check configuration # pragma: no cover\n\n # Check required configuration settings present\n self.parent.ensure_w_config(\n config_keys=(\"step_config\", \"ngs_mapping\", \"star\", \"path_index\"),\n msg=\"Path to STAR index is required\",\n )\n self.parent.ensure_w_config(\n config_keys=(\"static_data_config\", \"reference\"),\n msg=\"No reference genome FASTA file given\",\n )\n\n # Check validity of the STAR index\n full_path = self.config[\"star\"][\"path_index\"]\n # a lot of files should be in this dir, justtest these\n for indfile in (\"Genome\", \"SA\", \"SAindex\"):\n expected_path = os.path.join(full_path, indfile)\n if not os.path.exists(expected_path): # pragma: no cover\n tpl = \"Expected STAR index file {expected_path} does not exist!\".format(\n expected_path=expected_path\n )\n raise InvalidConfiguration(tpl)",
"def check_config(self):\n # Check if tool is at all included in workflow\n if \"gatk_post_bam\" not in (self.config[\"postprocessing\"] or []): # pylint: disable=C0325\n return # GATK BAM postprocessing not enabled, skip\n\n # Check required configuration settings present\n self.parent.ensure_w_config(\n config_keys=(\"step_config\", \"ngs_mapping\", \"gatk_post_bam\", \"paths_known_sites\"),\n msg=\"Known sites list cannot be empty for GATK BAM postprocessing\",\n )\n self.parent.ensure_w_config(\n config_keys=(\"static_data_config\", \"reference\", \"path\"),\n msg=\"Path to reference FASTA required for GATK BAM postprocessing\",\n )",
"def check(self):\r\n self._check_object(self._config.name)",
"def check_cableconfig(self):\n for circuit in self.circuits:\n circuit.build_cableconfig()\n if not self.circuits[0].cableconfig.equals(circuit.cableconfig):\n return False\n return True",
"def antenny_config_check(self):\n return self.antenny_config.check()",
"def check_config(self):\n # Check if tool is at all included in workflow\n if self.__class__.name not in self.config[\"tools\"][\"dna\"]:\n return # BWA not run, don't check configuration # pragma: no cover\n\n # Check required configuration settings present\n self.parent.ensure_w_config(\n config_keys=(\"step_config\", \"ngs_mapping\", \"bwa\", \"path_index\"),\n msg=\"Path to BWA index is required\",\n )\n\n # Check that the path to the BWA index is valid.\n for ext in (\".amb\", \".ann\", \".bwt\", \".pac\", \".sa\"):\n expected_path = self.config[\"bwa\"][\"path_index\"] + ext\n if not os.path.exists(expected_path): # pragma: no cover\n tpl = \"Expected BWA input path {expected_path} does not exist!\".format(\n expected_path=expected_path\n )\n raise InvalidConfiguration(tpl)",
"def check(self):\r\n PreparationAction.check(self)\r\n if self._config.has_key('release'):\r\n self._check_object(str(self._config['release']))\r\n else:\r\n raise Exception(\"'release' property is not defined for %s\" % self._config.name)\r\n\r\n for task in self.__get_tasks():\r\n self._check_object(\"Task %s\" % task)\r\n for folder in self.__get_folders():\r\n self._check_object(\"Folder %s\" % folder)\r\n \r\n for project in self.__get_subbaselines():\r\n self._check_object(project)\r\n\r\n if (not os.path.exists(self._config['dir'])):\r\n os.makedirs(self._config['dir'])\r\n \r\n # checking if the purpose exists\r\n if self._config.has_key('purpose'):\r\n session = self.get_session()\r\n purposes = session.purposes()\r\n if purposes.has_key(str(self._config['purpose'])):\r\n _logger.info(\"Checking purpose '%s'...Ok\" % (self._config['purpose']))\r\n else:\r\n _logger.info(\"Checking purpose '%s'...Not Found!\" % (self._config['purpose']))\r\n raise Exception(\"Could not find purpose %s in the database.\" % self._config['purpose'])\r\n \r\n role = session.role\r\n co_role = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n _logger.info(\"Try to switch user to role: %s\" % co_role)\r\n session.role = co_role\r\n session.role = role",
"def post_config_checks(self):\n\n\t\tif self.host is not None:\n\t\t\tself.tell(\"Doing post-config checks\")\n\n\t\tself.do_checklist([])",
"def check_config(self):\n cfgs = self.__get() \n \n for option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n \n if not cfgs.has_key(option):\n self.log.warn(\"Parameter '%s' is missing in '%s', using default('%s')\" % \\\n (option, self.config_file, _default))\n _file = _default\n else:\n _file = cfgs[option]\n Config.FILE_OPTIONS[option] = _file\n\n if not os.path.exists(_file) and not os.path.isfile(_file):\n self.log.error(\"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing file '%s')\" % \\\n (option, _file))\n\n\n for option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n \n if not cfgs.has_key(option):\n self.log.warn(\"Parameter '%s' is missing in '%s', using default('%s')\" % \\\n (option, self.config_file, _default))\n _dir = _default\n else:\n _dir = cfgs[option]\n Config.PATH_OPTIONS[option] = _dir\n\n if not os.path.exists(_dir) and not os.path.isdir(_dir):\n self.log.error(\"Paramenter '%s' points to non-existing directory '%s')\" % \\\n (option, _dir))\n raise ConfigError('File Error', \"Paramenter '%s' points to non-existing directory '%s')\" % \\\n (option, _dir))\n\n \n Config.DB_SFT_OPTIONS['sqlalchemy_sft.url'] = cfgs['sqlalchemy_sft.url']\n Config.DB_NAGIOS_OPTIONS['sqlalchemy_nagios.url'] = cfgs['sqlalchemy_nagios.url']\n\n self.log.debug(\"Configuration successfully checked\")",
"def validate_config(self, changed):\n logger.debug(\"[%s] Validating config (Legacy path)\", self.name)\n if not self.to_validate(changed):\n return\n # Validate (Legacy Path)\n from noc.cm.engine import Engine\n\n engine = Engine(self)\n try:\n engine.check()\n except: # noqa\n logger.error(\"Failed to validate config for %s\", self.name)\n error_report()",
"def config_in_use(self) -> bool:\n config = {}\n config_files = find_config_files([self.config_dir])\n for config_file in config_files:\n with open(config_file) as stream:\n config.update(yaml.safe_load(stream))\n\n if not config:\n return False\n\n print(config.get(CONFIG_LOCK))\n return config.get(CONFIG_LOCK, True)",
"def is_valid(self) -> bool:\n if not hasattr(self, \"_check_v{}\".format(self.version)):\n self.console.error(\"Unknown configuration version\")\n return False\n\n return getattr(self, \"_check_v{}\".format(self.version))(start_here=True)",
"def check_configs():\n configs = [os.path.join(CUCKOO_ROOT, \"conf\", \"cuckoo.conf\"),\n os.path.join(CUCKOO_ROOT, \"conf\", \"reporting.conf\")]\n\n for config in configs:\n if not os.path.exists(config):\n raise CuckooStartupError(\"Config file does not exist at path: %s\" % config)\n\n return True",
"def check(self) -> None:\n # check existence\n self.check_key_exists()\n\n # validate training config\n TrainConfigValidator(self.config[\"TRAIN_CONFIG\"], log=False).check()\n # if different training policy at prune is not specified\n if \"TRAIN_CONFIG_AT_PRUNE\" not in self.config:\n self.config[\"TRAIN_CONFIG_AT_PRUNE\"] = self.config[\"TRAIN_CONFIG\"]\n TrainConfigValidator(self.config[\"TRAIN_CONFIG_AT_PRUNE\"], log=False).check()\n\n # validate prune config\n self.check_prune_methods()\n\n # if SEED is not specified, set it same as training config's SEED\n if \"SEED\" not in self.config:\n self.config[\"SEED\"] = self.config[\"TRAIN_CONFIG\"][\"SEED\"]\n\n assert 0 < self.config[\"N_PRUNING_ITER\"]\n assert isinstance(self.config[\"N_PRUNING_ITER\"], int)"
] | [
"0.66688347",
"0.6636353",
"0.66157126",
"0.65205115",
"0.6457699",
"0.63909507",
"0.63631624",
"0.62421334",
"0.6215764",
"0.59953415",
"0.59659874",
"0.5956047",
"0.59328073",
"0.5863985",
"0.5859425",
"0.5851761",
"0.58481836",
"0.58206636",
"0.5782035",
"0.5770281",
"0.5767882",
"0.5720205",
"0.57153636",
"0.56810397",
"0.56673586",
"0.56496894",
"0.5647616",
"0.5640384",
"0.56328297",
"0.5604496"
] | 0.7320109 | 0 |
The possible initial states warped walkers may assume. | def initial_states(self):
return self._initial_states | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_initial_states(self):\n raise NotImplementedError()",
"def initial_state(self):\n return 0",
"def iter_initial_states(self):\n from six.moves import filter\n return filter(lambda s:s.is_initial, self.iter_states())",
"def states_initial(self):\n return self.states(\"Initial = YES\")",
"def initial_states(self):\n return list(self.iter_initial_states())",
"def initial_state(self) -> Tuple[ODState, int]:\n return self.initial, self.initial.construction_cost + len(self.initial.agents)",
"def get_initial_states(self):\n return product(*[phi.automaton().states.initial for phi in self])",
"def initial_state(self):\n\n return WorldState([[-1, -1], [1, 1], -1])",
"def reset(self):\n self.goal = random.randint(0, len(self.homes) - 1)\n\n curr_location = (random.randint(0, self.size-1), random.randint(0, self.size-1))\n bad_starts = self.homes + [self.store]\n\n while curr_location in bad_starts:\n curr_location = (random.randint(0, self.size - 1), random.randint(0, self.size - 1))\n\n self.curr_state = self.encode(*curr_location, 0, self.goal)\n return self.curr_state",
"def null_heuristic(state, problem=None):\r\n return 0",
"def preferred_init_points(self):\n if self._initial_state is None:\n return None\n else:\n # If an initial state was set by the user, then we want to make sure that the VQE does\n # not start from a random point. Thus, we return an all-zero initial point for the\n # optimizer which is used (unless it gets overwritten by a higher-priority setting at\n # runtime of the VQE).\n # However, in order to determine the correct length, we must build the QuantumCircuit\n # first, because otherwise the operators may not be set yet.\n self._build()\n return np.zeros(self.reps * len(self.operators), dtype=float)",
"def initialstate(self):\n return self.problem.initialstate",
"def initial_conditions(self):\n return self._initial_conditions",
"def __init__(self, initial, goals, allowed):\n self.initial = initial # initial state\n self.goals = goals # list of goals that can be achieved\n self.allowed = allowed # the states we can move into ",
"def nullHeuristic(state, problem=None):\r\n return 0",
"def null_heuristic(state, problem=None):\n return 0",
"def initial_state(self):\n return None",
"def has_initial_states(self):\n return len(self.initial_states()) > 0",
"def initialize_state(self):\n accepted = False\n while not accepted:\n self.state = self.net.sample(self.evidence)\n accepted = self.net.log_probability(self.state) != utils.LOG_PROB_0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n return 0",
"def nullHeuristic(state, problem=None):\n\treturn 0",
"def nullHeuristic(state, problem=None):\r\n\treturn 0"
] | [
"0.6536528",
"0.63338894",
"0.6325197",
"0.6295865",
"0.61272234",
"0.6122794",
"0.608243",
"0.6061773",
"0.60178685",
"0.59641576",
"0.5951884",
"0.59460944",
"0.59215546",
"0.5918401",
"0.5916066",
"0.5902502",
"0.589962",
"0.58667886",
"0.5859815",
"0.5859748",
"0.5859748",
"0.5859748",
"0.5859748",
"0.5859748",
"0.5859748",
"0.5859748",
"0.5859748",
"0.5859748",
"0.5840453",
"0.5838741"
] | 0.64018655 | 1 |
The indices of the atom positions in the state considered the ligand. | def ligand_idxs(self):
return self._ligand_idxs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])",
"def getLandmarkindices(self):\n return self.subsetnodes_indices",
"def getLandmarkindices(self):\n return self.subsetindices",
"def extract_ligand_indexes(traj, ligand):\n ligand_indexes = []\n for residue in traj.topology.residues:\n if residue.name == ligand:\n for atom in residue.atoms:\n if atom.element != \"H\":\n ligand_indexes.append(atom.index)\n if len(ligand_indexes) == 0:\n raise CustomError(\"Choosed Ligan %s does not apper in the trajectory\" % ligand)\n return ligand_indexes",
"def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices",
"def agent_locs_idx(self):\n return tuple(self.agent_locs.T)",
"def labeled_indices(self):\n return self._labeled_indices",
"def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j",
"def get_indices(self):\r\n return self._indices",
"def getIndices(self):\r\n return self._indices",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def indices(self):\n return self.index.indices",
"def receptor_idxs(self):\n\n return self._receptor_idxs",
"def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices",
"def getAtomIndices( structure, resname ):\n atom_indices_ligand = []\n topology = structure.topology\n for atom in topology.atoms():\n if str(resname) in atom.residue.name:\n atom_indices_ligand.append(atom.index)\n\n return atom_indices_ligand",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def indices(self):\n return range(len(self))",
"def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices",
"def _get_identical_ligand_indices(\n ligand: oechem.OEMolBase, smiles_iterable: Iterable[str]\n ) -> List[int]:\n from ..modeling.OEModeling import read_smiles, are_identical_molecules\n\n identical_ligand_indices = []\n for i, complex_ligand in enumerate(smiles_iterable):\n if are_identical_molecules(ligand, read_smiles(complex_ligand)):\n identical_ligand_indices.append(i)\n\n return identical_ligand_indices",
"def indices(self, position=None):\n \n raise NotImplementedError()",
"def _code_indices(self) -> Tuple[int, ...]:\n return tuple(idx for idx, seg in enumerate(self.segments) if seg.is_code)",
"def get_34index_list(self):\n msk = self.load_mask()\n return [i for (i,v) in enumerate(msk) if v==1]",
"def indices(self):\n return tuple([slice(*r) for r in self.location])",
"def get_raster_ids(self):\n return numpy.array(range(self._lo_atom, self._lo_atom + self._n_atoms))",
"def positions(self, searchstr: str):\n indices = []\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index >= 0:\n indices.append(index)\n return indices",
"def getAngleIndices(self):\n coord_types, atom_indices = self.force_field.getInternalCoordinateDefinitions()\n angle_indices = np.where((coord_types == 'A') | (coord_types == 'D') | (coord_types == 'I'))[0]\n return angle_indices",
"def mainIndices(self):\n return self.i1, self.i2",
"def get_bit_positions(bit_mask):\n\tbit_positions = []\n\t# find bit positions of enabled bits in mask\n\tfor i in range(16):\n\t\tif (bit_mask & (1 << i)) != 0:\n\t\t\tbit_positions.append(i)\n\treturn bit_positions",
"def get_index_array(self):\n return self.region_pairs",
"def getFeaturesIndices(self, tag, history, in_data=True):\n indices = super().getFeaturesIndices(tag, history, in_data)\n word = history.getWord()\n position = history.getIndex()\n for suffix in self.data.getSuffixesForWord(word):\n self.__checkFeatureIndex__(self.__f101__((suffix, tag)), indices)\n for prefix in self.data.getPrefixesForWord(word):\n self.__checkFeatureIndex__(self.__f102__((prefix, tag)), indices)\n self.__checkFeatureIndex__(self.__f105__(tag), indices)\n self.__checkFeatureIndex__(self.__fNum__(word), indices)\n self.__checkFeatureIndex__(self.__fCap__(word, position), indices)\n return indices"
] | [
"0.7631998",
"0.7395858",
"0.7285203",
"0.7201684",
"0.69681764",
"0.6946488",
"0.6833295",
"0.6828469",
"0.6820649",
"0.67353374",
"0.66252863",
"0.6587609",
"0.6586959",
"0.65467286",
"0.653346",
"0.6422198",
"0.64164",
"0.6409819",
"0.6397331",
"0.6265197",
"0.62486005",
"0.6248363",
"0.62173486",
"0.62009984",
"0.6193911",
"0.61936754",
"0.617632",
"0.6145114",
"0.6131555",
"0.6125162"
] | 0.8726919 | 0 |
The indices of the atom positions in the state considered the receptor. | def receptor_idxs(self):
return self._receptor_idxs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])",
"def get_indices(self):\r\n return self._indices",
"def getIndices(self):\r\n return self._indices",
"def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def agent_locs_idx(self):\n return tuple(self.agent_locs.T)",
"def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j",
"def ligand_idxs(self):\n return self._ligand_idxs",
"def indices(self):\n return self.index.indices",
"def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices",
"def binding_site_idxs(self):\n\n return self._receptor_idxs",
"def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices",
"def indices(self):\n return range(len(self))",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def mainIndices(self):\n return self.i1, self.i2",
"def indices(self, position=None):\n \n raise NotImplementedError()",
"def get_indices(self):\n selection_model = self.selectionModel()\n return selection_model.selectedRows()",
"def run_idxs(self):\n return list(range(len(self._h5[RUNS])))",
"def min_indices(self):\n return {term.minterm_index for term in self.iter_minterms()}",
"def position(self) -> np.ndarray:\n return self._state[0:2]",
"def get_final_pruned_indices(self):\n return self.final_pruned_indices",
"def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]",
"def get_rep_mol_indexes():\n f = open(FILE_WITH_REP_MOL_IDXS, \"r\")\n rd = csv.reader(f)\n mols = rd.next()\n f.close()\n mol_idxs = [int(i) - 1 for i in mols]\n os.unlink(FILE_WITH_REP_MOL_IDXS)\n return mol_idxs",
"def get_positions(self):\n return self.positions",
"def indices(self):\n return tuple([slice(*r) for r in self.location])",
"def _state_index(state):\n delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state\n actions, height, width, _, _, _ = Q.shape\n\n y = int((height / 2) + (delta_y / step_r) - 1)\n x = int((width / 2) + (delta_x / step_c) - 1)\n\n return y, x, bird_lmh, pipe_lmh, is_flapping",
"def getLandmarkindices(self):\n return self.subsetnodes_indices",
"def get_agent_indices(array):\t\n\tagent_indices = np.argwhere(array != 0)\n\treturn agent_indices",
"def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]",
"def get_active_register_indices(self):\n assert self.sketch.ndim == 1, 'Currently only support 1-dimensional sketch.'\n return np.flatnonzero(self.sketch)"
] | [
"0.82382464",
"0.727947",
"0.7218753",
"0.7151456",
"0.7083873",
"0.6969181",
"0.6967562",
"0.68910456",
"0.6889587",
"0.6860909",
"0.6848452",
"0.6824074",
"0.67979336",
"0.6595159",
"0.65589255",
"0.6538948",
"0.65247905",
"0.65097815",
"0.6502098",
"0.6494767",
"0.64834356",
"0.6455549",
"0.6431634",
"0.63775545",
"0.636381",
"0.634277",
"0.63383853",
"0.63295925",
"0.63215244",
"0.62749076"
] | 0.8276405 | 0 |
The method that must be implemented in nonabstract subclasses. Should decide if a walker should be warped or not and what its progress is regardless. | def _progress(self, walker):
raise NotImplementedError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def walk(self):\r\n return not not self.prototype.walk",
"def _warp(self, walker):\n\n\n # choose a state randomly from the set of initial states\n target_idx = np.random.choice(range(len(self.initial_states)), 1,\n p=self.initial_weights/np.sum(self.initial_weights))[0]\n\n warped_state = self.initial_states[target_idx]\n\n # set the initial state into a new walker object with the same weight\n warped_walker = type(walker)(state=warped_state, weight=walker.weight)\n\n # the data for the warp\n warp_data = {'target_idx' : np.array([target_idx]),\n 'weight' : np.array([walker.weight])}\n\n return warped_walker, warp_data",
"def run_shrink_pass(self, sp):\n if isinstance(sp, str):\n sp = self.shrink_pass(sp)\n\n self.debug(\"Shrink Pass %s\" % (sp.name,))\n\n initial_shrinks = self.shrinks\n initial_calls = self.calls\n size = len(self.shrink_target.buffer)\n try:\n sp.pass_function(self)\n finally:\n calls = self.calls - initial_calls\n shrinks = self.shrinks - initial_shrinks\n deletions = size - len(self.shrink_target.buffer)\n\n sp.calls += calls\n sp.shrinks += shrinks\n sp.deletions += deletions\n sp.runs += 1\n self.debug(\"Shrink Pass %s completed.\" % (sp.name,))\n\n # Complex state machine alert! A pass run can either succeed (we made\n # at least one shrink) or fail (we didn't). This changes the pass's\n # current classification according to the following possible\n # transitions:\n #\n # CANDIDATE -------> HOPEFUL\n # | ^\n # | |\n # v v\n # AVOID ---------> DUBIOUS\n #\n # From best to worst we want to run HOPEFUL, CANDIDATE, DUBIOUS, AVOID.\n # We will try any one of them if we have to but we want to prioritise.\n #\n # When a run succeeds, a pass will follow an arrow to a better class.\n # When it fails, it will follow an arrow to a worse one.\n # If no such arrow is available, it stays where it is.\n #\n # We also have the classification SPECIAL for passes that do not get\n # run as part of the normal process.\n previous = sp.classification\n\n # If the pass didn't actually do anything we don't reclassify it. This\n # is for things like remove_discarded which often are inapplicable.\n if calls > 0 and sp.classification != PassClassification.SPECIAL:\n if shrinks == 0:\n if sp.successes > 0:\n sp.classification = PassClassification.DUBIOUS\n else:\n sp.classification = PassClassification.AVOID\n else:\n sp.successes += 1\n if sp.classification == PassClassification.AVOID:\n sp.classification = PassClassification.DUBIOUS\n else:\n sp.classification = PassClassification.HOPEFUL\n if previous != sp.classification:\n self.debug(\n \"Reclassified %s from %s to %s\"\n % (sp.name, previous.name, sp.classification.name)\n )",
"def walk(self):\n pass",
"def _on_walk(self):\n pass",
"def _wander_strategy(self):\n\n result = 'moveForward'\n\n if self.last_status == 'Fail' or self.last_action == 'Action.drop':\n result = 'turnLeft'\n\n return result",
"def walk(self):\r\n return not not self.model.prototype.walk",
"def __post_init__(self):\n if self.steering == Direction.FWD:\n raise ValueError(\"Steering can't be FORWARD.\")",
"def walker():\n from .Walker import Walker\n\n return Walker",
"def pruning_routine(self):\n pass",
"def move_landscape(self):\n return not isinstance(self, Water)",
"def isFallthrough(self) -> bool:\n ...",
"def warp_walkers(self, walkers, cycle):\n\n new_walkers = []\n\n # sporadic, zero or many records per call\n warp_data = []\n bc_data = []\n\n # continual, one record per call\n progress_data = defaultdict(list)\n\n # calculate progress data\n all_progress_data = [self._progress(w) for w in walkers]\n\n for walker_idx, walker in enumerate(walkers):\n\n # unpack progress data\n to_warp, walker_progress_data = all_progress_data[walker_idx]\n\n # add that to the progress data record\n for key, value in walker_progress_data.items():\n progress_data[key].append(value)\n\n # if the walker is meets the requirements for warping warp\n # it\n if to_warp:\n # warp the walker\n warped_walker, walker_warp_data = self._warp(walker)\n\n # add the walker idx to the walker warp record\n walker_warp_data['walker_idx'] = np.array([walker_idx])\n\n # save warped_walker in the list of new walkers to return\n new_walkers.append(warped_walker)\n\n # save the instruction record of the walker\n warp_data.append(walker_warp_data)\n\n logging.info('WARP EVENT observed at {}'.format(cycle))\n logging.info('Warped Walker Weight = {}'.format(\n walker_warp_data['weight']))\n\n # no warping so just return the original walker\n else:\n new_walkers.append(walker)\n\n # consolidate the progress data to an array of a single\n # feature vectors for the cycle\n for key, value in progress_data.items():\n progress_data[key] = value\n\n # if the boundary conditions need to be updated given the\n # cycle and state from warping perform that now and return any\n # record data for that\n bc_data = self._update_bc(new_walkers, warp_data, progress_data, cycle)\n\n return new_walkers, warp_data, bc_data, progress_data",
"def walk(self):\n # === Choose direction ===\n # Increase probability of movement relative to map dimensions\n v_move = self.width / self.height\n h_move = self.height / self.width\n north, south, east, west = v_move, v_move, h_move, h_move\n\n # Weight the random walk against the edges\n if self.drunkard_x < self.width * 0.25: # far left side of map\n east += self.weighted_toward_center\n elif self.drunkard_x > self.width * 0.75: # far right side of map\n west += self.weighted_toward_center\n if self.drunkard_y < self.height * 0.25: # top of the map\n south += self.weighted_toward_center\n elif self.drunkard_y > self.height * 0.75: # bottom of the map\n north += self.weighted_toward_center\n\n # Weight in favor of the previous direction\n if self._prev_direction == \"north\":\n north += self.weighted_toward_prev_direction\n if self._prev_direction == \"south\":\n south += self.weighted_toward_prev_direction\n if self._prev_direction == \"east\":\n east += self.weighted_toward_prev_direction\n if self._prev_direction == \"west\":\n west += self.weighted_toward_prev_direction\n\n weights = [south, north, east, west]\n moves = {\"south\": (0, 1), \"north\": (0, -1), \"east\": (1, 0), \"west\": (-1, 0)}\n\n direction = choices(list(moves.keys()), weights)[0]\n dx, dy = moves[direction]\n\n # === Walk ===\n # check collision at edges\n if (1 < self.drunkard_x + dx < self.width - 1) and (1 < self.drunkard_y + dy < self.height - 1):\n self.drunkard_x += dx\n self.drunkard_y += dy\n if self.tiles[self.drunkard_x][self.drunkard_y]:\n self.tiles[self.drunkard_x][self.drunkard_y].carve()\n self._tiles_filled += 1\n self._prev_direction = direction",
"def _is_wiz(agent: Agent):\n return agent.agent_id == 'Wizard'",
"def test_wip(self):\n self.assertTrue(not return_true())",
"def _prior_teach(self):\n pass",
"def hasFallthrough(self) -> bool:\n ...",
"def decide(self):\n self.maybe_shoot()\n next(self.move_cycle)\n\n pass",
"def passes(self) -> bool:\n ...",
"def run_and_propagate(self):\n pass",
"def ismoving(self):\n return not self.get_par(\"done_moving\")",
"def shrink(self):\n # We assume that if an all-zero block of bytes is an interesting\n # example then we're not going to do better than that.\n # This might not technically be true: e.g. for integers() | booleans()\n # the simplest example is actually [1, 0]. Missing this case is fairly\n # harmless and this allows us to make various simplifying assumptions\n # about the structure of the data (principally that we're never\n # operating on a block of all zero bytes so can use non-zeroness as a\n # signpost of complexity).\n if not any(self.shrink_target.buffer) or self.incorporate_new_buffer(\n hbytes(len(self.shrink_target.buffer))\n ):\n return\n\n try:\n self.greedy_shrink()\n finally:\n if self.__engine.report_debug_info:\n\n def s(n):\n return \"s\" if n != 1 else \"\"\n\n total_deleted = self.initial_size - len(self.shrink_target.buffer)\n\n self.debug(\"---------------------\")\n self.debug(\"Shrink pass profiling\")\n self.debug(\"---------------------\")\n self.debug(\"\")\n calls = self.__engine.call_count - self.initial_calls\n self.debug(\n (\n \"Shrinking made a total of %d call%s \"\n \"of which %d shrank. This deleted %d byte%s out of %d.\"\n )\n % (\n calls,\n s(calls),\n self.shrinks,\n total_deleted,\n s(total_deleted),\n self.initial_size,\n )\n )\n for useful in [True, False]:\n self.debug(\"\")\n if useful:\n self.debug(\"Useful passes:\")\n else:\n self.debug(\"Useless passes:\")\n self.debug(\"\")\n for p in sorted(\n self.passes,\n key=lambda t: (-t.calls, -t.runs, t.deletions, t.shrinks),\n ):\n if p.calls == 0:\n continue\n if (p.shrinks != 0) != useful:\n continue\n\n self.debug(\n (\n \" * %s ran %d time%s, making %d call%s of which \"\n \"%d shrank, deleting %d byte%s.\"\n )\n % (\n p.name,\n p.runs,\n s(p.runs),\n p.calls,\n s(p.calls),\n p.shrinks,\n p.deletions,\n s(p.deletions),\n )\n )\n self.debug(\"\")",
"def run(self):\n # Check for a condition mismatch.\n if (self.sense and (not self.steallock)) \\\n or ((not self.sense) and self.steallock):\n return 0\n\n # Execute the child actions.\n return super(FilterStealLock, self).run()",
"def should_drive(self):\n\t\tif not self.moving:\n\t\t\tnew_direction = self.find_direction()\n\t\t\tif self.orderQueue.has_order_in_floor_and_direction(self.direction, self.currentFloor) or self.orderQueue.has_order_in_floor_and_direction(ORDERDIR.IN, self.currentFloor):\n\t\t\t\tself.orderQueue.delete_order_in_floor(self.direction, self.currentFloor)\n\t\t\t\tself.open_door()\n\t\t\telif new_direction != self.direction and self.orderQueue.has_order_in_floor_and_direction(not self.direction, self.currentFloor):\n\t\t\t\tself.orderQueue.delete_order_in_floor(not self.direction, self.currentFloor)\n\t\t\t\tself.open_door()\n\t\t\telif self.orderQueue.has_orders() and not self.moving and self.doorTimer.is_finished:\n\t\t\t\tself.drive()\n\t\t\tself.update_and_send_elevator_info()",
"def think(self):\n pass",
"def _progress(self, walker):\n\n min_distance = self._calc_min_distance(walker)\n\n # test to see if the ligand is unbound\n unbound = False\n if min_distance >= self._cutoff_distance:\n unbound = True\n\n progress_data = {'min_distances' : min_distance}\n\n return unbound, progress_data",
"def greedy(self) -> Action:\n return NotImplemented",
"def _perturbInPlaceHard(self):\n die",
"def __attack(self, target):\n attack_difference = (Warrior.attack(self, target))\n if attack_difference > 5:\n print(\"Second attack with ANGRY!\")\n Warrior.attack(self, target)\n return None"
] | [
"0.5694468",
"0.5546899",
"0.5413223",
"0.5394204",
"0.53752905",
"0.52860034",
"0.52287936",
"0.5194105",
"0.5181029",
"0.51800656",
"0.5109089",
"0.50865793",
"0.5069367",
"0.5034896",
"0.49629974",
"0.49596345",
"0.49489784",
"0.4874057",
"0.48161283",
"0.48133498",
"0.48097235",
"0.4807598",
"0.4775281",
"0.4746886",
"0.47466895",
"0.47242638",
"0.4712722",
"0.47105417",
"0.47069517",
"0.4704785"
] | 0.5744697 | 0 |
Perform the warping of a walker. Chooses an initial state to replace the walker's state with according to it's given weight. Returns a walker of the same type and weight. | def _warp(self, walker):
# choose a state randomly from the set of initial states
target_idx = np.random.choice(range(len(self.initial_states)), 1,
p=self.initial_weights/np.sum(self.initial_weights))[0]
warped_state = self.initial_states[target_idx]
# set the initial state into a new walker object with the same weight
warped_walker = type(walker)(state=warped_state, weight=walker.weight)
# the data for the warp
warp_data = {'target_idx' : np.array([target_idx]),
'weight' : np.array([walker.weight])}
return warped_walker, warp_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build(self, w=1):\n states = self.a.states.values()\n reachedStates = set()\n remainingStates = set()\n\n _open = Heap()\n\n for s in states:\n if s.acceptingFunction is not None:\n s.h = 0\n _open.push(0, s)\n reachedStates.add(s)\n else:\n remainingStates.add(s)\n\n if not reachedStates:\n Color.RED.print(\"No states reached!\")\n Color.RED.print(\"States were not reached: %s\" % remainingStates)\n raise Exception(\"Query has no goal, please review it!\")\n\n while _open:\n (h, s) = _open.pop()\n\n for cS in s._prev():\n if h+w < cS.h:\n reachedStates.add(cS)\n _open.push(h+w, cS)\n\n cS.h = h+w\n if cS in remainingStates:\n remainingStates.remove(cS)\n\n if remainingStates:\n Color.RED.print(\"Some states were not reached: %s\" % remainingStates)\n Color.YELLOW.print(\"States reached: %s\" % reachedStates)\n raise Exception(\"Query is probably unfinished, please review it\")\n\n reached = [(s.h, s) for s in reachedStates]\n reached.sort()\n\n if __debug__:\n Color.GREEN.print(\"Heuristic:\")\n for (h, s) in reached:\n Color.GREEN.print(\" * %d: %s\" % (h, s))\n\n for s in states:\n s.prepare()\n\n return self.a",
"def warp_walkers(self, walkers, cycle):\n\n new_walkers = []\n\n # sporadic, zero or many records per call\n warp_data = []\n bc_data = []\n\n # continual, one record per call\n progress_data = defaultdict(list)\n\n # calculate progress data\n all_progress_data = [self._progress(w) for w in walkers]\n\n for walker_idx, walker in enumerate(walkers):\n\n # unpack progress data\n to_warp, walker_progress_data = all_progress_data[walker_idx]\n\n # add that to the progress data record\n for key, value in walker_progress_data.items():\n progress_data[key].append(value)\n\n # if the walker is meets the requirements for warping warp\n # it\n if to_warp:\n # warp the walker\n warped_walker, walker_warp_data = self._warp(walker)\n\n # add the walker idx to the walker warp record\n walker_warp_data['walker_idx'] = np.array([walker_idx])\n\n # save warped_walker in the list of new walkers to return\n new_walkers.append(warped_walker)\n\n # save the instruction record of the walker\n warp_data.append(walker_warp_data)\n\n logging.info('WARP EVENT observed at {}'.format(cycle))\n logging.info('Warped Walker Weight = {}'.format(\n walker_warp_data['weight']))\n\n # no warping so just return the original walker\n else:\n new_walkers.append(walker)\n\n # consolidate the progress data to an array of a single\n # feature vectors for the cycle\n for key, value in progress_data.items():\n progress_data[key] = value\n\n # if the boundary conditions need to be updated given the\n # cycle and state from warping perform that now and return any\n # record data for that\n bc_data = self._update_bc(new_walkers, warp_data, progress_data, cycle)\n\n return new_walkers, warp_data, bc_data, progress_data",
"def weight_setup(self, weighting):\n if weighting == \"overlap\":\n self.weights = overlap_generator(overlap, self.graph)\n elif weighting == \"unit\":\n self.weights = overlap_generator(unit, self.graph)\n elif weighting == \"min_norm\":\n self.weights = overlap_generator(min_norm, self.graph)\n else:\n self.weights = overlap_generator(normalized_overlap, self.graph)",
"def optimise(w, w_delta):\n return w.assign(w - w_delta)",
"def init_weights(self):\n # Initialize weights\n self.apply(self._init_weights)\n # Tie weights if needed\n self.tie_weights()",
"def set_weight_decay(self, wd=U.DEFAULT_WD):\n self._recompile(wd=wd)\n return",
"def calculate_prep_weight(weight, size):\n r = find_recovery_on_size(size)\n return weight / r",
"def _update_weight(self, time):\r\n # Until the relative time window, return original weights.\r\n if time < self.window - 1:\r\n return self.weights\r\n\r\n # Set the current predicted relatives value.\r\n current_prediction = self._calculate_predicted_relatives(time)\r\n\r\n # Set the deviation from the mean of current prediction.\r\n predicted_deviation = current_prediction - np.ones(self.number_of_assets) * np.mean(\r\n current_prediction)\r\n\r\n # Calculate alpha, the lagrangian multiplier.\r\n norm2 = np.linalg.norm(predicted_deviation, ord=1) ** 2\r\n\r\n # If norm2 is zero, return previous weights.\r\n if norm2 == 0:\r\n return self.weights\r\n alpha = np.minimum(0, (current_prediction * self.weights - self.epsilon) / norm2)\r\n\r\n # Update new weights.\r\n new_weights = self.weights - alpha * predicted_deviation\r\n\r\n # Project to simplex domain.\r\n new_weights = self._simplex_projection(new_weights)\r\n\r\n return new_weights",
"def _determine_new_weight(self, weight, input, currentNeuron, bmu):\n return weight \\\n + (self.neighborhood.fn(currentNeuron, bmu) \\\n * self.learning_rate * (input - weight))",
"def switch_weighting(\n self, weighting=Union[tuple, Iterable[Union[FS, bwp.DatapackageBase]]]\n ) -> None:\n self._switch(\n obj=weighting,\n label=\"weighting\",\n matrix=\"weighting_matrix\",\n func=self.load_weighting_data,\n )",
"def moveWalker(self, walker):\n if walker not in self.walkers:\n raise ValueError(\"No such Walker exist in our space!\")\n Delta_x, Delta_y = walker.take_step()\n # moving the walker to new position (class)\n self.walkers[walker] = self.walkers[walker].move(Delta_x, Delta_y)",
"def weight_wrtg(self, wrtg):\n # Clear caches because weights are going to change.\n # TODO: it might be possible to not clear the caches\n # if the weight doesn't change, and re-use previous decoding.\n wrtg.ClearCaches()\n for p in wrtg.P:\n rule = p.rhs.rule\n assert isinstance(rule.features, list)\n rule.weight = self.weight_rule(rule)",
"def __init__(self, source, dest, weight_func):\r\n self.source = source\r\n self.dest = dest\r\n self.weight = weight_func()\r\n self.old_weight = 0.0",
"def change_weight(self, new_weight):\r\n self.old_weight = self.weight\r\n self.weight = new_weight",
"def weight_rotate(weight):\n weight = weight.permute(1, 2, 3, 0)\n return weight",
"def walk(self):\n # === Choose direction ===\n # Increase probability of movement relative to map dimensions\n v_move = self.width / self.height\n h_move = self.height / self.width\n north, south, east, west = v_move, v_move, h_move, h_move\n\n # Weight the random walk against the edges\n if self.drunkard_x < self.width * 0.25: # far left side of map\n east += self.weighted_toward_center\n elif self.drunkard_x > self.width * 0.75: # far right side of map\n west += self.weighted_toward_center\n if self.drunkard_y < self.height * 0.25: # top of the map\n south += self.weighted_toward_center\n elif self.drunkard_y > self.height * 0.75: # bottom of the map\n north += self.weighted_toward_center\n\n # Weight in favor of the previous direction\n if self._prev_direction == \"north\":\n north += self.weighted_toward_prev_direction\n if self._prev_direction == \"south\":\n south += self.weighted_toward_prev_direction\n if self._prev_direction == \"east\":\n east += self.weighted_toward_prev_direction\n if self._prev_direction == \"west\":\n west += self.weighted_toward_prev_direction\n\n weights = [south, north, east, west]\n moves = {\"south\": (0, 1), \"north\": (0, -1), \"east\": (1, 0), \"west\": (-1, 0)}\n\n direction = choices(list(moves.keys()), weights)[0]\n dx, dy = moves[direction]\n\n # === Walk ===\n # check collision at edges\n if (1 < self.drunkard_x + dx < self.width - 1) and (1 < self.drunkard_y + dy < self.height - 1):\n self.drunkard_x += dx\n self.drunkard_y += dy\n if self.tiles[self.drunkard_x][self.drunkard_y]:\n self.tiles[self.drunkard_x][self.drunkard_y].carve()\n self._tiles_filled += 1\n self._prev_direction = direction",
"def simulate_random_walk_unweighted (G, damping, max_jumps):\n\n results = []\n nodes = [] # keep nodes\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n\n j = 0\n while (j < max_jumps):\n previous_node = current_node\n jump_decision = random.uniform(0, 1)\n\n if jump_decision < damping or G.out_degree(current_node) == 0:\n # make a jump\n current_node = random.randrange(N)\n while not G.has_node(current_node):\n current_node = random.randrange(N)\n\n j += 1\n try:\n distance = nx.shortest_path_length(G, previous_node, current_node)\n results.append(distance)\n nodes.append(previous_node) # keep nodes\n except nx.NetworkXNoPath: continue\n\n else:\n # move to neighbor node\n incident = G.out_edges([current_node], data = False)\n current_node = random.choice(incident)[1]\n\n return results, nodes #, current_node (keep nodes)",
"def init_weight(self):\r\n xavier_init(self.output_proj, distribution='uniform', bias=0.)",
"def ApplyWeights(frame):\n if \"Wpol\" not in frame and \"Wunpol\" not in frame:\n return\n\n if frame[\"T\"].weighted:\n return frame\n ValidateMaps(frame)\n\n tmap = frame.pop(\"T\")\n\n if \"Wpol\" in frame:\n wmap = frame[\"Wpol\"]\n qmap = frame.pop(\"Q\")\n umap = frame.pop(\"U\")\n maps.apply_weights(tmap, qmap, umap, wmap)\n else:\n wmap = frame[\"Wunpol\"]\n maps.apply_weights_t(tmap, wmap)\n\n frame[\"T\"] = tmap\n if \"Wpol\" in frame:\n frame[\"Q\"] = qmap\n frame[\"U\"] = umap\n\n return frame",
"def weighted_random_item(items, weight):\n if not items:\n return None\n\n weight_sum = sum(weight(item) for item in items)\n if weight_sum <= 0:\n return None\n\n choice = random.random() * weight_sum\n for item in items:\n choice -= weight(item)\n if choice < 0:\n return item, weight(item) / weight_sum\n return items[-1], -1 # floating-point rounding error",
"def weight(self) -> None:\n assert hasattr(self, \"characterized_inventory\"), \"Must do lcia first\"\n if not hasattr(self, \"weighting_value\"):\n self.load_weighting_data()\n self.weighting_calculation()",
"def set_weight(self, weight):\n self.weight = weight # overwrite the existing weight with the input weight value",
"def get_weight(self, start_direction, current_weight, **kwargs):\n return self.weights.get(start_direction, self.default_weight)",
"def test_feeding_weight_carn(self):\n original = self.carn.weight\n self.carn.fitness = 1\n herb = [Herbivore(age=90) for _ in range(50)]\n self.carn.feeding(herb)\n nt.assert_greater(self.carn.weight, original)",
"def __add_delayed(self, name, weight, weightUp, weightDown, shift):\n if isinstance(dask_awkward.type(weight), awkward.types.OptionType):\n # TODO what to do with option-type? is it representative of unknown weight\n # and we default to one or is it an invalid weight and we should never use this\n # event in the first place (0) ?\n weight = dask_awkward.fill_none(weight, 1.0)\n if self._weight is None:\n self._weight = weight\n else:\n self._weight = self._weight * weight\n if self._storeIndividual:\n self._weights[name] = weight\n self.__add_variation(name, weight, weightUp, weightDown, shift)\n if isinstance(self._weightStats, coffea.processor.dict_accumulator):\n self._weightStats = {}\n self._weightStats[name] = {\n \"sumw\": dask_awkward.to_dask_array(weight).sum(),\n \"sumw2\": dask_awkward.to_dask_array(weight**2).sum(),\n \"minw\": dask_awkward.to_dask_array(weight).min(),\n \"maxw\": dask_awkward.to_dask_array(weight).max(),\n }",
"def _weight_boosting_random_state(name: str):\n return hp.randint(name, 5)",
"def test_feeding_weight(self):\n original = 20\n self.herb.weight = 20\n self.herb.feeding(10)\n nt.assert_greater(self.herb.weight, original)",
"def selection_wheel(self, weighted_population):\n weight_total = sum((item[1] for item in weighted_population))\n n = random.uniform(0, weight_total)\n for item, weight in weighted_population:\n if n < weight:\n return item\n n = n - weight\n return item",
"def sampleWeight(self):\r\n x=random.random()\r\n i = 0\r\n n = len(self.weights)-1\r\n cummulativeWeight = 0\r\n #Distribute the exploration weight evenly among all the actions that have been\r\n #taken up to this point in time by any of the users\r\n if len(self.sampledActions) == 0:\r\n explorationWeight = 0\r\n else:\r\n explorationWeight = self.explorationFund / len(self.sampledActions)\r\n #Compute the normalization factor. If no action has been sampled by this user yet,\r\n #then each action k has weight eta*pi_k, where pi_k is the weight of k in the\r\n #prior distribution. Then, the normalization factor is the sum(eta*pi_k) for all k,\r\n #which is equal to eta*sum(pi_k), which is just eta, since the sum of the previous\r\n #weights has to add up to 1.\r\n #If one or more actions have been already sampled, the normalization factor is the\r\n #sum of 1) the weights already in self.weights, 2) the exploration fund, and 3) the\r\n #weights of the actions that are not yet in self.weights. Each one of these actions\r\n #has weight eta*pi_k (because it hasn't been sampled yet), so the total weight of the\r\n #mass of actions not yet in self.weights is eta*(1-sum(pi_l)), where the sum is over all\r\n #the weights already in self.weights\r\n if n < 0:\r\n normalizationFactor = self.priorBelief\r\n else:\r\n normalizationFactor = sum(self.weights) + self.explorationFund + \\\r\n self.priorBelief*(1-self.priorTopicDistr.cummulative[n])\r\n #Keep getting the next weight until the combined mass of the weights is less than the\r\n #random number x\r\n while True:\r\n w = self.__getitem__(i)\r\n if i in self.sampledActions:\r\n w += explorationWeight\r\n cummulativeWeight += w\r\n if x <= cummulativeWeight/normalizationFactor:\r\n if i not in self.sampledActions:\r\n self.sampledActions.append(i)\r\n return w\r\n i += 1",
"def weights_clipping(self):\n max_weigth = np.amax(np.abs(self._weights))\n self._weights = self._clipping*self._weights/max_weigth"
] | [
"0.5691519",
"0.5282371",
"0.523244",
"0.52236164",
"0.5166298",
"0.5165015",
"0.5163046",
"0.51418316",
"0.5122877",
"0.50620407",
"0.5039045",
"0.5032708",
"0.5029594",
"0.5018036",
"0.50139284",
"0.497852",
"0.49433792",
"0.49433264",
"0.4927654",
"0.49240887",
"0.48710394",
"0.48648825",
"0.48634824",
"0.4849562",
"0.48489943",
"0.48475194",
"0.48406518",
"0.48163027",
"0.48150995",
"0.48078227"
] | 0.79330695 | 0 |
Test the progress of all the walkers, warp if required, and update the boundary conditions. Arguments | def warp_walkers(self, walkers, cycle):
new_walkers = []
# sporadic, zero or many records per call
warp_data = []
bc_data = []
# continual, one record per call
progress_data = defaultdict(list)
# calculate progress data
all_progress_data = [self._progress(w) for w in walkers]
for walker_idx, walker in enumerate(walkers):
# unpack progress data
to_warp, walker_progress_data = all_progress_data[walker_idx]
# add that to the progress data record
for key, value in walker_progress_data.items():
progress_data[key].append(value)
# if the walker is meets the requirements for warping warp
# it
if to_warp:
# warp the walker
warped_walker, walker_warp_data = self._warp(walker)
# add the walker idx to the walker warp record
walker_warp_data['walker_idx'] = np.array([walker_idx])
# save warped_walker in the list of new walkers to return
new_walkers.append(warped_walker)
# save the instruction record of the walker
warp_data.append(walker_warp_data)
logging.info('WARP EVENT observed at {}'.format(cycle))
logging.info('Warped Walker Weight = {}'.format(
walker_warp_data['weight']))
# no warping so just return the original walker
else:
new_walkers.append(walker)
# consolidate the progress data to an array of a single
# feature vectors for the cycle
for key, value in progress_data.items():
progress_data[key] = value
# if the boundary conditions need to be updated given the
# cycle and state from warping perform that now and return any
# record data for that
bc_data = self._update_bc(new_walkers, warp_data, progress_data, cycle)
return new_walkers, warp_data, bc_data, progress_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_bc(self, new_walkers, warp_data, progress_data, cycle):\n\n # Only report a record on\n # the first cycle which gives the distance at which walkers\n # are warped\n if cycle == 0:\n return [{'boundary_distance' : np.array([self._cutoff_distance]),},]\n else:\n return []",
"def _update_bc(self, new_walkers, warp_data, progress_data, cycle):\n\n # do nothing by default\n return []",
"def Advance():\n warp.step()",
"def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return",
"def computeForces(self, neighbors=[]): #computing forces to drive the agents and avoid collisions \n if not self.atGoal:\n if self.entry_state % 2 == 0 and len(self.entrancex) > 0 and self.id != 4 : #checks if assigned curve is entry and switches to state 1 to follow entry bezier curve\n time2=0.5 # time used to calculate driving force \n self.local_goal = [self.entrancex[0], self.entrancey[0]] #assigning waypoint as goal\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2))) #calculating direction vector\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez) #driving force\n self.entrancex = np.delete(self.entrancex,0) #eliminating the used waypoints from the list \n self.entrancey = np.delete(self.entrancey,0) #eliminating the used waypoints from the list \n \n elif self.force_state == 1 and (abs(self.pos[0] - self.goal[0]) >400 or abs(self.pos[1] - self.goal[1]) >400): #checks if force-based navigation is assigned, switches to state 2\n self.F = (self.gvel-self.vel)/self.ksi #driving force\n for neighbor in neighbors:\n if neighbor.id != self.id: #and not neighbor.atGoal: \n distSq = (neighbor.pos-self.pos).dot(neighbor.pos-self.pos)\n #print(distSq, self.dhorSq)\n if distSq < self.dhorSq: # neighbor is inside the sensing radius\n tau = self.ttc(neighbor)\n #print(tau, self.timehor)\n if tau < self.timehor: # will the two agents collide in less than timehor?\n dir = self.pos + self.vel*tau - neighbor.pos - neighbor.vel*tau \n length = sqrt(dir.dot(dir))\n if length > 0:\n dir = dir/length # the direction of the force\n mag = (self.timehor - tau)/(tau + 1e-6) # the magnitude of the force\n self.F += mag*dir # add the force\n \n else: #state 3 - following the exit bezier curve\n time2=0.5 # time used to calculate driving force\n self.local_goal = [self.exitx[0], self.exity[0]]\n if abs(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos)))) >10: #to reach first point of exit curve from agents previous state position\n self.F = ((self.local_goal - self.pos)/(sqrt((self.local_goal - self.pos).dot((self.local_goal - self.pos) )))*self.prefspeed)/self.ksi\n else:\n self.rel_posi = self.local_goal - self.pos #calculating relative position between agents\n self.n_bez = (self.rel_posi + (self.prefspeed*time2))/(abs(self.rel_posi + (self.prefspeed*time2)))\n self.F = ((max(self.timehor - time2/100, 0)/time2)*self.n_bez)\n #print(self.pos, self.local_goal)\n if len(self.exitx) > 1 :\n self.exitx = np.delete(self.exitx,0)\n self.exity = np.delete(self.exity,0)",
"def Continue():\n # adjust this to take as many steps as you need\n return warp.top.it <= 500",
"def run_step(self, debug=False):\n\n\n # is there an obstacle in front of us?\n hazard_detected = False\n\n # retrieve relevant elements for safe navigation, i.e.: traffic lights\n # and other vehicles\n actor_list = self._world.get_actors() # type: ActorList\n vehicle_list = actor_list.filter(\"*vehicle*\") # type: List[Actor]\n pedestrians_list = actor_list.filter(\"*walker.pedestrian*\")\n lights_list = actor_list.filter(\"*traffic_light*\") # type: List[carla.TrafficLight]\n\n if not self.drawn_lights and debug:\n for light in lights_list:\n self._world.debug.draw_box(\n carla.BoundingBox(light.trigger_volume.location + light.get_transform().location,\n light.trigger_volume.extent * 2),\n carla.Rotation(0, 0, 0), 0.05, carla.Color(255, 128, 0, 0), 0)\n self.drawn_lights = True\n\n # check possible obstacles\n vehicle_state, vehicle = self._is_vehicle_hazard(vehicle_list)\n if vehicle_state:\n if debug:\n print('!!! VEHICLE BLOCKING AHEAD [{}])'.format(vehicle.id))\n\n self._state = AgentState.BLOCKED_BY_VEHICLE\n hazard_detected = True\n\n # Check for pedestrians\n pedestrian_state, pedestrian = self._is_pedestrian_hazard(pedestrians_list)\n if pedestrian_state:\n if debug:\n print('!!! PEDESTRIAN BLOCKING AHEAD [{}])'.format(pedestrian.id))\n\n self._state = AgentState.BLOCKED_BY_VEHICLE\n hazard_detected = True\n\n # check for the state of the traffic lights\n light_state, traffic_light = self._is_light_red(lights_list)\n if light_state:\n if debug:\n print('=== RED LIGHT AHEAD [{}])'.format(traffic_light.id))\n\n self._state = AgentState.BLOCKED_RED_LIGHT\n hazard_detected = True\n\n new_target_speed = self._update_target_speed(hazard_detected, debug)\n\n # if hazard_detected:\n # control = self.emergency_stop()\n # else:\n # self._state = AgentState.NAVIGATING\n # self.braking_intial_speed = None\n # # standard local planner behavior\n # control = self._local_planner.run_step(debug=debug)\n # if self.stopping_for_traffic_light:\n # control.steer = 0.0\n\n self._state = AgentState.NAVIGATING\n self.braking_intial_speed = None\n # standard local planner behavior\n control = self._local_planner.run_step(debug=debug)\n if self.stopping_for_traffic_light:\n control.steer = 0.0\n # Prevent from steering randomly when stopped\n if math.fabs(get_speed(self._vehicle)) < 0.1:\n control.steer = 0\n\n return control",
"def main_loop(self):\n if self.points is None:\n return\n points = self.points\n transformed_lidar_poses = self.transform_lidar_into_map_coords(points) \n self.get_right_and_left_lanelet() \n if self.left_lanelet is not None: \n filtered_poses_left = self.filter_lidar_poses(self.left_lanelet, transformed_lidar_poses) \n if len(filtered_poses_left) > 0: \n dist = self.calc_dist(filtered_poses_left) \n if dist > 0 and dist < self.max_dist_lidar: \n self.obstacle_on_left_lane_pub.publish(dist) \n else:\n self.obstacle_on_left_lane_pub.publish(np.inf) \n # if there are no points on left_lane, checks successor and predecessor\n else:\n filtered_poses_left = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(self.left_lanelet.successor[0]), transformed_lidar_poses)\n\n if len(filtered_poses_left) > 0: \n dist = self.calc_dist(filtered_poses_left) \n if dist > 0 and dist < self.max_dist_lidar: \n self.obstacle_on_left_lane_pub.publish(dist) \n else:\n self.obstacle_on_left_lane_pub.publish(np.inf)\n else:\n filtered_poses_left = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(self.left_lanelet.predecessor[0]), transformed_lidar_poses)\n\n if len(filtered_poses_left) > 0: \n dist = self.calc_dist(filtered_poses_left) \n if dist > 0 and dist < self.max_dist_lidar: \n self.obstacle_on_left_lane_pub.publish(dist) \n else:\n self.obstacle_on_left_lane_pub.publish(np.inf)\n else:\n self.obstacle_on_right_lane_pub.publish(np.inf) \n \n if self.right_lanelet is not None: \n filtered_poses_right = self.filter_lidar_poses(self.right_lanelet, transformed_lidar_poses)\n if len(filtered_poses_right) > 0: \n dist = self.calc_dist(filtered_poses_right) \n if dist > 0 and dist < self.max_dist_lidar: \n self.obstacle_on_right_lane_pub.publish(dist) \n else:\n self.obstacle_on_right_lane_pub.publish(np.inf)\n # if there are no points on right_lane, checks successor and predecessor \n else:\n filtered_poses_right = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(self.right_lanelet.successor[0]), transformed_lidar_poses)\n\n if len(filtered_poses_right) > 0: \n dist = self.calc_dist(filtered_poses_right) \n if dist > 0 and dist < self.max_dist_lidar: \n self.obstacle_on_right_lane_pub.publish(dist) \n else:\n self.obstacle_on_right_lane_pub.publish(np.inf)\n else:\n filtered_poses_right = self.filter_lidar_poses(self.scenario.lanelet_network.find_lanelet_by_id(self.right_lanelet.predecessor[0]), transformed_lidar_poses)\n\n if len(filtered_poses_right) > 0: \n dist = self.calc_dist(filtered_poses_right) \n if dist > 0 and dist < self.max_dist_lidar: \n self.obstacle_on_right_lane_pub.publish(dist) \n else:\n self.obstacle_on_right_lane_pub.publish(np.inf) \n else:\n self.obstacle_on_right_lane_pub.publish(np.inf)",
"def _do_walk(self):\r\n rospy.loginfo(\"Started walking thread\")\r\n wb_walkerfunc=self.wb_walkerfunc\r\n \r\n # Global walk loop\r\n n=50\r\n print \"Thread rate\", 1.0/(self._cycle_period/(2.0*n))\r\n r=rospy.Rate(1.0/(self._cycle_period/(2.0*n)))\r\n p=True\r\n i=0\r\n self.current_velocity=[0,0,0]\r\n while not rospy.is_shutdown() and (self.walking or i<n or self.is_displacing()):\r\n if not self.walking:\r\n self.desired_velocity=[0,0,0]\r\n #if not self.is_displacing() and i==0: # Do not move if nothing to do and already at 0\r\n # self.update_current_velocity(self.desired_velocity, n)\r\n # r.sleep()\r\n # continue\r\n x=float(i)/n \r\n qd_curr=wb_walkerfunc.get(p, x, self.current_velocity)\r\n self.update_current_velocity(self.desired_velocity, n)\r\n self.robotis_mini_ci.set_qd(qd_curr)\r\n i+=1\r\n if i>n:\r\n i=0\r\n p=not p\r\n r.sleep()\r\n rospy.loginfo(\"Finished walking thread\")\r\n \r\n self._th_walk=None",
"def _step(self, a):\n # potential_old = self.potential\n obs, rew, done, info = super()._step(a)\n # state = self.robot.calc_state()\n # alive = float(self.robot.alive_bonus(state[0]+self.robot.initial_z, self.robot.body_rpy[1]))\n # alive *= 0.05\n # cost = 0.01 * -np.square(a).sum()\n # progress = float(self.potential - potential_old)\n # # print (\"Rewarsd\", alive, progress)\n # rew = alive + progress + cost\n # # if self.robot.body_xyz[0] > 5:\n # # rew = 1.0\n # # else:\n # # rew = 0.0\n # # print (\"ROBOT: \", self.robot.body_xyz[2] < 0.3)\n # # if done:\n # # print (\"DONE\")\n return obs, rew, done, info",
"def run(self):\n for direction in self.directions:\n rotation = direction[0]\n steps = direction[1]\n\n self.make_rotation(rotation)\n hq_found = self.travel(steps)\n\n if hq_found:\n return (abs(self.new_loc[0] + self.new_loc[1]))",
"def update(self):\r\n \r\n # Some state information for debugging purposes; generally not necessary to print.\r\n #print \"Walking:\", self.state['walking'], \" Running:\", self.state['running'], \" Jumping:\", self.state['jumping']\r\n \r\n # The action currently in process has reached its last sprite image and is about to loop.\r\n # Certain actions require an interruption. Those interruptions are placed here.\r\n if self.state['lastisdone']:\r\n # Landing\r\n if self.state['last'] == 'LAND':\r\n # Landing should only loop once, then transition to standing\r\n self.stand()\r\n # Make sure to check whether the character should be walking\r\n if self.state['ldown'] or self.state['rdown']: self.walk()\r\n \r\n # Jumping\r\n elif self.state['last'] == 'JUMP':\r\n # Jumping should loop on the final frame until a predetermined height is reached\r\n if self.y > self.state['floorheightnow'] - self.sprite_height - self.jump_height:\r\n self.actions['JUMP'].curr_step = self.actions['JUMP'].step_duration * (self.actions['JUMP'].num_steps - 1)\r\n # Once the height is reached, transition to falling\r\n else:\r\n self.nojump()\r\n self.fall()\r\n \r\n # Falling\r\n elif self.state['last'] == 'FALL':\r\n # Falling should loop until one of two things happens:\r\n # If the floor has not yet been reached,\r\n groundtest = self.terrain.groundtest(self.pos())\r\n if not groundtest[0]:\r\n # Increase the fall counter\r\n self.state['fallcount'] += 1\r\n # Check to see whether a predetermined number of loops has been reached\r\n # If it has, transition to falling fast\r\n if self.state['fallcount'] > 100:\r\n self.state['fallcount'] = 0\r\n self.nofall()\r\n self.fallfast()\r\n # If the floor has been reached or surpassed, move to floor height and transition to landing\r\n else:\r\n self.y = groundtest[1][1] - self.sprite_height\r\n self.nofall()\r\n self.land()\r\n \r\n # Falling Fast\r\n elif self.state['last'] == 'FALL_FAST':\r\n # If the floor has been reached or surpassed, move to floor height and transition to landing\r\n groundtest = self.terrain.groundtest(self.pos)\r\n if groundtest[0]:\r\n self.y = groundtest[1]-self.sprite_height\r\n self.nofallfast()\r\n self.land()\r\n\r\n # Always reset the 'lastisdone' flag\r\n self.state['lastisdone'] = False\r\n \r\n \r\n \r\n \r\n \r\n \r\n # Default to standing, unless something below changes the state\r\n self.state['action'] = 'STAND'\r\n \r\n # Walking\r\n if self.state['walking']:\r\n # If either shift key is held, set to running\r\n mods = pygame.key.get_mods()\r\n if mods & pygame.KMOD_LSHIFT or mods & pygame.KMOD_RSHIFT:\r\n self.nowalk()\r\n self.run()\r\n # Otherwise, move forward a preset distance and render the WALK action sprite\r\n else:\r\n self.state['action'] = 'WALK'\r\n self.move((self.walk_step if not self.state['left'] else -self.walk_step, 0))\r\n groundtest = self.terrain.groundtest(self.pos())\r\n if groundtest[0]:\r\n self.y = groundtest[1][1]-self.sprite_height\r\n \r\n # Running\r\n if self.state['running']:\r\n # Move forward a preset distance and render the RUN action sprite\r\n self.state['action'] = 'RUN'\r\n self.move(((self.walk_step if not self.state['left'] else -self.walk_step)*self.fast_factor, 0))\r\n groundtest = self.terrain.groundtest(self.pos)\r\n if groundtest[0]:\r\n self.y = groundtest[1]-self.sprite_height\r\n mods = pygame.key.get_mods()\r\n # If shift is no longer being held, make sure the next frame walking happens\r\n if not (mods & pygame.KMOD_LSHIFT or mods & pygame.KMOD_RSHIFT):\r\n self.norun()\r\n self.walk()\r\n \r\n # Jumping\r\n if self.state['jumping']: \r\n # Move up a preset distance and render the JUMP action sprite\r\n self.state['action'] = 'JUMP'\r\n self.move((0,-self.fall_step))\r\n \r\n # Falling\r\n # This action currently happens only as the result of a jump action\r\n if self.state['falling']:\r\n # Move down a preset distance and render the FALL action sprite\r\n self.state['action'] = 'FALL'\r\n self.move((0,self.fall_step))\r\n \r\n # Falling Fast\r\n # This action is intended to happen only when the character has been falling for a while\r\n if self.state['fallingfast']:\r\n # Move down a little farther than in a standard falling action and render the FALL_FAST action sprite\r\n self.state['action'] = 'FALL_FAST'\r\n self.move((0,self.fall_step*self.fast_factor))\r\n \r\n # Landing\r\n # This action should only happen after falling\r\n if self.state['landing']:\r\n self.state['action'] = 'LAND'\r\n \r\n if self.actions[self.state['action']].curr_step/self.actions[self.state['action']].step_duration == self.actions[self.state['action']].num_steps - 1:\r\n self.state['lastisdone'] = True\r\n \r\n if self.state['action'] != self.state['last']:\r\n self.actions[self.state['last']].reset()\r\n \r\n self.state['last'] = self.state['action']",
"def _progress(self, walker):\n\n # first recenter the ligand and the receptor in the walker\n box_lengths, box_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n grouped_walker_pos = group_pair(walker.state['positions'], box_lengths,\n self.binding_site_idxs, self.ligand_idxs)\n\n # center the positions around the center of the binding site\n centered_walker_pos = center_around(grouped_walker_pos, self.binding_site_idxs)\n\n # superimpose the walker state positions over the native state\n # matching the binding site indices only\n sup_walker_pos, _, _ = superimpose(self.native_state['positions'], centered_walker_pos,\n idxs=self.binding_site_idxs)\n\n # calculate the rmsd of the walker ligand (superimposed\n # according to the binding sites) to the native state ligand\n native_rmsd = calc_rmsd(self.native_state['positions'], sup_walker_pos,\n idxs=self.ligand_idxs)\n\n # test to see if the ligand is re-bound\n rebound = False\n if native_rmsd <= self.cutoff_rmsd:\n rebound = True\n\n progress_data = {'native_rmsd' : native_rmsd}\n\n return rebound, progress_data",
"def compute(self, i):\n self.path_is = get_path_type(self.section)\n self.msg, self.section, self.point_b = generate_path(i, self)\n if self.path_is == 'straight':\n rospy.loginfo(\"waiting 6 seconds to lower the bar...\")\n # publish lowring the bar command\n rospy.sleep(6)\n elif self.path_is == 'turn':\n rospy.loginfo(\"waiting 2 second and turn...\")\n # publish raising the bar command\n rospy.sleep(.2)\n\n # enable the tracker\n # rospy.sleep(1)\n self.publish()\n while True:\n distance = distance_to_goal(\n self.x_current, self.y_current, self.section[-1][0], self.section[-1][1]) \n if self.path_finished and distance < .2:\n # disable the tracker\n # rospy.sleep(1)\n break\n self.path_finished = False",
"def __init__(self, \n nd = 2, \n goal = np.array([1.0,1.0]),\n state_bound = [[0,1],[0,1]],\n nA = 4,\n action_list = [[0,1],[0,-1],[1,0],[-1,0]],\n<<<<<<< HEAD:archive-code/puddleworld.py\n ngrid = [10.0,10.0],\n maxStep = 40):\n ngrid = [40, 40]\n x_vec = np.linspace(0,1,ngrid[0])\n y_vec = np.linspace(0,1,ngrid[1])\n for x in x_vec:\n for y in y_vec:\n if ~self.inPuddle([x,y]):\n puddle.append([x,y])\n # puddle is a closed loop \n outpuddlepts = np.asarray(puddle)\n \"\"\"\n\n\n # Horizontal wing of puddle consists of \n # 1) rectangle area xch1<= x <=xc2 && ych1-radius <= y <=ych2+radius\n # (xchi,ychi) is the center points (h ==> horizantal)\n # x, y = state[0], state[1]\n xch1, ych1 = 0.3, 0.7\n xch2, ych2 = 0.65, ych1\n radius = 0.1\n\n\n #Vertical wing of puddle consists of \n # 1) rectangle area xcv1-radius<= x <=xcv2+radius && ycv1 <= y <= ycv2\n # where (xcvi,ycvi) is the center points (v ==> vertical)\n xcv1 = 0.45; ycv1=0.4;\n xcv2 = xcv1; ycv2 = 0.8;\n\n # % 2) two half-circle at end edges of rectangle\n \n # POINTS ON HORIZANTAL LINES OF PUDDLE BOUNDARY\n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n puddle.append([xcv1-radius,ych1-radius])\n \n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1-radius])\n \n for x in np.arange(xch1,xcv1-radius,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n \n puddle.append([xcv1-radius,ych1+radius])\n\n\n for x in np.arange(xcv1+radius,xch2,self.meshsize[0]/2):\n puddle.append([x,ych1+radius])\n\n # POINTS ON VERTICAL LINES OF PUDDLE BOUNDARY\n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1-radius,y])\n \n for y in np.arange(ycv1,ych1-radius,self.meshsize[1]/2):\n puddle.append([xcv1+radius,y])\n \"\"\"\n for y in np.arrange():\n puddle.append([])\n \n for y in np.arrange():\n puddle.append([])\n \"\"\"\n\n # HALF CIRCLES\n ngridTheta = 10\n thetaVec = np.linspace(0,pi,ngridTheta)\n\n for t in thetaVec:\n puddle.append([xch1+radius*np.cos(pi/2+t),ych1+radius*np.sin(pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xch2+radius*np.cos(-pi/2+t),ych2+radius*np.sin(-pi/2+t)])\n\n for t in thetaVec:\n puddle.append([xcv1+radius*np.cos(pi+t),ycv1+radius*np.sin(pi+t)])\n\n for t in thetaVec:\n puddle.append([xcv2+radius*np.cos(t),ycv2+radius*np.sin(t)])\n\n \n outpuddlepts = np.asarray(puddle)\n return outpuddlepts",
"def step():\n x_rand = sample()\n x_nearest = new_nearest_neighbour(x_rand)\n x_new = steer(x_nearest, x_rand)\n if obstacle_free(x_nearest, x_new):\n X_near = new_neighbourhood(x_new)\n x_min = x_nearest\n c_min = x_nearest.cost + x_nearest.dist_to(x_new)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_near.cost + x_near.dist_to(x_new) < c_min):\n x_min = x_near\n c_min = (x_near.cost + x_near.dist_to(x_new) < c_min)\n x_new_node = add_node(x_new, x_min, True)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_new_node.cost + x_near.dist_to(x_new) < x_near.cost):\n x_near.change_parent(x_new_node)\n # Here I check for goal paths and draw the circle\n updated = False\n if shared.root_path:\n updated = goal_path_resolve(shared.root_path[0])\n updated = updated or goal_path_resolve(shared.nodes[-1])\n if updated:\n diameter = shared.root_path_length\n center = ((shared.root_path[0].x + shared.root_path[-1].x) / 2,\n (shared.root_path[0].y + shared.root_path[-1].y) / 2)\n if shared.region:\n shared.region.remove_from_batch()\n shared.region = ellipse.Ellipse(center[0], center[1], diameter)\n shared.region.add_to_batch()",
"def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()",
"async def send_drones_to_extractor(self):\n if self.vespene < 100 and not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n for extractor in self.gas_buildings:\n drones_needed_to_fill_extractor = extractor.ideal_harvesters - extractor.assigned_harvesters\n if drones_needed_to_fill_extractor > 0:\n for drone in self.workers.closer_than(10, extractor).take(drones_needed_to_fill_extractor):\n self.do(drone.gather(extractor))",
"def step(self):\n if self.model.schedule.steps < self.model.residential_steps:\n residential_move = True\n else:\n residential_move = False\n\n\n if residential_move:\n # only step the agents if the number considered is not exhausted\n if self.model.total_considered < self.model.residential_moves_per_step:\n # move residential\n U_res = self.get_res_satisfaction(self.pos)\n self.model.res_satisfaction.append(U_res)\n\n # print(\"U_res\",U_res)\n if U_res < self.T:\n\n # todo: implement different move schemes, for now only random\n # find all empty places\n # rank them\n # take one with boltzmann probability.\n self.evaluate_move(U_res, school=False)\n\n else:\n self.model.res_happy += 1\n\n self.model.total_considered += 1\n #print(\"considered\",self.model.total_considered)\n\n\n else:\n if self.model.total_considered < self.model.school_moves_per_step:\n # school moves\n # satisfaction in current school\n U = self.get_school_satisfaction(self.school, self.dist_to_school)\n self.model.satisfaction.append(U)\n\n # If unhappy, compared to threshold move:\n if U < self.T:\n #print('unhappy')\n self.evaluate_move(U, school=True)\n\n else:\n self.model.happy += 1\n if self.model.total_considered>0:\n self.model.percent_happy = np.ma(self.model.happy/self.model.total_considered)",
"def walk(self):\n # === Choose direction ===\n # Increase probability of movement relative to map dimensions\n v_move = self.width / self.height\n h_move = self.height / self.width\n north, south, east, west = v_move, v_move, h_move, h_move\n\n # Weight the random walk against the edges\n if self.drunkard_x < self.width * 0.25: # far left side of map\n east += self.weighted_toward_center\n elif self.drunkard_x > self.width * 0.75: # far right side of map\n west += self.weighted_toward_center\n if self.drunkard_y < self.height * 0.25: # top of the map\n south += self.weighted_toward_center\n elif self.drunkard_y > self.height * 0.75: # bottom of the map\n north += self.weighted_toward_center\n\n # Weight in favor of the previous direction\n if self._prev_direction == \"north\":\n north += self.weighted_toward_prev_direction\n if self._prev_direction == \"south\":\n south += self.weighted_toward_prev_direction\n if self._prev_direction == \"east\":\n east += self.weighted_toward_prev_direction\n if self._prev_direction == \"west\":\n west += self.weighted_toward_prev_direction\n\n weights = [south, north, east, west]\n moves = {\"south\": (0, 1), \"north\": (0, -1), \"east\": (1, 0), \"west\": (-1, 0)}\n\n direction = choices(list(moves.keys()), weights)[0]\n dx, dy = moves[direction]\n\n # === Walk ===\n # check collision at edges\n if (1 < self.drunkard_x + dx < self.width - 1) and (1 < self.drunkard_y + dy < self.height - 1):\n self.drunkard_x += dx\n self.drunkard_y += dy\n if self.tiles[self.drunkard_x][self.drunkard_y]:\n self.tiles[self.drunkard_x][self.drunkard_y].carve()\n self._tiles_filled += 1\n self._prev_direction = direction",
"def solve_environment(self):\n \n #The first problem formulation\n #K kinds of towers\n #See more details about problem formulation in the writeup \n \n #Get a full matrix of the concatenated coverage matrices for \n #each tower type. THis new matrix has dimensions:\n #(Ntowers) x (sum(potential sites)), where the sum o=is over all tower types\n coverage = np.hstack(i for i in self.coverage_matrices)\n print coverage\n print coverage.shape \n \n #Diagonal matrix of the values of each target\n #(for the scenarios where we don't care about maximizing covered value,\n #target_values is just all ones, so this is just the identity matrix)\n V = np.diag(self.target_values)\n \n #If doing scenario where we want to fortify weakest link, only makes\n #sense if all targets are equal value:\n if self.objective_type == 'min_entries':\n V = np.eye(len(self.target_values))\n\n #Get the matrix of coverage values / expected value saved:\n C = np.dot(V,coverage)\n print 'V', V\n print 'coverage', coverage\n print 'C', C\n \n \n #Since not gauranteed to reach global optimum on any particular initialization,\n #run a few times and take the best result.\n #Just define \"best result\" as the result which had the most overall \n #\"converged\" x, combined over all tower kinds. \n# for j in xrange(self.N_random_starts_max):\n \n \n a = 2. #1.\n tau = 1e-4\n N = sum(i for i in self.N_tower_sites)\n w = np.zeros(N)\n ones = np.ones(N)\n p = 1. #the exponents power when doing he exponent method:\n \n for i in xrange(self.N_reweighting_iterations_max):\n #The concatenated vector of occupancies: Concatenated over all\n #of the kinds of towers.\n x = cvx.Variable(N)\n \n #Different objective functions depending on which optimization problem.\n #These are defined in the scenarios in the main function.\n if self.objective_type == 'min_entries':\n operation = cvx.min_entries\n elif self.objective_type == 'sum_entries':\n operation = cvx.sum_entries\n else:\n raise Exception('must specify valid objective_type')\n \n #Objective function includes penalty term for non-binary x values\n if self.penalty_type == 'reweighted_L1':\n #objective = cvx.Maximize(t - x.T*w)\n objective = cvx.Maximize(operation(C*x - x.T*w))\n\n\n #Main constraints on 0<=x<=1\n constraints = [0<=x, x<=1]\n \n \n #And then for each kind of tower, append the constraint that there\n #be exactly N_i towers, or <= quota (depending on constraint type)\n if self.constraints__type == 'fixed_N_towers' or self.constraints__type == 'tower_quotas':\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n print before_sum\n print before_sum + self.N_tower_sites[tk]\n if self.constraints__type == 'fixed_N_towers':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )==self.N_towers[tk])\n elif self.constraints__type == 'tower_quotas':\n constraints.append(cvx.sum_entries(\n x[before_sum : before_sum + self.N_tower_sites[tk]]\n )<=self.budget__tower_quotas[tk])\n print x[before_sum : before_sum + self.N_tower_sites[tk]]\n \n elif self.constraints__type == 'total_cost':\n costs = np.hstack([np.repeat(self.budget__tower_unit_costs[tk],self.N_tower_sites[tk]) for tk in xrange(self.N_tower_kinds)])\n constraints.append(cvx.sum_entries(costs * x) <= self.budget__total_cost) \n \n \n \n\n\n \n \n print 'penalty_type', self.penalty_type\n print 'objective_type', self.objective_type\n print 'constraints__type', self.constraints__type\n print 'budget__tower_quotas', self.budget__tower_quotas\n print 'operation', operation\n print 'objective', objective\n print 'constraints', constraints\n cvx.Problem(objective, constraints).solve(verbose=self.VERBOSE)\n x = np.array(x.value).flatten()\n print 'x', x\n w = a/(tau+np.abs(x))\n p += 1.\n plt.figure(figsize=(5,5))\n plt.plot(x,marker='o')\n plt.savefig('histrograms_{}.png'.format(i))\n print \n \n \n \n \n #From the solution x, get the coordinates of those tower sites where we\n #really do want to place a tower\n #use = np.isclose(x,1.)\n for tk in xrange(self.N_tower_kinds):\n before_sum = np.concatenate(([0],np.cumsum(self.N_tower_sites)))[tk]\n y = x[before_sum : before_sum + self.N_tower_sites[tk]]\n inds = np.argsort(y)\n s = y[inds]\n use = np.where(s>.5)[0]\n print inds\n print s\n print use \n if self.constraints__type == 'fixed_N_towers':\n if len(use) != self.N_towers[tk]:\n print 'Solution did not converge properly. Choosing the K best towers.'\n print self.N_towers[tk], len(use)\n # use = use[-self.N_towers[tk]:]\n use = inds[-self.N_towers[tk]:]\n elif self.constraints__type == 'tower_quotas':\n pass #Just use the towers thresholded at > .5\n print use\n \n \n self.coordinates__solved_towers.append([self.coordinates__tower_sites[tk][mm] for mm in inds[use]])",
"def propagation(self,map):\n near_cells = self.get_near(map)\n \n #fire spreading\n burnable = [] #list of burnable cells\n for cell in near_cells:\n if(cell.nat != 0 and cell.state == 0): #conditions to burn a cell\n burnable.append(cell)\n \n if(self.nat == 2): #spread faster if it's a forest\n n = rdm.randint(0,(self.state*2)) #n: number of cells to burn, n < 9\n if n>8: n=8\n else: n = rdm.randint(0,self.state)\n \n if map.wind_active: \n for i in range(n):\n \n #creating the list in which the choice is made (changing probability according to the wind direction)\n indexes=[]\n for ce in burnable:\n \n if map.wind==0:\n if ce.y > self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y == self.y:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==4:\n if ce.y < self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y== self.y: \n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==2:\n if ce.x > self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==6:\n if ce.x < self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest \n elif map.wind==1:\n if ce.y >= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce)) \n\n elif map.wind==3:\n if ce.y <= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce)) \n \n elif map.wind==5:\n if ce.y <= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce))\n \n elif map.wind==7:\n if ce.y >= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce))\n \n \n if len(indexes)>0:\n r = rdm.choice(indexes) #choose randoly the cell, among the availables, with weight\n cell = near_cells[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n\n\n #without the wind active\n else:\n if n>=len(burnable): #if n is greater than the number of burnable cells, they are all burned\n for cell in burnable:\n cell.state = 1\n map.burn_list.append(cell) #add cell to burn_list\n else: \n for i in range(n):\n r = rdm.randint(0,len(burnable)-1) #choose randoly the cell, among the availables\n cell = burnable[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n \n #fire intensity growing \n if(self.nat == 3): #burn faster if it's a house\n self.state += 2\n else:\n self.state += 1\n \n if(self.state > 5): #if it's burned\n self.charred = True\n self.state = 1\n map.burn_list.remove(self) #burned cells are removed form the burn_list",
"def run():\n\n # Build list of stations\n stations = build_station_list()\n \n # Update latest level data for all stations\n update_water_levels(stations)\n \n # Stations at which the current relative level is over 0.8\n z= stations_level_over_threshold(stations, 0.8)\n for a in z:\n print(a[0],a[1])\n print(\".\") \n print(\".\")",
"def do_move(self, world, friendly_units, enemy_units):\r\n # Fly away to freedom, daring fireflies\r\n # Build thou nests\r\n # Grow, become stronger\r\n # Take over the world\r\n\r\n start = time.clock()\r\n \r\n # ---- MAP ANALYSIS AND PLANNING\r\n if (not self.done_init):\r\n f_spawn, e_spawn, self.spawn_distance = get_spawns(world)\r\n self.wall_set = get_walls(world)\r\n self.planned_nest_set, self.nest_completion_set = tile_nests(world, self.wall_set)\r\n self.done_init = True\r\n\r\n # Objectives\r\n self.nest_completion_set = {x for x in self.nest_completion_set if world.get_tile_at(x).is_neutral()}\r\n potential_map = MapUtils.get_potential_map(world, self.nest_completion_set, friendly_units, enemy_units)\r\n\r\n # Update cached variables\r\n active_agents = {x.uuid for x in friendly_units}\r\n self.uuid_task_map = {k:v for k,v in self.uuid_task_map.items() if k in active_agents}\r\n \r\n # Statistics\r\n neg_strength = sum((x.health for x in enemy_units))\r\n pos_strength = sum((x.health for x in friendly_units))\r\n \r\n neg_density = neg_strength / len(enemy_units)\r\n pos_density = pos_strength / len(friendly_units)\r\n \r\n fraction_explored = 1 - (len(world.get_neutral_tiles()) / (world.get_width() * world.get_height()))\r\n \r\n # Lookups\r\n pos_enemy_lookup = world.get_position_to_enemy_dict()\r\n \r\n idle_units = list(unit for unit in friendly_units if not unit.uuid in self.uuid_task_map or self.uuid_task_map[unit.uuid].complete)\r\n\r\n # Print turn statistics\r\n print(\"f:{} e:{} - {}/{} idle\".format(pos_strength, neg_strength, len(idle_units), len(friendly_units)))\r\n\r\n # ---- UNIT LOOP\r\n for unit in friendly_units:\r\n\r\n # Ensure that we don't time out\r\n current = time.clock()\r\n if (current - start > 0.55):\r\n break\r\n\r\n idle = not unit in self.uuid_task_map\r\n curr_task = None\r\n current_move = None\r\n\r\n # ---- GLOBAL STRATEGY\r\n # Units are dispatched to achieve global objectives, but may\r\n # divert from their course and achieve local goals along the way.\r\n if (idle):\r\n curr_task = self.get_task_for_unit(world, unit, pos_density, neg_density)\r\n if (curr_task):\r\n self.uuid_task_map[unit.uuid] = curr_task\r\n else:\r\n curr_task = self.uuid_task_map[unit.uuid]\r\n \r\n if (curr_task):\r\n current_move = curr_task.get_next_move()\r\n\r\n # ---- LOCAL STRATEGY\r\n # In addition to top-down control based on game state, units can\r\n # make local decisions about attacking, defending and resting\r\n # based on enemies and a potential map. \r\n\r\n neighbours = list(world.get_neighbours(unit.position).values())\r\n local_potential = potential_map.get(unit.position, 1)\r\n\r\n # Some probability of simply staying put and powering up\r\n if (fraction_explored > 0.9 and random.random() < 0.15):\r\n current_move = None\r\n if curr_task: curr_task.complete # Set the current task to complete so that unit is reassigned\r\n\r\n # Probability based local behavior\r\n for n in neighbours:\r\n delta = local_potential - potential_map.get(n, 1)\r\n if (delta > 1 or (delta > 0 and random.random() < delta)):\r\n current_move = n\r\n if curr_task: curr_task.complete # Set the current task to complete so that unit is reassigned\r\n\r\n # Priority #1 trading with enemies, then other moves\r\n if (not current_move in pos_enemy_lookup):\r\n adjacent_enemies = [x for x in neighbours if x in pos_enemy_lookup]\r\n if (adjacent_enemies):\r\n current_move = max(adjacent_enemies, key=lambda x: pos_enemy_lookup[x].health)\r\n if curr_task: curr_task.complete # Set the current task to complete so that unit is reassigned\r\n\r\n # ---- PERFORM ACTION\r\n # Perform the chosen move itself\r\n if (current_move):\r\n try:\r\n world.move(unit, current_move)\r\n except Exception as e:\r\n print('An exception occurred: {}'.format(e))\r\n \r\n self.turn += 1",
"def _step(self, a):\n # potential_old = self.potential\n obs, rew, done, info = super()._step(a)\n # state = self.robot.calc_state()\n # alive = float(self.robot.alive_bonus(state[0]+self.robot.initial_z, self.robot.body_rpy[1]))\n # alive *= 0.01\n\n # cost = 0.001 * -np.square(a).sum()\n\n # progress = float(self.potential - potential_old)\n # print (\"Rewarsd\", alive, progress)\n # rew = alive + progress + cost\n # if self.robot.body_xyz[0] > 5:\n # rew = 1.0\n # else:\n # rew = 0.0\n return obs, rew, done, info",
"def _step(self, a):\n obs, rew, done, info = super()._step(a)\n # if self.robot.body_xyz[0] > self.threshold:\n # rew = 1.0\n # self.threshold += 1\n # else:\n # rew = 0.0\n # self.steps += 1\n # if self.steps > self.max_episode_steps:\n # done = True\n return obs, rew, done, info",
"def step(self):\n if self.store_paths:\n leapfrog_steps = self._max_leapfrog_steps\n else:\n leapfrog_steps = torch.ceil(self._max_leapfrog_steps * torch.rand(1)).int()\n self.potential_ = self.get_potential()\n self.metric_ = self.get_metric()\n self.momentum = self.resample_momenta()\n self.hamiltonian_ = self.get_hamiltonian()\n old_hamiltonian = self.hamiltonian_\n if self.shadow:\n if self.max_shadow is not None:\n old_shadow = torch.max(self.shadow_.clone() + self.max_shadow, old_hamiltonian)\n else:\n old_shadow = self.shadow_.clone()\n rejected = False\n for step in range(leapfrog_steps):\n if (self._integrator == 'RMHMC') and (self.lbfgs == False):\n self.momentum, rejected = self.implicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.parameters, rejected = self.implicit_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n elif self.lbfgs == True:\n self.momentum, rejected = self.lbfgs_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.parameters, rejected = self.lbfgs_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n self.momentum = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if rejected:\n break\n else:\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n self.parameters = self.explicit_drift()\n self.parameters = self.parameters.detach().requires_grad_(True)\n self.momentum, rejected = self.explicit_half_kick()\n self.parameters = self.parameters.detach().requires_grad_(True)\n if self.store_paths:\n self.paths.append(self.parameters.detach())\n new_hamiltonian = self.get_hamiltonian()\n ratio = old_hamiltonian - new_hamiltonian\n self.hamiltonian_error.append(ratio.detach().unsqueeze(0))\n if self.shadow:\n if self.max_shadow is not None:\n new_shadow = torch.max(self.get_shadow() + self.max_shadow, new_hamiltonian)\n else:\n new_shadow = self.get_shadow()\n shadow_error = old_shadow - new_shadow\n newratio = ratio + shadow_error\n self.shadow_hamiltonian_error.append(newratio.detach().unsqueeze(0))\n ratio = newratio\n\n uniform_rand = torch.rand(1)\n if uniform_rand >= torch.exp(ratio):\n # Reject sample\n rejected = True\n\n if rejected:\n if (len(self.momenta) > 10) and (self.momenta[-1] == self.momenta[-10]).sum().item():\n self.degenerate = True\n self.rejected += 1\n self.momentum = self.momenta[-1]\n self.parameters = self.samples[-1].clone().detach().requires_grad_(True)\n if self.shadow:\n radon_nikodym = torch.exp(old_shadow).unsqueeze(0)\n \n if self.verbose:\n print(\"(Rejected)\", int(self.acceptance_rate() * 100), \"%; Log-ratio: \",\n ratio.detach())\n else:\n self.accepted += 1\n if self.shadow:\n radon_nikodym = torch.exp(new_shadow).unsqueeze(0)\n if self.verbose:\n print(\"(Accepted)\", int(self.acceptance_rate() * 100), \"%; Log-ratio: \",\n ratio.detach())\n self.samples.append(self.parameters.detach())\n self.momenta.append(self.momentum)\n self.hamiltonians.append(self.hamiltonian_.detach())\n self.rands_.append(uniform_rand)\n self.shadows.append(self.shadow_.detach())\n if self.shadow:\n self.radon_nikodym.append(radon_nikodym.detach())\n return None",
"def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)",
"def run(self, num_iterations, debug=False):\n for cur_iteration in range(num_iterations):\n if debug:\n print 'At beginning of iteration:'\n self.check_fields()\n\n for cur_fluid in self.fluid_list:\n cur_fluid.move() # Move all jumpers\n if debug:\n print 'After move'\n self.check_fields()\n\n for cur_fluid in self.fluid_list:\n cur_fluid.move_bcs() # Must move before applying BC\n if debug:\n print 'After move bcs'\n self.check_fields()\n\n # Update forces here as appropriate\n for cur_fluid in self.fluid_list:\n cur_fluid.update_hydro() # Update the hydrodynamic variables\n if debug:\n print 'After updating hydro'\n self.check_fields()\n\n # Reset the total body force and add to it as appropriate\n self.Gx[...] = 0\n self.Gy[...] = 0\n for d in self.additional_forces:\n kernel = d[0]\n arguments = d[1]\n kernel(*arguments).wait()\n if self.poisson_force_active:\n self.screened_poisson_kernel()\n if debug:\n print 'After updating supplementary forces'\n self.check_fields()\n\n # Update other forces...includes pourous effects & must be run last\n for cur_fluid in self.fluid_list:\n cur_fluid.update_forces()\n if debug:\n print 'After updating internal forces'\n self.check_fields()\n\n # After updating forces, update the bary_velocity\n self.update_bary_velocity()\n if debug:\n print 'After updating bary-velocity'\n self.check_fields()\n\n for cur_fluid in self.fluid_list:\n cur_fluid.update_feq() # Update the equilibrium fields\n if debug:\n print 'After updating feq'\n self.check_fields()\n\n for cur_fluid in self.fluid_list:\n cur_fluid.collide_particles() # Relax the nonequilibrium fields.\n if debug:\n print 'After colliding particles'\n self.check_fields()\n\n # Loop over any additional collisions that are required (i.e. mass gain/loss)\n for d in self.additional_collisions:\n kernel = d[0]\n arguments = d[1]\n kernel(*arguments).wait()",
"def stage(self):\n\n # prepare projected land allocation data\n self.prep_projected()\n\n # prepare base land use data\n self.prep_base()\n\n # harmonize grid area between projected and base layer land allocation\n self.harmony()\n\n # apply constraints\n self.set_constraints()\n\n # create kernel density filter if not running multiple jobs\n self.kernel_filter()\n\n # set data for step zero\n self.set_step_zero()"
] | [
"0.60423297",
"0.5936672",
"0.5913109",
"0.5713147",
"0.57058764",
"0.56307214",
"0.56240726",
"0.5593375",
"0.55728346",
"0.5571248",
"0.556696",
"0.55633765",
"0.55494684",
"0.55380464",
"0.5494036",
"0.5490094",
"0.5489748",
"0.5474049",
"0.5456332",
"0.5423952",
"0.5392916",
"0.5370085",
"0.5359284",
"0.53391176",
"0.53354895",
"0.53315556",
"0.53202844",
"0.53154236",
"0.5315315",
"0.5301988"
] | 0.65448344 | 0 |
Tests whether a warping record generated by this class is discontinuous or not. | def warping_discontinuity(cls, warping_record):
# if it is Ellipsis then all possible values are discontinuous
if cls.DISCONTINUITY_TARGET_IDXS is Ellipsis:
return True
# if it is None then all possible values are continuous
elif cls.DISCONTINUITY_TARGET_IDXS is None:
return False
# otherwise it will have a tuple of indices for the
# target_idxs that are discontinuous targets
elif warping_record[2] in cls.DISCONTINUITY_TARGET_IDXS:
return True
# otherwise it wasn't a discontinuous target
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isDisturbance(self):\n return False",
"def is_disarming(self):\n return self == ArmingState.DISARMING",
"def is_inequality(self):\n return False",
"def is_inequality(self): \n return False",
"def violated(self) -> bool:\n ...",
"def isDisturbance(self):\n return True",
"def is_degenerated(self):\n for interval in self.intervals:\n if not Interval.is_degenerated(interval):\n return False\n return True",
"def is_inequality(self):\n return True",
"def is_inequality(self):\n return True",
"def isFallthrough(self) -> bool:\n ...",
"def is_declined(self):\n return self.get_data(\"state\") == self.STATE_DECLINED",
"def hasFallthrough(self) -> bool:\n ...",
"def is_water(self):\n return False",
"def is_water(self):\n return False",
"def __bool__(self):\n return not self.undefine",
"def is_degenerated(interval):\n return interval.right == interval.left",
"def is_recording(self) -> bool:\n return self.elastic_span.transaction.is_sampled and not self.elastic_span.ended_time",
"def _is_sporadic_records(self, run_record_key):\n\n # assume it is continual and check if it is in the sporadic groups\n if run_record_key in SPORADIC_RECORDS:\n return True\n else:\n return False",
"def invariant(self):\n\t\treturn (self.demand.popId != self.dstPopId)",
"def inferrable(self) -> bool:\n return self._strategy.inferrable",
"def is_over(self):\n return self.is_dead",
"def is_over(self):\n return self.is_dead",
"def is_over(self):\n return self.is_dead",
"def degenerate(self):\n return self.radius == 0.0",
"def deceived(self, a):\n try:\n n = self.control[a.name][0]\n if hardrule:\n return self.nsucc(a) > self.succ(a) + epsilonD and \\\n (n > 5) and ((self.world.round - n) > 5) \n else:\n return (n > 5) and ( (a.successRate > self.succ(a) + epsilonD) or \\\n (a.successRate < epsilonD))\n except KeyError:\n raise AssertionError, str(a) + \" is not a non-MI!\"",
"def _nonforce_drop(self) -> bool:\n if self.closed:\n return True\n if self.zero_failures():\n return False\n return random.random() < self.failurerate",
"def _departure_on_duty(self) -> bool:\n return self._get_departure_shift().is_on_duty()",
"def is_unhappy(self):\n #checked!#\n ###your code here###\n same=0\n for i in self.home.neighbors:\n if i.occupant!=None:\n if i.occupant.group==self.group:\n same+=1\n happniess=float(same)/len(self.home.neighbors)\n if happniess<self.happiness_threshold:\n return True\n else:\n return False",
"def is_artificial(self):\n\t\treturn 0",
"def halt(population, generation_count):\n return generation_count > DEFAULT_MAX_GENERATION or population[0].fitness == 0"
] | [
"0.6019516",
"0.58300686",
"0.5823266",
"0.58221227",
"0.5780994",
"0.57764226",
"0.56322896",
"0.562625",
"0.562625",
"0.5590142",
"0.557394",
"0.55690134",
"0.5565335",
"0.5565335",
"0.55490774",
"0.55443126",
"0.55276537",
"0.5511996",
"0.54868835",
"0.5474168",
"0.5464244",
"0.5464244",
"0.5464244",
"0.5453336",
"0.54527587",
"0.54475087",
"0.5435391",
"0.54280525",
"0.54261583",
"0.54230076"
] | 0.7393263 | 0 |
The cutoff RMSD for considering a walker bound. | def cutoff_rmsd(self):
return self._cutoff_rmsd | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cutoff_distance(self):\n return self._cutoff_distance",
"def getCutoffDistance(self):\n return self.cutoffDistance",
"def _find_cutoff(self):\n cutoff = 1\n while ((self.linear_rstar_unnorm(cutoff) -\n self.turing_rstar_unnorm(cutoff))**2\n > self.approx_turing_variance(cutoff)):\n cutoff += 1\n return cutoff",
"def _standardize_cutoff(cutoff):\n cutoff = np.asarray(cutoff)\n cutoff[0] = max(0., cutoff[0])\n cutoff[1] = min(1., cutoff[1])\n cutoff[0] = np.min([cutoff[0], 0.09])\n cutoff[1] = np.max([cutoff[1], 0.91])\n return cutoff",
"def get_cutoff_dim(self):\n return self.circuit._trunc",
"def cutoff(self, state, depth):\n abstract",
"def get_tolerance(self):\n return self.tolerance",
"def score_cutoff_(self):\n return self.predictor.score_cutoff_fun(self.scores_,\n self.score_coefficient)",
"def calc_tolerance(wt):\n return 1 - wt",
"def cutoff(self, *args, **kwargs) -> Any:\n pass",
"def Tolerance(self):\n\t\treturn self._get_attribute('tolerance')",
"def lower_bound(self) -> float:\n ...",
"def LimitTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_LimitTolerance(self, *args)",
"def _calculate_spatial_frequency_cutoff(self):\n \n # unused \n self.spatial_cutoff = (self.unit_actuators / self.telescope_diameter)/2",
"def limdrift_cutoff(g, tau, cutoff=1):\n return mc.limdrift(g, cutoff) * tau",
"def margulis_bound(self, normalized=False):\n if normalized:\n return 5 * np.sqrt(2) / 8\n else:\n return 5 * np.sqrt(2)",
"def tolerance(self):\n return self.params['tolerance']",
"def _compute_cutoffs(self):\n self._cutoffidx=np.zeros(self.nsamples,dtype=np.int)\n # Find the inlfection point\n # TODO: check robustness of this method against fluctuations in the data\n self.samplesdatadiff=np.diff(self.samplesdata,axis=0)\n flex=np.argmax(self.samplesdatadiff,axis=0)\n # if the detected cycles is the last one, then the flex has not yet been reached, warn.\n for i,f in enumerate(flex):\n #self._message(\"(%s) Preanalysis - detection of inflection point.\"%(self.samples[i])) \n if f==(self.nvalues-1):\n self._cutoffidx[i]=f\n self._message(\"Warning: (%s) Inflection point not detected. Using all fluorescent values available (%d cycles).\"%(self.samples[i],f)) \n elif f<10:\n self._message(\"Warning: (%s) Early inflection point (cycle %d).\"%(self.samples[i],f))\n else: \n self._cutoffidx[i]=np.minimum(f+2,self.nvalues)\n #self._message(\"(%s) Inflection point found at cycle %d).\"%(self.samples[i],f)) ",
"def trim(self, edge_ic_cutoff=0.4):\n pwm = self.pwm[:]\n while len(pwm) > 0 and self.ic_pos(pwm[0]) < edge_ic_cutoff:\n pwm = pwm[1:]\n self.pwm = self.pwm[1:]\n self.pfm = self.pfm[1:]\n while len(pwm) > 0 and self.ic_pos(pwm[-1]) < edge_ic_cutoff:\n pwm = pwm[:-1]\n self.pwm = self.pwm[:-1]\n self.pfm = self.pfm[:-1]\n \n self.consensus = None \n self.min_score = None\n self.max_score = None\n self.wiggled_pwm = None\n \n return self",
"def ramp_kernel_real(cutoff, length):\n pos = np.arange(-length, length, 1)\n return cutoff ** 2.0 * (2.0 * np.sinc(2 * pos * cutoff) - np.sinc(pos * cutoff) ** 2.0)",
"def find_cutoff(self, roi_results):\n int_ravel = roi_results[~np.isnan(roi_results[:, 3]), 3]\n mean = 0\n std = 0\n\n for _ in range(10):\n # for 10 times, fit norm to intensity and throw away outliers\n mean, std = norm.fit(int_ravel)\n int_ravel = int_ravel[int_ravel < mean + std * self.threshold_sigma]\n\n return mean + self.threshold_sigma * std",
"def tolerance(self) -> float:\n return self._tolerance",
"def calculate_optimal_dmstep(self, acceptedSNR= 95):\n\n if not self.useSNR:\n return 1.205e-7 * self.tsamp * (self.freq ** 3) / self.bandwidth\n \n x, y = self._calculate_snr_spread()\n return fabs(self.centerDm - x[np.max(np.where(y > np.max(y) * float(acceptedSNR) / 100.0 ))])",
"def getLowerFrequencyBound(self) -> int:\n return self.lower_frequency_bound",
"def tolerance(self):\n return self._tolerance",
"def _get_observation_lower_bound(self):\n lower_bound = -self._get_observation_upper_bound()\n lower_bound[-7] = 0.0\n lower_bound[-2:] = [self.min_speed, self.min_side_speed]\n return lower_bound",
"def upper_bound(self) -> float:\n ...",
"def getMagBoundary(self):\n\n # Get the boundary of magnitude based on the filter\n lowMagnitude = nan\n highMagnitude = nan\n if (self.filter == self.FilterU):\n lowMagnitude = 7.94\n highMagnitude = 14.80\n\n elif (self.filter == self.FilterG):\n lowMagnitude = 9.74\n highMagnitude = 16.17\n\n elif (self.filter == self.FilterR):\n lowMagnitude = 9.56\n highMagnitude = 15.73\n\n elif (self.filter == self.FilterI):\n lowMagnitude = 9.22\n highMagnitude = 15.26\n\n elif (self.filter == self.FilterZ):\n lowMagnitude = 8.83\n highMagnitude = 14.68\n \n elif (self.filter == self.FilterY):\n lowMagnitude = 8.02\n highMagnitude = 13.76\n\n return lowMagnitude, highMagnitude",
"def ideal_thickness(self, opt_freq=160e9):\n return (1/np.sqrt(self.dielectric)*3e8/(4*opt_freq))",
"def get_lift(self):\n return 0.0"
] | [
"0.68994784",
"0.684816",
"0.6659511",
"0.63361603",
"0.6224314",
"0.610096",
"0.60006905",
"0.5984938",
"0.5863935",
"0.5832403",
"0.5760526",
"0.5737055",
"0.5589224",
"0.55820787",
"0.5528107",
"0.5526099",
"0.5503479",
"0.5474288",
"0.5471942",
"0.54236096",
"0.5419457",
"0.54047346",
"0.53930485",
"0.53815424",
"0.5350512",
"0.5332484",
"0.531575",
"0.5308684",
"0.5306326",
"0.5294618"
] | 0.7921369 | 0 |
The indices of the atom positions in the state considered the binding site. | def binding_site_idxs(self):
return self._receptor_idxs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def atom_idxs(self):\n\n return np.array([atom.atom_idxs for atom in self])",
"def agent_locs_idx(self):\n return tuple(self.agent_locs.T)",
"def get_indices(self):\r\n return self._indices",
"def getIndices(self):\r\n return self._indices",
"def indices(self):\n i, j, _edge = self.indicesAndEdge()\n return i, j",
"def indices(self) -> np.ndarray:\n return self.impl.indices",
"def indices(self):\n return self.index.indices",
"def indices(self):\n return range(len(self))",
"def ligand_idxs(self):\n return self._ligand_idxs",
"def mainIndices(self):\n return self.i1, self.i2",
"def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]",
"def master_ndindex(self): # itermaster_indices(self):\n return itertools_product(\n *[range(*r) for r in self.location]\n ) # TODO check",
"def vir_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==0:\n indices.append(index)\n return indices",
"def getLandmarkindices(self):\n return self.subsetnodes_indices",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def getGlobalIdxVals( self, i : int ):\n return range(self._layout.starts[i],self._layout.ends[i])",
"def indices(self, position=None):\n \n raise NotImplementedError()",
"def receptor_idxs(self):\n\n return self._receptor_idxs",
"def indices(self):\n return tuple([slice(*r) for r in self.location])",
"def indices(self):\n _indices = []\n for h in self.miller.indices():\n _indices.append(self.indices_hkl(*h)[0])\n return _indices",
"def occ_indices(self):\n indices = []\n for index,item in enumerate(self):\n if item==1:\n indices.append(index)\n return indices",
"def _state_index(state):\n delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state\n actions, height, width, _, _, _ = Q.shape\n\n y = int((height / 2) + (delta_y / step_r) - 1)\n x = int((width / 2) + (delta_x / step_c) - 1)\n\n return y, x, bird_lmh, pipe_lmh, is_flapping",
"def _dofidxs(self):\n return [const['dofidxs'] for i, const in self._constraints_df.iterrows()]",
"def position(self) -> np.ndarray:\n return self._state[0:2]",
"def get_final_pruned_indices(self):\n return self.final_pruned_indices",
"def _pair_indices(self):\n indices_src = []\n indices_dst = []\n for i in range(self.walk_len):\n for j in range(max(i - self.l, 0), i):\n indices_src.append(i)\n indices_dst.append(j)\n for j in range(i + 1, min(i + self.r + 1, self.walk_len)):\n indices_src.append(i)\n indices_dst.append(j)\n return indices_src, indices_dst",
"def getLandmarkindices(self):\n return self.subsetindices",
"def getCoordinates(self):\n return list(self.gridVars.keys())",
"def get_position(self):\n return [self._row, self._column]",
"def get_indices(self):\n selection_model = self.selectionModel()\n return selection_model.selectedRows()"
] | [
"0.7762307",
"0.73607725",
"0.7259929",
"0.7114606",
"0.7097203",
"0.7028722",
"0.6852481",
"0.6815367",
"0.67249656",
"0.6657523",
"0.6605119",
"0.6554097",
"0.65420175",
"0.65259284",
"0.6498936",
"0.64765036",
"0.6446694",
"0.6387674",
"0.63860303",
"0.63356364",
"0.6294078",
"0.6279186",
"0.6264533",
"0.6247691",
"0.6237873",
"0.6209156",
"0.6200978",
"0.61969554",
"0.6171539",
"0.6162364"
] | 0.7760267 | 1 |
Minmin distance for a walker. | def _calc_min_distance(self, walker):
cell_lengths, cell_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])
t2 = time.time()
# make a traj out of it so we can calculate distances through
# the periodic boundary conditions
walker_traj = mdj.Trajectory(walker.state['positions'],
topology=self._mdj_top,
unitcell_lengths=cell_lengths,
unitcell_angles=cell_angles)
t3 = time.time()
# calculate the distances through periodic boundary conditions
# and get hte minimum distance
min_distance = np.min(mdj.compute_distances(walker_traj,
it.product(self.ligand_idxs,
self.receptor_idxs),
periodic=self._periodic)
)
t4 = time.time()
logging.info("Make a traj: {0}; Calc dists: {1}".format(t3-t2,t4-t3))
return min_distance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_min_distance():\n return np.argmin(d)",
"def _minimum_distance(self,arg):\n return min([abs(arg-e) for e in self if not e is arg])",
"def min_distance(distance, spt_set, self_nodes):\n minimum = sys.maxsize\n minimum_node = None\n for curr_node in self_nodes.values():\n if distance[curr_node.id] < minimum and not spt_set[curr_node.id]:\n minimum = distance[curr_node.id]\n minimum_node = curr_node\n return minimum_node",
"def minimum_distance(self, state, *args, **kwargs):\n raise NotImplementedError",
"def get_min_distance(self):\n return round(min(self.combined_euclidian_distance))",
"def get_min_distance(self, node):\r\n if self.have_min_distance(node):\r\n return self.table[node][\"dist\"]\r\n return None",
"def min_distance(self, target):\n difference = self.pivot - target\n return max(math.sqrt(np.dot(difference, difference)) - self.radius, 0)",
"def find_min(self):\n return min(self.nodes, key=int)",
"def find_min(self):\n return self.min",
"def find_min(self):\n return self.min",
"def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode",
"def smallest (self):\n return self.pointers[0].smallest()",
"def potential_min(self):\n\n return self._args.min",
"def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)",
"def get_min_distance(distances, unvisited_nodes):\n min_value = None\n node = None\n for city, distance in distances.items():\n if city not in unvisited_nodes:\n continue\n if min_value is None:\n node = city\n min_value = distance\n elif distance < min_value:\n node = city\n min_value = distance\n return node",
"def getMinNode(self):\n currentNode = self.openList[0]\n for node in self.openList:\n if node.g + node.h < currentNode.g + currentNode.h:\n currentNode = node\n return currentNode",
"def min(self):\n return min(self)",
"def min(self):\n return self._min(self.root)",
"def min(self):\n return self.__min",
"def min(self):\n node = self\n while node.left:\n node = node.left\n return node",
"def min(self) -> float:\n return stats.min(self)",
"def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX",
"def findMin(self):\n curr = self\n while curr.hasLeftChild():\n curr = curr.leftChild\n return curr",
"def get_min(self):\n\t\tif self.left:\n\t\t\treturn self.left.get_min()\n\t\treturn self.value",
"def find_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current",
"def min(self):\n return self._min_coords",
"def peek_min(self):\n if self.root:\n return self.root.min().value\n raise ValueError(\"cannot perform peek_min on an empty tree\")",
"def min(self):\n return self._min",
"def min(self):\n return self._min",
"def find_min(self):\n\n if self.left:\n return self.left.find_min()\n\n return self.data"
] | [
"0.73179567",
"0.72875845",
"0.72847605",
"0.71689516",
"0.7112868",
"0.6957201",
"0.68769455",
"0.6720647",
"0.67204887",
"0.67204887",
"0.6690567",
"0.6615009",
"0.6576083",
"0.65604377",
"0.6559518",
"0.65405184",
"0.649975",
"0.64913887",
"0.6486688",
"0.6473945",
"0.6455698",
"0.6436357",
"0.64296347",
"0.6419847",
"0.6419589",
"0.6374782",
"0.636303",
"0.6361566",
"0.6361566",
"0.63474524"
] | 0.7929464 | 0 |
Calculate whether a walker has unbound and also provide a dictionary for a single walker in the progress records. | def _progress(self, walker):
min_distance = self._calc_min_distance(walker)
# test to see if the ligand is unbound
unbound = False
if min_distance >= self._cutoff_distance:
unbound = True
progress_data = {'min_distances' : min_distance}
return unbound, progress_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _progress(self, walker):\n\n # first recenter the ligand and the receptor in the walker\n box_lengths, box_angles = box_vectors_to_lengths_angles(walker.state['box_vectors'])\n grouped_walker_pos = group_pair(walker.state['positions'], box_lengths,\n self.binding_site_idxs, self.ligand_idxs)\n\n # center the positions around the center of the binding site\n centered_walker_pos = center_around(grouped_walker_pos, self.binding_site_idxs)\n\n # superimpose the walker state positions over the native state\n # matching the binding site indices only\n sup_walker_pos, _, _ = superimpose(self.native_state['positions'], centered_walker_pos,\n idxs=self.binding_site_idxs)\n\n # calculate the rmsd of the walker ligand (superimposed\n # according to the binding sites) to the native state ligand\n native_rmsd = calc_rmsd(self.native_state['positions'], sup_walker_pos,\n idxs=self.ligand_idxs)\n\n # test to see if the ligand is re-bound\n rebound = False\n if native_rmsd <= self.cutoff_rmsd:\n rebound = True\n\n progress_data = {'native_rmsd' : native_rmsd}\n\n return rebound, progress_data",
"def continueCheck(building, lift):\n\n continue_lift = False\n # if passengers remain in any of the dictionaries, both same length.\n for i in range(0,len(building.up_dictionary),1):\n passengers = building.up_dictionary[i]\n if passengers:\n continue_lift = True\n else:\n continue\n passengers = building.down_dictionary[i]\n if passengers:\n continue_lift = True\n else:\n continue\n # any undelivered passengers, continue\n if (len(lift.current_passengers) > 0):\n continue_lift = True\n\n return continue_lift",
"def _analyze(self):\r\n if self.value is None or self.value == self.previous:\r\n pass\r\n elif self._operation == \"add\":\r\n self._additions = self.value\r\n elif self._operation == \"remove\":\r\n self._removals = self.value\r\n elif self.previous is None:\r\n self._assignments = self.value\r\n else:\r\n # partial update time\r\n self._additions = (self.value - self.previous) or None\r\n self._removals = (self.previous - self.value) or None\r\n self._analyzed = True",
"def _rec_only_updated(cls, rec):\n return rec.get('uplinked', None) \\\n and not rec.get('queued', None) \\\n and not rec.get('announced', None) \\\n and not rec.get('blocked', None) \\\n and not rec.get('finished', None) \\\n and not rec.get('aborted', None)",
"def _progress(self, walker):\n\n raise NotImplementedError",
"def unify_walk(d1, d2, U):\r\n for (k1, v1) in d1.items():\r\n if d2.has_key(k1):\r\n U = unify_walk(v1, d2[k1], U)\r\n if U is False:\r\n return False\r\n return U",
"def check_for_dict(check):",
"def check_dict_alg(dic, validator, entry_list, messages, whole_validator, current_elem):\n for node in validator:\n new_list = dc(entry_list)\n node_value = validator[node]\n if node != 'isReference':\n if not ('isReference' in node_value and len(entry_list) == 0):\n if is_operator(node):\n handle_operator(\n node, dic, validator, new_list, messages, whole_validator, current_elem\n )\n elif is_leaf(node_value):\n new_list.append(node)\n check_leaf(node_value, dic, new_list, messages, current_elem)\n else:\n new_list.append(node)\n check_dict_alg(\n dic, node_value, new_list, messages, whole_validator, current_elem\n )",
"def computed(cls, pdb_object):\n return cls.name in pdb_object.completed_steps",
"def check(self):\n # check forward\n self._check_impl(self.key_to_stat_fwd, \"forward\")\n\n # check backward\n self._check_impl(self.key_to_stat_bwd, \"backward\")",
"def SplitBuildStatsByPass(\n self, expectation: BaseExpectation\n ) -> Dict[str, Tuple['StepBuildStatsMap', 'StepBuildStatsMap',\n 'StepBuildStatsMap']]:\n retval = {}\n for builder_name, step_map in self.items():\n fully_passed = StepBuildStatsMap()\n never_passed = StepBuildStatsMap()\n partially_passed = StepBuildStatsMap()\n\n for step_name, stats in step_map.items():\n if stats.NeverNeededExpectation(expectation):\n assert step_name not in fully_passed\n fully_passed[step_name] = stats\n elif stats.AlwaysNeededExpectation(expectation):\n assert step_name not in never_passed\n never_passed[step_name] = stats\n else:\n assert step_name not in partially_passed\n partially_passed[step_name] = stats\n retval[builder_name] = (fully_passed, never_passed, partially_passed)\n return retval",
"def _is_this_healthy_rDNA(self):\n if self.length < 3000:\n return 0\n mapping_state = []\n for item in self.sam_summary:\n if item[1] != '0':\n mapping_state.append(1)\n else:\n mapping_state.append(0)\n threshold = 0.8\n if sum(mapping_state)/len(mapping_state) > threshold:\n return 1\n else:\n for i in range(1, len(mapping_state) - 50):\n if sum(mapping_state[i:])/len(mapping_state[i:]) > threshold or \\\n sum(mapping_state[:-i])/len(mapping_state[:-i]) > threshold:\n healthy = 2\n return 0",
"def _get_attr_w_warn_on_none(\n self,\n mapper: Mapper[Any],\n state: InstanceState[Any],\n dict_: _InstanceDict,\n column: ColumnElement[Any],\n ) -> Callable[[], Any]:\n\n # in this callable, we're trying to thread the needle through\n # a wide variety of scenarios, including:\n #\n # * the object hasn't been flushed yet and there's no value for\n # the attribute as of yet\n #\n # * the object hasn't been flushed yet but it has a user-defined\n # value\n #\n # * the object has a value but it's expired and not locally present\n #\n # * the object has a value but it's expired and not locally present,\n # and the object is also detached\n #\n # * The object hadn't been flushed yet, there was no value, but\n # later, the object has been expired and detached, and *now*\n # they're trying to evaluate it\n #\n # * the object had a value, but it was changed to a new value, and\n # then expired\n #\n # * the object had a value, but it was changed to a new value, and\n # then expired, then the object was detached\n #\n # * the object has a user-set value, but it's None and we don't do\n # the comparison correctly for that so warn\n #\n\n prop = mapper.get_property_by_column(column)\n\n # by invoking this method, InstanceState will track the last known\n # value for this key each time the attribute is to be expired.\n # this feature was added explicitly for use in this method.\n state._track_last_known_value(prop.key)\n\n lkv_fixed = state._last_known_values\n\n def _go() -> Any:\n assert lkv_fixed is not None\n last_known = to_return = lkv_fixed[prop.key]\n existing_is_available = (\n last_known is not LoaderCallableStatus.NO_VALUE\n )\n\n # we support that the value may have changed. so here we\n # try to get the most recent value including re-fetching.\n # only if we can't get a value now due to detachment do we return\n # the last known value\n current_value = mapper._get_state_attr_by_column(\n state,\n dict_,\n column,\n passive=PassiveFlag.PASSIVE_OFF\n if state.persistent\n else PassiveFlag.PASSIVE_NO_FETCH ^ PassiveFlag.INIT_OK,\n )\n\n if current_value is LoaderCallableStatus.NEVER_SET:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object \"\n \"%s; no value has been set for this column\"\n % (column, state_str(state))\n )\n elif current_value is LoaderCallableStatus.PASSIVE_NO_RESULT:\n if not existing_is_available:\n raise sa_exc.InvalidRequestError(\n \"Can't resolve value for column %s on object \"\n \"%s; the object is detached and the value was \"\n \"expired\" % (column, state_str(state))\n )\n else:\n to_return = current_value\n if to_return is None:\n util.warn(\n \"Got None for value of column %s; this is unsupported \"\n \"for a relationship comparison and will not \"\n \"currently produce an IS comparison \"\n \"(but may in a future release)\" % column\n )\n return to_return\n\n return _go",
"def findSteps(wires):\n allDicts = list(map(lambda w: dictCoordsFor(w), wires))\n baseDict = allDicts[0]\n for i in range(1, len(allDicts)):\n otherDict = allDicts[i]\n for k in list(baseDict.keys()):\n if not k in otherDict:\n del baseDict[k]\n \n keys = list(baseDict.keys())\n sums = list(map(lambda c: sumSteps(allDicts, c), keys))\n return min(sums)",
"def step_reaches(step_thr):\n\n return lambda step, curr_obj, curr_optimized_obj: step>=step_thr",
"def filter_passing_hits(self):\n self.create_fasta()\n self.blastn_commandline()\n\n hits = {}\n result_handle = open(generate_path(\"tmp/validate.xml\"))\n for record in NCBIXML.parse(result_handle):\n for entry in record.alignments:\n hit = entry.hit_def\n seqlen = entry.length\n hsp = entry.hsps[0]\n percent_ident = (float(hsp.positives) / float(seqlen)) * 100\n\n if 90 <= percent_ident <= 100:\n if hit in hits:\n if percent_ident > hits[hit]:\n hits[hit] = percent_ident\n else:\n hits[hit] = percent_ident\n del result_handle\n self.seqdata.hits = hits",
"def has_dict(self, dict_in_pointer):\n if type(dict_in_pointer)!=dict:\n return None\n start = self.head\n while start:\n if dict_in_pointer==start.getMember():\n return start\n start = start.getLink()\n return None",
"def __call__(self, trainer):\n observation = trainer.observation\n summary = self._summary\n key = self._key\n if key in observation:\n summary.add({key: observation[key]})\n\n if not self._interval_trigger(trainer):\n return False\n\n if self._max_trigger(trainer):\n return True\n\n stats = summary.compute_mean()\n value = float(stats[key]) # copy to CPU\n self._init_summary()\n\n if not self._best_value or self._compare(self._best_value, value):\n self._best_value = value\n self._waited = 0\n return False\n elif self._waited >= self._patience:\n return True\n else:\n self._waited += 1\n if self._waited >= self._patience:\n return True\n else:\n return False",
"def been_there(state, check_dict, check):\r\n \r\n key = str(state)\r\n if key in check_dict:\r\n return True\r\n else:\r\n if check:\r\n check_dict[key] = True\r\n return False",
"def _is_populated(state: _qutip.Qobj, internal: str, n: int, tol: float) -> bool:\n return _np.abs(_state.element(state, f\"{internal}{n}\")) > tol",
"def passengersRemaining():\n passengers_remaining = 0\n # loop through both dictionaries and count all people\n for i in range(0,len(building.up_dictionary),1):\n passengers = building.up_dictionary[i]\n if passengers:\n passengers_remaining = passengers_remaining + len(passengers)\n else:\n continue\n for i in range(0,len(building.down_dictionary),1):\n passengers = building.down_dictionary[i]\n if passengers:\n passengers_remaining = passengers_remaining + len(passengers)\n else:\n continue\n\n return passengers_remaining",
"def check_attr(chain):\n attrs = {}\n if chain.climbSet:\n attrs[\"climbSet\"] = True\n attrs[\"climbers\"] = [int(i) for i in chain.climbers]\n attrs[\"locks\"] = chain.locks\n attrs[\"TotBandEnergy\"] = chain.TotBandEnergy\n\n return attrs",
"def is_property_available(self, name):\n if name in self.properties and not (isinstance(self.properties[name], dict)\n and '__deferred' in self.properties[name]):\n return True\n return False",
"def complete(t):\n def cmplt(t):\n \"\"\"\n Helper func that is called recursively\n Calculates the height and checks for binary validity\n Pre-conditions:\n t: The tree to be checked\n Returns\n (Boolean, Int) Tuple representing the validity and height of the tree\n \"\"\"\n # Hit bottom of tree\n if t is None:\n return (True, 0)\n # Compare lengths of deeper segments\n else:\n bin1, ldepth = cmplt(t.left)\n bin2, rdepth = cmplt(t.right)\n if ldepth == rdepth and bin1 and bin2:\n return (True, rdepth + 1)\n else:\n return (False, rdepth + 1)\n\n # Run helper on tree, check for height\n bal, h = cmplt(t)\n if bal and h > 0:\n return True\n return False",
"def _get_simulation_reward_with_done(self, info: dict) -> Tuple[float, bool]:\n return 0.0, False",
"def _check_for_completion(self, node):\n dis=0\n for i in range(node.state.size):\n dis+=(node.state[i]-self.goal.state[i])**2\n\n dis=np.sqrt(dis)\n if(dis<=self.step_size):\n return True\n else: return False",
"def is_queued(self, key: Hashable) -> bool:\n entry = self.key_to_defer.get(key)\n if not entry:\n # No entry so nothing is waiting.\n return False\n\n # There are waiting deferreds only in the OrderedDict of deferreds is\n # non-empty.\n return bool(entry.deferreds)",
"def _checkIfAllCalculated(self, statistic, items):\n\n if statistic.final_json:\n collected_items = simplejson.loads(statistic.final_json)\n if len(collected_items.keys()) >= len(items):\n properties = {\n 'final_json': None,\n }\n statistic = self.updateEntityProperties(statistic, properties,\n store=False)\n\n return statistic",
"def check_status(self) -> Mapping[str, bool]:\n ups_stat = {}\n for name in self.ups_names:\n ups_stat[name] = self.check_ups(name)\n return ups_stat",
"def alive(self):\n\t\treturn any( (ind for ind in self.members if ind.current_hp > 0) )"
] | [
"0.5411492",
"0.49769568",
"0.47918302",
"0.47679746",
"0.47668457",
"0.4766094",
"0.47169706",
"0.4641848",
"0.45923612",
"0.4586116",
"0.45711255",
"0.45584255",
"0.45256615",
"0.4499298",
"0.44648027",
"0.44494456",
"0.44364905",
"0.44351688",
"0.44245186",
"0.4407154",
"0.44006914",
"0.439853",
"0.43962076",
"0.4393041",
"0.43730646",
"0.43679097",
"0.43662098",
"0.43638492",
"0.43622836",
"0.43368575"
] | 0.6793257 | 0 |
Add CORS header if the TabPy has attribute _cors_origin and _cors_origin is not an empty string. | def _add_CORS_header(self):
origin = self.tabpy.get_access_control_allow_origin()
if len(origin) > 0:
self.set_header("Access-Control-Allow-Origin", origin)
logger.debug("Access-Control-Allow-Origin:{}".format(origin))
headers = self.tabpy.get_access_control_allow_headers()
if len(headers) > 0:
self.set_header("Access-Control-Allow-Headers",headers)
logger.debug("Access-Control-Allow-Headers:{}".format(headers))
methods = self.tabpy.get_access_control_allow_methods()
if len(methods) > 0:
self.set_header("Access-Control-Allow-Methods",methods)
logger.debug("Access-Control-Allow-Methods:{}".format(methods)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_cors_header(resp):\n resp.headers['X-Content-Type-Options'] = os.environ.get(\"X_CONTENT_TYPE_OPTIONS\")\n resp.headers['Access-Control-Allow-Origin'] = os.environ.get(\"ACCESS_CONTROL_ALLOW_ORIGIN\")\n resp.headers['Access-Control-Allow-Headers'] = os.environ.get(\"ACCESS_CONTROL_ALLOW_HEADERS\")\n return resp",
"def _has_cors_header(self):\n return \"Access-Control-Request-Method\" in self.headers or \"Access-Control-Request-Headers\" in self.headers or \"Origin\" in self.headers",
"def enable_cors_after_request_hook():\n\tadd_cors_headers()",
"def add_cors_headers_to_response(response, request):\n opt_method_list = ','.join(request.allowed_method_list + ['OPTIONS'])\n response['Allow'] = opt_method_list\n response['Access-Control-Allow-Methods'] = opt_method_list\n response['Access-Control-Allow-Origin'] = request.META.get('Origin', '*')\n response['Access-Control-Allow-Headers'] = 'Authorization'\n response['Access-Control-Allow-Credentials'] = 'true'",
"def add_cors_headers(self, response):\n # TODO in production we need to use only our domain\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')\n\n return response",
"def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')",
"def _check_cors_headers(self, res):\r\n self.assertEqual(res.headers['access-control-allow-origin'], '*')\r\n self.assertEqual(\r\n res.headers['access-control-allow-headers'], 'X-Requested-With')",
"def add_cors_headers(resp):\n if 'headers' not in resp:\n resp['headers'] = dict()\n resp['headers']['Access-Control-Allow-Origin'] = '*',\n resp['headers']['Access-Control-Allow-Headers'] = 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token',\n resp['headers']['Access-Control-Allow-Credentials'] = True,\n return resp",
"def enable_cors():\r\n response.headers['Access-Control-Allow-Origin'] = '*'\r\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\r\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'",
"def enable_cors():\r\n response.headers['Access-Control-Allow-Origin'] = '*'\r\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\r\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'",
"def enable_cors():\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'",
"def cors_allow_any(request, response):\n origin = request.META.get('HTTP_ORIGIN')\n if not origin:\n return response\n\n # From the CORS spec: The string \"*\" cannot be used for a resource that supports credentials.\n response['Access-Control-Allow-Origin'] = origin\n patch_vary_headers(response, ['Origin'])\n response['Access-Control-Allow-Credentials'] = 'true'\n\n if request.method == 'OPTIONS':\n if 'HTTP_ACCESS_CONTROL_REQUEST_HEADERS' in request.META:\n response['Access-Control-Allow-Headers'] \\\n = request.META['HTTP_ACCESS_CONTROL_REQUEST_HEADERS']\n response['Access-Control-Allow-Methods'] = 'GET, POST, OPTIONS'\n\n return response",
"def allow_cors(response):\n response.headers['Access-Control-Allow-Origin'] = '*'\n return response",
"def cors(self) -> Optional[pulumi.Input['CorsRulesArgs']]:\n return pulumi.get(self, \"cors\")",
"def add_header(response):\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['Access-Control-Allow-Credentials'] = True\n response.headers['Access-Control-Allow-Methods'] = \"GET,POST,OPTIONS\"\n response.headers['Access-Control-Allow-Headers'] = \"Origin, Content-Type, Accept\"\n return response",
"def _send_cors_headers(self):\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.send_header(\"Access-Control-Allow-Methods\", \"GET,POST,OPTIONS\")\n self.send_header(\"Access-Control-Allow-Headers\", \"x-api-key,Content-Type\")",
"def set_allow_origin(resp):\n\th = resp.headers\n\tif request.method != 'OPTIONS' and 'Origin' in request.headers: # Allow crossdomain for other HTTP Verbs\n\t\th['Access-Control-Allow-Origin'] = request.headers['Origin']\n\t\t\n\th['Access-Control-Allow-Credentials'] = 'true'\n\treturn resp",
"def add_header(response):\n response.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return response",
"def http_header_access_control_allow_origin():\n return 'Access-Control-Allow-Origin'",
"def set_allow_origin(resp):\r\n\r\n h = resp.headers\r\n\r\n # Allow crossdomain for other HTTP Verbs\r\n if request.method != 'OPTIONS' and 'Origin' in request.headers:\r\n h['Access-Control-Allow-Origin'] = request.headers['Origin']\r\n\r\n\r\n return resp",
"def coors_handle(response: dict) -> dict:\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add(\n 'Access-Control-Allow-Headers',\n 'Content-Type,Authorization')\n response.headers.add(\n 'Access-Control-Allow-Methods',\n 'GET,PUT,POST,DELETE, PATCH')\n return response",
"def enable_cors():\n response.headers['Access-Control-Allow-Origin'] = 'localhost:3000'\n response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'\n response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'",
"def enableCors():\n # bottle.response.set_header('Access-Control-Allow-Credentials', 'true')\n bottle.response.set_header('Access-Control-Max-Age:', '3600')\n bottle.response.set_header('Access-Control-Allow-Origin', '*')\n bottle.response.set_header('Access-Control-Allow-Methods',\n 'PUT, GET, POST, DELETE, OPTIONS')\n bottle.response.set_header('Access-Control-Allow-Headers',\n 'Origin, Accept, Content-Type, X-Requested-With,'\n ' X-CSRF-Token, X-Auth-Token')",
"def preflight_checks_cors(self, method, rule):\n if options.http_cors and 'Access-Control-Request-Method' in self.request.headers:\n origin = self.request.headers.get('Origin')\n if origin:\n allowed_methods =list(rule.target.str_allowed_methods) + ['OPTIONS']\n self.set_header('Access-Control-Allow-Methods', '.'.join(allowed_methods))\n\n req_method = self.request.headers.get('Access-Control-Request-Method')\n req_headers = self.request.headers.get('Access-Control-Request-Headers')\n\n method_target = rule.target.get_method_target(HttpMethod[req_method])\n if req_method in allowed_methods and method_target:\n if '*' in method_target.origins:\n self.set_header('Access-Control-Allow-Origin', '*')\n elif origin in method_target.origins:\n self.add_header('Vary', 'Origin')\n self.set_header('Access-Control-Allow-Origin', origin)\n if '*' in method_target.allowed_headers:\n self.set_header('Access-Control-Allow-Headers', req_headers)\n else:\n self.set_header('Access-Contorl-Allow-Headers', '.'.join(method_target.allowed_headers))\n\n if method_target.exposed_headers:\n self.set_header('Access-Control-Expose-Headers', '.'.join(method_target.exposed_headers))\n if method_target.allow_credentials is not None:\n self.set_header('Access-Control-Allow-Credentials', method_target.allow_credentials)\n\n self.set_header('Access-Control-Max-Age', method_target.max_age)\n else:\n self.set_header('Access-Control-Max-Age', 86400)\n self.set_header('Access-Control-Allow-Origin', '*')\n self.set_header('Access-Control-Allow-Headers', req_headers)\n else:\n self.set_header('Allow', '.'.join(rule.target.str_methods + ['OPTIONS']))\n return False\n return True",
"def cors(self) -> pulumi.Output[Optional['outputs.CorsRulesResponse']]:\n return pulumi.get(self, \"cors\")",
"def set_cors(self, allow_origins):\n\n self._api_manager.set_cors(allow_origins)",
"def set_default_headers(self):\n\n # Setting CORS-issue related\n self.set_header('Access-Control-Allow-Origin', '*')\n\n # Only authorized headers should proceed\n self.set_header('Access-Control-Allow-Headers',\n 'x-requested-with, Authorization, Content-type')\n\n # And only allowed methods\n self.set_header('Access-Control-Allow-Methods',\n 'POST, GET, OPTIONS, PATCH, DELETE, PUT')",
"def set_default_headers(self, *args, **kwargs):\n super().set_default_headers()\n self.set_header(\"Access-Control-Allow-Origin\", '*')\n self.set_header(\n \"Access-Control-Allow-Headers\",\n \"Content-Type, crsf-header, file-length, file-token, x-csrftoken, \"\n )\n self.set_header(\"Access-Control-Allow-Methods\", \"GET, POST, OPTIONS\")",
"def set_default_headers(self):\n self.set_header(\"Access-Control-Allow-Origin\", \"*\")\n self.set_header(\"Access-Control-Allow-Headers\", \"x-requested-with\")\n self.set_header('Access-Control-Allow-Methods', 'GET, POST, PUT, DELETE, OPTIONS')",
"def add_access_control_headers(self):\n self.add_header('Access-Control-Allow-Origin', self.params['allowed_access_control_origins'])\n self.add_header('Access-Control-Allow-Methods', self.params['allowed_access_control_methods'])\n self.add_header('Access-Control-Allow-Headers', self.params['allowed_access_control_headers'])"
] | [
"0.7479771",
"0.7410093",
"0.7130474",
"0.7120577",
"0.7087482",
"0.6989684",
"0.6989684",
"0.6969113",
"0.69539034",
"0.69539034",
"0.6934905",
"0.6931153",
"0.6773808",
"0.67104447",
"0.67014056",
"0.6693697",
"0.6672938",
"0.6642145",
"0.6585849",
"0.65656114",
"0.6508159",
"0.64727557",
"0.64630824",
"0.6427392",
"0.63866955",
"0.6384501",
"0.63660145",
"0.6365106",
"0.63536716",
"0.63176274"
] | 0.8239014 | 0 |
asserts that the two tensors have nans in the same locations, and the nonnan elements all are close. | def _assert_allclose_with_nans(self, tensor1, tensor2):
# Check nans are in the same place
self.assertFalse(
torch.any( # True if there's any mismatch
torch.logical_xor( # True where either tensor1 or tensor 2 has nans, but not both (mismatch)
torch.isnan(tensor1), # True where tensor1 has nans
torch.isnan(tensor2), # True where tensor2 has nans
)
),
msg="Nans occur in different places.",
)
valid_mask = torch.logical_not(torch.isnan(tensor1))
self.assertTrue(
torch.allclose(tensor1[valid_mask], tensor2[valid_mask]),
msg="Non-nan values don't match.",
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sa_allclose(self, a, b):\n for name in a.dtype.names:\n print(name, a[name], b[name])\n nan_mask_a = np.isnan(a[name])\n nan_mask_b = np.isnan(b[name])\n self.assertTrue(np.allclose(nan_mask_a, nan_mask_b))\n self.assertTrue(np.allclose(\n a[name][~nan_mask_a],\n b[name][~nan_mask_b]\n ))\n return True",
"def assert_allclose_na(a, b):\n if _is_na(a) and _is_na(b):\n pass\n else:\n npt.assert_allclose(a, b)",
"def equal_with_nan(a1, a2):\n a1, a2 = np.asarray(a1), np.asarray(a2)\n a1nan, a2nan = np.isnan(a1), np.isnan(a2)\n nan_sameness = a1nan == a2nan\n value_sameness = (a1 == a2)\n # If they are actually the same, they should be value same xor nansame.\n flags = value_sameness ^ nan_sameness\n return flags",
"def assert_no_nans(x):\n assert not torch.isnan(x).any()",
"def _autocheck_nan(self):\n # assert np.isnan(self.W).any() == False, \"W matrix should not contain NaN values.\"\n assert np.isnan(self.Win).any() == False, \"Win matrix should not contain NaN values.\"\n if self.Wfb is not None:\n assert np.isnan(self.Wfb).any() == False, \"Wfb matrix should not contain NaN values.\"",
"def assertAllNan(self, a):\n is_nan = np.isnan(self._GetNdArray(a))\n all_true = np.ones_like(is_nan, dtype=np.bool)\n self.assertAllEqual(all_true, is_nan)",
"def allclose(tensor1: Tensor, tensor2: Tensor) ->bool:\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2\n return torch.allclose(tensor1, tensor2)",
"def has_nans(tensor, verbose=True):\n tensor_numpy = tensor.data.cpu().numpy().flatten()\n where_nan = np.argwhere(tensor_numpy != tensor_numpy)\n\n nan_count = len(where_nan)\n nan = nan_count != 0\n\n if verbose and nan:\n print(f\"Encountered {nan_count} NaNs\")\n\n return nan",
"def array_equal_nan(a1, a2):\n try:\n a1, a2 = np.asarray(a1), np.asarray(a2)\n except Exception:\n return False\n if a1.shape != a2.shape:\n return False\n # Handling NaN values\n a1nan, a2nan = np.isnan(a1), np.isnan(a2)\n # NaN's occur at different locations\n if not (a1nan == a2nan).all():\n return False\n # Shapes of a1, a2 and masks are guaranteed to be consistent by this point\n return bool(np.asarray(a1[~a1nan] == a2[~a1nan]).all())",
"def testExpectedNaNOpOutputs(self):\n check_numerics_callback.enable_check_numerics()\n\n # Empty input tensor\n x = constant_op.constant(1, dtype=dtypes.float32, shape=[0, 1, 1, 1])\n scale = constant_op.constant([1], dtype=dtypes.float32)\n offset = constant_op.constant([1], dtype=dtypes.float32)\n\n # Calling fused_batch_norm with an empty input should output a NaN in the\n # latter four outputs without triggering the check_numerics callback\n batch_norm_res = gen_nn_ops._fused_batch_norm(\n x=x, scale=scale, offset=offset, mean=[], variance=[])\n\n _, batch_mean, batch_variance, _, _ = self.evaluate(batch_norm_res)\n\n self.assertTrue(np.isnan(batch_mean.squeeze()))\n self.assertTrue(np.isnan(batch_variance.squeeze()))",
"def _raise_assert_on_np_is_close_all(self, np0, np1):\r\n\r\n return self.assertTrue(np.isclose(np0, np1).all())",
"def _point_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n return np.allclose(a._Point__loc, b._Point__loc,\n rtol=rtol, atol=atol)",
"def near(a,b):\n return torch.allclose(a,b, rtol=1e-03, atol=1e-05)",
"def remove_nans(a, b):\n a = np.asarray(a)\n b = np.asarray(b)\n\n mask = ~np.isnan(a) & ~np.isnan(b)\n a = a[mask]\n b = b[mask]\n\n return a, b",
"def gdx_val_equal(val1,val2,gdxf):\n if gdx_isnan(val1, gdxf) and gdx_isnan(val2, gdxf):\n return True\n return val1 == val2",
"def assert_table_equal(t1, t2, check_meta=False, rtol=1.0e-15, atol=1.0e-300):\n assert_equal(len(t1), len(t2))\n assert_equal(t1.colnames, t2.colnames)\n if check_meta:\n assert_equal(t1.meta, t2.meta)\n for name in t1.colnames:\n if len(t1) != 0:\n assert_equal(t1[name].dtype.kind, t2[name].dtype.kind)\n if not isinstance(t1[name], MaskedColumn):\n for i, el in enumerate(t1[name]):\n try:\n if not isinstance(el, str) and np.isnan(el):\n assert_true(\n not isinstance(t2[name][i], str) and np.isnan(t2[name][i])\n )\n elif isinstance(el, str):\n assert_equal(el, t2[name][i])\n else:\n assert_almost_equal(el, t2[name][i], rtol=rtol, atol=atol)\n except (TypeError, NotImplementedError):\n pass # ignore for now",
"def test_canonicalize_nan(self):\r\n sio = StringIO()\r\n handler = logging.StreamHandler(sio)\r\n handler.setLevel(logging.ERROR)\r\n logging.getLogger('theano.gof.opt').addHandler(handler)\r\n try:\r\n x = vector()\r\n f = theano.function([x], x + numpy.nan)\r\n finally:\r\n logging.getLogger('theano.gof.opt').removeHandler(handler)\r\n # Ideally this test would only catch the maxed out equilibrium\r\n # optimizer error message, but to be safe in case this message\r\n # is modified in the future, we assert that there is no error\r\n # at all.\r\n assert not sio.getvalue()",
"def _assert_all_close_according_to_type(self, a, b):\n if a.dtype == np.float32:\n np.testing.assert_allclose(a, b, rtol=1e-6, atol=1e-6)\n elif a.dtype == np.float64:\n np.testing.assert_allclose(a, b, rtol=1e-15, atol=1e-15)\n else:\n assert False",
"def na_cmp():\n return lambda x, y: bool(pd.isna(x.magnitude)) & bool(pd.isna(y.magnitude))",
"def test_nan_in_bbox():\n\n data1 = np.ones((101, 101))\n data2 = data1.copy()\n data1[33, 33] = np.nan\n data1[67, 67] = np.inf\n data1[33, 67] = -np.inf\n data1[22, 22] = np.nan\n data1[22, 23] = np.inf\n error = data1.copy()\n\n aper1 = CircularAperture((50, 50), r=20.0)\n aper2 = CircularAperture((5, 5), r=20.0)\n\n tbl1 = aperture_photometry(data1, aper1, error=error)\n tbl2 = aperture_photometry(data2, aper1, error=error)\n assert_allclose(tbl1['aperture_sum'], tbl2['aperture_sum'])\n assert_allclose(tbl1['aperture_sum_err'], tbl2['aperture_sum_err'])\n\n tbl3 = aperture_photometry(data1, aper2, error=error)\n tbl4 = aperture_photometry(data2, aper2, error=error)\n assert_allclose(tbl3['aperture_sum'], tbl4['aperture_sum'])\n assert_allclose(tbl3['aperture_sum_err'], tbl4['aperture_sum_err'])",
"def test_nan_check(self):\n values_with_nans = np.array([1, 2, 3, np.nan, np.nan])\n\n with LogCapture(\"puma\") as log:\n _ = hist_w_unc(values_with_nans, bins=4)\n log.check(\n (\n \"puma\",\n \"WARNING\",\n \"Histogram values contain 2 nan values!\",\n )\n )",
"def test_atan2_special_case_one_nan(dt):\n q = get_queue_or_skip()\n skip_if_dtype_not_supported(dt, q)\n\n x1 = dpt.asarray([dpt.nan, dpt.nan, 1], dtype=dt)\n x2 = dpt.asarray([dpt.nan, 1, dpt.nan], dtype=dt)\n\n y = dpt.atan2(x1, x2)\n assert dpt.all(dpt.isnan(y))",
"def check_equal(tensor_1, tensor_2):\n return tf.reduce_max(tf.abs(tensor_1 - tensor_2)).numpy() < 1e-6",
"def testCalcCorrNaN(self):\n self.assertEqual(INF, calc_corr(2.050000, INF, EXP_KBT))",
"def test_interpolation_random_array_and_nan(self):\n\n # Define pixel centers along each direction\n x = numpy.arange(20) * 1.0\n y = numpy.arange(25) * 1.0\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define arbitrary values for each x, y pair\n numpy.random.seed(17)\n A = numpy.random.random((len(x), len(y))) * 10\n\n # Create islands of NaN\n A[5, 13] = numpy.nan\n A[6, 14] = A[6, 18] = numpy.nan\n A[7, 14:18] = numpy.nan\n A[8, 13:18] = numpy.nan\n A[9, 12:19] = numpy.nan\n A[10, 14:17] = numpy.nan\n A[11, 15] = numpy.nan\n\n A[15, 5:6] = numpy.nan\n\n # Creat interpolation points\n xis = numpy.linspace(x[0], x[-1], 39) # Hit all mid points\n etas = numpy.linspace(y[0], y[-1], 73) # Hit thirds\n points = combine_coordinates(xis, etas)\n\n for mode in ['linear', 'constant']:\n vals = interpolate2d(x, y, A, points, mode=mode)\n\n # Calculate reference result with expected NaNs and compare\n i = j = 0\n for k, (xi, eta) in enumerate(points):\n\n # Find indices of nearest higher value in x and y\n i = numpy.searchsorted(x, xi)\n j = numpy.searchsorted(y, eta)\n\n if i > 0 and j > 0:\n\n # Get four neigbours\n A00 = A[i - 1, j - 1]\n A01 = A[i - 1, j]\n A10 = A[i, j - 1]\n A11 = A[i, j]\n\n if numpy.allclose(xi, x[i]):\n alpha = 1.0\n else:\n alpha = 0.5\n\n if numpy.allclose(eta, y[j]):\n beta = 1.0\n else:\n beta = eta - y[j - 1]\n\n if mode == 'linear':\n if numpy.any(numpy.isnan([A00, A01, A10, A11])):\n ref = numpy.nan\n else:\n ref = (A00 * (1 - alpha) * (1 - beta) +\n A01 * (1 - alpha) * beta +\n A10 * alpha * (1 - beta) +\n A11 * alpha * beta)\n elif mode == 'constant':\n assert alpha >= 0.5 # Only case in this test\n\n if beta < 0.5:\n ref = A10\n else:\n ref = A11\n else:\n msg = 'Unknown mode: %s' % mode\n raise Exception(msg)\n\n #print i, j, xi, eta, alpha, beta, vals[k], ref\n assert nanallclose(vals[k], ref, rtol=1e-12, atol=1e-12)",
"def pd_val_equal(val1, val2):\n return pd_isnan(val1) and pd_isnan(val2) or val1 == val2",
"def _check_compatible_fill_values(self, other: \"FlattenedStorage\"):\n for k in set(self._fill_values).intersection(other._fill_values):\n if np.isnan(self._fill_values[k]) and np.isnan(other._fill_values[k]):\n continue\n else:\n if self._fill_values[k] != other._fill_values[k]:\n raise ValueError(\n \"Fill values for arrays in storages don't match, can't perform requested operation\"\n )",
"def test_nan_expected(metric_class, nan_strategy, value, expected):\n metric = metric_class(nan_strategy=nan_strategy)\n metric.update(value.clone())\n out = metric.compute()\n assert np.allclose(out, expected, equal_nan=True)",
"def test_nan():\n assert 'invalid' == classify_triangle(1,2,float('nan'))",
"def assert_no_error(self): \r\n Nx = self['Nx']\r\n Nt = self.m.Nt\r\n L, T = self.problem['L T'.split()]\r\n L = L/2 # only half the domain used (symmetry)\r\n x = np.linspace(0, L, Nx+1) # Mesh points in space \r\n t = np.linspace(0, T, Nt+1) # Mesh points in time\r\n \r\n for n in range(len(t)):\r\n u_e = self.problem.u_exact(x, t[n])\r\n diff = np.abs(self.f.u[n,:] - u_e).max()\r\n print 'diff:', diff\r\n tol = 1E-13\r\n assert diff < tol"
] | [
"0.7419661",
"0.7288233",
"0.7000732",
"0.6927218",
"0.6881153",
"0.6624811",
"0.66094536",
"0.64761245",
"0.64403725",
"0.64227897",
"0.6385241",
"0.6251182",
"0.6232587",
"0.6197191",
"0.619503",
"0.61666614",
"0.61274195",
"0.6125417",
"0.612099",
"0.6098439",
"0.60977125",
"0.6067147",
"0.6024517",
"0.6012723",
"0.5995762",
"0.599079",
"0.594048",
"0.59350395",
"0.5915928",
"0.5894657"
] | 0.8152861 | 0 |
This endpoint saves stars on both employees (from and to). | def give_star_to(request, from_employee_id, to_employee_id):
if from_employee_id == to_employee_id:
content = {'detail': config.USER_UNABLE_TO_GIVE_STARS_ITSELF}
return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)
elif request.method == 'POST':
# Set values from request.data from POST
text = (request.data['text'] if 'text' in request.data.keys() else None)
from_user = get_object_or_404(Employee, pk=from_employee_id)
to_user = get_object_or_404(Employee, pk=to_employee_id)
category = get_object_or_404(Category, pk=request.data['category'])
keyword = get_object_or_404(Keyword, pk=request.data['keyword'])
if from_user.is_blocked:
content = {'detail': config.USER_BLOCKED_TO_GIVE_STARS}
return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)
elif to_user.is_blocked:
content = {'detail': config.USER_BLOCKED_TO_RECEIVED_STARS}
return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)
# Create data object to save
data = {"category": category.id,
"keyword": keyword.id,
"text": text,
"from_user": from_user.id,
"to_user": to_user.id}
# Validate serializer with data provided.
serializer = StarInputSerializer(data=data)
if serializer.is_valid():
# Save recommendation
serializer.save()
# Add 1 to employee given points
from_user.add_stars_given(1)
from_user.save()
current_level = to_user.level
# Add points to to_user according category weight
if from_user.position:
weight = from_user.position.weight
else:
weight = 1
to_user.add_stars(weight)
message = config.RECOMMENDATION_MESSAGE % (weight, from_user.first_name, from_user.last_name)
send_push_notification(to_user, message)
to_user.evaluate_level()
to_user.save()
# Add activity log if user level up
if to_user.level != current_level:
message = config.LEVEL_UP_TEXT % (to_user.first_name, to_user.last_name, to_user.level)
activity = Activity.objects.create(text=message, to_user=to_user)
send_push_notification(to_user, message)
activity.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def give_star_to_many(request, from_employee_id):\n if request.method == 'POST':\n serializer_bulk = StarBulkSerializer(data=request.data)\n errors = []\n stars_added = 0\n if serializer_bulk.is_valid():\n text = (request.data['text'] if 'text' in request.data.keys() else None)\n from_user = get_object_or_404(Employee, pk=from_employee_id)\n category = get_object_or_404(Category, pk=request.data['category'])\n keyword = get_object_or_404(Keyword, pk=request.data['keyword'])\n\n # Create data object to save\n data = {\"category\": category.id,\n \"keyword\": keyword.id,\n \"text\": text,\n \"from_user\": from_user.id}\n\n for user_pk in request.data['to_users']:\n data.update({\"to_user\": int(user_pk)})\n serializer = StarSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n stars_added += 1\n\n # Add points\n to_user = get_object_or_404(Employee, pk=user_pk)\n from_user.add_stars_given(1)\n from_user.save()\n\n current_level = to_user.level\n\n # Add points to to_user according category weight\n if from_user.position:\n weight = from_user.position.weight\n else:\n weight = 1\n\n to_user.add_stars(weight)\n message = config.RECOMMENDATION_MESSAGE % (weight, from_user.first_name, from_user.last_name)\n send_push_notification(to_user, message)\n to_user.evaluate_level()\n to_user.save()\n\n # Add activity log if user level up\n if to_user.level != current_level:\n message = config.LEVEL_UP_TEXT % (to_user.first_name, to_user.last_name, to_user.level)\n activity = Activity.objects.create(text=message, to_user=to_user)\n activity.save()\n\n else:\n errors.append(serializer.errors)\n else:\n errors.append(serializer_bulk.errors)\n\n if len(errors) == 0:\n content = {'detail': config.SUCCESSFULLY_STARS_ADDED}\n return Response(content, status=status.HTTP_201_CREATED)\n else:\n stars_results = {\"stars_added\": stars_added}\n detail = {'detail': errors}\n content = stars_results.copy()\n content.update(detail)\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)",
"def set_stars():\n prod_id = int(request.vars.prod_id)\n logger.info(\"changing stars on prod_id {%s}\" %prod_id)\n rating = int(request.vars.rating)\n logger.info(\"auth.user from api: %s\"%auth.user.email )\n db.stars.update_or_insert(\n (db.stars.prod_id == prod_id) & (db.stars.user_email == auth.user.email),\n prod_id = prod_id,\n user_email = auth.user.email,\n rating = rating\n )\n new_avg = calc_avg_rating(prod_id)\n return response.json(dict(new_avg=new_avg))",
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def give_badge_to(request, badge_id, to_employee_id, from_employee_id):\n if to_employee_id == from_employee_id:\n content = {'detail': config.USER_UNABLE_TO_GIVE_BADGES_ITSELF}\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)\n elif request.method == 'POST':\n badge = get_object_or_404(Badge, pk=badge_id)\n to_employee = get_object_or_404(Employee, pk=to_employee_id)\n from_employee = get_object_or_404(Employee, pk=from_employee_id)\n try:\n employee_badge = EmployeeBadge.objects.create(to_user=to_employee, assigned_by=from_employee, badge=badge)\n except Exception as e:\n print(e)\n content = {'detail': config.BADGE_UNIQUE_CONSTRAINT_FAILED}\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)\n serializer = EmployeeBadgeSerializer(employee_badge)\n return Response(serializer.data, status=status.HTTP_201_CREATED)",
"def post_rating():\n\n id = request.args.get('id')\n\n rating = request.args.get('rating')\n\n record = mod.provide_rating(id, int(rating))\n\n return jsonify(record)",
"def post(self, request, slug):\n rating = request.data.get(\"rate\", {})\n serializer = self.serializer_class(data=rating)\n serializer.is_valid(raise_exception=True)\n rating = serializer.data.get('rating')\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise NotFound(\"An article with this slug does not exist\")\n\n ratings = Ratings.objects.filter(rater=request.user.profile,\n article=article).first()\n if not ratings:\n ratings = Ratings(\n article=article,\n rater=request.user.profile,\n stars=rating)\n ratings.save()\n avg = Ratings.objects.filter(\n article=article).aggregate(Avg('stars'))\n return Response({\n \"avg\": avg\n }, status=status.HTTP_201_CREATED)\n\n if ratings.counter >= 5:\n raise PermissionDenied(\n \"You are not allowed to rate this article more than 5 times.\"\n )\n\n ratings.counter += 1\n ratings.stars = rating\n ratings.save()\n avg = Ratings.objects.filter(article=article).aggregate(Avg('stars'))\n return Response({\"avg\": avg}, status=status.HTTP_201_CREATED)",
"def post(self):\n args = UpdateLikeList.post_parser.parse_args()\n user_name = args.get('user_name')\n restaurant_name = args.get('restaurant_name')\n #rating = args.get('rating')\n newlike = {\n 'user_name':args.get('user_name'),\n 'restaurant_name':args.get('restaurant_name')\n #'rating':args.get('rating')\n }\n conn = db.create_connection(db.connection_config_dict)\n cursor = conn.cursor()\n\n # To get user's user_id\n user_id = []\n sql_1 = 'SELECT user_id FROM User WHERE user_name = \"{user_name}\"'.format(user_name=user_name)\n print(sql_1)\n cursor.execute(sql_1)\n for u in cursor:\n user_id.append(u)\n print(user_id) \n\n # To get restaurant's restaurant_id\n restaurant_id = []\n sql_2 = 'SELECT restaurant_id FROM Restaurant WHERE name = \"{restaurant_name}\"'.format(restaurant_name=restaurant_name)\n print(sql_2)\n cursor.execute(sql_2)\n for u in cursor:\n restaurant_id.append(u)\n print(restaurant_id)\n\n # Insert new restaurant into LikeList table\n # neo4j may need insert data here\n # user id is user_id[0][0], restaurant id is restaurant_id[0][0].\n sql_3 = \"INSERT INTO LikeList (user_id, restaurant_id) VALUES ({user_id}, {restaurant_id});\".format(user_id=user_id[0][0], restaurant_id=restaurant_id[0][0])\n print(sql_3)\n cursor.execute(sql_3)\n\n conn.commit()\n return newlike, 201",
"def _rate_exploration(self, exp_id, num_ratings, rating):\n # Each user id needs to be unique since each user can only give an\n # exploration one rating.\n user_ids = ['user%d' % i for i in range(num_ratings)]\n for user_id in user_ids:\n rating_services.assign_rating_to_exploration(\n user_id, exp_id, rating)",
"def add_rating():\n try:\n payload = request.json\n # payload = change_case(payload, \"lower\")\n for required_key in rating_schema:\n if required_key not in payload.keys():\n return jsonify({\"message\": f\"Missing {required_key} parameter\"}), 400\n \n db_rating = db.ratings.find_one(\n {\n \"user\": payload[\"user\"],\n \"business\": payload[\"business\"],\n }\n )\n if db_rating is not None:\n db.ratings.update_one({\n '_id': db_rating['_id']\n },{\n '$set': {\n 'rating': payload['rating'],\n 'comment': payload['comment']\n }\n }, upsert=False)\n else:\n db.ratings.insert_one(payload)\n \n\n # Now get updated business details\n business_id = payload[\"business\"]\n business = db.dukaans.find_one({\"_id\": ObjectId(business_id)})\n if business is None:\n return jsonify({\"success\": False, \"message\": \"Business not found.\"}), 404\n\n if len(business.get(\"categories\", [])) > 0:\n business[\"categories\"] = [\n db.categories.find_one({\"_id\": ObjectId(_id)})[\"name\"]\n for _id in business[\"categories\"]\n ]\n ratings = list(db.ratings.find({\"business\": str(business_id)}))\n if len(ratings) > 0:\n ratings_sum = sum([r[\"rating\"] for r in ratings])\n business[\"avg_rating\"] = round(float(ratings_sum) / float(len(ratings)), 1)\n else:\n business[\"avg_rating\"] = 0.0\n\n for rating in ratings:\n rating[\"user_name\"] = db.users.find_one({\"_id\": ObjectId(rating[\"user\"])})[\n \"name\"\n ]\n\n business[\"ratings\"] = ratings\n business[\"total_ratings\"] = len(ratings)\n return jsonify({\"success\": True, \"business\": clean_dict_helper(business)}), 201\n except Exception as err:\n print(\"Error: \", str(err))\n print(sys.exc_info()[-1].tb_lineno)",
"def post(self, request, format=None):\n serializer = IdeasPostSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n average_score = calculate_average_score(serializer.validated_data)\n serializer.save(average_score=average_score, author=request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)",
"def put(first_name,last_name,name,note):\n repository = NotationRepository()\n notation = repository.update(first_name = first_name, last_name = last_name, name = name, note = note)\n return jsonify({\"notation\": notation.json})",
"def post(self):\r\n\t\tdataResponse = defaultResponse\r\n\t\tq = \"\"\"\tUPDATE scooters\r\n\t\t\t\tSET is_reserved = false\r\n\t\t\t\tWHERE id={id.int}\r\n\t\t\t\tAND is_reserved = true\r\n\t\t\t\tRETURNING true;\r\n\t\t\t\t\"\"\"\r\n\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\tif payload:\r\n\t\t\t# End reservation success code\r\n\t\t\tdataResponse = getQueryResponse(payload, newQuery, queryType='update')\r\n\r\n\t\tif dataResponse[0]:\r\n\t\t\tq = \"\"\"SELECT ST_Distance(location, ST_MakePoint({endlng.float}, {endlat.float})) FROM scooters WHERE id = {id.int}\"\"\"\r\n\t\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\t\tif payload:\r\n\t\t\t\t# Charge customer and update scooter location.\r\n\t\t\t\tdistanceTraveled = getQueryResponse(payload, newQuery, queryType='query')[0]\r\n\t\t\t\tdistanceTraveled = distanceTraveled[0]\r\n\t\t\t\twhile type(distanceTraveled) is tuple:\r\n\t\t\t\t\tdistanceTraveled = distanceTraveled[0]\r\n\t\t\t\tdistanceTraveled = roundup(distanceTraveled) if distanceTraveled > 0 else 1 # Min distance traveled is always 1.\r\n\t\t\t\tpricePerMeter = 1.0 # Ideally, this value is should not be hard coded\r\n\t\t\t\tfareCost = pricePerMeter * distanceTraveled\r\n\r\n\t\t\t\tq = \"\"\"UPDATE users\r\n\t\t\t\t\tSET (last_fare, fares, scooter_ids, distances_traveled) = ({fareCost}::real, array_append(fares, {fareCost}::real),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(scooter_ids, {id.int}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(distances_traveled, {distanceTraveled}::bigint))\r\n\t\t\t\t\tWHERE id={userid.int};\r\n\r\n\t\t\t\t\tUPDATE scooters\r\n\t\t\t\t\tSET (lon, lat, distances_traveled, rider_ids, location) = ({endlng.float}, {endlat.float},\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(distances_traveled, {distanceTraveled}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tarray_append(rider_ids, {userid.int}::bigint),\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tST_POINT({endlng.float}, {endlat.float}))\r\n\t\t\t\t\tWHERE id = {id.int};\r\n\r\n\t\t\t\t\t\"\"\"\r\n\t\t\t\tq = q.replace('{fareCost}', str(fareCost)).replace('{distanceTraveled}', str(distanceTraveled)) # Partial format subtitution\r\n\r\n\t\t\t\tpayload, newQuery = Validator.validatePayload(q, request)\r\n\t\t\t\tif payload:\r\n\t\t\t\t\t_ = getQueryResponse(payload, newQuery, queryType='update')\r\n\r\n\t\treturn dataResponse",
"def update_exam():\n try:\n data = request.get_json()\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n\n if examiner:\n if not data.get('exam_id'):\n return jsonify({'message':'No exam_id included in payload'}), 400\n\n exam_id = data['exam_id']\n exam = Exam.query.get(exam_id)\n \n if exam is None:\n return jsonify({'message':'Exam with id {} not found'.format(exam_id)}), 404\n \n if exam.start_date > datetime.utcnow():\n if data.get('exam_name'):\n exam.exam_name = data['exam_name'] \n if data.get('subject_id'):\n exam.subject_id = data['subject_id']\n if data.get('start_date'):\n start_date = parse_datetime(data['start_date'])\n if start_date < datetime.utcnow():\n raise Exception('Exam start_date has passed')\n exam.start_date = start_date\n if data.get('end_date'):\n end_date = parse_datetime(data['end_date'])\n if end_date < datetime.utcnow():\n raise Exception('Exam end_date has passed')\n exam.end_date = end_date\n if data.get('duration'):\n exam.duration = parse_datetime(data['duration']).time()\n if data.get('document_link'):\n exam.document_link = data['document_link']\n\n if exam.start_date > exam.end_date:\n raise Exception('Exam end_date precedes Exam start_date.')\n\n db.session.commit()\n\n return jsonify(exam.to_dict()), 200\n\n raise Exception('Cannot update an Exam that has already started.')\n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except exc.SQLAlchemyError as e:\n db.session.rollback()\n return jsonify({ 'message': e.args }), 500\n except Exception as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 400",
"def add_rating(self):\n ratings = Comment.query.filter(\n Comment.user_to_id == self.user_to_id).all()\n rate = 0\n tot = 0\n ave = 0\n for r in ratings:\n if r.rating:\n tot += 1\n rate += r.rating\n ave = rate / tot\n user = User.query.get_or_404(self.user_to_id)\n user.rating = ave\n db.session.add(user)\n db.session.commit()\n return user",
"def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)",
"def save(self, *args, **kwargs):\n self.item.rates_total += 1\n self.item.average_rate += (self.item.average_rate + self.rate) / self.item.rates_total\n self.item.save()\n super(Rate, self).save(*args, **kwargs)",
"def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created",
"def put(self, employee_id):\n\n employee = EmployeeModel.find_by_id(employee_id)\n if employee is None:\n return {'message': \"There is no employee with this ID, or your access_token is invalid.\"}, 404\n else:\n \"\"\" check if employee entered the building today\"\"\"\n if WorkdayModel.find_latest_workday(employee.id):\n \"\"\"checking if employee already entered building today\"\"\"\n last_workday = WorkdayModel.find_latest_workday(employee.id)\n\n if last_workday.time_in.day == datetime.today().day:\n last_workday.time_out = datetime.today()\n # calculate hours_worked| .time converts to H:M\n duration = last_workday.time_out - last_workday.time_in\n # duration is a datetime.timedelta\n duration = (datetime.min + duration).time()\n last_workday.hours_worked = duration\n try:\n last_workday.save_to_db()\n except:\n return {'message': 'An error occurred updating worked hours'}, 500\n\n return last_workday.json()\n\n return {'message': 'First use of card, or employee did not start work today'}, 200",
"def addstar(starname):\n try:\n Star.create(name=starname)\n except IntegrityError:\n print(('Star {0} already in database. Record not created, but can be updated.'.format(starname)))",
"def add_heart_rate(email, heart_rate, time):\n # Get the first user where _id=email\n user = models.User.objects.raw({\"_id\": email}).first()\n # Append the heart_rate to the user's list of heart rates\n user.heart_rate.append(heart_rate)\n # append the current time to the user's list of heart rate times\n user.heart_rate_times.append(time)\n user.save() # save the user to the database",
"def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201",
"def create_rating(input_user_id, input_rating, input_movie_id):\n \n rating = Rating(user_id=input_user_id, rating=input_rating, movie_id=input_movie_id)\n \n db.session.add(rating)\n db.session.commit()\n\n return rating",
"def update_attendance_rate(self):\n session_avg_rate = self.session_set\\\n .filter(attendance_rate__isnull=False)\\\n .aggregate(Avg('attendance_rate'))\n self.attendance_rate = session_avg_rate['attendance_rate__avg']\n self.save()",
"def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)",
"def make_hourly(self,rate,name):\n id = self.find_employee_id(name)\n if id in self.clsf:\n self.emp_dict[id][5] = \"1\"\n print(\"{}{}\".format(name,\" was successfully changed to be an hourly employee\"))\n self.emp_dict[id][8] = rate\n self.classification()\n return self.emp_dict\n else:\n print(\"Error- employee not found\")\n self.employee_data()",
"def put(self, uuid: str):\n try:\n employee = self.service.update_employee(\n self.schema, uuid, request.json\n )\n except ValidationError as error:\n return error.messages, 400\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200",
"def update_rating(user_id, item_id):\n\n rating_type = request.get_json().get('rating_type')\n if rating_type is None or rating_type < 1 or rating_type > 3:\n return jsonify({'message': 'Error: rating type should be between 1 and 3'}), 400\n\n con = connect()\n cursor = con.cursor()\n query = f\"\"\"\n START TRANSACTION;\n\n UPDATE counts_by_rating_type\n SET count = count - 1\n WHERE counts_by_rating_type.user_id = {user_id}\n AND counts_by_rating_type.rating_type IN (\n SELECT rating_type\n FROM ratings\n WHERE user_id = {user_id} AND item_id = {item_id}\n );\n\n UPDATE counts_by_rating_type\n SET count = count + 1\n WHERE user_id = {user_id} AND rating_type = {rating_type};\n\n INSERT INTO ratings (user_id, item_id, rating_type)\n VALUES ({user_id}, {item_id}, {rating_type})\n ON DUPLICATE KEY UPDATE\n rating_type = {rating_type};\n \n COMMIT;\n \"\"\"\n cursor.execute(query)\n cursor.close()\n return jsonify({'message': 'ok'}), 200",
"def update(self, request, pk=None, **kwargs):\n rate_update = self.get_object()\n serializer = self.serializer_class(\n rate_update, data=request.data, partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(serializer.data, status=status.HTTP_200_OK)",
"def post(self, request, *args, **kwargs):\n rating = request.data['rating']\n if rating < 0 or rating > 5:\n return Response({'detail': 'Invalid rating!'}, status.HTTP_400_BAD_REQUEST)\n\n data = {\n 'igdb': request.data['igdb'],\n 'name': request.data['name'],\n 'slug': request.data['slug'],\n 'cover_id': request.data['cover_id'],\n 'backdrop_id': request.data['backdrop_id']\n }\n game, _ = Game.objects.get_or_create(**data)\n user = CustomUser.objects.get(id=request.user.id)\n\n r, _ = Ratings.objects.get_or_create(game=game, user=user)\n r.rating = rating\n r.save()\n\n serializer = RatingSerializer(r).data\n\n return Response(serializer)"
] | [
"0.6868924",
"0.6250418",
"0.55197275",
"0.5386975",
"0.53556263",
"0.5274113",
"0.5269144",
"0.51558644",
"0.5138154",
"0.5062679",
"0.5037192",
"0.4971181",
"0.49530414",
"0.49388695",
"0.4928753",
"0.4862501",
"0.48402056",
"0.48165706",
"0.48164162",
"0.48102924",
"0.48074448",
"0.48010492",
"0.47725058",
"0.47708565",
"0.4751689",
"0.47420892",
"0.47399032",
"0.47219878",
"0.46702096",
"0.46583247"
] | 0.7238564 | 0 |
This endpoint saves stars on many employees. | def give_star_to_many(request, from_employee_id):
if request.method == 'POST':
serializer_bulk = StarBulkSerializer(data=request.data)
errors = []
stars_added = 0
if serializer_bulk.is_valid():
text = (request.data['text'] if 'text' in request.data.keys() else None)
from_user = get_object_or_404(Employee, pk=from_employee_id)
category = get_object_or_404(Category, pk=request.data['category'])
keyword = get_object_or_404(Keyword, pk=request.data['keyword'])
# Create data object to save
data = {"category": category.id,
"keyword": keyword.id,
"text": text,
"from_user": from_user.id}
for user_pk in request.data['to_users']:
data.update({"to_user": int(user_pk)})
serializer = StarSerializer(data=data)
if serializer.is_valid():
serializer.save()
stars_added += 1
# Add points
to_user = get_object_or_404(Employee, pk=user_pk)
from_user.add_stars_given(1)
from_user.save()
current_level = to_user.level
# Add points to to_user according category weight
if from_user.position:
weight = from_user.position.weight
else:
weight = 1
to_user.add_stars(weight)
message = config.RECOMMENDATION_MESSAGE % (weight, from_user.first_name, from_user.last_name)
send_push_notification(to_user, message)
to_user.evaluate_level()
to_user.save()
# Add activity log if user level up
if to_user.level != current_level:
message = config.LEVEL_UP_TEXT % (to_user.first_name, to_user.last_name, to_user.level)
activity = Activity.objects.create(text=message, to_user=to_user)
activity.save()
else:
errors.append(serializer.errors)
else:
errors.append(serializer_bulk.errors)
if len(errors) == 0:
content = {'detail': config.SUCCESSFULLY_STARS_ADDED}
return Response(content, status=status.HTTP_201_CREATED)
else:
stars_results = {"stars_added": stars_added}
detail = {'detail': errors}
content = stars_results.copy()
content.update(detail)
return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_stars():\n prod_id = int(request.vars.prod_id)\n logger.info(\"changing stars on prod_id {%s}\" %prod_id)\n rating = int(request.vars.rating)\n logger.info(\"auth.user from api: %s\"%auth.user.email )\n db.stars.update_or_insert(\n (db.stars.prod_id == prod_id) & (db.stars.user_email == auth.user.email),\n prod_id = prod_id,\n user_email = auth.user.email,\n rating = rating\n )\n new_avg = calc_avg_rating(prod_id)\n return response.json(dict(new_avg=new_avg))",
"def give_star_to(request, from_employee_id, to_employee_id):\n if from_employee_id == to_employee_id:\n content = {'detail': config.USER_UNABLE_TO_GIVE_STARS_ITSELF}\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)\n elif request.method == 'POST':\n # Set values from request.data from POST\n text = (request.data['text'] if 'text' in request.data.keys() else None)\n from_user = get_object_or_404(Employee, pk=from_employee_id)\n to_user = get_object_or_404(Employee, pk=to_employee_id)\n category = get_object_or_404(Category, pk=request.data['category'])\n keyword = get_object_or_404(Keyword, pk=request.data['keyword'])\n\n if from_user.is_blocked:\n content = {'detail': config.USER_BLOCKED_TO_GIVE_STARS}\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)\n elif to_user.is_blocked:\n content = {'detail': config.USER_BLOCKED_TO_RECEIVED_STARS}\n return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)\n\n # Create data object to save\n data = {\"category\": category.id,\n \"keyword\": keyword.id,\n \"text\": text,\n \"from_user\": from_user.id,\n \"to_user\": to_user.id}\n\n # Validate serializer with data provided.\n serializer = StarInputSerializer(data=data)\n if serializer.is_valid():\n # Save recommendation\n serializer.save()\n\n # Add 1 to employee given points\n from_user.add_stars_given(1)\n from_user.save()\n\n current_level = to_user.level\n\n # Add points to to_user according category weight\n if from_user.position:\n weight = from_user.position.weight\n else:\n weight = 1\n to_user.add_stars(weight)\n message = config.RECOMMENDATION_MESSAGE % (weight, from_user.first_name, from_user.last_name)\n send_push_notification(to_user, message)\n to_user.evaluate_level()\n to_user.save()\n\n # Add activity log if user level up\n if to_user.level != current_level:\n message = config.LEVEL_UP_TEXT % (to_user.first_name, to_user.last_name, to_user.level)\n activity = Activity.objects.create(text=message, to_user=to_user)\n send_push_notification(to_user, message)\n activity.save()\n\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def post_rating():\n\n id = request.args.get('id')\n\n rating = request.args.get('rating')\n\n record = mod.provide_rating(id, int(rating))\n\n return jsonify(record)",
"def post(self, request, slug):\n rating = request.data.get(\"rate\", {})\n serializer = self.serializer_class(data=rating)\n serializer.is_valid(raise_exception=True)\n rating = serializer.data.get('rating')\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n raise NotFound(\"An article with this slug does not exist\")\n\n ratings = Ratings.objects.filter(rater=request.user.profile,\n article=article).first()\n if not ratings:\n ratings = Ratings(\n article=article,\n rater=request.user.profile,\n stars=rating)\n ratings.save()\n avg = Ratings.objects.filter(\n article=article).aggregate(Avg('stars'))\n return Response({\n \"avg\": avg\n }, status=status.HTTP_201_CREATED)\n\n if ratings.counter >= 5:\n raise PermissionDenied(\n \"You are not allowed to rate this article more than 5 times.\"\n )\n\n ratings.counter += 1\n ratings.stars = rating\n ratings.save()\n avg = Ratings.objects.filter(article=article).aggregate(Avg('stars'))\n return Response({\"avg\": avg}, status=status.HTTP_201_CREATED)",
"def _rate_exploration(self, exp_id, num_ratings, rating):\n # Each user id needs to be unique since each user can only give an\n # exploration one rating.\n user_ids = ['user%d' % i for i in range(num_ratings)]\n for user_id in user_ids:\n rating_services.assign_rating_to_exploration(\n user_id, exp_id, rating)",
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def post(self, request, format=None):\n serializer = IdeasPostSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n average_score = calculate_average_score(serializer.validated_data)\n serializer.save(average_score=average_score, author=request.user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)",
"def add_rating():\n try:\n payload = request.json\n # payload = change_case(payload, \"lower\")\n for required_key in rating_schema:\n if required_key not in payload.keys():\n return jsonify({\"message\": f\"Missing {required_key} parameter\"}), 400\n \n db_rating = db.ratings.find_one(\n {\n \"user\": payload[\"user\"],\n \"business\": payload[\"business\"],\n }\n )\n if db_rating is not None:\n db.ratings.update_one({\n '_id': db_rating['_id']\n },{\n '$set': {\n 'rating': payload['rating'],\n 'comment': payload['comment']\n }\n }, upsert=False)\n else:\n db.ratings.insert_one(payload)\n \n\n # Now get updated business details\n business_id = payload[\"business\"]\n business = db.dukaans.find_one({\"_id\": ObjectId(business_id)})\n if business is None:\n return jsonify({\"success\": False, \"message\": \"Business not found.\"}), 404\n\n if len(business.get(\"categories\", [])) > 0:\n business[\"categories\"] = [\n db.categories.find_one({\"_id\": ObjectId(_id)})[\"name\"]\n for _id in business[\"categories\"]\n ]\n ratings = list(db.ratings.find({\"business\": str(business_id)}))\n if len(ratings) > 0:\n ratings_sum = sum([r[\"rating\"] for r in ratings])\n business[\"avg_rating\"] = round(float(ratings_sum) / float(len(ratings)), 1)\n else:\n business[\"avg_rating\"] = 0.0\n\n for rating in ratings:\n rating[\"user_name\"] = db.users.find_one({\"_id\": ObjectId(rating[\"user\"])})[\n \"name\"\n ]\n\n business[\"ratings\"] = ratings\n business[\"total_ratings\"] = len(ratings)\n return jsonify({\"success\": True, \"business\": clean_dict_helper(business)}), 201\n except Exception as err:\n print(\"Error: \", str(err))\n print(sys.exc_info()[-1].tb_lineno)",
"def addstar(starname):\n try:\n Star.create(name=starname)\n except IntegrityError:\n print(('Star {0} already in database. Record not created, but can be updated.'.format(starname)))",
"def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created",
"def post(self):\n args = UpdateLikeList.post_parser.parse_args()\n user_name = args.get('user_name')\n restaurant_name = args.get('restaurant_name')\n #rating = args.get('rating')\n newlike = {\n 'user_name':args.get('user_name'),\n 'restaurant_name':args.get('restaurant_name')\n #'rating':args.get('rating')\n }\n conn = db.create_connection(db.connection_config_dict)\n cursor = conn.cursor()\n\n # To get user's user_id\n user_id = []\n sql_1 = 'SELECT user_id FROM User WHERE user_name = \"{user_name}\"'.format(user_name=user_name)\n print(sql_1)\n cursor.execute(sql_1)\n for u in cursor:\n user_id.append(u)\n print(user_id) \n\n # To get restaurant's restaurant_id\n restaurant_id = []\n sql_2 = 'SELECT restaurant_id FROM Restaurant WHERE name = \"{restaurant_name}\"'.format(restaurant_name=restaurant_name)\n print(sql_2)\n cursor.execute(sql_2)\n for u in cursor:\n restaurant_id.append(u)\n print(restaurant_id)\n\n # Insert new restaurant into LikeList table\n # neo4j may need insert data here\n # user id is user_id[0][0], restaurant id is restaurant_id[0][0].\n sql_3 = \"INSERT INTO LikeList (user_id, restaurant_id) VALUES ({user_id}, {restaurant_id});\".format(user_id=user_id[0][0], restaurant_id=restaurant_id[0][0])\n print(sql_3)\n cursor.execute(sql_3)\n\n conn.commit()\n return newlike, 201",
"def post(self, request, *args, **kwargs):\n rating = request.data['rating']\n if rating < 0 or rating > 5:\n return Response({'detail': 'Invalid rating!'}, status.HTTP_400_BAD_REQUEST)\n\n data = {\n 'igdb': request.data['igdb'],\n 'name': request.data['name'],\n 'slug': request.data['slug'],\n 'cover_id': request.data['cover_id'],\n 'backdrop_id': request.data['backdrop_id']\n }\n game, _ = Game.objects.get_or_create(**data)\n user = CustomUser.objects.get(id=request.user.id)\n\n r, _ = Ratings.objects.get_or_create(game=game, user=user)\n r.rating = rating\n r.save()\n\n serializer = RatingSerializer(r).data\n\n return Response(serializer)",
"def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )",
"def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201",
"def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)",
"def save(self, *args, **kwargs):\n self.item.rates_total += 1\n self.item.average_rate += (self.item.average_rate + self.rate) / self.item.rates_total\n self.item.save()\n super(Rate, self).save(*args, **kwargs)",
"def populate_employees():\n employees = get_employees()\n\n db.session.bulk_save_objects(employees)\n db.session.commit()",
"def create_rating(input_user_id, input_rating, input_movie_id):\n \n rating = Rating(user_id=input_user_id, rating=input_rating, movie_id=input_movie_id)\n \n db.session.add(rating)\n db.session.commit()\n\n return rating",
"def update_rating(user_id, item_id):\n\n rating_type = request.get_json().get('rating_type')\n if rating_type is None or rating_type < 1 or rating_type > 3:\n return jsonify({'message': 'Error: rating type should be between 1 and 3'}), 400\n\n con = connect()\n cursor = con.cursor()\n query = f\"\"\"\n START TRANSACTION;\n\n UPDATE counts_by_rating_type\n SET count = count - 1\n WHERE counts_by_rating_type.user_id = {user_id}\n AND counts_by_rating_type.rating_type IN (\n SELECT rating_type\n FROM ratings\n WHERE user_id = {user_id} AND item_id = {item_id}\n );\n\n UPDATE counts_by_rating_type\n SET count = count + 1\n WHERE user_id = {user_id} AND rating_type = {rating_type};\n\n INSERT INTO ratings (user_id, item_id, rating_type)\n VALUES ({user_id}, {item_id}, {rating_type})\n ON DUPLICATE KEY UPDATE\n rating_type = {rating_type};\n \n COMMIT;\n \"\"\"\n cursor.execute(query)\n cursor.close()\n return jsonify({'message': 'ok'}), 200",
"def add_employee(schema, employee_json):\n employee = schema.load(employee_json, session=db.session)\n db.session.add(employee)\n db.session.commit()\n return employee",
"def updateUserRating(definition, increase):\n user = mongo.db.users.find_one({\"_id\": definition[\"submitted_by\"]})\n mongo.db.users.update_one(\n {\"_id\": user[\"_id\"]},\n {\"$inc\": {\"total_rating\": increase}})",
"def employees(self, employees: object):\n\n self._employees = employees",
"def employees_json_id(request, employee_id):\n curent_employee = Employee.objects.get(pk=int(employee_id))\n if curent_employee.is_manager:\n employee_list = Employee.objects.filter(manager=curent_employee)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n manager_dict['photo'] = employee.photo.url if employee.photo else ''\n employees.append(manager_dict)\n data = {\"employees\": employees}\n else:\n return JsonResponse(status=400, data={\"error\": \"Employee with id={} not is_manager\".format(int(employee_id))})\n return JsonResponse(data=data, content_type='application/json', safe=False)",
"def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise",
"def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise",
"def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()",
"def _create_post_requests(self, response):\n meta = response.meta.copy()\n meta['_current_star'] = {}\n asin = meta['product_id']\n\n for star in self.buyer_reviews_stars:\n args = {\n 'asin': asin, 'filterByStar': star,\n 'filterByKeyword': '', 'formatType': 'all_formats',\n 'pageNumber': '1', 'pageSize': '10', 'sortBy': 'helpful',\n 'reftag': 'cm_cr_pr_viewopt_sr', 'reviewerType': 'all_reviews',\n 'scope': 'reviewsAjax0',\n }\n meta['_current_star'] = star\n yield FormRequest(\n url=self.REVIEW_URL_1.format(domain=self.allowed_domains[0]),\n formdata=args, meta=meta,\n callback=self._get_rating_by_star_by_individual_request,\n dont_filter=True\n )",
"def update_girl(self, hash, new_rate):\n image = self._db.girls.find_one({'_id': hash})\n total_average = self.average(image['rating'], new_rate, image['count'])\n\n self._db.girls.find_one_and_update(\n {'_id': hash}, {'$inc': {'count': 1},\n '$set': {'rating': total_average}},\n return_document=pymongo.ReturnDocument.AFTER)",
"def test_post_rating(self):\n\n rating_data = {\n 'user': self.user.pk,\n 'book': self.book.pk,\n 'rating': random.randint(1, 5)\n }\n\n self.client.post(self.list_url, data=rating_data)\n\n self.book.refresh_from_db()\n\n assert self.book.ratings.count() == 1\n self.assertAlmostEqual(self.book.average_rating, rating_data['rating'])"
] | [
"0.65493494",
"0.6255241",
"0.5915746",
"0.58588463",
"0.581408",
"0.5757517",
"0.5629571",
"0.55980355",
"0.5534477",
"0.5484442",
"0.5417349",
"0.5381393",
"0.53757095",
"0.5244011",
"0.5242649",
"0.5189538",
"0.51865286",
"0.5157941",
"0.5154189",
"0.5085337",
"0.5067989",
"0.50623727",
"0.5054041",
"0.5029103",
"0.5015467",
"0.501355",
"0.50076544",
"0.49839061",
"0.49809343",
"0.49780223"
] | 0.66047436 | 0 |
Returns stars list from employee | def stars_employee_list(request, employee_id):
if request.method == 'GET':
employee = get_object_or_404(Employee, pk=employee_id)
employee_stars = Star.objects.filter(to_user=employee)
paginator = PageNumberPagination()
results = paginator.paginate_queryset(employee_stars, request)
serializer = StarSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a",
"def read_stars(self):\n if self.hip_stars: return\n all_stars = list(hipparcos.stars())\n self.hip_stars = [None]*(max(s[0] for s in all_stars)+1)\n for s in all_stars: self.hip_stars[s[0]] = s",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_category(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'category__pk',\n 'category__name').annotate(num_stars=Count('category')).order_by('-num_stars', 'category__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeCategoriesSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def list(show=0):\n global stars_\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n if show == 0:\n i=0\n for s in stars_:\n i=i+1\n print i,s[0],s[1],s[2],s[3]\n else:\n if show > 0 and show <= len(stars_):\n s = stars_[show-1]\n print show,s[0],s[1],s[2],s[3]\n else:\n print \"Bad star index\"",
"def stars(self, magnitude=20):\n # Get the stars that are visible within this chart.\n thestars = []\n for s in self.hip_stars:\n if not s: continue\n hip_id, mag, ra, dec, bv = s\n if mag>magnitude: continue\n if dec<min(self.inner_dec, self.outer_dec): continue\n if dec>max(self.inner_dec, self.outer_dec): continue\n thestars.append(s)\n # This should sort them by increasing magnitude (brightest first).\n thestars.sort(key=lambda a:a[1])\n if not thestars: return\n # Set the least bright magnitude.\n self.dimmest_mag = math.floor(thestars[-1][1])\n # Create the star group.\n star_g = self.make_element(self.centered, 'g', (\n 'stroke', 'none'), ('fill', 'black'), (\n 'clip-path', 'url(#innerClipPath)'))\n for hip_id, mag, ra, dec, bv in thestars:\n x, y = self.radec2xy(ra, dec)\n self.make_element(star_g, 'circle', (\n 'cx', x), ('cy', y), ('r', self.starsize(hip_id)))",
"def stars_employee_list_group_by_keyword_detail(request, employee_id, keyword_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(to_user=employee, keyword=keyword).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_top_employee_lists(request, top_number, kind, id):\n try:\n if request.method == 'GET':\n if kind == 'category':\n top_list = Star.objects.filter(category__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level'\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n elif kind == 'keyword':\n top_list = Star.objects.filter(keyword__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n else:\n return Response(status=status.HTTP_412_PRECONDITION_FAILED)\n serializer = StarTopEmployeeLists(top_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n raise APIException(e)",
"def stars_employee_list_group_by_category_detail(request, employee_id, category_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n category = get_object_or_404(Category, pk=category_id)\n stars = Star.objects.filter(to_user=employee, category=category).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def stars_keyword_list_detail(request, keyword_id):\n if request.method == 'GET':\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(keyword=keyword).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarTopEmployeeLists(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]",
"def onlyYoungDisk(self):\n names = self.getArray('name')\n\n stars = []\n\n yng = starTables.youngStarNames()\n paum = starTables.Paumard2006()\n\n for name in yng:\n # Find the star in our star lists\n try:\n idx = names.index(name)\n star = self.stars[idx]\n\n if (star.r2d >= 0.8):\n stars.append(star)\n\n # Make sure this star is in the Paumard star list\n idx = paum.ourName.index(name)\n except ValueError as e:\n # Couldn't find the star in our lists\n continue\n\n # Set the starset's star list\n self.stars = stars\n\n print(('Found %d young stars.' % len(stars)))",
"def list(self, request):\n queryset = Students.objects.filter(average_rating=5.0)\n students = normalize_students(queryset)\n return Response(students)",
"def getUserAverageStars(user, auth):\n url = 'https://api.github.com/users/{}/repos'.format(user)\n r = requests.get(url=url, auth=auth)\n stars = [rep['stargazers_count'] for rep in r.json()]\n return mean(stars)",
"def __str__(self):\n return \"{} - {}\".format(self.stars, self.user.username)",
"def reviewers(self, stars=None):\n all_reviewers = []\n number_of_pages = self._number_of_review_pages(stars) + 1\n for page_num in xrange(1, number_of_pages):\n all_reviewers.extend(self._star_reviewers(stars, page_num))\n return all_reviewers",
"def _star_reviewers(self, star_num, page_num):\n one_star_url = self._star_reviews_url(star_num, page_num)\n req = Request(one_star_url, headers=self.firefox)\n content = urlopen(req).read()\n return self._parse_reviewers(content)",
"def find_rating():\n print(\"***** Finding Star/Rating *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(\"This business is rated \" + str(\n business_object['stars']) + \" stars with \" + str(\n business_object['review_count']) + \" reviews.\\n\")\n\n print_business(business_object)",
"def ratings_usuarios(username, ratings):\n return list(filter(lambda x: x.username == username, ratings))",
"def get_meals_user_liked(username):\n meals_user_liked = []\n user_liked = Rating.objects.filter(member__username=username, like=True)\n for ratting in user_liked:\n meals_user_liked.append(ratting.meal)\n return meals_user_liked",
"def fleets(self):\n\t\treturn [fleet for fleet in self.galaxy.fleets.values() if 'ouid' in fleet.data and fleet.ouid == self.star_id]",
"def print_stars():\n for i in range(2):\n for j in range(35):\n print(\"*\", end = '')\n print('')",
"def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))",
"def find_stars():\n\theap = []\n\tsys.stdin.readline()\n\tfor star_data in sys.stdin:\n\t\tstar_data = star_data.split(',')\n\t\tif len(star_data) > 18:\n\t\t\tcoordinates = (float(star_data[X_INDEX]), float(star_data[Y_INDEX]), \n\t\t\t\t\t\t float(star_data[Z_INDEX]))\n\t\t\tdistance = cartesian_distance(*coordinates)\n\t\t\tmanage_heap(heap, coordinates, distance)\n\tprint heap\n\treturn [star[1] for star in heap]",
"def values(self):\n if '%' in self.starid:\n query = \"\"\"SELECT * from ngc2236 where starid like '%s'\"\"\" % self.starid\n else:\n query = \"\"\"SELECT * from ngc2236 where starid = '%s'\"\"\" % self.starid\n result = self.wifsip.query(query)\n values = [r for r in result[0]]\n if '%' in self.starid:\n self.starid=values[0]\n return values",
"def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()",
"def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees"
] | [
"0.65949583",
"0.59830517",
"0.5790625",
"0.5790625",
"0.5762904",
"0.57204705",
"0.57030344",
"0.5624437",
"0.56211567",
"0.56128275",
"0.5539449",
"0.54814416",
"0.54640126",
"0.54514897",
"0.5410562",
"0.5351911",
"0.5337319",
"0.5314365",
"0.52982193",
"0.52729607",
"0.52343714",
"0.52325255",
"0.5217665",
"0.5206219",
"0.5166658",
"0.514215",
"0.5133586",
"0.5112351",
"0.51098096",
"0.5066797"
] | 0.6991311 | 0 |
Returns stars list from employee grouped by categories | def stars_employee_list_group_by_category(request, employee_id):
if request.method == 'GET':
employee = get_object_or_404(Employee, pk=employee_id)
employee_stars = Star.objects.filter(to_user=employee).values(
'category__pk',
'category__name').annotate(num_stars=Count('category')).order_by('-num_stars', 'category__name')
paginator = PageNumberPagination()
result = paginator.paginate_queryset(employee_stars, request)
serializer = StarEmployeeCategoriesSerializer(result, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stars_employee_list_group_by_category_detail(request, employee_id, category_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n category = get_object_or_404(Category, pk=category_id)\n stars = Star.objects.filter(to_user=employee, category=category).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a",
"def stars_employee_list_group_by_keyword_detail(request, employee_id, keyword_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(to_user=employee, keyword=keyword).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def get_category_ratings(self):\n category_ratings = dict()\n for cat_rating in self.category_ratings.all():\n category_ratings[cat_rating.category.name] = cat_rating.rating\n return category_ratings",
"def fetch_per_category(n, path=os.path.join('data', 'yelp_academic_dataset_review.json.zip')):\n\n subsample = []\n counts = {}\n\n # Read zipped JSON\n with zipfile.ZipFile(path, 'r') as z:\n for filename in z.namelist():\n with z.open(filename) as f:\n\n # Iterate over the reviews\n for line in f:\n review = json.loads(line.decode('utf-8'))\n\n # Collect records and update the count\n if review['stars'] not in counts:\n subsample.append(review)\n counts[review['stars']] = 1\n elif counts[review['stars']] < n:\n subsample.append(json.loads(line.decode('utf-8')))\n counts[review['stars']] += 1\n\n # Break when n records are gathered for all star ratings\n if all(c == n for c in counts.values()) == n:\n break\n\n return subsample",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def processed_stars(test=False,\n categories=('books', 'dvd', 'electronics', 'kitchen')):\n\n if isinstance(categories, str):\n categories = [categories]\n\n # loop over each category and extract features and labels per line\n # append these to the final\n labeled_features = []\n for category in categories:\n # open the relevant file, either train or test\n file = f'./processed_stars/{category}/'\n if not test:\n file += 'train'\n elif test:\n file += 'test'\n with open(file, encoding='utf-8') as f:\n raw = f.read()\n # one document per line, so split into lines\n reviews = raw.split('\\n')\n # extract features and their counts for each line\n features = [{ftr[0].strip(): int(ftr[1])\n for ftr in re.findall(r'(.*?(?<!#label#)):(\\d)', line)}\n for line in reviews]\n # extract all labels\n labels = re.findall(r'#label#:(\\d+.\\d+)', raw)\n # zip the features list and labels into tuples and add to final list\n labeled_features += [(f_set, float(label))\n for f_set, label in zip(features, labels)]\n\n return labeled_features",
"def stars_top_employee_lists(request, top_number, kind, id):\n try:\n if request.method == 'GET':\n if kind == 'category':\n top_list = Star.objects.filter(category__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level'\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n elif kind == 'keyword':\n top_list = Star.objects.filter(keyword__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n else:\n return Response(status=status.HTTP_412_PRECONDITION_FAILED)\n serializer = StarTopEmployeeLists(top_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n raise APIException(e)",
"def get_user_interests_with_categories(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n cursor.execute(\"SELECT tag, category FROM \" + ENV_DB +\r\n \".UserTags WHERE username='\" + self.user.username + \"'\")\r\n data = cursor.fetchall()\r\n database.close()\r\n return list((i[0], i[1]) for i in data)",
"def stars(self, magnitude=20):\n # Get the stars that are visible within this chart.\n thestars = []\n for s in self.hip_stars:\n if not s: continue\n hip_id, mag, ra, dec, bv = s\n if mag>magnitude: continue\n if dec<min(self.inner_dec, self.outer_dec): continue\n if dec>max(self.inner_dec, self.outer_dec): continue\n thestars.append(s)\n # This should sort them by increasing magnitude (brightest first).\n thestars.sort(key=lambda a:a[1])\n if not thestars: return\n # Set the least bright magnitude.\n self.dimmest_mag = math.floor(thestars[-1][1])\n # Create the star group.\n star_g = self.make_element(self.centered, 'g', (\n 'stroke', 'none'), ('fill', 'black'), (\n 'clip-path', 'url(#innerClipPath)'))\n for hip_id, mag, ra, dec, bv in thestars:\n x, y = self.radec2xy(ra, dec)\n self.make_element(star_g, 'circle', (\n 'cx', x), ('cy', y), ('r', self.starsize(hip_id)))",
"def avg_by_category(self, start_date, end_date):\n data = self.by_date(start_date, end_date)\n\n return data.values('category__name').annotate(avg_value=models.Avg('value')).order_by('category')",
"def get_ratings(self):\n df = pd.read_csv(IoManager.CARD_RATINGS_FILE_PATH)\n df = IoManager.scale_ratings(df)\n df = IoManager.normalize_ratings_per_archetype(df)\n df = self.add_ratings_sum(df)\n # print(df[[\"name\", \"monogreen\", \"simic_ramp\", \"general\"]].tail(60))\n # print(df[[\"name\", \"general\"]].sort_values(ascending=False, by=\"general\").head(50))\n return df",
"def get_female_diversity_movies_lst():\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT movies.movie_id, movies.title as title, COUNT(IF(profile.gender='1',1,NULL)) / NULLIF(COUNT(IF(profile.gender='1',1,NULL))+COUNT(IF(profile.gender='2',1,NULL)), 0) as ratio\"\n + \" FROM movie_crew, profile, movies WHERE profile.profile_id = movie_crew.profile_id AND movie_crew.movie_id = movies.movie_id \"\n + \" GROUP BY movies.movie_id HAVING ratio >= 0 \")\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst",
"def ratings_usuarios(username, ratings):\n return list(filter(lambda x: x.username == username, ratings))",
"def get_users_movies(myRatings):\n #return [x[1] for x in myRatings]\n return list(myRatings.map(lambda x: x[1]).collect())",
"def list(self, request):\n queryset = Students.objects.filter(average_rating=5.0)\n students = normalize_students(queryset)\n return Response(students)",
"def students_data():\n\n return [\n {'name': 'Alexey', 'rate': 2, 'course': 'Python'},\n {'name': 'Vali', 'rate': 5, 'course': 'Java'},\n {'name': 'Olga', 'rate': 4, 'course': 'Python'},\n {'name': 'Frank', 'rate': 5, 'course': 'Python'},\n {'name': 'Masha', 'rate': 3, 'course': 'Java'},\n {'name': 'Vasily', 'rate': 2, 'course': 'Java'},\n {'name': 'Daria', 'rate': 3, 'course': 'Python'},\n {'name': 'Nickname', 'rate': 4, 'course': 'Python'},\n {'name': 'Fort', 'rate': 3, 'course': 'Java'},\n {'name': 'Lama', 'rate': 4, 'course': 'Java'},\n {'name': 'Pop', 'rate': 2, 'course': 'Python'},\n {'name': 'Sort', 'rate': 3, 'course': 'Python'},\n {'name': 'Elya', 'rate': 5, 'course': 'Java'},\n {'name': 'Tolik', 'rate': 4, 'course': 'Python'},\n ]",
"def get_aggregations(self):\n return []",
"def get_average(values_per_category):\n sum = 0\n num_categories = 0\n for category in values_per_category:\n num_categories += 1\n sum += values_per_category[category]\n return sum / num_categories",
"def get_mean_for_user(df,genres, userID):\n #PROFIL UŻYTWKONIKA#\n\n\n mean_for_user = {}\n for genre in genres:\n mean_for_user[genre] = df[(df['userID'] == userID ) & (df['genre'] == genre)]['rating'].mean()\n change_nan(mean_for_user)\n return mean_for_user",
"def get_reviews(df, col, stars):\n log.info('Number of reviews to extract: {}'.format(stars))\n log.info(\n 'Number of available reviews: {}'.format(df[col].value_counts()))\n if [x for x in df[col].value_counts() if x < min(stars.values())]:\n raise Exception(\"To many review chosen from dataset\")\n idxs = []\n for star, n_rev in stars.iteritems():\n idxs += random.sample(df[df[col] == star].index, n_rev)\n return idxs",
"def avg_by_day(self, start_date, end_date, category, user):\n data = self.by_date(start_date, end_date)\n return data.values('record_date').annotate(avg_value=models.Avg('value')).order_by('record_date') \\\n .filter(category__name=category, user__name=user)",
"def all_categories_for_phrase(db, phrase, access_codes):\n ratings = [0, 0, 0]\n for access_code in access_codes:\n category_index = annotator_category_for_phrase(db, phrase, access_code)\n ratings[category_index] += 1\n return ratings",
"def get_meals_user_liked(username):\n meals_user_liked = []\n user_liked = Rating.objects.filter(member__username=username, like=True)\n for ratting in user_liked:\n meals_user_liked.append(ratting.meal)\n return meals_user_liked",
"def get_used():\r\n sql = text('''\r\n SELECT category.* FROM category, app\r\n WHERE app.category_id=category.id GROUP BY category.id\r\n ''')\r\n results = db.engine.execute(sql)\r\n categories = []\r\n for row in results:\r\n category = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n description=row.description)\r\n categories.append(category)\r\n return categories",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def all_prods(request):\n products = Product.objects.all()\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': products,\n 'stars': stars\n }\n return render(request, \"products.html\", context)",
"def getAllJudgeRatings(self):\n\n judgesExcelLogger.info(\"getAllJudgeRatings: Attempting to get ratings from all judges \"\n \"for set '%s'\", self.setName)\n try:\n for judgeName in self.judgeNames:\n self.judgeToRating[judgeName] = self.getRatingsFromJudge(judgeName)\n except:\n judgesExcelLogger.warning(\"getAllJudgeRatings: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))"
] | [
"0.7007525",
"0.59851885",
"0.5556762",
"0.55023956",
"0.5407123",
"0.5271068",
"0.5266676",
"0.5217127",
"0.51259863",
"0.5058241",
"0.5042192",
"0.49784303",
"0.49537513",
"0.49003536",
"0.48288986",
"0.4767216",
"0.47545198",
"0.4754141",
"0.47451153",
"0.4737546",
"0.4733727",
"0.4725858",
"0.46692902",
"0.46635798",
"0.46582162",
"0.46521872",
"0.4646946",
"0.4646946",
"0.46348158",
"0.46263307"
] | 0.7557061 | 0 |
Returns stars list detail from employee divided by category | def stars_employee_list_group_by_category_detail(request, employee_id, category_id):
if request.method == 'GET':
employee = get_object_or_404(Employee, pk=employee_id)
category = get_object_or_404(Category, pk=category_id)
stars = Star.objects.filter(to_user=employee, category=category).order_by('-date')
paginator = PageNumberPagination()
results = paginator.paginate_queryset(stars, request)
serializer = StarSmallSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stars_employee_list_group_by_category(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'category__pk',\n 'category__name').annotate(num_stars=Count('category')).order_by('-num_stars', 'category__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeCategoriesSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_keyword_detail(request, employee_id, keyword_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(to_user=employee, keyword=keyword).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_top_employee_lists(request, top_number, kind, id):\n try:\n if request.method == 'GET':\n if kind == 'category':\n top_list = Star.objects.filter(category__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level'\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n elif kind == 'keyword':\n top_list = Star.objects.filter(keyword__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n else:\n return Response(status=status.HTTP_412_PRECONDITION_FAILED)\n serializer = StarTopEmployeeLists(top_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n raise APIException(e)",
"def stars_keyword_list_detail(request, keyword_id):\n if request.method == 'GET':\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(keyword=keyword).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarTopEmployeeLists(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def fetch_per_category(n, path=os.path.join('data', 'yelp_academic_dataset_review.json.zip')):\n\n subsample = []\n counts = {}\n\n # Read zipped JSON\n with zipfile.ZipFile(path, 'r') as z:\n for filename in z.namelist():\n with z.open(filename) as f:\n\n # Iterate over the reviews\n for line in f:\n review = json.loads(line.decode('utf-8'))\n\n # Collect records and update the count\n if review['stars'] not in counts:\n subsample.append(review)\n counts[review['stars']] = 1\n elif counts[review['stars']] < n:\n subsample.append(json.loads(line.decode('utf-8')))\n counts[review['stars']] += 1\n\n # Break when n records are gathered for all star ratings\n if all(c == n for c in counts.values()) == n:\n break\n\n return subsample",
"def list(self, request):\n queryset = Students.objects.filter(average_rating=5.0)\n students = normalize_students(queryset)\n return Response(students)",
"def getCategory():",
"def _get_rating(snippet_html, category):\n attr = 'rating-container-{category}'.format(category=category)\n ratings_table_html = snippet_html.find('td', 'listingratings')\n category_html = ratings_table_html.find('div', attr)\n return int(list(list(category_html.children)[1])[0])",
"def all_prods(request):\n products = Product.objects.all()\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': products,\n 'stars': stars\n }\n return render(request, \"products.html\", context)",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a",
"def get_category_ratings(self):\n category_ratings = dict()\n for cat_rating in self.category_ratings.all():\n category_ratings[cat_rating.category.name] = cat_rating.rating\n return category_ratings",
"def apparel(request):\n results = Product.objects.filter(category__icontains='A')\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': results,\n 'stars': stars\n }\n if not results:\n messages.error(request, \"No apparel as of yet, that will change soon!\")\n return redirect(reverse('products'))\n else:\n return render(request, \"products.html\", context)",
"def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]",
"def CategoryScore(Category):\r\n \r\n Category = pd.read_excel('OutdoorScores.xlsx', Category , \r\n usecols=[0,1,2,3,4])\r\n ResultCategory = Category.sort_values(['Score','Golds','Hits'],\r\n ascending=[False,False,False],na_position='last')\r\n ResultCategory = ResultCategory.reset_index(drop=True)\r\n N=0\r\n for i in range(100):\r\n N += 1\r\n if pd.isnull(Category.loc[N,'Name']) == True: \r\n # looks at row N, column 'Name'\r\n break\r\n return ResultCategory[0:N] # if the cell is NaN, stops at row N\r",
"def cat_details(cat_id, shelter_id):\n\n shelter = petfinder.shelter_data_map(shelter_id)\n shelter = list(shelter.values())\n cat = petfinder.cat_data_map(cat_id)\n cat = list(cat.values())\n\n return render_template('more_details.html',\n shelter=shelter,\n cat=cat)\n\n #if user selects <3 to favorite a cat then redirct to the login page",
"def avg_by_category(self, start_date, end_date):\n data = self.by_date(start_date, end_date)\n\n return data.values('category__name').annotate(avg_value=models.Avg('value')).order_by('category')",
"def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")",
"def classification(self):\n for i in self.emp_id:\n if self.emp_dict[i][5] == \"1\":\n self.clsf[i] = \"Hourly\"\n elif self.emp_dict[i][5] == \"2\":\n self.clsf[i] = \"Salaried\"\n elif self.emp_dict[i][5] == \"3\":\n self.clsf[i] = \"Commissioned\"\n else:\n self.clsf[i] = \"Error\"\n\n #print(self.clsf)\n return self.clsf",
"def single_prod(request, pk):\n product = get_object_or_404(Product, pk=pk)\n stars = Product.objects.filter(id=pk).annotate(\n avg_review=Avg('productreview__rating')\n )\n context = {\n 'product': product,\n 'stars': stars,\n }\n return render(request, 'aproduct.html', context)",
"def find_some_item_from_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n print('{:10s}{:10s}{:10s}'.format('일련번호', '평균', 'Grade'))\n print(target_list[['average', 'grade']].to_string(header=False, col_space=10))",
"def employee_list_group_by_badges_detail(request, badge_id):\n if request.method == 'GET':\n badge = get_object_or_404(Badge, pk=badge_id)\n employee_list = EmployeeBadge.objects.filter(badge=badge).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_list, request)\n serializer = EmployeeGroupedListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def extended_rating(self, soup):\n logging.info('Getting hotel extended rating.')\n extended_rating = {}\n if soup.select_one('div.v2_review-scores__body.v2_review-scores__body--compared_to_average') is None:\n logging.error('Cant get extended rating.')\n extended_rating = {}\n else:\n for rating in soup.select_one(\n 'div.v2_review-scores__body.v2_review-scores__body--compared_to_average').findAll(\n 'li', {\"class\": \"v2_review-scores__subscore\"}):\n rating_name = rating.find(\"div\", {\"class\": \"c-score-bar\"}).contents[0].text.strip()\n rating_score = rating.find(\"div\", {\"class\": \"c-score-bar\"}).contents[1].text\n extended_rating[rating_name] = rating_score\n return extended_rating",
"def find_rating():\n print(\"***** Finding Star/Rating *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(\"This business is rated \" + str(\n business_object['stars']) + \" stars with \" + str(\n business_object['review_count']) + \" reviews.\\n\")\n\n print_business(business_object)",
"def processed_stars(test=False,\n categories=('books', 'dvd', 'electronics', 'kitchen')):\n\n if isinstance(categories, str):\n categories = [categories]\n\n # loop over each category and extract features and labels per line\n # append these to the final\n labeled_features = []\n for category in categories:\n # open the relevant file, either train or test\n file = f'./processed_stars/{category}/'\n if not test:\n file += 'train'\n elif test:\n file += 'test'\n with open(file, encoding='utf-8') as f:\n raw = f.read()\n # one document per line, so split into lines\n reviews = raw.split('\\n')\n # extract features and their counts for each line\n features = [{ftr[0].strip(): int(ftr[1])\n for ftr in re.findall(r'(.*?(?<!#label#)):(\\d)', line)}\n for line in reviews]\n # extract all labels\n labels = re.findall(r'#label#:(\\d+.\\d+)', raw)\n # zip the features list and labels into tuples and add to final list\n labeled_features += [(f_set, float(label))\n for f_set, label in zip(features, labels)]\n\n return labeled_features",
"def stars(self, magnitude=20):\n # Get the stars that are visible within this chart.\n thestars = []\n for s in self.hip_stars:\n if not s: continue\n hip_id, mag, ra, dec, bv = s\n if mag>magnitude: continue\n if dec<min(self.inner_dec, self.outer_dec): continue\n if dec>max(self.inner_dec, self.outer_dec): continue\n thestars.append(s)\n # This should sort them by increasing magnitude (brightest first).\n thestars.sort(key=lambda a:a[1])\n if not thestars: return\n # Set the least bright magnitude.\n self.dimmest_mag = math.floor(thestars[-1][1])\n # Create the star group.\n star_g = self.make_element(self.centered, 'g', (\n 'stroke', 'none'), ('fill', 'black'), (\n 'clip-path', 'url(#innerClipPath)'))\n for hip_id, mag, ra, dec, bv in thestars:\n x, y = self.radec2xy(ra, dec)\n self.make_element(star_g, 'circle', (\n 'cx', x), ('cy', y), ('r', self.starsize(hip_id)))",
"def avg_by_day(self, start_date, end_date, category, user):\n data = self.by_date(start_date, end_date)\n return data.values('record_date').annotate(avg_value=models.Avg('value')).order_by('record_date') \\\n .filter(category__name=category, user__name=user)"
] | [
"0.7767644",
"0.627831",
"0.6173061",
"0.61503166",
"0.6051116",
"0.5625467",
"0.5144692",
"0.51389676",
"0.5044438",
"0.50162166",
"0.4995553",
"0.49473953",
"0.49473953",
"0.4908418",
"0.48810714",
"0.4879202",
"0.48721278",
"0.48388222",
"0.4828112",
"0.48069254",
"0.48045638",
"0.47802263",
"0.47699466",
"0.47595552",
"0.47509733",
"0.47156978",
"0.47095805",
"0.47068506",
"0.4705255",
"0.4694144"
] | 0.7837158 | 0 |
Returns stars list detail from employee divided by keyword | def stars_employee_list_group_by_keyword_detail(request, employee_id, keyword_id):
if request.method == 'GET':
employee = get_object_or_404(Employee, pk=employee_id)
keyword = get_object_or_404(Keyword, pk=keyword_id)
stars = Star.objects.filter(to_user=employee, keyword=keyword).order_by('-date')
paginator = PageNumberPagination()
results = paginator.paginate_queryset(stars, request)
serializer = StarSmallSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_keyword_list_detail(request, keyword_id):\n if request.method == 'GET':\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(keyword=keyword).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarTopEmployeeLists(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_keyword_list(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n search_term = request.GET.get('search')\n star_list = Star.objects.filter(\n Q(keyword__name__icontains=search_term)).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n else:\n star_list = Star.objects.all().values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(star_list, request)\n serializer = StarKeywordList(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_category_detail(request, employee_id, category_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n category = get_object_or_404(Category, pk=category_id)\n stars = Star.objects.filter(to_user=employee, category=category).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_category(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'category__pk',\n 'category__name').annotate(num_stars=Count('category')).order_by('-num_stars', 'category__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeCategoriesSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def find_some_item_from_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n print('{:10s}{:10s}{:10s}'.format('일련번호', '평균', 'Grade'))\n print(target_list[['average', 'grade']].to_string(header=False, col_space=10))",
"def stars_top_employee_lists(request, top_number, kind, id):\n try:\n if request.method == 'GET':\n if kind == 'category':\n top_list = Star.objects.filter(category__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level'\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n elif kind == 'keyword':\n top_list = Star.objects.filter(keyword__id=id).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]\n else:\n return Response(status=status.HTTP_412_PRECONDITION_FAILED)\n serializer = StarTopEmployeeLists(top_list, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Exception as e:\n raise APIException(e)",
"def find_rating():\n print(\"***** Finding Star/Rating *****\")\n while (True):\n print()\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(\"This business is rated \" + str(\n business_object['stars']) + \" stars with \" + str(\n business_object['review_count']) + \" reviews.\\n\")\n\n print_business(business_object)",
"def DisplayIdea(keyword):\n logger.info('Found Keyword with text [%s]' % keyword['KEYWORD_TEXT'])\n logger.info(' Keyword Idea search volume: %s' % keyword['SEARCH_VOLUME'])\n logger.info(' Keyword Idea average CPC: %s' % keyword['AVERAGE_CPC'])\n logger.info(' Keyword Idea categories: %s' % keyword['CATEGORY_PRODUCTS_AND_SERVICES'])",
"def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a",
"def extract_review_rating(soup):\r\n notes = (\"One\", \"Two\", \"Three\", \"Four\", \"Five\" )\r\n review_rating = \"None\"\r\n section = soup.find(\"div\", attrs={\"class\": \"col-sm-6 product_main\"})\r\n for n in notes:\r\n note = \"star-rating \" + n\r\n if section.find(\"p\", attrs={\"class\": note}):\r\n review_rating = n \r\n return review_rating",
"def results():\n\n queryName = request.form['query']\n queryStars = request.form['stars']\n \n datasource = DataSource()\n listOfRestaurantNames = datasource.searchRestaurantsByNameAndMinimumStars(queryName, queryStars)\n restaurants = datasource.generateRestaurantObjects(listOfRestaurantNames[:15])\n\n return render_template('results.html', restaurants=restaurants)",
"def lookup_employee():\n unique_names = get_unique_employees()\n while True:\n if len(unique_names) > 1:\n print('Entries found by {} and {}.'.format(\n ', '.join(unique_names[:-1]),\n unique_names[-1]))\n elif len(unique_names) == 1:\n print('Entries found by {}.'.format(unique_names[0]))\n\n search_query = input('Show entries by: ')\n if validate_lookup_employee_format(search_query):\n break\n print('** Please enter a name of alphabetic characters and spaces **')\n return Entry.select().where(Entry.employee_name == search_query)",
"def search_keyword(self,keyword):\n for entry in self.available_fields_list:\n for x in entry:\n if keyword in x:\n print(entry)\n break\n return",
"def search_mApe_title (title,format):\n\n mape_main_url = 'https://www.mightyape.co.nz/'\n # Defining the url paths for search types\n mape_mv_category_url = 'movies-tv/movies/all?q='+parse.quote_plus(title)+\"+\"\n mape_mv_format_search_url = 'movieformat~'+format\n\n # This is the final url string\n\n searchUrl = mape_main_url+mape_mv_category_url+mape_mv_format_search_url\n #'https://www.mightyape.co.nz/movies-tv/movies/all?sort=2&q=movieformat~blu-ray'\n\n # Using a dictionary to store data, as contains list with objects\n mape_list = {}\n\n page = requests.get(searchUrl)\n tree = html.fromstring(page.content)\n\n data = tree.xpath(\n '//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]/div[@class=\"title\"]/a') # <--- WORKS\n\n data_alt = tree.xpath('//div[@class=\"product-list gallery-view\"]/div[@class=\"product\"]')\n\n print('Getting results from url:', searchUrl)\n print('Number of objects=', len(data_alt))\n count = 1\n\n for item in data_alt:\n simple_item = item.xpath('div[@class=\"title\"]/a')\n title = simple_item[0].text\n link = simple_item[0].get('href')\n format = item.xpath('div[@class=\"format\"]/text()')\n rating = item.xpath('div[@class=\"customer-rating\"]/span/span[@class=\"average\"]/text()')\n base_price = item.xpath('div[@class=\"price\"]/s/text()')\n hot_price = item.xpath('div[@class=\"price\"]/span[@class=\"price hot\"]/text()')\n normal_price = item.xpath('div[@class=\"price\"]/span[@class=\"price\"]/text()')\n if len(rating) > 0:\n # temp_mv = Movie_object(title,format[0],rating[0].strip(), mape_main_url + link,normal_price, base_price, hot_price)\n print(title, format[0], rating[0].strip(), mape_main_url + link, normal_price, base_price, hot_price)\n # mape_list[title] = temp_mv\n else:\n print(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n # temp_mv = Movie_object(title, format[0], 'n/a', mape_main_url + link, normal_price, base_price, hot_price)\n # mape_list[title] = temp_mv\n\n count += 1\n\n return mape_list",
"def employee_list_group_by_badges(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n search_term = request.GET.get('search')\n badge_list = EmployeeBadge.objects.filter(\n Q(badge__name__icontains=search_term)).values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n else:\n badge_list = EmployeeBadge.objects.all().values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(badge_list, request)\n serializer = EmployeeBadgeListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def findRatings():\n if request.method == 'POST':\n connector = appEngine.connect()\n rating = int(request.form['rating'])\n joinTable = connector.execute(\"SELECT movie.movieName, actor.actorName, rating.rating FROM movie INNER JOIN rating ON movie.movieID=rating.movie_ID INNER JOIN movie_actor ON movie.movieID=movie_actor.movie_ID INNER JOIN actor ON movie_actor.actor_ID=actor.actorID WHERE rating.rating >= (?);\", (rating))\n result = {'data': [dict(zip(tuple(joinTable.keys()), i)) for i in joinTable.cursor]}\n return result\n return render_template('rating_search.html')",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def starred(request):\n stars = models.Account.current_user_account.stars\n if not stars:\n issues = []\n else:\n starred_issue_keys = [ndb.Key(models.Issue, i) for i in stars]\n issues = [issue for issue in ndb.get_multi(starred_issue_keys)\n if issue and issue.view_allowed]\n _load_users_for_issues(issues)\n _optimize_draft_counts(issues)\n return respond(request, 'starred.html', {'issues': issues})",
"def lookup_search_term():\n while True:\n search_query = input('Show entries containing (in name or notes): ')\n if validate_lookup_search_term_format(search_query):\n break\n print('** Please enter search term **')\n return (Entry.select().where(Entry.employee_name.contains(search_query)) |\n Entry.select().where(Entry.task_notes.contains(search_query)))",
"def get_mean_for_user(df,genres, userID):\n #PROFIL UŻYTWKONIKA#\n\n\n mean_for_user = {}\n for genre in genres:\n mean_for_user[genre] = df[(df['userID'] == userID ) & (df['genre'] == genre)]['rating'].mean()\n change_nan(mean_for_user)\n return mean_for_user",
"def onlinedata(star):\n data = None\n if not isinstance(star, list):\n star = [star]\n for s in star:\n # Stacking the results one after each in a numpy array.\n s = correctname(s)\n print(('Star : {0}'.format(s)))\n d = query(s)\n if data is None:\n data = np.array(d)\n else:\n data = np.hstack((data, d))\n df = pd.DataFrame(data)\n df = correctcoordinates(df)\n return df",
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def getUserAverageStars(user, auth):\n url = 'https://api.github.com/users/{}/repos'.format(user)\n r = requests.get(url=url, auth=auth)\n stars = [rep['stargazers_count'] for rep in r.json()]\n return mean(stars)",
"def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))",
"def extended_rating(self, soup):\n logging.info('Getting hotel extended rating.')\n extended_rating = {}\n if soup.select_one('div.v2_review-scores__body.v2_review-scores__body--compared_to_average') is None:\n logging.error('Cant get extended rating.')\n extended_rating = {}\n else:\n for rating in soup.select_one(\n 'div.v2_review-scores__body.v2_review-scores__body--compared_to_average').findAll(\n 'li', {\"class\": \"v2_review-scores__subscore\"}):\n rating_name = rating.find(\"div\", {\"class\": \"c-score-bar\"}).contents[0].text.strip()\n rating_score = rating.find(\"div\", {\"class\": \"c-score-bar\"}).contents[1].text\n extended_rating[rating_name] = rating_score\n return extended_rating",
"def query_employee_skill(self):\n\n query = \"select Skill_Descrpt, Emp_Fname, Emp_Lname from \" \\\n \"skill, employee, empskill \" \\\n \"where employee.Emp_ID = empskill.Emp_ID \" \\\n \"and skill.Skill_ID = empskill.Skill_ID \"\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)",
"def suggest_names(request):\n if request.method == \"GET\":\n contains = request.GET[\"q\"]\n artikel_list = Artikel.objects.filter(naziv__icontains=contains)\n if artikel_list:\n if len(artikel_list) > 12:\n artikel_list = artikel_list[:12]\n\n return render_to_response('invoices/lookup_artikel.html',\n {'seznam_artiklov': artikel_list}) #,\n #context_instance=RequestContext(request))",
"def all_prods(request):\n products = Product.objects.all()\n stars = Product.objects.annotate(\n avg_review=Avg('productreview__rating'),\n )\n context = {\n 'products': products,\n 'stars': stars\n }\n return render(request, \"products.html\", context)"
] | [
"0.7377244",
"0.71312815",
"0.61359555",
"0.6098041",
"0.5693139",
"0.5631095",
"0.5615941",
"0.55251753",
"0.5348024",
"0.52622336",
"0.51969576",
"0.4985614",
"0.4914094",
"0.48876992",
"0.48755857",
"0.48753193",
"0.48228306",
"0.48139346",
"0.48138267",
"0.48138267",
"0.48016977",
"0.47979796",
"0.47936067",
"0.47595736",
"0.4732856",
"0.47312123",
"0.47268027",
"0.46967357",
"0.46912196",
"0.46695814"
] | 0.7438995 | 0 |
Returns stars top {top_number} list according to {kind} (category, keyword) {id} (kind_id) | def stars_top_employee_lists(request, top_number, kind, id):
try:
if request.method == 'GET':
if kind == 'category':
top_list = Star.objects.filter(category__id=id).values(
'to_user__pk',
'to_user__username',
'to_user__first_name',
'to_user__last_name',
'to_user__level'
'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]
elif kind == 'keyword':
top_list = Star.objects.filter(keyword__id=id).values(
'to_user__pk',
'to_user__username',
'to_user__first_name',
'to_user__last_name',
'to_user__level',
'to_user__avatar').annotate(num_stars=Count('to_user')).order_by('-num_stars')[:top_number]
else:
return Response(status=status.HTTP_412_PRECONDITION_FAILED)
serializer = StarTopEmployeeLists(top_list, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except Exception as e:
raise APIException(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_top_stars():\n topStars = {'1': {'amm': [{'name': 'G1_1', 'x': 2509.0, 'y': 555.0},\n {'name': 'G1_2', 'x': 2294.0, 'y': 1638.5}],\n 'ben': [{'name': 'G1_1', 'x': 2534.5, 'y': 563.5},\n {'name': 'G1_2', 'x': 2320.0, 'y': 1643.0}],\n 'jlu': [{'name': 'G1_1', 'x': 2532.0, 'y': 561.0},\n {'name': 'G1_2', 'x': 2317.5, 'y': 1644.0}]},\n '2': {'amm': [{'name': 'G2_1', 'x': 1659.0, 'y': 1595.4},\n {'name': 'G2_2', 'x': 507.0, 'y': 1394.5}],\n 'ben': [{'name': 'G2_1', 'x': 1665.5, 'y': 1602.0},\n {'name': 'G2_2', 'x': 513.5, 'y': 1401.0}],\n 'jlu': [{'name': 'G2_1', 'x': 1663.0, 'y': 1599.5},\n {'name': 'G2_2', 'x': 511.0, 'y': 1398.0}]},\n '3': {'amm': [{'name': 'G3_1', 'x': 507.0, 'y': 2344.0},\n {'name': 'G3_2', 'x': 1819.0, 'y': 2877.5}],\n 'ben': [{'name': 'G3_1', 'x': 522.0, 'y': 2386.5},\n {'name': 'G3_2', 'x': 1828.5, 'y': 2933.0}],\n 'jlu': [{'name': 'G3_1', 'x': 519.0, 'y': 2384.0},\n {'name': 'G3_2', 'x': 1826.0, 'y': 2930.5}]},\n '4': {'amm': [{'name': 'G4_1', 'x': 3470.0, 'y': 2781.5},\n {'name': 'G4_2', 'x': 2441.0, 'y': 3481.0}],\n 'ben': [{'name': 'G4_1', 'x': 3509.0, 'y': 2834.0},\n {'name': 'G4_2', 'x': 2469.3, 'y': 3519.5}],\n 'jlu': [{'name': 'G4_1', 'x': 3505.0, 'y': 2831.5},\n {'name': 'G4_2', 'x': 2467.0, 'y': 3516.9}]}}\n\n users = ['amm', 'ben', 'jlu']\n frames = ['58', '59', '60']\n chips = ['1', '2', '3', '4']\n\n root = 'S20121230S00'\n for user in users:\n for frame in frames:\n for chip in chips:\n starlist = '{0}{1}_{2}_{3}.lis'.format(root, frame,\n chip, user)\n\n move_stars_up(starlist, topStars[chip][user])\n\n return",
"def top_by_ratings(self, n, metric=average):\n return top_movies",
"def top_by_num_of_ratings(self, n):\n return top_movies",
"def get_top_recipes(df, sort_params=None, count=10):\n if not sort_params:\n logging.warning(\"Column names to soty by are not defined.\")\n return df\n\n return df.sort_values(sort_params[\"names\"],\n ascending=sort_params[\"order\"]).head(count)",
"async def get_top_trending_tags_summary():\n # Same results, more overhead:\n #return [tag['name'] for tag in await get_trending_tags('', 50)]\n sql = \"\"\"\n SELECT category\n FROM hive_posts_cache\n WHERE is_paidout = '0'\n GROUP BY category\n ORDER BY SUM(payout) DESC\n LIMIT 50\n \"\"\"\n return query_col(sql)",
"def top_ten(subreddit):\n url = \"https://www.reddit.com/r/\" + subreddit + \"/hot.json?limit=10\"\n identify = {\"User-Agent\": \"Requests library from Python\",\n \"From\": \"[email protected]\"}\n to_print = []\n hot = requests.get(url, headers=identify, allow_redirects=False)\n if hot.status_code == 404:\n print(\"None\")\n return 0\n if hot.status_code == 200:\n hot = hot.json()\n hot = hot[\"data\"]\n hot = hot[\"children\"]\n for items in hot:\n del items[\"kind\"]\n for data in hot:\n to_print.append(data[\"data\"])\n hot = to_print\n to_print = []\n for dictio in hot:\n to_print.append(dictio[\"title\"])\n for itera in to_print:\n print(itera)",
"def top_ten(subreddit):\n\n limit = \"10\"\n\n url = \"https://www.reddit.com/r/{}/hot.json?limit={}\".format(subreddit,\n limit)\n\n user_agent = {\"User-Agent\": \"Python\"}\n response = requests.get(url, headers=user_agent, allow_redirects=False)\n if response.status_code >= 300:\n print(\"None\")\n else:\n for elem in response.json().get(\"data\").get(\"children\"):\n print(elem.get(\"data\").get(\"title\"))",
"def test_top_stars(self):\n self._create_stars()\n self._create_observations()\n expected = [\n {\n \"star_id\": self.star.id,\n \"star__name\": self.star.name,\n \"observations_count\": 10,\n },\n {\n \"star_id\": self.periodic_star.id,\n \"star__name\": self.periodic_star.name,\n \"observations_count\": 8,\n },\n ]\n top_stars = list(Observation.objects.top_stars())\n self.assertEqual(top_stars, expected)",
"def top_ten(subreddit):\n\n user_agent = {'User-agent': 'Mozilla/5.0 (Macintosh; \\\nIntel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) \\\nChrome/39.0.2171.95 Safari/537.36'}\n\n res = requests.get('https://www.reddit.com/r/{}/hot.json?limit=10'.format(\n subreddit), headers=user_agent)\n\n if res.status_code == 404:\n print(None)\n\n else:\n for sub in res.json().get(\"data\").get(\"children\"):\n print(sub.get(\"data\").get(\"title\"))",
"def top_ten(subreddit):\n\n if subreddit is None or not isinstance(subreddit, str):\n print(\"None\")\n\n user_agent = {'User-agent': 'Google Chrome Version 81.0.4044.129'}\n params = {'limit': 10}\n url = 'https://www.reddit.com/r/{}/hot/.json'.format(subreddit)\n\n response = get(url, headers=user_agent, params=params)\n all_data = response.json()\n\n try:\n raw1 = all_data.get('data').get('children')\n\n for i in raw1:\n print(i.get('data').get('title'))\n\n except:\n print(\"None\")",
"def get_popularity_based_topk(self, top_k=10, sort_top_k=False):\n\n test_scores = np.array([self.item_frequencies])\n\n logger.info('Getting top K')\n top_items, top_scores = get_top_k_scored_items(\n scores=test_scores, top_k=top_k, sort_top_k=sort_top_k\n )\n\n return pd.DataFrame(\n {\n self.col_item: [\n self.index2item[item] for item in top_items.flatten()\n ],\n self.col_prediction: top_scores.flatten(),\n }\n )",
"def top_ten(subreddit):\n headers = {\"User-Agent\": \"Holberton\"}\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(\n subreddit)\n req = requests.get(url, headers=headers)\n\n if req.status_code != 200:\n print(None)\n return\n redit = req.json().get(\"data\").get(\"children\")\n for chil in redit:\n print(chil.get(\"data\").get(\"title\"))",
"def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results",
"def top_girls(self):\n return [girl for girl in self._db.girls.find().sort('rating', pymongo.DESCENDING).limit(5)]",
"def top_ten(subreddit):\n try:\n info = requests.get('https://www.reddit.com/r/{}/hot.json?limit=10'\n .format(subreddit), allow_redirects=False,\n headers={'User-Agent': 'Custom'}).json().get(\n 'data').get('children')\n for child in info:\n print(child.get('data').get('title'))\n except:\n print('None')",
"def top_tracks(genre):\n\tartist = random.choice(genre_artist[genre])\n\ttop_tracks = search_for_artist_top_tracks(artist)\n\titems = []\n\tif top_tracks:\n\t\tfor track in top_tracks:\n\t\t\titems.append({\"artist\": track[\"artists\"][0][\"name\"], \"popularity\": track[\"popularity\"], \"track\": track[\"name\"],\n\t\t\t\t \"preview_url\": track[\"preview_url\"], \"album_image_url\": track[\"album\"][\"images\"][2][\"url\"]})\n\t\titems = sorted(items, key=lambda x: x['popularity'], reverse=True)\n\t\tfor item in items:\n\t\t\tdel item['popularity']\n\t\treturn items\n\telse:\n\t\treturn None",
"def get_user_top_choices( self, user_id, n = 5 ):\t\n\t\tuser_df = ( self.df[ self.df['user_id'] == user_id ][[ 'business_id', 'stars' ]]\n\t\t\t\t\t.sort_values( ['stars'], ascending = False )\n\t\t\t\t\t.head(n) )\n\t\treturn user_df",
"def top_ten(subreddit):\n h = requests.utils.default_headers()\n h.update({'User-Agent': 'My User Agent 1.0'})\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(subreddit)\n r = requests.get(url, headers=h).json()\n result = r.get('data', {}).get('children', [])\n if not result:\n print(None)\n for i in result:\n print(i.get('data').get('title'))",
"def __get_top_with_detail(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productId', keep=\"first\")[\n :top]\n\n return result",
"def top_ten(subreddit):\n url = \"https://www.reddit.com/r/\" + subreddit + \"/top/.json\"\n r = requests.get(url,\n headers={'User-agent': 'norman'},\n params={\"limit\": 10},\n allow_redirects=False)\n if r.status_code == 200:\n for dic in r.json().get('data').get('children'):\n print(dic.get('data').get('title'))\n else:\n print(\"None\")",
"def top(self, **kwargs) -> Dict[str, Any]:",
"def top_ten(subreddit):\n header = {\"User-Agent\": \"Holberton\"}\n url = \"https://www.reddit.com/r/{}/hot.json?limit=10\".format(subreddit)\n response = requests.get(url, headers=header, allow_redirects=False)\n if response.status_code == 200:\n\n for item in response.json().get(\"data\", None).get(\"children\", None):\n print(item.get(\"data\", None).get(\"title\", None))\n else:\n print(None)\n return",
"def get_top_tweets():\n Tweet.top_tweets = [(k, v) for k, v in sorted(Tweet.hashtag_counter.items(), key=lambda item: item[1], reverse=True)]\n top_10_tweets = {}\n top_10_tweets['top_tweets'] = []\n for tweet in Tweet.top_tweets[:10]:\n top_10_tweets['top_tweets'].append({'hashtag': \"#\"+tweet[0], 'count': tweet[1]})\n return top_10_tweets",
"def get_most_popular(self):\n\t\tpopular_rated = self.data_final[self.data_final['Rating'] == 10]\n\t\tpopular_jokes = popular_rated.groupby('JokeID').count().reset_index()\n\t\tpopular_jokes = popular_jokes[['JokeID','Rating']]\n\t\tpopular_jokes.columns = ['JokeID','Number_rated10']\n\t\ttop_joke = popular_jokes.sort_values(by=['Number_rated10'], ascending=False).head(1)\n\t\ttop_joke_val = top_joke['JokeID'].values[0]\n\t\tjokes_list = sorted(set(self.data_final['JokeID']))\n\t\tjoke_num = jokes_list.index(top_joke_val)\n\t\ttop_joke_desc = self.data_jokes[self.data_jokes['JokeID'] == top_joke_val].values[0][1]\n\n\t\treturn top_joke_desc, joke_num",
"def stars_keyword_list_detail(request, keyword_id):\n if request.method == 'GET':\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(keyword=keyword).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarTopEmployeeLists(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def top_ten(subreddit):\n url = 'https://www.reddit.com/r/{}/hot.json'.format(subreddit)\n response = get(url, headers=headers, params=query, allow_redirects=False)\n if response.status_code is not 200:\n return print('None')\n\n r = response.json()\n data = r.get('data')\n children = data['children']\n for item in children:\n print(item.get('data')['title'])",
"def top_artists_from_API(api_results):\r\n df = pd.DataFrame(api_results[\"items\"])\r\n cols = [\"name\",\"id\",\"genres\",\"popularity\",\"uri\"]\r\n return df[cols]",
"def topTags(db, topN=1000):\n c=db.cursor()\n c.execute(\"\"\"\n SELECT\n tag\n FROM tags\n GROUP BY tag\n ORDER BY COUNT(*) DESC\n LIMIT %d\n \"\"\" % topN)\n tops = [tag0[0] for tag0 in c.fetchall()]\n c.close()\n return tops",
"async def top_specs(self):\r\n players = await self.get_players()\r\n specs = []\r\n for player in players:\r\n specs.append(player['specId'])\r\n await self.bot.send_message('Top 3v3 Composition:')\r\n for key in self.specs:\r\n if specs.count(int(key)) > 0:\r\n await self.bot.send_message('{:s}: {:d} ({:.2f}%)'.format(\r\n self.specs[key],\r\n specs.count(int(key)),\r\n float(specs.count(int(key))/965.0)*100)\r\n )",
"def top_ten(subreddit):\n url = \"https://api.reddit.com/r/{}/hot?limit=10\".format(subreddit)\n response = requests.get(url, headers={\"User-Agent\": \"Python3\"})\n if str(response) != \"<Response [200]>\": # response.status_code != 200\n print(None)\n return\n response = response.json()\n child = response[\"data\"][\"children\"]\n for tittle in child:\n print(tittle[\"data\"][\"title\"])"
] | [
"0.60313934",
"0.60311204",
"0.5865894",
"0.57242155",
"0.56809145",
"0.5665485",
"0.56539917",
"0.56519616",
"0.56417096",
"0.5641546",
"0.5620258",
"0.5525258",
"0.5505354",
"0.55012465",
"0.54801244",
"0.5477889",
"0.54741967",
"0.5462364",
"0.5446692",
"0.54427594",
"0.5441486",
"0.5430479",
"0.542932",
"0.5419446",
"0.5405425",
"0.5395042",
"0.53819185",
"0.53742576",
"0.53685683",
"0.535471"
] | 0.6483316 | 0 |
Returns stars list grouped by keyword or result list if you use ?search= | def stars_keyword_list(request):
if request.method == 'GET':
if request.GET.get('search'):
search_term = request.GET.get('search')
star_list = Star.objects.filter(
Q(keyword__name__icontains=search_term)).values(
'keyword__pk',
'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars')
else:
star_list = Star.objects.all().values(
'keyword__pk',
'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars')
paginator = PageNumberPagination()
results = paginator.paginate_queryset(star_list, request)
serializer = StarKeywordList(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_keyword_list_detail(request, keyword_id):\n if request.method == 'GET':\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(keyword=keyword).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarTopEmployeeLists(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_keyword_detail(request, employee_id, keyword_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(to_user=employee, keyword=keyword).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def search_plans(self, term, planlove=False):\n get = {'mysearch': term,\n 'planlove': int(bool(planlove))}\n response = self._get_page('search.php', get=get)\n soup = bs4.BeautifulSoup(response.text, 'html5lib')\n results = soup.find('ul', {'id': 'search_results'})\n if results is None:\n return [] # no results\n # results are grouped by the plan\n # on which the result was found\n user_groups = results.findAll(\n 'div', {'class': 'result_user_group'})\n resultlist = []\n for group in user_groups:\n user = group.find('a', {'class': 'planlove'}).contents[0]\n count = group.find('span').contents[0]\n # now extract snippets\n snippetlist = group.findAll('li')\n snippets = []\n for li in snippetlist:\n tag = li.find('span')\n tag.hidden = True # prevents BS from wrapping contents in\n # <span> upon conversion to unicode string\n snip = tag.decode(formatter=self._html_esc) # soup to unicode\n snip = self._canonicalize_plantext(snip)\n snippets.append(snip)\n resultlist.append((str(user), int(count), snippets))\n return resultlist",
"def search():\n\tif not request.vars.search_term:\n\t\tredirect(URL('index'))\n\tterm = request.vars.search_term\n\torigterm = term\n\tterm = term.replace(' ','|')\n\tartists = db.executesql(\"select distinct(m1.id), m1.art_name, m1.artist_type, m1.country, m1.b_year,m1.b_month,m1.b_date,m1.e_year,m1.e_month,m1.e_day,ts_rank(to_tsvector(m1.art_name),to_tsquery('\"+term+\"')) rank from art_info m1 where to_tsvector('english',m1.art_name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\talbums = db.executesql(\"select distinct(m1.id),m2.name,m1.art_id,m1.art_name,m1.rel_type,m1.count,ts_rank(to_tsvector(m2.name),to_tsquery('\"+term+\"')) rank from rel_art m1, release_name m2, release_group m3 where m3.name = m2.id and m3.id = m1.id and to_tsvector('english',m2.name) @@ to_tsquery('\"+term+\"') order by rank desc limit 20;\")\n\tsongs = db.executesql(\"select m2.id, m1.name, m3.art_id, m3.art_name, m3.rel_id, m3.rel_name from track_name m1, recording m2, rec_rel_art m3 where m1.id = m2.name and m2.id = m3.rec_id and lower(m1.name) LIKE lower('%%\"+origterm+\"%%') limit 20;\")\n\treturn dict(songs=songs, albums=albums, artists=artists)",
"def results():\n\n queryName = request.form['query']\n queryStars = request.form['stars']\n \n datasource = DataSource()\n listOfRestaurantNames = datasource.searchRestaurantsByNameAndMinimumStars(queryName, queryStars)\n restaurants = datasource.generateRestaurantObjects(listOfRestaurantNames[:15])\n\n return render_template('results.html', restaurants=restaurants)",
"def show_results():\n\n\tuser_query = request.args.get(\"search\")\n\tsearch_activity = SearchActivity(user_id=session.get('user_id'), search_query=user_query, datetime = datetime.now())\n\n\tdb.session.add(search_activity)\n\tdb.session.commit()\n\tsearch_items_not_filtered_list = user_search(user_query)\n\tfound_items = []\n\t\n\tfor item in search_items_not_filtered_list:\n\t\tTaxonomy_obj = db.session.query(Taxonomy).filter(Taxonomy.path.like(\"%Food%\")).filter_by(category_node=item[u'categoryNode']).all()\n\t\tfor obj in Taxonomy_obj:\n\t\t\tif item[u'categoryNode'] == obj.category_node:\t\n\t\t\t\tfound_items.append({\n\t\t\t\t\t\"name\": item.get(u'name', \"\"), \n\t\t\t\t\t\"item_id\": item.get(u'itemId', \"\"),\n\t\t\t\t\t\"category\": item.get(u'categoryPath', \"\"), \n\t\t\t\t\t\"sale_price\": format(item.get(u'salePrice', \"\"), \".2f\"), \n\t\t\t\t\t\"description\": unescape(item.get(u'shortDescription', \"\")), \n\t\t\t\t\t\"customer_rating_img\": item.get(u'customerRatingImage', \"\"),\n\t\t\t\t\t\"thumbnail_image\": item.get(u'thumbnailImage', \"\")\n\t\t\t\t\t})\n\t\t\t\t\n\treturn render_template(\"searchresults.html\", found_items=found_items)",
"def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a",
"def shortsearch(term,location):\n results = search(term,location)['listings']\n result = []\n for business in results:\n result.append([business['id'],business['name'],\"Yellow Pages\"])\n return result",
"def google_search(keyword):\n keyword = urllib2.quote(keyword)\n\n url = SEARCH_API % keyword\n\n results = simplejson.loads(urllib2.urlopen(url).read())\n return [ el for el in results['responseData']['results'] ]",
"def get_summaries(query, **kwargs):\n kwargs.update(stop=40)\n results = search(query, **kwargs)\n return results",
"def show_search_results():\n\n #Get values from search-box via AJAX\n current_keyword = request.form.get('search').lower()\n print \"**********************\"\n print current_keyword\n print \"**********************\"\n tweets = get_tweets_by_api(term=current_keyword)\n\n result = []\n\n for tweet in tweets:\n # Exclude retweets since they appear as duplicatses to endu ser\n if tweet.retweeted_status is None:\n # Convert tweet text from unicode to text\n tweet_id = tweet.id\n text = unicodedata.normalize('NFKD', tweet.text).encode('ascii', 'ignore')\n # Find URL in text and bind to url\n # url = re.search('((?:http|https)(?::\\\\/{2}[\\\\w]+)(?:[\\\\/|\\\\.]?)(?:[^\\\\s\"]*))', text)\n url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', text)\n # Remove URL from text\n text_wo_url = re.sub(r'^https?:\\/\\/.*[\\r\\n]*', '', text, flags=re.MULTILINE)\n # Handle / Name\n user = unicodedata.normalize('NFKD', tweet.user.screen_name).encode('ascii', 'ignore')\n # Count of favorites\n favorite_count = tweet.favorite_count\n #Return dictionary of hashtags with hashtag as key and number of occurances as value\n if tweet.hashtags:\n # Convert hashtags from unicode to string\n ht_list = []\n for hashtag in tweet.hashtags:\n ht_str = unicodedata.normalize('NFKD', hashtag.text).encode('ascii', 'ignore')\n ht_list.append(ht_str.lower())\n hashtags = Counter(ht_list)\n else:\n hashtags = tweet.hashtags\n # Convert tweet from unicode to datetime\n created_at = tweet.created_at\n # format created_at string to ISO 8610\n created_at_str = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(created_at, '%a %b %d %H:%M:%S +0000 %Y'))\n # create a moment from the string\n created_at = moment.date(created_at_str, 'YYYY-MM-DD HH:mm:ss')\n result.append({'created_at': created_at_str, 'tweet_text': text_wo_url, 'user': user,\n 'favorite_count': favorite_count, 'hashtags': hashtags,\n 'url': url, 'tweet_id': tweet_id})\n\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n print result\n print \"&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\"\n\n return jsonify(result=result) #, tweets",
"def search():\n\n post_data = request.get_json()\n\n # BONUS: Validate the arguments?\n if post_data:\n if not('username' in post_data):\n return jsonify({'Error': 'Invalid user is given'})\n\n if not('pattern' in post_data):\n return jsonify({'Error': 'Invalid pattern is given'})\n\n else:\n return jsonify({\"Error\": \"'application/json' was not set for mine-type and accept\"})\n \n username = post_data['username']\n pattern = post_data['pattern']\n\n result = {}\n \n # set the key\n dsKey = datastore.Key(username)\n\n # BONUS: Can we cache results in a datastore/db?\n # validate above\n if ds.contains(dsKey):\n gists = ds.get(dsKey)\n else:\n gists = gists_for_user(username)\n\n # store a key for the datastore\n ds.put(dsKey, gists)\n\n matches = []\n for gist in gists:\n # REQUIRED: Fetch each gist and check for the pattern\n txt_version = str(gist)\n search_gist = re.findall(pattern, txt_version, re.IGNORECASE)\n if search_gist:\n matches.append(gist)\n\n result['status'] = 'success'\n result['username'] = username\n result['pattern'] = pattern\n result['matches'] = matches\n\n return jsonify(result)",
"def getStars(queries, lcs_fold, query_path=None, progb_txt=\"Querying stars: \"):\n ORDINARY_QUERY_KEY = \"QUERY:\"\n\n stars = []\n for query in tqdm(queries, desc=progb_txt):\n query = query.strip()\n\n if query.startswith(ORDINARY_QUERY_KEY):\n stars += getStarsFromRemoteDb(\n query[len(ORDINARY_QUERY_KEY):], query_path)\n\n else:\n stars += getStarsFromFolder(query, lcs_fold)\n\n if not stars:\n raise QueryInputError(\"There no stars. Your query: %s\" % queries)\n\n return stars",
"def findRatings():\n if request.method == 'POST':\n connector = appEngine.connect()\n rating = int(request.form['rating'])\n joinTable = connector.execute(\"SELECT movie.movieName, actor.actorName, rating.rating FROM movie INNER JOIN rating ON movie.movieID=rating.movie_ID INNER JOIN movie_actor ON movie.movieID=movie_actor.movie_ID INNER JOIN actor ON movie_actor.actor_ID=actor.actorID WHERE rating.rating >= (?);\", (rating))\n result = {'data': [dict(zip(tuple(joinTable.keys()), i)) for i in joinTable.cursor]}\n return result\n return render_template('rating_search.html')",
"def cat_results():\n\n cats = petfinder.search_data_map()\n cats = list(cats.values())\n\n return render_template('search_results.html',\n cats=cats)",
"def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })",
"def search_json(request):\n query = request.GET.get('q')\n books = []\n authors = []\n sections = []\n if len(query) >= 3:\n for book in Book.objects.filter(title__icontains=query):\n books.append({\n 'title': book.title,\n 'url': book.get_absolute_url(),\n })\n for author in Author.objects.filter(name__icontains=query):\n authors.append({\n 'title': author.name,\n 'url': author.get_absolute_url(),\n })\n for section in Section.objects.filter(title__icontains=query):\n sections.append({\n 'title': section.title,\n 'url': section.get_absolute_url(),\n })\n\n return JsonResponse({\n 'results': {\n 'books': {\n 'name': 'Books',\n 'results': books,\n },\n 'authors': {\n 'name': 'Authors',\n 'results': authors,\n },\n 'sections': {\n 'name': 'Sections',\n 'results': sections,\n },\n }\n })",
"def search(self, term):",
"def get(self, request, format=None):\n user = request.user\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(request, user)\n keywords = request.GET.get('tags', '')\n result = {\n 'keywords': [],\n 'trademark': 0\n }\n\n if keywords != '':\n for word in keywords.split(','):\n payload = {\n 'apikey': settings.KEYWORDTOOL,\n 'keyword': '[{0}]'.format(word),\n 'output': 'json',\n 'country': 'us',\n 'language': 'en',\n 'metrics': 'true',\n 'metrics_location': '2840',\n 'metrics_language': 'en'\n }\n word = word.lower()\n\n word_result = Word.objects.filter(name=word).first()\n\n if word_result:\n data = word_result.results\n else:\n data = False\n try:\n data_keywordtool = requests.get(\n 'http://api.keywordtool.io/v2/search/suggestions/amazon', params=payload)\n if data_keywordtool.status_code == 200:\n results = data_keywordtool.json()\n created_word = Word.objects.create(\n name=word, results=results)\n data = created_word.results\n except Exception as e:\n pass\n\n list_keywords = []\n if data:\n for item in data['results']:\n for sub_item in data['results'][item]:\n if 'volume' in sub_item and 'string' in sub_item:\n if sub_item['volume'] > 300 and not sub_item['string'] in list_keywords:\n list_keywords.append(sub_item['string'])\n result['keywords'].append({'name': sub_item['string'].replace(\n '[', '').replace(']', ''), 'volume': sub_item['volume'], 'trademark': False})\n\n return Response(result)",
"def search(self, query, maxhits=100):",
"def search(term: str):\n api_key = current_app.config[\"GIPHY_API_KEY\"]\n search_result = giphy_api_bridge.search_gif(api_key, term, 5)\n return json.dumps(search_result, default=lambda o: o.__dict__, indent=2)",
"def dec_search(\r\n self,\r\n r_star: List[int],\r\n ) -> List[int]:\r\n # Decrypt r_star and sort it according to timestamp t\r\n decrypted_updates: List[Update] = list(map(lambda e: self._decrypt_update(e), r_star))\r\n decrypted_updates.sort(key=lambda x: x[0])\r\n\r\n keyword_documents_dict: Dict[str, List[int]] = {}\r\n for update in decrypted_updates:\r\n # Unpack entry (see utils.Update)\r\n (t, op, ind, w) = update\r\n\r\n if w not in keyword_documents_dict:\r\n keyword_documents_dict[w] = []\r\n\r\n documents_list: List[int] = keyword_documents_dict[w]\r\n if op == Op.ADD and ind not in documents_list:\r\n # Add ind to the results for this keyword\r\n documents_list.append(ind)\r\n keyword_documents_dict[w] = documents_list\r\n elif op == Op.DEL and ind in documents_list:\r\n # Remove ind from the results for this keyword\r\n documents_list.remove(ind)\r\n keyword_documents_dict[w] = documents_list\r\n\r\n # Combine the ind values for all keywords and remove duplicates\r\n results = [ind for sub_results in keyword_documents_dict.values() for ind in sub_results]\r\n return list(set(results))",
"def listSearches(self, authenticationToken):\r\n pass",
"def search():\n query = request.form.get(\"query\")\n category = list(mongo.db.tips.find({\"$text\": {\"$search\": query}}))\n return render_template(\"tips.html\", category=category)",
"def get_search_results(query):\n global index, doc_names\n result = ranked = list()\n doc_list = set(doc_names.keys())\n flag = 0\n for word in query:\n if word in index:\n flag = 1\n doc_list = doc_list.intersection(index[word].keys())\n else:\n return []\n\n if flag != 0:\n for doc_id in doc_list:\n positions = list()\n for word in query:\n positions.append(index[word][doc_id])\n doc_result = [(doc_id, x) for x in position_merge(positions)]\n result += doc_result\n ranked = sorted(result, key=lambda x: (x[0], x[1]))\n return ranked",
"def search_recipes(\n *,\n keyword: Optional[str] = Query(None, min_length=3, example=\"chicken\"),\n max_results: Optional[int] = 10,\n) -> dict:\n if not keyword:\n # we use Python list slicing to limit results\n # based on the max_results query parameter\n return {\"results\": RECIPES[:max_results]}\n\n results = filter(lambda recipe: keyword.lower() in recipe[\"label\"].lower(), RECIPES)\n return {\"results\": list(results)[:max_results]}",
"def search(self, query):",
"def search_helper():\n\n if request.args.get(\"movie_name\"):\n movie_name = request.args.get(\"movie_name\")\n movie = Movie.query.filter(Movie.name == movie_name).one()\n session['movie'] = movie.name\n\n else:\n print 'RANDOMLY PICKING A MOVIE'\n movie = random.choice(Movie.query.all())\n\n color_list = get_colors_from_movie(movie)\n print 'Originally got colors %s from Movie %s' % (sorted(color_list), movie.name)\n\n result_dict = etsy.get_listing_items(color_list)\n\n print 'Colors returned %s' % (sorted(result_dict['colors']))\n \n best_dict = etsy.get_image_urls(result_dict, movie.id)\n \n (top_listing, bottom_listing, accessory_listing, dress_listing,\n shoe_listing, bag_listing) = etsy.get_listing_urls(best_dict)\n\n print 'returning ' , result_dict['colors']\n return (result_dict['colors'], movie, best_dict, top_listing, bottom_listing, accessory_listing, dress_listing,\n shoe_listing, bag_listing)",
"def search(self, query, mediatype=None):\n items = utils.listItems(self, '/search?query=%s' % quote(query))\n if mediatype:\n return [item for item in items if item.type == mediatype]\n return items"
] | [
"0.685482",
"0.6766883",
"0.6648922",
"0.6097524",
"0.5980909",
"0.5949772",
"0.57064515",
"0.5693047",
"0.5673603",
"0.5615363",
"0.55775267",
"0.5569931",
"0.5540077",
"0.5528783",
"0.5505158",
"0.5449459",
"0.54458284",
"0.54412043",
"0.54386306",
"0.5397043",
"0.5381772",
"0.5371085",
"0.53706187",
"0.5342045",
"0.53415805",
"0.5327198",
"0.53239006",
"0.5317234",
"0.5305601",
"0.5304488"
] | 0.78138244 | 0 |
Returns stars list detail for keyword id. | def stars_keyword_list_detail(request, keyword_id):
if request.method == 'GET':
keyword = get_object_or_404(Keyword, pk=keyword_id)
stars = Star.objects.filter(keyword=keyword).values(
'to_user__pk',
'to_user__username',
'to_user__first_name',
'to_user__last_name',
'to_user__level',
'to_user__avatar').annotate(num_stars=Count('keyword')).order_by('-num_stars')
paginator = PageNumberPagination()
results = paginator.paginate_queryset(stars, request)
serializer = StarTopEmployeeLists(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stars_keyword_list(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n search_term = request.GET.get('search')\n star_list = Star.objects.filter(\n Q(keyword__name__icontains=search_term)).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n else:\n star_list = Star.objects.all().values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(star_list, request)\n serializer = StarKeywordList(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_keyword_detail(request, employee_id, keyword_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(to_user=employee, keyword=keyword).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def listofstars():\n a = []\n for star in Star.select():\n a.append(star.name)\n return a",
"def getSpecific(self, keyword, key):",
"def DisplayIdea(keyword):\n logger.info('Found Keyword with text [%s]' % keyword['KEYWORD_TEXT'])\n logger.info(' Keyword Idea search volume: %s' % keyword['SEARCH_VOLUME'])\n logger.info(' Keyword Idea average CPC: %s' % keyword['AVERAGE_CPC'])\n logger.info(' Keyword Idea categories: %s' % keyword['CATEGORY_PRODUCTS_AND_SERVICES'])",
"def list(show=0):\n global stars_\n if len(stars_) == 0:\n print \"No stars have been selected, go use 'stars()'\"\n return\n if show == 0:\n i=0\n for s in stars_:\n i=i+1\n print i,s[0],s[1],s[2],s[3]\n else:\n if show > 0 and show <= len(stars_):\n s = stars_[show-1]\n print show,s[0],s[1],s[2],s[3]\n else:\n print \"Bad star index\"",
"def star_rating(table, record_id, splitstars=False):\n import uuid\n id = uuid.uuid4()\n row=db(db.plugin_wiki_rating.tablename==table)(db.plugin_wiki_rating.record_id==record_id).select().first()\n rating = row.rating if row else 0\n callback = URL('plugin_wiki', 'star_rate', args = [table,record_id])\n incr = 0.5 if splitstars else 1\n return TAG[''](DIV(_id='star'+str(id),_class='rating'),\n SCRIPT(\"jQuery(document).ready(function(){jQuery('%(uid)s').rating('%(callback)s',{increment:%(incr)s, maxvalue:5, curvalue:%(rating)s});});\" % dict(uid='#star'+str(id), callback=callback,incr=incr, rating=rating)))",
"def get_keywords():\n \n #get all movies from db\n movies_df = movie_helper.get_movies_df() \n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows(): \n \n #if imbdid exists use it to look up the API\n if (row['imdbId']):\n \n #get list of keywords and created delimted string\n movie = ia.get_movie(str(row['imdbId']), info='keywords')\n try:\n keywords = \",\".join(movie['keywords'])\n except:\n keywords = None\n \n #update the movies table in the db\n database_helper.update_data(\"movies\", update_params = {\"keywords\" : keywords}, select_params = {\"movieId\" : row[\"movieId\"]})\n pbar.update(1)",
"def values(self):\n if '%' in self.starid:\n query = \"\"\"SELECT * from ngc2236 where starid like '%s'\"\"\" % self.starid\n else:\n query = \"\"\"SELECT * from ngc2236 where starid = '%s'\"\"\" % self.starid\n result = self.wifsip.query(query)\n values = [r for r in result[0]]\n if '%' in self.starid:\n self.starid=values[0]\n return values",
"def get_song(_id):\r\n return [Song.song_json(Song.query.filter_by(id=_id).first())]\r\n # Song.song_json() coverts our output to the json format defined earlier\r\n # the filter_by method filters the query by the id\r\n # since our id is unique we will only get one result\r\n # the .first() method will get that first value returned\r",
"def mark_star(self, star_id):\n\n ra, dec = self.db.get_star(star_id)[2:4]\n kwargs = dict(layer = self.MARKERS_LAYER,\n edgecolor = '#24ff29',\n s = self.MARK_RADIUS)\n self.aplpy_plot.show_markers(ra, dec, **kwargs)\n self.navig.home()\n\n self.selected_star_id = star_id\n self.goto_button.set_sensitive(True)",
"def get_keyword(self, keywordId, **kwargs) -> ApiResponse:\n return self._request(fill_query_params(kwargs.pop('path'), keywordId), params=kwargs)",
"def sense_id(self, id:Text):\n words=Delegator().by_sense_id(id=id)\n pprint(words)",
"def get_rating(self):\n self.rating = imdb.get_title_ratings(self.ID)['rating']",
"def get_k13_member_stars(mwscid, mwscname, outpath, overwrite=1, p_0=61):\n\n if os.path.exists(outpath) and not overwrite:\n print('found {} and not overwrite. return'.format(outpath))\n return\n if os.path.exists(outpath) and overwrite:\n os.remove(outpath)\n\n # if buggy, look at the links at e.g.,\n # http://cdsarc.u-strasbg.fr/viz-bin/getCatFile_Redirect/?-plus=-%2b&J/A%2bA/558/A53/stars/2m_0763_Platais_5.dat\n mwsc_urlname = mwscname.replace('_','%5F')\n url = (\n \"http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/fits?-plus=-+&J/A%2BA/558/A53/stars/2m%5F{:s}%5F{:s}.dat\".\n format(mwscid, mwsc_urlname)\n )\n\n t = Table.read(url)\n\n c = SkyCoord(t['RAhour'], t['DEdeg'], unit=(u.hourangle, u.degree))\n\n t['RAdeg'] = c.ra.value\n\n sel = (\n (t['Ps'] == 1)\n &\n (t['Pkin'] > p_0)\n &\n (t['PJKs'] > p_0)\n &\n (t['PJH'] > p_0)\n )\n\n print('Got {} stars in {} nbhd from K+13 query'.format(len(t), mwscname))\n t = t[sel]\n print('... and {} member stars'.format(len(t)))\n\n df = t.to_pandas()\n\n return df",
"def snippet_detail(request, snippet_id):\n snippet = get_object_or_404(Snippet, pk=snippet_id)\n return render_to_response('cab/snippet_detail.html',\n { 'object': snippet,\n 'num_ratings': snippet.rating_set.count(),\n 'rating_score': Rating.objects.score_for_snippet(snippet.id) },\n context_instance=RequestContext(request))",
"def show_place(id):\n\n # theplace = query_db('''select places.name, places.address, places.city, places.zipcode, places.place_id AS pid, AVG(rating) AS avgrating from places, reviews WHERE places.place_id=? AND reviews.place_id=places.place_id''', [id], one=True)\n theplace = query_db('''select name, address, city, zipcode, place_id from places WHERE place_id=?''', [id], one=True)\n reviews = query_db('''select * from reviews WHERE place_id=?''', [id])\n avg = query_db('''select AVG(rating) from reviews WHERE place_id=?''', [id], one=True)[0]\n if avg is None:\n avg = 0\n print theplace\n\n return render_template('place.html', place=theplace, reviews=reviews, rating=avg)",
"def get_keywords_for_movie(url):\n pass",
"def get_song_by_id(self, song_id: int):\n #print(\"song_id: \",song_id)\n search_object = {'query': {'term': {\"_id\": song_id}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1, FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index=SONGS_INDEXNAME, body=search_object)\n #print(\"response: \",response)\n dct = {\"song_name\":response[\"hits\"][\"hits\"][0]['_source'][FIELD_SONGNAME],\n \"total_hashes\":response[\"hits\"][\"hits\"][0]['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":response[\"hits\"][\"hits\"][0]['_source'][FIELD_FILE_SHA1]}\n #print(\"dct: \",dct)\n return dct",
"def star(request):\n account = models.Account.current_user_account\n account.user_has_selected_nickname() # This will preserve account.fresh.\n if account.stars is None:\n account.stars = []\n keyid = request.issue.key.id()\n if keyid not in account.stars:\n account.stars.append(keyid)\n account.put()\n return respond(request, 'issue_star.html', {'issue': request.issue})",
"def get_mean_movie_rating(self, movie_id):\n return self.mean_movie_rating[self.mean_movie_rating['movie_id'] == movie_id]['rating'].item()",
"def snippetDetail(requeset, pk, format = None):",
"def get_keyword(self, collection_id, name):\n sql = \"\"\"SELECT keyword.name, keyword.args, keyword.doc\n FROM keyword_table as keyword\n WHERE keyword.collection_id == ?\n AND keyword.name like ?\n \"\"\"\n cursor = self._execute(sql, (collection_id,name))\n # We're going to assume no library has duplicate keywords\n # While that in theory _could_ happen, it never _should_,\n # and you get what you deserve if it does.\n row = cursor.fetchone()\n if row is not None:\n return {\"name\": row[0],\n \"args\": json.loads(row[1]),\n \"doc\": row[2],\n \"collection_id\": collection_id\n }\n return {}",
"def get(self, kf_id):\n st = Study.query.get(kf_id)\n if st is None:\n abort(404, 'could not find {} `{}`'\n .format('study', kf_id))\n return StudySchema().jsonify(st)",
"def get_ratings(self):\n return self.ratings",
"def get_ratings(self):\n return self.ratings",
"def find_some_item_from_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n print('{:10s}{:10s}{:10s}'.format('일련번호', '평균', 'Grade'))\n print(target_list[['average', 'grade']].to_string(header=False, col_space=10))",
"def single_prod(request, pk):\n product = get_object_or_404(Product, pk=pk)\n stars = Product.objects.filter(id=pk).annotate(\n avg_review=Avg('productreview__rating')\n )\n context = {\n 'product': product,\n 'stars': stars,\n }\n return render(request, 'aproduct.html', context)",
"def get_liked_songs(self, station_id):\n\n feedbacks = self.get_station_feedbacks(station_id)\n songs = []\n for feedback in feedbacks:\n songs.append({\n \"name\": feedback[\"songTitle\"],\n \"album\": feedback[\"albumTitle\"],\n \"artist\": feedback[\"artistName\"]\n })\n return songs"
] | [
"0.65570277",
"0.6077957",
"0.5929901",
"0.52924275",
"0.5283511",
"0.5109503",
"0.50915295",
"0.5078839",
"0.5013255",
"0.50105697",
"0.50094837",
"0.5008347",
"0.5002635",
"0.49825704",
"0.4980135",
"0.49639466",
"0.49519765",
"0.48904246",
"0.48874703",
"0.48666564",
"0.4782236",
"0.47807088",
"0.4773573",
"0.47415987",
"0.47352698",
"0.4722441",
"0.4722441",
"0.46968946",
"0.46834746",
"0.46711615"
] | 0.7541413 | 0 |
This endpoint saves badge assignation to employee | def give_badge_to(request, badge_id, to_employee_id, from_employee_id):
if to_employee_id == from_employee_id:
content = {'detail': config.USER_UNABLE_TO_GIVE_BADGES_ITSELF}
return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)
elif request.method == 'POST':
badge = get_object_or_404(Badge, pk=badge_id)
to_employee = get_object_or_404(Employee, pk=to_employee_id)
from_employee = get_object_or_404(Employee, pk=from_employee_id)
try:
employee_badge = EmployeeBadge.objects.create(to_user=to_employee, assigned_by=from_employee, badge=badge)
except Exception as e:
print(e)
content = {'detail': config.BADGE_UNIQUE_CONSTRAINT_FAILED}
return Response(content, status=status.HTTP_406_NOT_ACCEPTABLE)
serializer = EmployeeBadgeSerializer(employee_badge)
return Response(serializer.data, status=status.HTTP_201_CREATED) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201",
"def post(self):\n data = EmployeeRegister.parser.parse_args()\n new_employee_id = str(uuid.uuid4())\n\n while EmployeeModel.find_by_id(new_employee_id):\n # if this id is already in use\n new_employee_id = str(uuid.uuid4())\n\n employee = EmployeeModel(**data, employee_id=new_employee_id)\n employee.save_to_db()\n\n return {\"message\": \"Employee successfully added to the system\"}, 201 # 201 - Created",
"def post(self):\n return self.get_request_handler(request.headers).create_new_employment_status(request)",
"def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)",
"def post(self):\n request, error_message = flask_request_response.message_request(\n _api_intput_pb2.AssignTask, ASSIGN_TASK_API, POST_REQUEST\n )\n if error_message is not None:\n return flask_request_response.error_response(\n [error_message[\"err_message\"]], ASSIGN_TASK_API, POST_REQUEST\n )\n try:\n app.logger.error(\"In API calling assign_task_query_response\")\n assign_task_response = assign_task_query_response(\n request.assigned_by, request.assigned_to_list,\n request.chapter_key\n )\n app.logger.info(assign_task_response)\n return flask_request_response.json_response(\n assign_task_response,\n ASSIGN_TASK_API, POST_REQUEST, 200\n )\n except Exception as err:\n return flask_request_response.error_response(\n [str(err)], ASSIGN_TASK_API, POST_REQUEST\n )",
"def add_badge(name, description, tier, image):\n\n badge = Badge.objects.get_or_create(name=name)[0]\n badge.description = description\n badge.tier = tier\n badge.icon = image\n badge.save()\n return badge",
"def update(self, request, pk):\n serializer = data_serializers.UpdateEmployeeRequestSerializer(data=request.data)\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n try:\n new_employee_entity = self.controller.update_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee_entity)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.ObjectEntityDoesNotExist\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def post_another_try(self, request): # SECOND EXAMPLE\n model = self.create_booking(request)\n client.historio().push(model, get_current_user_id(), source='assignment', source_id=model.id) # Magic happens\n # Magic is done",
"def _assign(request, obj, person_id):\n try:\n if request.method == \"POST\":\n person_id = request.POST.get('person_1', None)\n\n if person_id is None:\n obj.assigned_to = None\n else:\n person = Person.objects.get(pk=person_id)\n obj.assigned_to = person\n\n obj.save()\n\n except Person.DoesNotExist:\n raise Http404(\"No person found matching the query.\")",
"def put(self, employee_id):\n\n employee = EmployeeModel.find_by_id(employee_id)\n if employee is None:\n return {'message': \"There is no employee with this ID, or your access_token is invalid.\"}, 404\n else:\n \"\"\" check if employee entered the building today\"\"\"\n if WorkdayModel.find_latest_workday(employee.id):\n \"\"\"checking if employee already entered building today\"\"\"\n last_workday = WorkdayModel.find_latest_workday(employee.id)\n\n if last_workday.time_in.day == datetime.today().day:\n last_workday.time_out = datetime.today()\n # calculate hours_worked| .time converts to H:M\n duration = last_workday.time_out - last_workday.time_in\n # duration is a datetime.timedelta\n duration = (datetime.min + duration).time()\n last_workday.hours_worked = duration\n try:\n last_workday.save_to_db()\n except:\n return {'message': 'An error occurred updating worked hours'}, 500\n\n return last_workday.json()\n\n return {'message': 'First use of card, or employee did not start work today'}, 200",
"def send_badge(self, badge):\n pass",
"def enterprise_save(request):\r\n action = tool.get_param_by_request(request.POST, 'action', 'add', str)\r\n _id = tool.get_param_by_request(request.POST, 'id', 0, int)\r\n career_id = tool.get_param_by_request(request.POST, 'career_id', 0, int)\r\n img_title = tool.get_param_by_request(request.POST, 'img_title', '', str)\r\n img_url = request.FILES.get('img_url', \"\")\r\n old_image_path = tool.get_param_by_request(request.POST, 'old_image_path', \"\", str)\r\n\r\n image_path = old_image_path # 如果更新数据时,未更改图片,image_url为空,设置图片的路径为老路径\r\n if img_url:\r\n image_path = tool.upload(img_url, settings.UPLOAD_IMG_PATH)\r\n\r\n enterprise = APIResult()\r\n if action == \"add\":\r\n enterprise = api_enterprise.insert_career_page_enterprise(image_path, img_title, career_id)\r\n elif action == \"edit\":\r\n enterprise = api_enterprise.update_career_page_enterprise(_id, image_path, img_title, career_id)\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n\r\n return HttpResponseRedirect('/course/careerIntroduce/edit/?action=show&id='+str(career_id))",
"def post(self, request):\n data = request.data\n skill_data = data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n Employee = EmployeeDetail.objects.create(department=department, manager=manager, **data)\n Employee.save()\n for skill in skill_data:\n skill_add, create = Skill.objects.get_or_create(name=skill)\n Employee.skills.add(skill_add)\n return Response(\n data=request.data\n )",
"def add_employee(self, empl):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee values(default,%s,%s,%s,%s,%s,%s,%s,%s)',\n (empl.name, empl.email, empl.office, empl.research_group, empl.title, empl.internOrExtern,\n empl.active, empl.promotor))\n cursor.execute('SELECT LASTVAL()')\n eid = cursor.fetchone()[0]\n empl.id = eid\n # get id and return updated object\n self.dbconnect.commit()\n except(Exception, self.dbconnect.get_error()) as error:\n self.dbconnect.rollback()\n raise Exception('\\nUnable to save Employee!\\n(%s)' % (error))",
"def _save_grade(self):\r\n student = self._student('POST', key='grader_id')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n # Update the number of essays the student has graded\r\n student.grade_peer_essay()\r\n return self._success_response({})",
"def save(self, *args, **kwargs):\n\n self.grade = EmployeeGrade.get_grade(\n self.timecard.reporting_period.end_date,\n self.timecard.user\n )\n\n self.submitted = self.timecard.submitted\n\n p_pl = self.project.profit_loss_account # Project PL info.\n u_pl = self.timecard.user.user_data.profit_loss_account # User PL info.\n rp = self.timecard.reporting_period # TimecardObject reporting period.\n\n if p_pl and \\\n p_pl.account_type == 'Revenue' and \\\n p_pl.as_start_date < rp.end_date and \\\n p_pl.as_end_date > rp.end_date:\n self.revenue_profit_loss_account = p_pl\n else:\n self.revenue_profit_loss_account = None\n\n if u_pl and \\\n u_pl.account_type == 'Expense' and \\\n u_pl.as_start_date < rp.end_date and \\\n u_pl.as_end_date > rp.end_date:\n\n self.expense_profit_loss_account = u_pl\n else:\n self.expense_profit_loss_account = None\n\n\n super(TimecardObject, self).save(*args, **kwargs)",
"def add_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('INSERT INTO employee(id, name, email, office, extra_info, picture_location, research_group, '\n 'title, is_external, is_admin, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);',\n (obj.e_id, obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active))\n\n self.dbconnect.commit()\n return obj\n except:\n self.dbconnect.rollback()\n raise",
"def post(\n self,\n email,\n company_name,\n location,\n job_profile,\n salary,\n username,\n password,\n security_question,\n security_answer,\n notes,\n date_applied,\n status,\n):",
"def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def save_agea(self, data, suffix=''):\n self.title = data.get('title', self.title)\n self.question = data.get('question', self.question)\n self.raw_question = data.get('raw_question',self.raw_question)\n self.raw_solution= data.get('raw_solution',self.raw_solution)\n self.max_attempts = data.get('max_attempts', self.max_attempts)\n # Validate points before saving\n points = data.get('points', self.points)\n # Check that we are an int\n try:\n points = int(points)\n except ValueError:\n raise JsonHandlerError(400, '\"Score to be graded out of\" must be an integer')\n\n # Check that we are positive\n if points < 0:\n raise JsonHandlerError(400, '\"Score to be graded out of\" must be a positive integer')\n self.points = points\n\n # Validate weight before saving\n \n weight = data.get('weight', self.weight)\n # Check that weight is a float.\n if weight:\n try:\n weight = float(weight)\n except ValueError:\n raise JsonHandlerError(400, 'Weight must be a decimal number')\n # Check that we are positive\n if weight < 0:\n raise JsonHandlerError(\n 400, 'Weight must be a positive decimal number'\n )\n self.weight = weight \n submission = self.get_question()\n if submission:\n uploaded_submission = submission.get(\"question\").get(\"filename\", None)\n if uploaded_submission:\n question = self._question_storage_path(self.raw_question['sha1'], self.raw_question['filename'])\n question = os.path.join(IMAGEDIFF_ROOT, question)\n actual=total_marks(question)\n if actual < points:\n raise JsonHandlerError(400, '\"Score to be graded out of\" should be less than equal to the maximum attainable score for the question paper you uploaded')\n \n self.save()\n log.info(self)\n \n #self.weight = data.get('weight', self.max_score())",
"def put(self, id):\n empleadoactualizar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoactualizar:\n reg = api.payload\n empleadoactualizar.employee_id = reg['employee_id']\n empleadoactualizar.name = reg['name']\n empleadoactualizar.age = reg['age']\n empleadoactualizar.position = reg['position']\n empleadoactualizar.fechaingreso = datetime.date.fromisoformat(reg['fechaingreso'])\n db.session.merge(empleadoactualizar)\n db.session.commit()\n return 201\n api.abort(404)",
"def post(self, request):\n call_id = request.POST['call-id']\n engineer_id = request.POST['engineer-id']\n next = request.POST['next']\n if not next:\n next = reverse('calls:calls_list_view')\n if not call_id or not engineer_id:\n messages.add_message(request, messages.INFO, 'Failed - Invalid details')\n return redirect(next)\n CallAllocation.objects.filter(\n call=get_object_or_404(CallRegister, pk=call_id)\n ).filter(status='P').update(status='E')\n CallAllocation.objects.create(\n call = get_object_or_404(CallRegister, pk=call_id),\n engineer_assigned = get_object_or_404(Engineer, pk=engineer_id),\n added_by = self.request.user\n )\n messages.add_message(request, messages.INFO, 'Success - Call allocated successfully!')\n return redirect(next)",
"def save_initiative(self, request, pk=None):\n order = request.data.get('order')\n\n for member in order:\n if member['is_monster']:\n encounter_monster = EncounterMonster.objects.get(pk=member['id'])\n encounter_monster.initiative = member['initiative']\n encounter_monster.current_hp = encounter_monster.monster.hit_points\n encounter_monster.save()\n else:\n encounter_player = EncounterPlayer.objects.get(pk=member['id'])\n encounter_player.initiative = member['initiative']\n encounter_player.save()\n\n encounter = Encounter.objects.get(pk=pk)\n encounter.round = 1\n encounter.save()\n serializer = EncounterDetailSerializer(encounter)\n return Response(serializer.data)",
"def post(self):\n employee = Employee(**self.data)\n _dict = Employee.encode(employee)\n\n _id = DatabaseManager.insert(Collection.EMPLOYEES, _dict)\n employee_dict = DatabaseManager.find_document_by_id(\n Collection.EMPLOYEES, _id, True\n )\n return employee_dict",
"def update_employee(self, obj):\n cursor = self.dbconnect.get_cursor()\n try:\n cursor.execute('UPDATE employee '\n 'SET name = %s, email = %s, office = %s, extra_info = %s, picture_location = %s, '\n 'research_group = %s, title = %s, is_external = %s, is_admin = %s, is_active = %s '\n 'WHERE id = %s;',\n (obj.name, obj.email, obj.office, obj.extra_info, obj.picture_location, obj.research_group,\n obj.title, obj.is_external, obj.is_admin, obj.is_active, obj.e_id))\n self.dbconnect.commit()\n except:\n self.dbconnect.rollback()\n raise",
"def event_register_collaborator(request, event_id, employee_id):\n if request.method == 'PUT':\n event = get_object_or_404(Event, pk=event_id, is_registration_open=True)\n collaborator = get_object_or_404(Employee, pk=employee_id)\n event.collaborators.add(collaborator)\n event.save()\n serializer = EventSimpleRegistrationSerializer(event)\n return Response(serializer.data, status=status.HTTP_202_ACCEPTED)",
"def put(self, uuid: str):\n try:\n employee = self.service.update_employee(\n self.schema, uuid, request.json\n )\n except ValidationError as error:\n return error.messages, 400\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200",
"def update(self, request, pk=None):\n\n missing_keys = self._get_missing_keys()\n if len(missing_keys) > 0:\n return Response(\n {'message':\n f'Request body is missing the following required properties: {\", \".join(missing_keys)}'\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\n user = User.objects.get(id=request.auth.user.id)\n\n expense = Expenses.objects.get(pk=pk)\n expense.date_purchased = request.data[\"date_purchased\"]\n expense.cost = request.data[\"cost\"]\n expense.image = request.data[\"image\"]\n expense.user = user\n\n supply_type = Supply_Type.objects.get(\n pk=request.data[\"supply_type_id\"])\n expense.supply_type = supply_type\n\n expense.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)",
"def updateBadgeInfo(badgeId, name, description, badgeEnabled):\n parameters = {\n\t \"name\": name,\n\t \"description\": description,\n\t \"enabled\": badgeEnabled\n }\n url = f\"https://badges.roblox.com/v1/badges/{badgeId}\"\n r = requests.patch(url, params=parameters, cookies=cookie)\n j = json.loads(r.text)\n return j",
"def create_employee(self,personal_identity):\r\n new_emp = Employee(*personal_identity)\r\n registration_str = new_emp.get_registration_str()\r\n\r\n return_value = self.save_object_to_DB(\"employee\",registration_str)\r\n return return_value"
] | [
"0.5923621",
"0.5880477",
"0.56466275",
"0.56447726",
"0.5616239",
"0.55937636",
"0.5498253",
"0.5423412",
"0.5403294",
"0.5400293",
"0.53704214",
"0.5318688",
"0.5316724",
"0.5301786",
"0.5289739",
"0.52713543",
"0.5261288",
"0.5256878",
"0.52554864",
"0.5237651",
"0.5233833",
"0.521807",
"0.52116764",
"0.517655",
"0.5167315",
"0.5161469",
"0.5141716",
"0.5132661",
"0.5130535",
"0.510204"
] | 0.72410214 | 0 |
Returns badge list from employee | def badges_employee_list(request, employee_id):
if request.method == 'GET':
employee = get_object_or_404(Employee, pk=employee_id)
employee_bages = EmployeeBadge.objects.filter(to_user=employee)
paginator = PageNumberPagination()
results = paginator.paginate_queryset(employee_bages, request)
serializer = EmployeeBadgeSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def employee_list_group_by_badges_detail(request, badge_id):\n if request.method == 'GET':\n badge = get_object_or_404(Badge, pk=badge_id)\n employee_list = EmployeeBadge.objects.filter(badge=badge).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_list, request)\n serializer = EmployeeGroupedListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def employee_list_group_by_badges(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n search_term = request.GET.get('search')\n badge_list = EmployeeBadge.objects.filter(\n Q(badge__name__icontains=search_term)).values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n else:\n badge_list = EmployeeBadge.objects.all().values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(badge_list, request)\n serializer = EmployeeBadgeListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]",
"def get_badges(self) -> List:\n LOGGER.info('Get all badges')\n\n with self.client.create_session() as session:\n badges = session.query(RDSBadge).all()\n\n results = []\n for badge in badges:\n results.append(Badge(badge_name=badge.rk,\n category=badge.category))\n\n return results",
"def get_employees(self):\n return self.employees",
"def get_birthday_employees(self):\n birthday_employees = []\n\n employees = self.search([\n ('birthday_reminders', '=', True),\n ('birthday', '!=', False),\n ])\n if not employees:\n return birthday_employees\n\n return employees.filtered(lambda x: self.check_emp_birthday(x.birthday))",
"def getEmployees(self):\n return self.employees",
"def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list",
"def get(self) -> Iterable[Union[Mapping, int, None]]:\n badges = self.client.get_badges()\n return marshal({'badges': badges}, badges_fields), HTTPStatus.OK",
"def employees(self) -> object:\n return self._employees",
"def getRobloxBadges(userId):\n url = f\"https://accountinformation.roblox.com/v1/users/{userId}/roblox-badges\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j",
"def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)",
"def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees",
"def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))",
"def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return",
"def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200",
"def get(self):\n resultado = EmployeeModel.query.all()\n return resultado",
"def test_should_give_a_list_for_badges(self):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/badge_retrieval.yaml'):\n self.assertTrue(isinstance(badgr.badges, list))",
"def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp",
"def get_sal_slip_list(self, as_dict=False):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.name\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=as_dict)\n\n\t\treturn emp_list",
"def generateEmployees(self):\r\n\r\n # Name\r\n maleNames = ['Perry Lovan', 'Horacio Arvidson', 'Gale Skipworth', 'Joshua Lodge', 'Noble Shutter', 'Kristopher Talor', 'Jarod Harrop', 'Joan Henrichs', 'Wilber Vitiello', 'Clayton Brannum', 'Joel Sennett', 'Wiley Maffei', 'Clemente Flore', 'Cliff Saari', 'Miquel Plamondon', 'Erwin Broadus', 'Elvin Defibaugh', 'Ramon Vaquera', 'Roberto Koval', 'Micah Sumter', 'Wyatt Cambareri', 'Jamal Delarosa', 'Franklyn Hayles', 'Riley Haslett', 'Robt Fincher', 'Abraham Denzer', 'Darius Jude', 'Phillip Sunderman', 'August Kindel', 'Jospeh Mawson', 'Damion Postma', 'Gregorio Pasco', 'Rosendo Downing', 'Chance Plascencia', 'Jewell Pankratz', 'Jerrell Tarrance', 'Michal Bliss', 'Josue Larocque', 'Aaron Harpster', 'Zack Hildebrant', 'Frank Souders', 'Lindsay Bechard', 'Agustin Marks', 'Mathew Fredericksen', 'Ivan Hanline', 'Michael Otto', 'Max Oberlander', 'Ricky Mckellar', 'Bernard Friedt', 'King Lorentzen']\r\n femaleNames = ['Lorretta Vansickle', 'Loura Steimle', 'Neomi Fritz', 'Vernie Vanderveen', 'Dede Poehler', 'Margarete Espinoza', 'Leda Leonardo', 'Fae Strand', 'Nichol Winford', 'Danika Ridgeway', 'Elvira Balentine', 'Sharell Xie', 'Sheree Booker', 'Emely Conine', 'Justina Kleve', 'Pia Maxton', 'Sophia Lark', 'Nilsa Albee', 'Felipa Seman', 'Jeraldine Watkins', 'Susann Sowards', 'Asha Irion', 'Shay Koran', 'Rosio Jahn', 'Rachal Slaven', 'Beryl Byron', 'Jona Lira', 'Margert Strite', 'Talia Beauregard', 'Jacqueline Vella', 'Rolande Mccready', 'Margret Hickerson', 'Precious Confer', 'Evita Nicolai', 'Fredda Groner', 'Laquanda Bracken', 'Alana Saddler', 'Melania Harring', 'Shae Everette', 'Marlyn Mcfalls', 'Madeline Nicols', 'Fonda Webster', 'Fumiko Steffy', 'Virginia Sprinkle', 'Lula Frisch', 'Mari Mulherin', 'Alecia Remillard', 'Jeanna Halderman', 'Ocie Waldrep', 'Theresa Knouse']\r\n\r\n for i in range(self.num_of_employees):\r\n\r\n # Clock in an hour before opening, 6 hours after, or 12 hours after\r\n clockIn = random.choice([7, 13, 19])\r\n\r\n # Clock out after 5 hours, 10 hours, or 15 hours\r\n clockOut = random.choice([13, 19, 23])\r\n while clockOut <= clockIn:\r\n clockOut = random.choice([13, 19, 23])\r\n\r\n # Hourly wage\r\n wage = random.choice([8, 9, 10, 12, 20])\r\n\r\n gender = random.choice(['M', 'F'])\r\n if gender == 'M':\r\n name = random.choice(maleNames)\r\n else:\r\n name = random.choice(femaleNames)\r\n\r\n self.c.execute(\"INSERT INTO Employee (Name, ClockIn, ClockOut, Wage) VALUES (?, ?, ?, ?)\", (name, clockIn, clockOut, wage))\r\n self.conn.commit()\r\n\r\n if self.print_employees:\r\n print(\"\\nName:\", name)\r\n print(\"Clock in:\", clockIn)\r\n print(\"Clock out:\", clockOut)\r\n print(\"Wage:\", wage)",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def filter_badges_for_printing(self, badge_list, **params):\n\n if 'badge_type' in params:\n return badge_list.filter(Attendee.badge_type == params['badge_type'])\n elif 'dealer_only' in params:\n return badge_list.filter(Attendee.ribbon.in_([c.DEALER_RIBBON, c.DEALER_ASST_RIBBON]))\n elif 'badge_upgrade' in params:\n return badge_list.filter(Attendee.amount_extra == params['badge_upgrade'])\n else:\n return badge_list",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")",
"def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200",
"def get_sample_award_badge_data(self):\n return {\n \"recipient\": {\n \"identity\": \"[email protected]\"\n },\n \"notify\": True,\n \"evidence\": [{\n \"url\": \"http://example.com/\",\n \"narrative\": \"Joe completed all...\"\n }]\n }",
"def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees",
"def get_attendance(employee, date):\n # empty list to append the date come from database, after convert it from tuple to string\n day = []\n # excute sql query to get list of data each date come as tuple [('2020-04-01',)]\n FetchDay = c.execute(\"SELECT day FROM Attendance where employee=:employee\", {\n 'employee': employee})\n # get all date as list of tuples\n day_as_tuple = c.fetchall()\n\n # iterate over list of tuple and append each date to day list\n for days in day_as_tuple:\n for ele in days:\n day.append(ele)\n\n # test the case to check if date in day list or not\n if date in day:\n attended = True\n else:\n attended = False\n\n # make report as dictionary\n report = {}\n report['attended'] = attended\n # Time duration function to compute time duration\n duration = TimeDuration(employee, date)\n report['duration'] = str(duration)[:5]\n return report",
"def gather_employee_entries(self):\n user_inputs = [\n self.emp_lname.get(), self.emp_mi.get(), self.emp_fname.get(),\n self.emp_hiredate.get()\n ]\n\n return self.check_input_empty(user_inputs)",
"def get_hale_ages(gbd_round_id: int) -> Tuple[List[int], List[int]]:\n # Get age group IDs used in this GBD round and sort chronologically.\n demo_df = db_queries.get_demographics('epi', gbd_round_id)\n age_group_ids = demo_df[columns.AGE_GROUP_ID]\n age_spans = get_age_spans()\\\n .query(f'{columns.AGE_GROUP_ID} in @age_group_ids')\\\n .sort_values(columns.AGE_GROUP_YEARS_START)\n\n # Get under-one age groups (including birth) and full list of HALE age\n # groups.\n under_one_ages = [age_groups.BIRTH] + age_spans.loc[\n age_spans[columns.AGE_GROUP_YEARS_START] < 1, columns.AGE_GROUP_ID\n ].tolist()\n hale_ages = [age_groups.UNDER_ONE] + age_spans.loc[\n ~age_spans[columns.AGE_GROUP_ID].isin(under_one_ages),\n columns.AGE_GROUP_ID\n ].tolist()\n\n return hale_ages, under_one_ages"
] | [
"0.6881834",
"0.66980416",
"0.6510214",
"0.61874956",
"0.6009835",
"0.5942125",
"0.593319",
"0.5839908",
"0.5740555",
"0.5684213",
"0.5672405",
"0.5652698",
"0.56246674",
"0.5577624",
"0.5495039",
"0.547461",
"0.539064",
"0.536127",
"0.53156245",
"0.52861315",
"0.5280018",
"0.5278964",
"0.5265705",
"0.51969403",
"0.51960045",
"0.5178339",
"0.51746076",
"0.51711196",
"0.517",
"0.5136038"
] | 0.74351066 | 0 |
Returns badge list with employee counter or result list if you use ?search= | def employee_list_group_by_badges(request):
if request.method == 'GET':
if request.GET.get('search'):
search_term = request.GET.get('search')
badge_list = EmployeeBadge.objects.filter(
Q(badge__name__icontains=search_term)).values(
'badge__pk',
'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')
else:
badge_list = EmployeeBadge.objects.all().values(
'badge__pk',
'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')
paginator = PageNumberPagination()
results = paginator.paginate_queryset(badge_list, request)
serializer = EmployeeBadgeListSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def badges_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_bages = EmployeeBadge.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_bages, request)\n serializer = EmployeeBadgeSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def employee_list_group_by_badges_detail(request, badge_id):\n if request.method == 'GET':\n badge = get_object_or_404(Badge, pk=badge_id)\n employee_list = EmployeeBadge.objects.filter(badge=badge).values(\n 'to_user__pk',\n 'to_user__username',\n 'to_user__first_name',\n 'to_user__last_name',\n 'to_user__level',\n 'to_user__avatar')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_list, request)\n serializer = EmployeeGroupedListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def legal_universal_search_results():\n advisory_opinions = advisory_opinions_search_results()\n regulations = regulations_search_results()\n total_count = advisory_opinions['total_advisory_opinions'] + regulations['total_regulations']\n results = {\"total_all\": total_count}\n results.update(advisory_opinions)\n results.update(regulations)\n return results",
"def shortsearch(term,location):\n results = search(term,location)['listings']\n result = []\n for business in results:\n result.append([business['id'],business['name'],\"Yellow Pages\"])\n return result",
"def search():\n return {\n \"status\": \"UP\",\n }, 200",
"def get(self, request):\n city_code = request.GET.get(\"city_code\", \"6624397033787067229\")\n checkin_date = request.GET.get(\"checkin_date\", \"20191026\")\n checkout_date = request.GET.get(\"checkout_date\", \"20191027\")\n url = \"https://hermes.goibibo.com/hotels/v9/search/data/v3/\" + city_code + \"/\" + checkin_date + \"/\" \\\n + checkout_date + \"/1-2-0\"\n\n params = {\n \"s\": \"popularity\",\n \"cur\": \"INR\",\n \"f\": \"{}\",\n \"sb\": \"0\",\n \"ud\": \"\",\n \"ai\": \"1\",\n \"asi\": \"0\",\n \"st\": \"voy\",\n \"vt\": \"city\",\n \"eid\": city_code,\n \"pid\": \"0\",\n \"im\": \"true\"\n }\n response = requests.get(url, params=params)\n resp = []\n if response.status_code == 200:\n response_data = response.json().get(\"data\", [])\n for obj in response_data:\n data_dict = {\n \"hotel_name\": obj.get(\"hn\", \"\"),\n \"star_rating\": obj.get(\"gr\", \"\"),\n \"image_url\": obj.get(\"t\", \"\"),\n \"price\": obj.get(\"opr\", \"\"),\n \"rating_count\": obj.get(\"grc\", \"\"),\n \"badge\": obj.get(\"bt\", \"\"),\n \"location\": obj.get(\"l\", \"\"),\n \"info\": obj.get(\"ut\", \"\")\n }\n resp.append(data_dict)\n return Response(resp)",
"def get(self):\n global hits\n return {\"hits\": hits}, 200",
"def list(self, request):\n encounters = Encounter.objects.all()\n serializer = EncounterListSerializer(encounters, many=True)\n return Response(serializer.data)",
"def getResults():",
"def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports",
"def search(self, query, maxhits=100):",
"def report_current(request):\n apps = Application.objects.filter(app_status__name__icontains='Current').order_by('acronym', 'release')\n return render_to_response('application/search_results.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));",
"def test_query_params_employer(session, params, expected_number_of_hits):\n result = get_search(session, params)\n compare(result['total']['value'], expected_number_of_hits)",
"def search_all_giphies() -> \"Tuple[Response, int]\":\n response: \"List[Dict]\" = retriever.retrieve_giphies(**request.get_json())\n success: \"int\" = 200 if len(response) > 0 else 404\n return jsonify(response), success",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):\n\t\tleave_ids = super (HolidaysType, self)._search (args, offset=offset, limit=limit, order=order, count=count,\n\t\t access_rights_uid=access_rights_uid)\n\t\tif not count and not order and self._context.get ('employee_id'):\n\t\t\tleaves = self.browse (leave_ids)\n\t\t\tsort_key = lambda l: (not l.limit, l.virtual_remaining_leaves)\n\t\t\treturn leaves.sorted (key=sort_key, reverse=True).ids\n\t\treturn leave_ids",
"def do_search(self, *args, **kwargs):\n return [{}]",
"def get_search_results(client, search_string, page):\n resp = client.search(search_string, page)\n resp_json = resp.json()\n search_results = resp_json[\"results\"]\n if search_results is None or len(search_results) == 0:\n if search_string:\n logger.info(uxstring.UxString.empty_listing.format(search_string))\n else:\n logger.info(uxstring.UxString.no_app_in_marketplace)\n\n return 0\n\n total_pages = resp_json[\"total_pages\"]\n logger.info(\"\\nPage {}/{}\".format(page + 1, total_pages), fg=\"green\")\n content = market_search_formatter(search_results)\n logger.info(content)\n return total_pages",
"def get_examinee():\n try:\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n getting_own_results = is_self(user_id)\n if examiner or getting_own_results:\n results_query = db.session.query(User, func.count(ExamRecording.user_id)).\\\n outerjoin(ExamRecording, ExamRecording.user_id==User.user_id).\\\n group_by(User.user_id)\n\n results, next_page_exists = filter_results(results_query, User)\n users = []\n for u, er_count in results:\n users.append({\n **u.to_dict(),\n 'exam_recordings':er_count\n })\n return jsonify({'users':users, 'next_page_exists':next_page_exists}), 200\n \n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except (Exception, exc.SQLAlchemyError) as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500",
"def get_summaries(query, **kwargs):\n kwargs.update(stop=40)\n results = search(query, **kwargs)\n return results",
"def list(self, request):\n user = request.auth.user\n events = Event.objects.order_by('datetime')\n search_text = self.request.query_params.get('q', None)\n if search_text is not None:\n events = events.filter(\n Q(cost__icontains=search_text)\n )\n search_text = self.request.query_params.get('date', None)\n if search_text is not None:\n events = events.filter(\n Q(datetime__icontains=search_text)\n )\n for event in events:\n event.bookmarked = None\n try:\n Bookmark.objects.get(event=event, user=user)\n event.bookmarked = True\n except Bookmark.DoesNotExist:\n event.bookmarked = False\n # game = self.request.query_params.get('gameId', None)\n # if game is not None:\n # events = events.filter(game__id=game)\n serializer = EventSerializer(\n events, many=True, context={'request': request})\n return Response(serializer.data)",
"def enumerate_appointments(age, gender, nb=2, price=60.):",
"def search_results():\n skip = int(flask.request.args.get(\"skip\", \"0\"))\n limit = int(flask.request.args.get(\"limit\", \"20\"))\n\n obj = {}\n\n # query : will be event kit in case of triage information\n uidstr = flask.request.args.get(\"query\", None)\n\n if uidstr == None:\n obj[\"error\"] = \"Missing search ID\"\n\n uidstr = json.loads(uidstr)\n\n obj[\"query\"] = {}\n obj[\"query\"][\"uid\"] = uidstr\n obj[\"clips\"] = []\n states = backend.get_search_sessions()\n obj[\"sessions\"] = []\n for astate in states:\n obj[\"sessions\"].append(str(astate))\n try:\n uid = uuid.UUID(uidstr)\n state = backend.get_iqr_search_state(uid)\n # use the uid of the state and get the information from the database\n col = str(state.uuid)\n obj[\"collection\"] = col\n searchdb[col].ensure_index([(\"model_id\", pymongo.ASCENDING),(\"probability\", pymongo.DESCENDING) ])\n # Force probabilities\n obj[\"positives\"] = list(state.positives)\n obj[\"negatives\"] = list(state.negatives)\n log = \"\"\n for id in state.positives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 1.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 1.0001\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n\n for id in state.negatives:\n # log = log + \"Found %d\"%(searchdb[col].find({\"model_id\" : \"FUSION\", \"clip_id\" : id}).count()) + \", \"\n # res = searchdb[col].update({\"model_id\" : \"FUSION\", \"clip_id\" : id}, {\"$set\" : { \"probability\" : 0.0}})\n # log = log + \"Done %d\"%id + \", \"\n news = searchdb[col].find_one({\"model_id\" : \"FUSION\", \"clip_id\" : id})\n news[\"probability\"] = 0.0\n searchdb[col].save(news)\n log = log + \"Now : \" + str(news)\n\n obj[\"log\"] = log\n\n allres = searchdb[col].find({\"model_id\" : \"FUSION\"}).sort([(\"probability\", pymongo.DESCENDING)]).skip(skip).limit(limit)\n rank = skip + 1\n for one in allres:\n aclip = {}\n aclip[\"score\"] = one[\"probability\"]\n aclip[\"id\"] = \"HVC\" + str(one[\"clip_id\"]).zfill(6)\n clipobj = db[\"clips\"].find_one({\"id\" : \"HVC\" + str(one[\"clip_id\"]).zfill(6)},{\"duration\" : 1})\n aclip[\"duration\"] = clipobj[\"duration\"]\n aclip[\"rank\"] = rank\n rank = rank + 1\n obj[\"clips\"].append(aclip)\n obj[\"count\"] = len(obj[\"clips\"])\n\n except Exception as e:\n obj[\"error\"] = str(type(e)) + \": \" + str(e)\n return jsonify(obj)\n\n obj[\"next\"] = \"http://localhost:5003/iqr/search_results?\" + urllib.urlencode({\"uid\" : uid, \"skip\" : skip+limit } )\n return jsonify(obj)",
"def event_list(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n request_terms = request.GET.get('search')\n search_terms_array = request_terms.split()\n\n initial_term = search_terms_array[0]\n event_list = Event.objects.annotate(\n num_participants=Count('participants', distinct=True),\n num_collaborators=Count('collaborators', distinct=True)).filter(\n Q(title__icontains=initial_term) |\n Q(description__icontains=initial_term))\n if len(search_terms_array) > 1:\n for term in range(1, len(search_terms_array)):\n event_list = event_list.filter(Q(title__icontains=search_terms_array[term]) |\n Q(description__icontains=search_terms_array[term]))\n else:\n event_list = Event.objects.annotate(\n num_participants=Count('participants', distinct=True),\n num_collaborators=Count('collaborators', distinct=True)).all()\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(event_list, request)\n serializer = EventSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def search_by_email(self, request, **kwargs):\n self.method_check(request, allowed=['get'])\n self.throttle_check(request)\n\n keyword = request.GET['keyword']\n members = Member.objects.filter(email__icontains=keyword)\n\n bundles = []\n\n for member in members:\n bundle = self.build_bundle(obj=member, request=request)\n bundles.append(self.full_dehydrate(bundle, for_list=True))\n\n return self.create_response(request, bundles)",
"def search_results(request):\r\n mdict = request.matchdict\r\n rdict = request.GET\r\n\r\n if 'terms' in mdict:\r\n phrase = \" \".join(mdict['terms'])\r\n else:\r\n phrase = rdict.get('search', '')\r\n\r\n if rdict.get('search_mine') or 'username' in mdict:\r\n with_user = True\r\n else:\r\n with_user = False\r\n\r\n username = None\r\n if with_user:\r\n if 'username' in mdict:\r\n username = mdict.get('username')\r\n elif request.user and request.user.username:\r\n username = request.user.username\r\n\r\n # with content is always in the get string\r\n search_content = asbool(rdict.get('with_content', False))\r\n\r\n conn_str = request.registry.settings.get('sqlalchemy.url', False)\r\n searcher = get_fulltext_handler(conn_str)\r\n\r\n # check if we have a page count submitted\r\n page = rdict.get('page', 0)\r\n count = rdict.get('count', 10)\r\n\r\n try:\r\n res_list = searcher.search(\r\n phrase,\r\n content=search_content,\r\n username=username if with_user else None,\r\n ct=count,\r\n page=page\r\n )\r\n except ValueError:\r\n request.response.status_int = 404\r\n ret = {'error': \"Bad Request: Page number out of bound\"}\r\n return _api_response(request, ret)\r\n\r\n constructed_results = []\r\n for res in res_list:\r\n return_obj = dict(res)\r\n return_obj['tags'] = [dict(tag[1]) for tag in res.tags.items()]\r\n\r\n # the hashed object is there as well, we need to pull the url and\r\n # clicks from it as total_clicks\r\n return_obj['url'] = res.hashed.url\r\n return_obj['total_clicks'] = res.hashed.clicks\r\n\r\n constructed_results.append(return_obj)\r\n\r\n return _api_response(request, {\r\n 'search_results': constructed_results,\r\n 'result_count': len(constructed_results),\r\n 'phrase': phrase,\r\n 'page': page,\r\n 'with_content': search_content,\r\n 'username': username,\r\n })",
"def search_yelp(params):\n url = 'https://api.yelp.com/v3/businesses/search'\n headers = {'Authorization': 'Bearer ' + os.environ['YELP_KEY']}\n resp = requests.get(url=url, params=params, headers=headers)\n responses = resp.json()\n return responses",
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def search_unified():\n result_types = flask.request.args.get('result_types').split(',')\n\n # TODO(david): Cache this.\n course_dicts = []\n if 'courses' in result_types:\n courses = sorted(list(m.Course.objects().only('id', 'name',\n '_keywords', 'department_id', 'number')),\n key=lambda c: c.id)\n course_dicts = [{\n 'label': c.id,\n 'name': c.name,\n 'type': 'course',\n 'tokens': c._keywords,\n 'department_id': c.department_id,\n 'number': c.number\n } for c in courses]\n\n friend_dicts = []\n if 'friends' in result_types:\n user = view_helpers.get_current_user()\n if user:\n friends = user.get_friends()\n friend_dicts = [{\n 'label': f.name,\n 'program': f.short_program_name,\n 'type': 'friend',\n 'id': f.id,\n 'pic': f.profile_pic_urls['square'],\n 'tokens': [f.first_name, f.last_name]\n } for f in friends]\n\n prof_dicts = []\n if 'professors' in result_types:\n professors = m.Professor.objects().only('id',\n 'first_name',\n 'last_name',\n 'departments_taught')\n prof_dicts = [{\n 'label': p.name,\n 'departments_taught': p.departments_taught,\n 'type': 'prof',\n 'prof_id': p.id,\n 'name': p.name,\n 'tokens': [p.first_name, p.last_name, 'professor']\n } for p in professors]\n\n return api_util.jsonify({\n 'friends': friend_dicts,\n 'courses': course_dicts,\n 'professors': prof_dicts\n })",
"def search_professors(search_term):\n print(\"Professor to search\", search_term)\n if search_term == \"\" or search_term is None:\n return json.dumps([])\n else:\n # pandas_index_list = elastic_dash.search_professors(search_term)\n pandas_index_list = elastic_dash.search_personnel(search_term)\n print(\"pandas index list \", pandas_index_list)\n return json.dumps(pandas_index_list)"
] | [
"0.6890771",
"0.6407852",
"0.57139814",
"0.5629032",
"0.558691",
"0.5570377",
"0.5480534",
"0.5445521",
"0.5406145",
"0.5389485",
"0.53879863",
"0.5364861",
"0.53562313",
"0.5354165",
"0.53436494",
"0.5332263",
"0.530441",
"0.5293682",
"0.5283176",
"0.5282777",
"0.5282735",
"0.52586937",
"0.52425945",
"0.5229607",
"0.5226615",
"0.5222059",
"0.52158135",
"0.5209311",
"0.51992154",
"0.5191385"
] | 0.7933099 | 0 |
Returns employee list grouped by badge, you should provide badge_id | def employee_list_group_by_badges_detail(request, badge_id):
if request.method == 'GET':
badge = get_object_or_404(Badge, pk=badge_id)
employee_list = EmployeeBadge.objects.filter(badge=badge).values(
'to_user__pk',
'to_user__username',
'to_user__first_name',
'to_user__last_name',
'to_user__level',
'to_user__avatar')
paginator = PageNumberPagination()
results = paginator.paginate_queryset(employee_list, request)
serializer = EmployeeGroupedListSerializer(results, many=True)
return paginator.get_paginated_response(serializer.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def employee_list_group_by_badges(request):\n if request.method == 'GET':\n if request.GET.get('search'):\n search_term = request.GET.get('search')\n badge_list = EmployeeBadge.objects.filter(\n Q(badge__name__icontains=search_term)).values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n else:\n badge_list = EmployeeBadge.objects.all().values(\n 'badge__pk',\n 'badge__name').annotate(num_employees=Count('to_user')).order_by('-num_employees')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(badge_list, request)\n serializer = EmployeeBadgeListSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def badges_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_bages = EmployeeBadge.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_bages, request)\n serializer = EmployeeBadgeSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def get_badges(self) -> List:\n LOGGER.info('Get all badges')\n\n with self.client.create_session() as session:\n badges = session.query(RDSBadge).all()\n\n results = []\n for badge in badges:\n results.append(Badge(badge_name=badge.rk,\n category=badge.category))\n\n return results",
"def get_employees(self):\n return self.employees",
"def get_birthday_employees(self):\n birthday_employees = []\n\n employees = self.search([\n ('birthday_reminders', '=', True),\n ('birthday', '!=', False),\n ])\n if not employees:\n return birthday_employees\n\n return employees.filtered(lambda x: self.check_emp_birthday(x.birthday))",
"def _get_employee_info() -> List[List[str]]:\n return [\n ['100', 'Dave', 'Team Leader'],\n ['101', 'Ram', 'Developer'],\n ['102', 'Raj', 'Developer'],\n ['103', 'Rahul', 'Tester'],\n ]",
"def stars_employee_list_group_by_category(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'category__pk',\n 'category__name').annotate(num_stars=Count('category')).order_by('-num_stars', 'category__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeCategoriesSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def getEmployees(self):\n return self.employees",
"def get_emp_list(self):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.employee as employee, count(*) as attendance_days\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=True)\n\t\treturn emp_list",
"def stars_employee_list_group_by_keyword(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee).values(\n 'keyword__pk',\n 'keyword__name').annotate(num_stars=Count('keyword')).order_by('-num_stars', 'keyword__name')\n paginator = PageNumberPagination()\n result = paginator.paginate_queryset(employee_stars, request)\n serializer = StarEmployeeKeywordsSerializer(result, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp",
"def get_employees_directory(self):\n response = requests.get(self._base_url + \"employees/directory\",\n auth=(self._api_key, \"pass\"),\n headers={'Accept': 'application/json'})\n if response.status_code != 200:\n response.raise_for_status()\n emps_json = json.loads(response.text)['employees']\n return {int(e['id']): Employee(e['displayName'],\n e['firstName'],\n e['lastName'],\n e['nickname']) for e in emps_json}",
"def list_employees(order_by=\"id\"):\n ret = {}\n status, result = _query(action=\"employees\", command=\"directory\")\n root = ET.fromstring(result)\n for cat in root:\n if cat.tag != \"employees\":\n continue\n for item in cat:\n emp_id = next(iter(item.values()))\n emp_ret = {\"id\": emp_id}\n for details in item:\n emp_ret[next(iter(details.values()))] = details.text\n ret[emp_ret[order_by]] = emp_ret\n return ret",
"def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)",
"def filter_badges_for_printing(self, badge_list, **params):\n\n if 'badge_type' in params:\n return badge_list.filter(Attendee.badge_type == params['badge_type'])\n elif 'dealer_only' in params:\n return badge_list.filter(Attendee.ribbon.in_([c.DEALER_RIBBON, c.DEALER_ASST_RIBBON]))\n elif 'badge_upgrade' in params:\n return badge_list.filter(Attendee.amount_extra == params['badge_upgrade'])\n else:\n return badge_list",
"def employees(self) -> object:\n return self._employees",
"def get_employees(self):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employee')\n\n employees = list()\n for row in cursor:\n employee = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])\n employees.append(employee)\n return employees",
"def get_all(self):\n total_expense_reports = []\n get_count = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'EEXPENSES',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'RECORDID',\n 'WHENCREATED',\n 'WHENPOSTED',\n 'TOTALENTERED',\n 'STATE',\n 'TOTALDUE',\n 'DESCRIPTION',\n 'CURRENCY',\n 'BASECURR',\n 'MEMO'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n expense_reports = self.format_and_send_request(data)['data']['EEXPENSES']\n total_expense_reports = total_expense_reports + expense_reports\n offset = offset + pagesize\n return total_expense_reports",
"def get_hale_ages(gbd_round_id: int) -> Tuple[List[int], List[int]]:\n # Get age group IDs used in this GBD round and sort chronologically.\n demo_df = db_queries.get_demographics('epi', gbd_round_id)\n age_group_ids = demo_df[columns.AGE_GROUP_ID]\n age_spans = get_age_spans()\\\n .query(f'{columns.AGE_GROUP_ID} in @age_group_ids')\\\n .sort_values(columns.AGE_GROUP_YEARS_START)\n\n # Get under-one age groups (including birth) and full list of HALE age\n # groups.\n under_one_ages = [age_groups.BIRTH] + age_spans.loc[\n age_spans[columns.AGE_GROUP_YEARS_START] < 1, columns.AGE_GROUP_ID\n ].tolist()\n hale_ages = [age_groups.UNDER_ONE] + age_spans.loc[\n ~age_spans[columns.AGE_GROUP_ID].isin(under_one_ages),\n columns.AGE_GROUP_ID\n ].tolist()\n\n return hale_ages, under_one_ages",
"def stars_employee_list_group_by_category_detail(request, employee_id, category_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n category = get_object_or_404(Category, pk=category_id)\n stars = Star.objects.filter(to_user=employee, category=category).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)",
"def stars_employee_list_group_by_keyword_detail(request, employee_id, keyword_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n keyword = get_object_or_404(Keyword, pk=keyword_id)\n stars = Star.objects.filter(to_user=employee, keyword=keyword).order_by('-date')\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(stars, request)\n serializer = StarSmallSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200",
"def list_eip_groups(self, id=None, name=None, status=None,\n marker=None, max_keys=None, config=None):\n path = self._get_path()\n params = {}\n if id is not None:\n params[b'id'] = id\n if name is not None:\n params[b'name'] = name\n if status is not None:\n params[b'status'] = status\n if marker is not None:\n params[b'marker'] = marker\n if max_keys is not None:\n params[b'maxKeys'] = max_keys\n return self._send_request(http_methods.GET, path,\n params=params, config=config)",
"def get_sal_slip_list(self, as_dict=False):\n\t\tcondition= ''\n\t\temp_list=[]\n\t\tif self.is_for_all==0:\n\t\t\tif not self.selected_employees:\n\t\t\t\tfrappe.throw(_(\"No employees for the mentioned criteria\"))\n\t\t\t#emp_list = [cstr(d.employee) for d in self.selected_employees]\n\t\t\temp_list = frappe.db.sql_list(\"\"\"\n\t\t\t\tselect\n\t\t\t\t\temployee from `tabAttendance Salary Tool Employee`\n\t\t\t\twhere\n\t\t\t\t\tparent = '%(parent)s' \n\t\t\t\"\"\"%{\"parent\": self.name})\n\t\t\tcondition+= \"\"\" and t1.employee IN %(employees)s \"\"\"\n\t\tif self.is_open_period==0:\n\t\t\tif not self.start_date or not self.end_date:\n\t\t\t\tfrappe.throw(_(\"Satart Date and End Date are Mandatories\"))\n\t\t\tcondition= \"\"\" and attendance_date >= %(start_date)s and attendance_date <= %(end_date)s\"\"\"\n\t\temp_list = frappe.db.sql(\"\"\"\n\t\t\tselect\n\t\t\t\tt1.name\n\t\t\tfrom\n\t\t\t\t`tabAttendance` t1\n\t\t\twhere\n\t\t\t\tt1.attendance_salary_tool is null\n\t\t\t\tand t1.docstatus = 1 and t1.status='Present'\n\t\t\t\t{condition} group by t1.employee order by t1.employee asc\n\t\t\"\"\".format(condition=condition),{\"employees\": tuple(emp_list),\"start_date\": self.start_date,\"end_date\": self.end_date}, as_dict=as_dict)\n\n\t\treturn emp_list",
"async def get_all_investigators(request):\n client_key = general.get_request_key_header(request)\n investigator_list = await security_messaging.get_investigators(request.app.config.VAL_CONN, client_key)\n\n investigator_list_json = []\n for address, dp in investigator_list.items():\n investigator_list_json.append({\n 'public_key': dp.public_key,\n 'name': dp.name\n })\n return response.json(body={'data': investigator_list_json},\n headers=general.get_response_headers())",
"def list_by_group(self, id_egroup):\n\n if id_egroup is None:\n raise InvalidParameterError(\n u'The identifier of Group Equipament is invalid or was not informed.')\n\n url = 'equipment/group/' + str(id_egroup) + '/'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)",
"def get_newhire_tickets(group_id):\n url = f\"{BASE_URL}/api/v2/tickets\"\n headers = {\"AUTHorization\": f\"Basic {AUTH}\"}\n r = requests.get(url, headers=headers)\n if r.ok:\n print(f\"Got list of all new hire tickets.\")\n else:\n logging.debug(f\"Error - {r.status_code} - {r.content}\")\n tickets = r.json()[\"tickets\"]\n ticket_ids = set()\n last_hour = datetime.now() - timedelta(hours=1)\n\n for ticket in tickets:\n update_time = datetime.strptime(ticket[\"updated_at\"], \"%Y-%m-%dT%H:%M:%SZ\")\n # Check for tickets modified in the last hour\n if update_time > last_hour:\n # Verify the subject and group are related to New Hire Onboarding\n if \"New Hire\" in ticket[\"subject\"] and ticket[\"group_id\"] == group_id:\n start_date = get_start_date(ticket[\"id\"])\n # Check to see if ticket due date was already updated\n if start_date == ticket[\"due_by\"][0:10]:\n print(f'Ticket {ticket[\"id\"]} already updated.')\n else:\n ticket_ids.add(ticket[\"id\"])\n add_ticket_note(ticket[\"id\"], ticket[\"due_by\"][0:10])\n\n return ticket_ids",
"def get(self):\n resultado = EmployeeModel.query.all()\n return resultado"
] | [
"0.76944005",
"0.6957184",
"0.611837",
"0.56083935",
"0.55651563",
"0.5556811",
"0.55224836",
"0.54797256",
"0.54534733",
"0.54494524",
"0.5444034",
"0.5382721",
"0.534674",
"0.5346019",
"0.53341836",
"0.52874357",
"0.5269832",
"0.5198266",
"0.51640195",
"0.5162273",
"0.5160529",
"0.51335466",
"0.5093009",
"0.5054957",
"0.5027952",
"0.50245744",
"0.50060314",
"0.49763733",
"0.4976321",
"0.49711955"
] | 0.7924092 | 0 |
Create a RegressionData object from sklearn estimator. | def from_sklearn(cls, estimator, feature_matrix, property_vector):
return cls(
module=estimator.__module__,
estimator_name=estimator.__class__.__name__,
feature_matrix=feature_matrix,
property_vector=property_vector,
parameters=estimator.get_params(),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, estimator, **kwargs):\n super(LogisticRegression, self).__init__(\n estimator, **kwargs)\n\n self.estimator = estimator",
"def _load(cls,data:{}, X=None):\n estimator = cls()\n estimator.load_data(data=data)\n estimator.load_X(X=X)\n return estimator",
"def linear_regression_sklearn(data):\n# Split the data into training/testing sets\n dataset = np.array(data)\n\n X_train = dataset[:,0].reshape(-1,1)\n y_train = dataset[:,1]\n\n# Create linear regression object\n regr = linear_model.LinearRegression()\n\n# Train the model using the training sets\n regr.fit(X_train, y_train)\n\n return (regr.coef_[0], regr.intercept_)",
"def __init__(self, estimator = LogisticRegression()): \n\t self.estimator = estimator",
"def test_compatibility_with_sklearn(self) -> type(None):\n check_estimator(StackingRegressor)",
"def _build_regression(endog, exog, model, lasso_positive, alpha):\n if model=='Ridge':\n mod = Ridge(alpha=alpha)\n elif model=='Lasso':\n mod = Lasso(alpha=alpha, positive=lasso_positive)\n else:\n raise ValueError(\"Model must be of type Ridge or Lasso\")\n \n mod.fit(endog, exog)\n return mod",
"def __init__(self, estimator=LogisticRegression(), theta=0.1, demote=True):\n # TODO: assert that estimator has a predict_proba method.\n self.estimator = estimator\n self.theta = theta\n self.demote = demote",
"def _fit_base_estimator(self, X, y):\n if not isinstance(X,pd.DataFrame) and self._feature_columns is not None and self._label_binarier is False:\n\n X=pd.DataFrame(X,index=None,columns=self._feature_columns,dtype=np.float)\n\n #X=pd.DataFrame(X,columns=self._feature_columns,dtype=np.float)\n X['model']=X['model'].astype(np.int)#.astype('category')\n #print('transpose')\n if isinstance(self.base_estimator_,GBDTLRClassifier):\n return sklearn.base.clone(self.base_estimator_).fit(X, y, gbdt__categorical_feature=[65])\n else:\n return sklearn.base.clone(self.base_estimator_).fit(X, y,categorical_feature=[65])\n\n if self._label_binarier is True:\n return sklearn.base.clone(self.base_estimator_).fit(X,y)",
"def sklearn_model(train_data):\n X, y = train_data\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model = LogisticRegression(\n multi_class=\"multinomial\", solver=\"lbfgs\", max_iter=1000\n )\n model.fit(X, y)\n return model",
"def train_model(X_train: pd.DataFrame, y_train: pd.DataFrame) -> LinearRegression:\n randomForestClassifier = RandomForestClassifier(\n n_estimators=100, max_depth=2, random_state=0\n )\n\n randomForestClassifier.fit(X_train, y_train)\n return randomForestClassifier",
"def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.linear_model\n self.model = sklearn.linear_model.LogisticRegression",
"def nnRegression(data):",
"def __init__(self, binner=None, estimator=None, n_jobs=None, verbose=False):\n if estimator is None:\n estimator = LinearRegression()\n if binner in ('tree', None):\n binner = DecisionTreeRegressor(min_samples_leaf=2)\n RegressorMixin.__init__(self)\n PiecewiseEstimator.__init__(self, binner=binner, estimator=estimator,\n n_jobs=n_jobs, verbose=verbose)",
"def from_object(cls, estimator, feature_matrix, property_vector, parameters=None):\n try:\n estimator_name = estimator.__class__.__name__\n except AttributeError:\n estimator_name = estimator.__name__\n\n return cls(\n module=estimator.__module__,\n estimator_name=estimator_name,\n feature_matrix=feature_matrix,\n property_vector=property_vector,\n parameters=parameters,\n )",
"def fit(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n y = df.values\n if y.shape[1] == 1:\n y = y.ravel()\n X = date_part(df.index, method=self.datepart_method)\n from autots.models.sklearn import retrieve_regressor\n\n multioutput = True\n if y.ndim < 2:\n multioutput = False\n elif y.shape[1] < 2:\n multioutput = False\n self.model = retrieve_regressor(\n regression_model=self.regression_model,\n verbose=0,\n verbose_bool=False,\n random_seed=2020,\n multioutput=multioutput,\n )\n self.model = self.model.fit(X, y)\n self.shape = df.shape\n return self",
"def test_regressor_age_estimator(x, reg_model):\n return reg_model.predict(x)",
"def to_sklearn(self):\n import sklearn.pipeline as skp\n\n steps = []\n for step in self.steps:\n steps += [(step[0], step[1].to_sklearn())]\n return skp.Pipeline(steps)",
"def test_sklearn_compatible_estimator(estimator: Any, check: Any) -> None:\n check(estimator)",
"def is_sklearn_regressor(obj):\n return is_sklearn_estimator(obj) and sklearn_scitype(obj) == \"regressor\"",
"def build_benchmark_model(X, y):\n model = LinearRegression()\n model = model.fit(X, y)\n \n \"\"\"Displays model summary.\"\"\"\n print(\"Model coefficient: {}\".format(model.coef_))\n print(\"Model intercept: {}\".format(model.intercept_))\n \n return model",
"def _regressor_fit(X, y, params, n_iter):\n\n X_is_sparse = sp.issparse(X)\n y_is_sparse = sp.issparse(y)\n\n js_state = libpytsetlini.regressor_fit(\n X, X_is_sparse,\n y, y_is_sparse,\n _params_as_json_bytes(params),\n n_iter)\n\n return js_state",
"def get_trained_model(dataframe, features, target, method='logistic'):\n if method == 'logistic':\n model = LogisticRegression()\n model.fit(dataframe[features], dataframe[target])\n return model\n else:\n raise NotImplementedError",
"def get_default_estimator():\n return LogisticRegression()",
"def fit(self, X, y):\n features = 2 \n forest = [self.Tree(features) for i in range(self.n_estimators)] \n estimators = []\n \n for tree in forest:\n# mylist = list(range(len(X.columns)))\n# sample_index = np.random.choice(mylist, size=features , replace=True, p=None)\n# X_data = None \n# for j in range(len(sample_index)):\n# X_data = pd.concat([X_data, X[:, i]] , axis=1, ignore_index=True).reset_index() \n estimator = tree\n estimator.fit(X, y)\n estimators.append(estimator)\n self.estimators = estimators\n return",
"def fit(self, X, y):\n features = 2 \n forest = [self.Tree(features) for i in range(self.n_estimators)] \n estimators = []\n \n for tree in forest:\n# mylist = list(range(len(X.columns)))\n# sample_index = np.random.choice(mylist, size=features , replace=True, p=None)\n# X_data = None \n# for j in range(len(sample_index)):\n# X_data = pd.concat([X_data, X[:, i]] , axis=1, ignore_index=True).reset_index() \n estimator = tree\n estimator.fit(X, y)\n estimators.append(estimator)\n self.estimators = estimators\n return",
"def __init__(self, data_table, answers):\n\t\tBasicRegression.__init__(self,data_table,answers)\n\t\tself.add_intercept()",
"def __init__(\n self,\n estimator = SGDClassifier(),\n ):\n self.estimator = estimator",
"def fit(self):\n \n # Open an existing model and get the training & test dataset and targets\n train_test_df, target_df = self._get_model_and_data(target=True, set_feature_def=True)\n \n # Check that the estimator is an supervised ML algorithm\n if self.model.estimator_type not in [\"classifier\", \"regressor\"]:\n err = \"Incorrect usage. The estimator specified is not a known classifier or regressor: {0}\".format(self.model.estimator)\n raise Exception(err)\n \n # Check which validation strategy is to be used, if any\n # For an explanation of cross validation in scikit-learn see: http://scikit-learn.org/stable/modules/cross_validation.html#multimetric-cross-validation\n if self.model.time_series_split > 0:\n self.model.validation = \"timeseries\"\n # Set up cross validation to be performed using TimeSeriesSplit\n self.model.cv = TimeSeriesSplit(n_splits=self.model.time_series_split, max_train_size=self.model.max_train_size)\n elif self.model.cv > 0:\n self.model.validation = \"k-fold\"\n elif self.model.test_size > 0:\n self.model.validation = \"hold-out\"\n else:\n self.model.validation = \"external\"\n\n if self.model.validation == \"hold-out\": \n # Split the data into training and testing subsets\n self.X_train, self.X_test, self.y_train, self.y_test = \\\n train_test_split(train_test_df, target_df, test_size=self.model.test_size, random_state=self.model.random_state)\n else:\n self.X_train = train_test_df\n self.y_train = target_df\n \n # Add the training and test data to the model if required\n if self.model.retain_data:\n self.model.X_train = self.X_train\n self.model.y_train = self.y_train\n \n try:\n self.model.X_test = self.X_test\n self.model.y_test = self.y_test\n except AttributeError:\n pass\n \n # Scale the targets and increase stationarity if required\n if self.model.scale_target or self.model.make_stationary:\n # Set up the target transformer\n self.model.target_transformer = TargetTransformer(scale=self.model.scale_target, make_stationary=self.model.make_stationary, stationarity_lags=self.model.stationarity_lags,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Fit the transformer to the training targets\n self.model.target_transformer = self.model.target_transformer.fit(self.y_train)\n\n # Apply the transformer to the training targets\n self.y_train = self.model.target_transformer.transform(self.y_train)\n # Drop samples where the target cannot be transformed due to insufficient lags\n self.X_train = self.X_train.iloc[len(self.X_train)-len(self.y_train):] \n \n # Add lag observations to the samples if required\n if self.model.lags or self.model.lag_target:\n # Check if the current sample will be included as an input, or whether we only use lag observations for predictions\n extrapolate = 1 if self.model.current_sample_as_input else 0\n # Add the lag observations\n self.X_train = self._add_lags(self.X_train, self.y_train, extrapolate=extrapolate, update_features_df=True)\n # Drop targets for samples which were dropped due to null values after adding lags.\n if len(self.y_train) > len(self.X_train):\n self.y_train = self.y_train.iloc[len(self.y_train)-len(self.X_train):]\n\n # If this is a Keras estimator, we require the preprocessing to return a data frame instead of a numpy array\n prep_return = 'df' if self.model.using_keras else 'np'\n\n # Construct the preprocessor\n prep = Preprocessor(self.model.features_df, return_type=prep_return, scale_hashed=self.model.scale_hashed, scale_vectors=self.model.scale_vectors,\\\n missing=self.model.missing, scaler=self.model.scaler, logfile=self.logfile, **self.model.scaler_kwargs)\n\n # Setup a list to store steps for the sklearn pipeline\n pipe_steps = [('preprocessor', prep)]\n\n if self.model.dim_reduction:\n # Construct the dimensionality reduction object\n reduction = self.decomposers[self.model.reduction](**self.model.dim_reduction_args)\n \n # Include dimensionality reduction in the pipeline steps\n pipe_steps.append(('reduction', reduction))\n self.model.estimation_step = 2\n else:\n self.model.estimation_step = 1 \n\n # If this is a Keras estimator, update the input shape and reshape the data if required\n if self.model.using_keras:\n # Update the input shape based on the final number of features after preprocessing\n self._keras_update_shape(prep)\n\n # Add the Keras build function, architecture and prediction_periods to the estimator keyword arguments\n self.model.estimator_kwargs['build_fn'] = self._keras_build_fn\n self.model.estimator_kwargs['architecture'] = self.model.architecture\n self.model.estimator_kwargs['prediction_periods'] = self.model.prediction_periods\n\n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(10)\n \n # Check than an identifier has been provided for sorting data if this is a sequence prediction problem\n if self.model.lags or len(self.model.first_layer_kwargs[\"input_shape\"]) > 1:\n assert len(self.model.original_features_df[self.model.original_features_df['variable_type'].isin([\"identifier\"])]) == 1, \\\n \"An identifier is mandatory when using lags or with sequence prediction problems. Define this field in your feature definitions.\"\n\n # Cater for multi-step predictions\n if self.model.prediction_periods > 1:\n # Transform y to a vector of values equal to prediction_periods\n self.y_train = utils.vectorize_array(self.y_train, steps=self.model.prediction_periods)\n # Drop values from x for which we don't have sufficient y values\n self.X_train = self.X_train.iloc[:-len(self.X_train)+len(self.y_train)]\n\n # Add a pipeline step to update the input shape and reshape the data if required\n # This transform will also add lag observations if specified through the lags parameter\n # If lag_target is True, an additional feature will be created for each sample using the previous value of y \n reshape = Reshaper(first_layer_kwargs=self.model.first_layer_kwargs, logfile=self.logfile)\n pipe_steps.append(('reshape', reshape))\n self.model.estimation_step += self.model.estimation_step\n\n # Avoid tensorflow error for keras models\n # https://github.com/tensorflow/tensorflow/issues/14356\n # https://stackoverflow.com/questions/40785224/tensorflow-cannot-interpret-feed-dict-key-as-tensor\n kerasbackend.clear_session()\n \n # Try assuming the pipeline involves a grid search\n try:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Prepare the grid search using the previously set parameter grid\n grid_search = GridSearchCV(estimator=estimator, param_grid=self.model.param_grid, **self.model.grid_search_args)\n \n # Add grid search to the pipeline steps\n pipe_steps.append(('grid_search', grid_search))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n\n # Get the best parameters and the cross validation results\n grid_search = self.model.pipe.named_steps['grid_search']\n self.model.best_params = grid_search.best_params_\n self.model.cv_results = grid_search.cv_results_\n\n # Get the best estimator to add to the final pipeline\n estimator = grid_search.best_estimator_\n\n # Update the pipeline with the best estimator\n self.model.pipe.steps[self.model.estimation_step] = ('estimator', estimator)\n\n except AttributeError:\n # Construct an estimator\n estimator = self.algorithms[self.model.estimator](**self.model.estimator_kwargs)\n\n # Add the estimator to the pipeline steps\n pipe_steps.append(('estimator', estimator))\n\n # Construct the sklearn pipeline using the list of steps\n self.model.pipe = Pipeline(pipe_steps)\n\n if self.model.validation in [\"k-fold\", \"timeseries\"]:\n # Perform K-fold cross validation\n self._cross_validate()\n\n # Fit the training data to the pipeline\n if self.model.using_keras:\n # https://stackoverflow.com/questions/54652536/keras-tensorflow-backend-error-tensor-input-10-specified-in-either-feed-de\n session = tf.Session()\n kerasbackend.set_session(session)\n with session.as_default():\n with session.graph.as_default():\n sys.stdout.write(\"\\nMODEL: {}, INPUT SHAPE: {}\\n\\n\".format(self.model.name, self.model.first_layer_kwargs['input_shape']))\n y = self.y_train.values if self.y_train.shape[1] > 1 else self.y_train.values.ravel()\n self.model.pipe.fit(self.X_train, y)\n else:\n self.model.pipe.fit(self.X_train, self.y_train.values.ravel())\n \n if self.model.validation == \"hold-out\": \n # Evaluate the model using the test data \n self.calculate_metrics(caller=\"internal\")\n \n if self.model.calc_feature_importances:\n # Select the dataset for calculating importances\n if self.model.validation == \"hold-out\":\n X = self.X_test\n y = self.y_test # Already a numpy array after calculate_metrics\n else:\n X = self.X_train\n y = self.y_train.values.ravel()\n \n # Calculate model agnostic feature importances\n self._calc_importances(X = X, y = y)\n\n # Persist the model to disk\n self.model = self.model.save(self.model.name, self.path, overwrite=self.model.overwrite, compress=self.model.compress)\n \n # Update the cache to keep this model in memory\n self._update_cache()\n \n # Prepare the output\n if self.model.validation != \"external\": \n message = [[self.model.name, 'Model successfully trained, tested and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model has a score of {1:.3f} against the test data.\"\\\n .format(self.model.estimator, self.model.score), self.model.score]]\n else:\n message = [[self.model.name, 'Model successfully trained and saved to disk.',\\\n time.strftime('%X %x %Z', time.localtime(self.model.state_timestamp)),\\\n \"{0} model score unknown as test_size was <= 0.\"\\\n .format(self.model.estimator), np.NaN]]\n \n self.response = pd.DataFrame(message, columns=['model_name', 'result', 'time_stamp', 'score_result', 'score'])\n \n # Send the reponse table description to Qlik\n self._send_table_description(\"fit\")\n \n # Debug information is printed to the terminal and logs if the paramater debug = true\n if self.model.debug:\n self._print_log(4)\n \n # Finally send the response\n return self.response",
"def fit_model(X, y):\n\n # Create cross-validation sets from the training data\n # sklearn version 0.18: ShuffleSplit(n_splits=10, test_size=0.1, train_size=None, random_state=None)\n # sklearn versiin 0.17: ShuffleSplit(n, n_iter=10, test_size=0.1, train_size=None, random_state=None)\n cv_sets = ShuffleSplit(n_splits=10, test_size=0.20, random_state=42)\n\n # TODO: Create a decision tree regressor object\n regressor = DecisionTreeRegressor()\n\n # TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10\n params = {'max_depth': np.arange(1, 11)}\n\n # TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'\n scoring_fnc = make_scorer(performance_metric)\n\n # TODO: Create the grid search cv object --> GridSearchCV()\n # Make sure to include the right parameters in the object:\n # (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.\n grid = GridSearchCV(regressor, params, scoring=scoring_fnc, cv=cv_sets)\n\n # Fit the grid search object to the data to compute the optimal model\n grid = grid.fit(X, y)\n\n # Return the optimal model after fitting the data\n return grid.best_estimator_",
"def _fit(self):\n\n\t\tclf = LogisticRegression()\n\t\tclf.fit(inputs, labels)\n\n\t\treturn clf"
] | [
"0.61592203",
"0.608703",
"0.6083207",
"0.6022593",
"0.6017327",
"0.59459114",
"0.5923836",
"0.5880928",
"0.5872907",
"0.5844896",
"0.5699999",
"0.56314605",
"0.5628858",
"0.5625192",
"0.5571561",
"0.5568077",
"0.5566561",
"0.5545005",
"0.55440325",
"0.5529227",
"0.552391",
"0.55171263",
"0.5491852",
"0.5460279",
"0.5460279",
"0.5451983",
"0.5440325",
"0.54401076",
"0.54012907",
"0.5399419"
] | 0.67040175 | 0 |
Get the ECI for the cluster expansion. This just divides coefficients by the corresponding multiplicities. External terms are dropped since their fitted coefficients do not represent ECI. | def eci(self):
num_ext_terms = len(self._subspace.external_terms) # check for extra terms
coefs = self.coefs[:-num_ext_terms] if num_ext_terms else self.coefs[:]
eci = coefs.copy()
eci = eci / self._subspace.function_total_multiplicities
return eci | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eci(self):\n return self.__eci",
"def get_Ec(self):\n return self.Ec",
"def expansion(self, niters=-1):\n _cgco.gcoExpansion(self.handle, np.intc(niters), self.energyTempArray)\n return self._convertEnergyBack(self.energyTempArray[0])",
"def get_E(J,k):\n E = -2 * J * np.cos(k) # energyeigenvalue \n return E",
"def list_coe_clusters(self):\n return list(self.container_infrastructure_management.clusters())",
"def orbital_eccentricity(self):\n return self._orbital_eccentricity",
"def get_Ecc_n(self, eccType=\"ed\", r_power=2, order=2, where=\"\", orderBy=\"event_id\"):\n eccArray = self.getEccentricities(eccType=eccType, r_power=r_power, order=order, orderBy=orderBy)\n return eccArray[:,0] + 1j*eccArray[:,1]",
"def hecke_coeff(self, expansion, ch, (a,b,c), k) :\n character_eval = expansion.parent()._character_eval_function()\n \n ell = self.__l\n coeff = 0\n for t1 in self.__l_divisors[ell]:\n for t2 in self.__l_divisors[t1]:\n for V in self.get_representatives(t1/t2):\n aprime, bprime, cprime = self.change_variables(V,(a,b,c))\n if aprime % t1 == 0 and bprime % t2 == 0 and cprime % t2 == 0:\n try:\n coeff = coeff + character_eval(V, ch) * t1**(k-2)*t2**(k-1)*expansion[( ch, ((ell*aprime) //t1**2,\n (ell*bprime) //t1//t2,\n (ell*cprime) //t2**2) )]\n except KeyError, msg:\n raise ValueError, '%s' %(expansion,msg)\n return coeff",
"def _get_eci_chem_pot(self):\n bf = self.atoms.get_calculator().BC.basis_functions\n bf_change_vec = np.zeros((1, len(bf)))\n for i, func in enumerate(bf):\n for key, num in self.groups[0].items():\n bf_change_vec[0, i] += func[key] * num\n\n for key, num in self.groups[1].items():\n bf_change_vec[0, i] -= func[key] * num\n pinv = np.linalg.pinv(bf_change_vec)\n mu_vec = pinv.dot(np.array([self._chem_pot]))\n\n chem_pot_dict = {}\n for i in range(len(mu_vec)):\n chem_pot_dict[\"c1_{}\".format(i)] = mu_vec[i]\n return chem_pot_dict",
"def cse_elastic(energy, elements, stoic):\n i = 0\n cs_el = 0\n gamma = float(1.+energy/ElectronMass)\n beta = sqrt(1.-1./(gamma*gamma))\n for Z in elements:\n Z = float(Z)\n cs_el += 1.0e-14*1.4e-6*(Z**1.5)*(1.-(0.26*Z/(137.*beta)))/(beta*beta)*stoic[i]\n i += 1\n\n return cs_el",
"def aic_c(self):\n if hasattr(self, '_aic_c'):\n return self._aic_c\n else:\n k = len(self.params)\n n = self.data['n'].sum()\n self._aic_c = self.aic() + (2*k**2 + 2*k)/(n - k - 1)\n return self._aic_c",
"def ci_OLS(OLSMod):\r\n if hasattr(OLSMod, 'xtx'):\r\n xtx = OLSMod.xtx # (array) k x k projection matrix (includes constant)\r\n elif hasattr(OLSMod, 'hth'):\r\n xtx = OLSMod.hth # (array) k x k projection matrix (includes constant)\r\n diag = np.diagonal(xtx)\r\n scale = xtx/diag \r\n eigval = np.linalg.eigvals(scale)\r\n max_eigval = max(eigval)\r\n min_eigval = min(eigval)\r\n ci_result = sqrt(max_eigval/min_eigval)\r\n \r\n return ci_result",
"def exp(self):\n return ComplexNumber(\n math.cos(self.imaginary) * (math.e ** self.real),\n math.sin(self.imaginary) * (math.e ** self.real),\n )",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def energyK(k):\r\n C1 = 9.7846113e-07\r\n C2 = 12.263868e0 \r\n E = (-1.0 + np.sqrt(1.0 + 4.0 * C1 * C2**2 * k**2))/(2.0 * C1)\r\n return E",
"def make_cijkl_E_nu(E=200, nu=0.3):\n lambd = E * nu / (1 + nu) / (1 - 2 * nu)\n mu = E / 2 / (1 + nu)\n cij = np.zeros((6, 6))\n cij[(0, 1, 2), (0, 1, 2)] = lambd + 2 * mu\n cij[(0, 0, 1, 1, 2, 2), (1, 2, 0, 2, 0, 1)] = lambd\n cij[(3, 4, 5), (3, 4, 5)] = mu\n # check symmetry\n assert np.allclose(cij, cij.T)\n # convert to order 4 tensor\n coord_mapping = {\n (1, 1): 1,\n (2, 2): 2,\n (3, 3): 3,\n (2, 3): 4,\n (1, 3): 5,\n (1, 2): 6,\n (2, 1): 6,\n (3, 1): 5,\n (3, 2): 4,\n }\n\n cijkl = np.zeros((3, 3, 3, 3))\n for i in range(3):\n for j in range(3):\n for k in range(3):\n for l in range(3):\n u = coord_mapping[(i + 1, j + 1)]\n v = coord_mapping[(k + 1, l + 1)]\n cijkl[i, j, k, l] = cij[u - 1, v - 1]\n return cijkl, cij",
"def computeEnergy(self):\n\t\tGmo = self.Gmo\n\t\te = self.e\n\t\tself.Ec = 0.0\n\n\t\tfor i in range( self.nocc ):\n\t\t\tfor j in range( self.nocc ):\n\t\t\t\tfor a in range( self.nocc,self.norb ):\n\t\t\t\t\tfor b in range( self.nocc,self.norb ):\n\t\t\t\t\t\tself.Ec += 0.25*(Gmo[i,j,a,b]*Gmo[a,b,i,j])/(e[i]+e[j]-e[a]-e[b])\n\n\t\treturn self.E0 + self.Ec",
"def EC(f):\n return dmp_ground_EC(f.rep, f.lev, f.dom)",
"def E0_sum(r, k, fiber_radius, eps_out, eps_in, E0_mod, nmin_sc, nmax_sc, case):\n\n # refractive index of the cylinder relative \n # to that of the surrounding medium\n m = np.sqrt(eps_in / eps_out)\n E0 = Mie_scat_cyl.Es(r[0], r[1], r[2], k, fiber_radius,\n m, E0_mod, nmin_sc, nmax_sc, case)\n\n r_car = pol2cart(r)\n kvec_car = np.array([-k, 0, 0]) # normal incidence\n exp_factor = np.exp(1j * np.dot(kvec_car, r_car))\n if case == 1:\n Einc_car = np.array([0, 0, E0_mod], dtype=complex) * exp_factor\n # Ez is the same in pol and in cart coordinates\n E0 += Einc_car\n elif case == 2:\n Einc_car = np.array([0, E0_mod, 0], dtype=complex) * exp_factor\n E0 += vec_cart2pol(r_car, Einc_car)\n\n return(E0)",
"def GetEigenvector(self, i):\n return _hypre.HypreAME_GetEigenvector(self, i)",
"def _calc_interaction_expansion(self):\n # preevaluate expansions for volume and surface phase functions\n # this returns symbolic code to be then further used\n\n volexp = self.V.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n brdfexp = self.SRF.legexpansion(self.t_0, self.t_ex,\n self.p_0, self.p_ex,\n self.geometry).doit()\n\n # preparation of the product of p*BRDF for coefficient retrieval\n # this is the eq.23. and would need to be integrated from 0 to 2pi\n fPoly = expand(2 * sp.pi * volexp * brdfexp)\n\n # do integration of eq. 23\n expr = self._integrate_0_2pi_phis(fPoly)\n\n # now we do still simplify the expression to be able to express\n # things as power series of cos(theta_s)\n theta_s = sp.Symbol('theta_s')\n replacements = [(sp.sin(theta_s) ** i,\n expand((1. - sp.cos(theta_s) ** 2)\n ** sp.Rational(i, 2)))\n for i in range(1, self.SRF.ncoefs + self.V.ncoefs - 1)\n if i % 2 == 0]\n\n res = expand(expr.xreplace(dict(replacements)))\n\n return res",
"def eigs(self):\n return np.concatenate(self.operator.eigenvalues)",
"def getEccentricities(self, eccType=\"ed\", r_power=2, order=2, where=\"\", orderBy=\"event_id\"):\n whereClause = \"ecc_id=%d and r_power=%d and n=%d\" % (self._ecc_id(eccType), r_power, order)\n if where:\n whereClause += \" and \" + where\n return np.asarray(self.db.selectFromTable(\"eccentricities\", (\"ecc_real, ecc_imag\"), whereClause=whereClause, orderByClause=orderBy))",
"def energy(self):\n nocc, ntot, gmo, e = self.nocc, self.ntot, self.gmo, self.e\n\n Ec = 0.0\n for i in range(nocc):\n for j in range(nocc):\n for a in range(nocc, ntot):\n for b in range(nocc, ntot):\n Ec += gmo[i, a, j, b]*(2*gmo[i, a, j, b] - gmo[i, b, j, a])/\\\n (e[i] + e[j] - e[a] - e[b])\n\n self.Ec = Ec\n self.E_mp2 = Ec + self.E_scf\n\n print('@MP2 correlation energy: {:15.10f}\\n'.format(self.Ec))\n print('@Total MP2 energy: {:15.10f}\\n'.format(self.E_mp2))\n\n return self.E_mp2",
"def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)",
"def get_e1_elec(mol, g1, atom, coord, complexsymmetric: bool, nelec,\n g0_ghf = None):\n\n if g0_ghf is None:\n m = scf.RHF(mol)\n m.verbose = 0\n m.kernel()\n g0_rhf = m.mo_coeff\n g0 = rhf_to_ghf(g0_rhf, nelec)\n else:\n g0 = g0_ghf\n\n p0 = get_p0(g0, complexsymmetric, nelec)\n p1 = get_p1(g0, g1, complexsymmetric, nelec)\n\n hcore0 = get_hcore0(mol)\n pi0 = get_pi0(mol)\n hcore1 = get_hcore1(mol, atom, coord)\n pi1 = get_pi1(mol, atom, coord)\n\n f0_prime_1e = hcore0\n f1_prime_1e = hcore1\n\n f0_prime_2e = 0.5 * np.einsum(\"ijkl,jl->ik\", pi0, p0)\n f1_prime_2e = (0.5 * np.einsum(\"ijkl,jl->ik\", pi1, p0)\n + 0.5 * np.einsum(\"ijkl,jl->ik\", pi0, p1))\n\n e1_elec_1e = (np.einsum(\"ij,ji->\",f0_prime_1e, p1)\n + np.einsum(\"ij,ji->\",f1_prime_1e, p0))\n e1_elec_2e = (np.einsum(\"ij,ji->\",f0_prime_2e, p1)\n + np.einsum(\"ij,ji->\",f1_prime_2e, p0))\n\n e1_elec = e1_elec_1e + e1_elec_2e\n\n return e1_elec",
"def GetEigenvector(self, i):\n return _hypre.HypreLOBPCG_GetEigenvector(self, i)",
"def eccentricity(self):\n return self.b / self.a",
"def getExponent(self):\n return _libsbml.ASTNode_getExponent(self)",
"def get_eccentricity(self, ellipse):\r\n a = ellipse.get_width()\r\n b = ellipse.get_height()\r\n if b > a:\r\n a, b = b, a\r\n c = np.sqrt(a**2 - b**2)\r\n return fdiv(c, a)"
] | [
"0.6564559",
"0.57694936",
"0.5583941",
"0.55318445",
"0.5458844",
"0.54127294",
"0.5366535",
"0.53557855",
"0.53367984",
"0.52990323",
"0.5223814",
"0.51811206",
"0.51741844",
"0.5160664",
"0.5150545",
"0.5145208",
"0.51438606",
"0.51201993",
"0.51041776",
"0.51036525",
"0.5095057",
"0.5080486",
"0.5067263",
"0.5063794",
"0.5053806",
"0.5045298",
"0.5018033",
"0.50161403",
"0.50136596",
"0.49981073"
] | 0.72708476 | 0 |
Get tuple of cluster interaction tensors. Tuple of ndarrays where each array is the interaction tensor for the corresponding orbit of clusters. | def cluster_interaction_tensors(self):
interaction_tensors = (self.coefs[0],) + tuple(
sum(
m * self.eci[orbit.bit_id + i] * tensor
for i, (m, tensor) in enumerate(
zip(orbit.bit_combo_multiplicities, orbit.correlation_tensors)
)
)
for orbit in self._subspace.orbits
)
return interaction_tensors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def interactions(self) -> Sequence[Interaction[_C_out, Tuple[int,...]]]:\n return self._simulation.interactions",
"def interactions(self) -> Sequence[Interaction[_C_out, _A_out]]:\n ...",
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._interactions",
"def interactions(self) -> Sequence[Interaction[_C_out,_A_out]]:\n return self._simulation.interactions",
"def three_dimensional(self, z): # Maybe I misunderstood the task. My method looks weird\n return (self.x, self.y, z)",
"def _get_cluster_components(self):\n print(\"Connecting to cluster...\")\n self.cluster.connect_to_cluster()\n print(\"Connected!\")\n print(\"Collecting information from the cluster...\")\n return self.cluster.get_components()",
"def k_clusters(old_ops, max_outputs, mut):\n \n # DM construction\n matrix = starting_centroids(old_ops, max_outputs, mut)\n\n\n # Clustering\n seed = []\n for i in matrix.OPs:\n seed.append(i)\n centroids = cluster(old_ops, seed, mut)\n disto = distortion(centroids, old_ops, mut)\n\n return centroids, disto",
"def select_all_clusters(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM clusters\")\n\n rows = cur.fetchall()\n\n # for row in rows:\n # print(row)\n\n return np.array(rows)[:,0], np.array(rows)[:,2], np.array(rows)[:,3], np.array(rows)[:,4]",
"def clusters(self) -> ndarray:\n return self._clusters",
"def cluster_feature(feature_mat, k):\n whitened = whiten(feature_mat.transpose())\n centroid, distortion = kmeans(whitened, k)\n\n return centroid, distortion",
"def get_cluster_elements(self):\n \n copy = deepcopy(self.cluster_elements)\n return copy",
"def classify_clusters_o3d(cloud: object, labels: np.ndarray) -> Tuple[list, list]:\n cloud_np = np.asarray(cloud.points)\n indices = list(dict.fromkeys(labels))\n if (-1 in indices):\n indices.remove(-1)\n clusters = [[] for i in indices]\n for (i, point) in enumerate(cloud_np, start=0):\n if (labels[i] != -1):\n clusters[labels[i]].append(point)\n for el in clusters:\n el = np.vstack(el)\n cluster_objects = [Cluster(np.asarray(el), sensor='lidar') for el in clusters]\n\n return (cluster_objects, indices)",
"def get_atom_connectivity(self):\n m, connectivity = self.owner, []\n for index, i in enumerate(self.rix):\n for j in self.rix[index + 1:]:\n a1 = m.rings[i].aix\n a2 = m.rings[j].aix\n if set(a1).intersection(a2):\n connectivity.append((i, j))\n return tuple(connectivity)",
"def get_clusters(cluster_path): #{{{\n print 'loading cluster info'\n indicesToParticle = pickle.load(open(cluster_path+\"/verticesToParticle.p\",\"rb\"))\n indicesOnCluster = pickle.load(open(cluster_path+\"/verticesOnCell.p\",\"rb\"))\n maxIndices = pickle.load(open(cluster_path+\"/maxVertices.p\",\"rb\"))\n print 'done'\n\n return indicesToParticle, indicesOnCluster, maxIndices #}}}",
"def get_centroids(self, dim):\n cdef np.ndarray[float64, mode='c', ndim=2] out\n\n if dim == 0:\n return self.coors\n\n else:\n out = np.empty((self.mesh.topology.num[dim], self.dim),\n dtype=np.float64)\n mesh_get_centroids(self.mesh, &out[0, 0], dim)\n\n return out",
"def cluster(self):\n logger.debug(\"Beginning feature based clustering on %d clusters.\" % len(self.c2b))\n # Merge the two nearest clusters until we can't.\n #\n while self.mergeNearestClusters():\n pass\n logger.debug(\"After clustering, there are now %d clusters remaining.\" % len(self.c2b))\n return self.c2b.values()",
"def get_clusters(self):\r\n\r\n return self.__clusters",
"def clusters(self):\n return self._clusters",
"def clusters(self):\n raise NotImplementedError",
"def get_clusters(self):\n return self._clusters",
"def get_word_cluster_pairs(cls, clusters, words):\n\n print \"Getting the associations with clusters\", clusters\n\n associations = db.session.query(cls.cluster_id, cls.word).filter(\n cls.cluster_id.in_(clusters), cls.word.in_(words)).all()\n\n return associations",
"def cluster_ids(self):\n return self.model.cluster_ids",
"def clusters(self):\n\t\tif self._record is None:\n\t\t return []\n\t\tclusters = [i for i in self._record.features if i.type == 'cluster']\n\t\treturn clusters",
"def get_clusters_adjacencies(adjacency, clusters: list):\n clusters.sort(key=lambda t: len(t), reverse=True)\n id_to_cluster = get_id_to_cluster(clusters, adjacency.shape[0])\n num_clusters = len(clusters)\n mat = np.zeros((num_clusters, num_clusters))\n rows, cols = adjacency.nonzero()\n for i, j in zip(rows, cols):\n weight = adjacency[i, j]\n src_cluster = id_to_cluster[i]\n dest_cluster = id_to_cluster[j]\n mat[src_cluster, dest_cluster] += weight\n return mat",
"def co_vertexes(self):\n theta = self.orientation + np.pi / 2\n shifts = np.array([np.cos(theta), np.sin(theta)]) * self.b\n return self.coords + (shifts[:, None] * [-1, 1]).T",
"def get_cluster_dstructure(self, curs, mcl_id, splat_table, mcl_table):\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tcluster = self.get_basic_cluster_dstructure(curs, mcl_id, splat_table, mcl_table)\n\t\tif cluster:\t#not None\n\t\t\tcluster.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, cluster.vertex_set)\n\t\t\tcluster.go_no2information = self.get_information_of_go_functions(curs, cluster.go_no2association_genes, \\\n\t\t\t\tlen(cluster.vertex_set), no_of_total_genes)\n\t\t\tcluster.edge_cor_2d_list, cluster.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, cluster.edge_set)\n\t\t\t#graph = self.graph_from_node_edge_set(cluster.vertex_set, cluster.edge_set)\n\t\treturn cluster\n\t\t\n\t\t\"\"\"\n\t\tprint \"vertex_set\"\n\t\tprint cluster.vertex_set\n\t\tprint \"edge_set\"\n\t\tprint cluster.edge_set\n\t\trecurrence_list_2d = ['recurrence_array']+cluster.recurrence_array\n\t\trecurrence_list_2d_1 = ['recurrence_array_1']+cluster.recurrence_array\n\t\trecurrence_list_2d = [recurrence_list_2d, recurrence_list_2d_1]\n\t\tself.column_output('/tmp/yh/recurrence_array',recurrence_list_2d)\n\n\t\tprint cluster.splat_connectivity\n\t\tprint \"connectivity\"\n\t\tprint cluster.connectivity\n\t\tprint \"connectivity_original\"\n\t\tprint cluster.connectivity_original\n\t\tcor_list_2d = []\n\t\tsig_list_2d = []\n\t\tfor i in range(len(cluster.edge_set)):\n\t\t\tcor_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_cor_2d_list[i])\n\t\t\tsig_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_sig_2d_list[i])\n\t\tself.column_output('/tmp/yh/edge_cor_2d_list', cor_list_2d)\n\t\tself.column_output('/tmp/yh/edge_sig_2d_list', sig_list_2d)\n\n\t\tgo_no_list_2d = []\n\t\tfor go_no,information in cluster.go_no2information.iteritems():\n\t\t\tgo_no_list_2d.append(list(information)+[len(cluster.go_no2association_genes[go_no])])\n\t\t#self.column_output('/tmp/yh/go_no_list_2d', go_no_list_2d)\n\t\t\"\"\"",
"def nt_3d_centers(cif_file, consider_all_atoms):\n result =[]\n try:\n structure = MMCIFParser().get_structure(cif_file, cif_file)\n except Exception as e:\n warn(f\"\\n{cif_file.split('/')[-1]} : {e}\", error=True)\n with open(runDir + \"/errors.txt\", \"a\") as f:\n f.write(f\"Exception in nt_3d_centers({cif_file.split('/')[-1]})\\n\")\n f.write(str(e))\n f.write(\"\\n\\n\")\n return result\n for model in structure:\n for chain in model:\n for residue in chain:\n if consider_all_atoms:\n temp_list = []\n for atom in residue:\n temp_list.append(atom.get_coord())\n lg = len(temp_list)\n summ = np.sum(temp_list, axis = 0)\n res_isobaricentre = [summ[0]/lg, summ[1]/lg, summ[2]/lg]\n result.append([res_isobaricentre[0], res_isobaricentre[1], res_isobaricentre[2]])\n else:\n coordinates = None\n for atom in residue:\n if atom.get_name() == \"C1'\":\n coordinates = atom.get_coord()\n if coordinates is None:\n # Residue has no C1'\n res = np.nan\n else:\n res = [coordinates[0], coordinates[1], coordinates[2]]\n result.append(res)\n return(result)",
"def get_clusters(self):\n\n return self.__clusters",
"def cluster_items(xs: np.ndarray, k: int):\n kmeans = KMeans(n_clusters=k).fit(xs)\n\n centroids = kmeans.cluster_centers_\n labels = kmeans.labels_\n\n return centroids, labels",
"def get_entities(doc, clusters):\n ent_clusts = []\n for clust in clusters:\n ent_clust = []\n for (s, e) in clust:\n ent_clust.append(doc[s : e + 1])\n ent_clusts.append(ent_clust)\n return ent_clusts"
] | [
"0.5650275",
"0.5396234",
"0.5260104",
"0.52348673",
"0.51911455",
"0.5157701",
"0.5040121",
"0.50043637",
"0.49888366",
"0.496246",
"0.49001786",
"0.48809347",
"0.4871462",
"0.48085994",
"0.47944516",
"0.4790466",
"0.47883955",
"0.47878823",
"0.4782386",
"0.47713706",
"0.475943",
"0.474521",
"0.47138602",
"0.47014573",
"0.46986556",
"0.46804994",
"0.46771228",
"0.46578115",
"0.4638507",
"0.46369794"
] | 0.8034195 | 0 |
Get expansion structure. Prim structure with only sites included in the expansion (i.e. sites with partial occupancies) | def expansion_structure(self):
return self.cluster_subspace.expansion_structure | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getExpansion(self, data):\n pass",
"def test_get_systems_expanded(self):\n pass",
"def get_expansion(block, expansion=None):\n if isinstance(expansion, int):\n assert expansion > 0\n elif expansion is None:\n if hasattr(block, 'expansion'):\n expansion = block.expansion\n elif issubclass(block, BasicBlock):\n expansion = 1\n elif issubclass(block, Bottleneck):\n expansion = 4\n else:\n raise TypeError(f'expansion is not specified for {block.__name__}')\n else:\n raise TypeError('expansion must be an integer or None')\n return expansion",
"def get_expansion(block, expansion=None):\n if isinstance(expansion, int):\n assert expansion > 0\n elif expansion is None:\n if hasattr(block, 'expansion'):\n expansion = block.expansion\n elif issubclass(block, ViPNAS_Bottleneck):\n expansion = 1\n else:\n raise TypeError(f'expansion is not specified for {block.__name__}')\n else:\n raise TypeError('expansion must be an integer or None')\n return expansion",
"def test_get_projects_expanded(self):\n pass",
"def get_expanded_def_and_includes(self):\n self.expanded_def = {}\n self.includes = {}\n if 'merge' in self.sdef['df'].keys():\n self.process_merge(self.expanded_def, self.sdef['df']['merge'], self.includes)\n self.merge_def(self.expanded_def, self.sdef, self.includes)\n # merge any attributes to self.attributes for later processing\n if 'attributes' in self.expanded_def:\n self.attributes.update(self.expanded_def['attributes'])\n del self.expanded_def['attributes']",
"def get_expand(self):\n\n return self.props[\"expand\"]",
"def Find_Lowest_Energy_Structure_Electrostatics(self):\n n_Na = self.structure.composition['Na']\n n_S = self.structure.composition['S']\n n_O = self.structure.composition['O']\n n_N = self.structure.composition['N']\n n_Fe = self.structure.composition['Fe']\n\n n_Fe_reduced = self.variable_magnetization_dict['Fe']['n_reduced']\n n_Fe_oxidized = n_Fe-n_Fe_reduced \n\n N_charge = ( 2.*n_O-6.*n_S-n_Na-3.*n_Fe_oxidized-2.*n_Fe_reduced )/n_N\n\n oxidation_states = {'Na':+1, 'Fe':+3, 'O':-2,'S':+6,'N':N_charge}\n Fe_2plus = pymatgen.Specie('Fe',oxidation_state=+2)\n\n structure_with_charges = self.structure.copy()\n structure_with_charges.add_oxidation_state_by_element(oxidation_states) \n\n # identify Fe sites\n list_Fe_indices = []\n for i,site in enumerate(structure_with_charges):\n if site.specie.symbol == 'Fe':\n list_Fe_indices.append(i)\n\n # Generate all possible permutation of sites and compute \n # Ewald energy\n ewald_model = EwaldElectrostaticModel(acc_factor=6)\n list_reduced_sets = []\n list_ewald_energy = []\n for reduced_set in itertools.combinations(list_Fe_indices,n_Fe_reduced):\n list_reduced_sets.append(reduced_set) \n\n struct = structure_with_charges.copy()\n for i in reduced_set:\n struct.replace(i, Fe_2plus)\n\n list_ewald_energy.append(ewald_model.get_energy(struct))\n\n if len(list_ewald_energy) == 0:\n # all sites are oxidized. No sorting involved \n list_reduced_site_indices = []\n list_oxidized_site_indices = list_Fe_indices\n else:\n # some reduction takes place. Identify best electrostatic choice\n\n imin = np.argmin(list_ewald_energy)\n\n list_reduced_site_indices = list_reduced_sets[imin] \n list_oxidized_site_indices = []\n for i in list_Fe_indices:\n if i not in list_reduced_site_indices: \n list_oxidized_site_indices.append(i) \n\n\n return list_reduced_site_indices, list_oxidized_site_indices",
"def GetExpansionState(self):\n \n root = self._window.GetRootItem()\n if not root:\n return []\n if self._window.HasFlag(wx.TR_HIDE_ROOT):\n return self.GetExpansionStateOfChildren(root)\n else:\n return self.GetExpansionStateOfItem(root)",
"def expand_dictation_expansion(expansion):\n def is_unprocessed(e):\n if isinstance(e, AlternativeSet):\n jsgf_only_alt = False\n\n # Not necessarily dictation only, that scenario is handled by\n # expansion sequence and SequenceRule.\n dictation_alt = False\n for c in e.children:\n if dictation_in_expansion(c):\n dictation_alt = True\n else:\n jsgf_only_alt = True\n\n if jsgf_only_alt and dictation_alt:\n return True\n elif (isinstance(e, (OptionalGrouping, KleeneStar))\n and dictation_in_expansion(e)):\n return True\n else:\n return False\n\n def first_unprocessed_expansion(e):\n \"\"\"\n Find the first AlternativeSet or OptionalGrouping (if any) with both\n descendants containing and not containing Dictation expansions.\n :type e: Expansion\n :return: Expansion | None\n \"\"\"\n filtered = filter_expansion(e, is_unprocessed, TraversalOrder.PostOrder)\n if not filtered:\n return None\n else:\n return filtered[0]\n\n def find_expansion(e, goal):\n result = filter_expansion(e, lambda x: x == goal)\n if not result:\n return None\n else:\n return result[0]\n\n def process(e):\n \"\"\"\n Process an expansion recursively and return a list of expanded expansions.\n :type e: Expansion\n :return: list\n \"\"\"\n result = []\n current = first_unprocessed_expansion(e)\n\n # Handle cases where no processing is required\n if not current:\n return [e]\n\n copies = []\n if isinstance(current, AlternativeSet):\n dictation_children = [] # again, not necessarily only dictation.\n jsgf_only_children = []\n for child in current.children:\n if dictation_in_expansion(child):\n dictation_children.append(child)\n else:\n jsgf_only_children.append(child)\n\n # Create a replacements list, create copies of the expansion tree and\n # replace the copy of the AlternativeSet currently being processed\n if len(jsgf_only_children) == 1:\n replacements = jsgf_only_children\n else:\n replacements = [AlternativeSet(*jsgf_only_children)]\n replacements.extend(dictation_children)\n\n elif isinstance(current, (OptionalGrouping, KleeneStar)):\n # Handle not required - remove from a copy\n copy = deepcopy(current.root_expansion)\n copy_x = find_expansion(copy, current)\n copy_parent = copy_x.parent\n ancestor = copy_parent\n\n # Traverse up the parent tree and remove copy_x or one of its ancestors\n # where there is another child\n while ancestor:\n if ancestor.children > 1:\n ancestor.children.remove(copy_x)\n break\n\n copy_x = ancestor\n ancestor = ancestor.parent\n\n # copy_x or one of its ancestors was removed from the tree correctly\n # If this isn't true, the expansion is an empty tree and shouldn't be\n # added.\n if ancestor:\n copies.append(copy)\n\n # Let replacement loop handle required\n if isinstance(current, OptionalGrouping):\n replacements = [current.child]\n else:\n replacements = [Repeat(current.child)]\n else:\n replacements = []\n\n for replacement in replacements:\n # Find the copy of the current AlternativeSet being processed\n copy = deepcopy(current.root_expansion)\n copy_x = find_expansion(copy, current)\n copy_parent = copy_x.parent\n if copy_parent:\n index = copy_parent.children.index(copy_x)\n copy_parent.children.remove(copy_x)\n copy_parent.children.insert(index, replacement)\n else:\n # copy is the root expansion.\n copy = replacement\n copies.append(copy)\n\n for copy in copies:\n next_unprocessed = first_unprocessed_expansion(copy)\n if not next_unprocessed and copy not in result:\n result.append(copy)\n else:\n # Process the next unprocessed expansion and add the result\n result.extend(process(next_unprocessed))\n\n return result\n\n processed = process(expansion)\n\n return processed",
"def stack_search(start, expand, mode='bfs', build_inv=False):\r\n\r\n if mode not in ('bfs', 'dfs'):\r\n raise ValueError('mode should be bfs or dfs', mode)\r\n rval_set = set()\r\n rval_list = list()\r\n if mode == 'bfs':\r\n start_pop = start.popleft\r\n else:\r\n start_pop = start.pop\r\n expand_inv = {}\r\n while start:\r\n l = start_pop()\r\n if id(l) not in rval_set:\r\n rval_list.append(l)\r\n rval_set.add(id(l))\r\n expand_l = expand(l)\r\n if expand_l:\r\n if build_inv:\r\n for r in expand_l:\r\n expand_inv.setdefault(r, []).append(l)\r\n start.extend(expand_l)\r\n assert len(rval_list) == len(rval_set)\r\n if build_inv:\r\n return rval_list, expand_inv\r\n return rval_list",
"def get_structure_from_mp(formula):\n m = MPRester()\n entries = m.get_entries(formula, inc_structure=\"final\")\n if len(entries) == 0:\n raise ValueError(\"No structure with formula %s in Materials Project!\" %\n formula)\n elif len(entries) > 1:\n warnings.warn(\"%d structures with formula %s found in Materials \"\n \"Project. The lowest energy structure will be returned.\" %\n (len(entries), formula))\n return min(entries, key=lambda e: e.energy_per_atom).structure",
"def final_structure(self):\n final_structure = getattr(self.vasprun_obj, 'final_structure', None)\n if not final_structure:\n return None\n return get_data_node('structure', pymatgen=final_structure)",
"def to_pmg_structure(self):\n\n if not _pmg_present:\n raise ModuleNotFoundError(\"Pymatgen is not present. Please \"\n \"install Pymatgen and try again\")\n\n site_properties = {'force:': self.forces, 'std': self.stds}\n\n return pmgstruc.Structure(lattice=self.cell,\n species=self.species_labels,\n coords=self.positions,\n coords_are_cartesian=True,\n site_properties=site_properties\n )",
"def _full_structure_geometry(self):\n # Characterized borehole structures\n borehole_structures = self._characterize_shearzones()\n\n # Tunnel shearzone data\n tunnel_structures = self.tunnel_structures\n\n structures = pd.concat(\n [borehole_structures, tunnel_structures], ignore_index=True, sort=False\n )\n\n # Fill NaN-values in all columns to 0 except in column 'shearzone', for which we do nothing.\n structures = structures.fillna(\n value={**{s: 0 for s in borehole_structures}, **{\"shearzone\": np.nan}}\n )\n\n mapping = {\n \"x\": \"x\",\n \"y\": \"y\",\n \"z\": \"z\",\n \"depth\": \"depth\",\n \"upward_gradient\": \"upward_gradient\",\n \"azimuth\": \"azimuth_bh\",\n }\n borehole_to_global_coords(structures, **mapping)\n\n return structures",
"def expansion_steps(self):\n return self._p",
"def structures(self):\n pdb = self.name\n residues = self.__residues__(pdb)\n return Structure(list(residues), pdb=pdb)",
"def get_prob(cls, expansion, **given):\n fields = 'parent lmk rel deg'\n params = dict((f, None) for f in fields.split())\n params.update(given)\n return cls.query.filter_by(expansion=expansion, **params).one()",
"def getStructure(sname):\n x = resolve(vs_defs, sname.split(\".\"))\n if x is not None:\n return x()\n\n return None",
"def expansion_matrix_xl(self):\n return self._base_nlp.expansion_matrix_xl()",
"def __call__(self, data):\n return self.getExpansion(data)",
"def get_structure(self, sid):\n return Structure.from_dict(\n self.structures.get_entry(\n pk=sid, _fields=[\"lattice\", \"sites\", \"charge\"]\n ).result()\n )",
"def get_structure(self):\n return self.fragment.chain.model.structure",
"def structure(self):\n return self.cluster_subspace.structure",
"def _generateExpandableState(self, obj, **args):\n result = []\n if not args.get('mode', None):\n args['mode'] = self._mode\n args['stringType'] = 'expansion'\n indicators = self._script.formatting.getString(**args)\n state = obj.getState()\n if state.contains(pyatspi.STATE_EXPANDABLE):\n if state.contains(pyatspi.STATE_EXPANDED):\n result.append(indicators[1])\n else:\n result.append(indicators[0])\n return result",
"def test_get_software_set_expanded(self):\n pass",
"def _get_structure(self, structure_id, depth, head_validation=True, **kwargs):\n structure_entry = self._lookup_course(structure_id, head_validation=head_validation)\n root = structure_entry.structure['root']\n result = self._load_items(structure_entry, [root], depth, **kwargs)\n return result[0]",
"def sst_expanded(self, is_expanded):\n self._p('[sst_expanded] {}'.format(int(bool(is_expanded))))",
"def get_structure(self):\n return self.structure",
"def test_expand_experiments():\n template_script = get_template_script()\n experiment_systems = utils.CombinatorialLeaf(['explicit-system', 'implicit-system', 'hydration-system'])\n template_script['experiments']['system'] = experiment_systems\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=1, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 2\n\n exp_builder = ExperimentBuilder(script=template_script, job_id=2, n_jobs=2)\n experiments = list(exp_builder._expand_experiments())\n assert len(experiments) == 1"
] | [
"0.5775837",
"0.5456535",
"0.54396933",
"0.5356344",
"0.514245",
"0.5062866",
"0.5056621",
"0.50404763",
"0.49497902",
"0.49316373",
"0.49132568",
"0.48724383",
"0.48683625",
"0.48390302",
"0.48267677",
"0.48048848",
"0.4803488",
"0.47871587",
"0.47768342",
"0.47751993",
"0.47552323",
"0.47523063",
"0.47492224",
"0.4736728",
"0.47343984",
"0.4717232",
"0.47055945",
"0.46847045",
"0.46744046",
"0.4660167"
] | 0.722945 | 0 |
Get Orbit ids corresponding to each ECI in the Cluster Expansion. If the Cluster Expansion includes external terms these are not included in the list since they are not associated with any orbit. | def eci_orbit_ids(self):
return self._subspace.function_orbit_ids | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_electrode_indeces(electrical_series, electrode_ids):\n electrode_table_region = list(electrical_series.electrodes.to_dataframe().index)\n return [elect_idx for elect_idx, elect_id in enumerate(electrode_table_region) if elect_id in electrode_ids]",
"def get_eids(self):\n return [d['eid'] for d in self._json]",
"def getExpAccessions(cf):\n\tplatform = cf.get_parameter('platform')\n\tsrafetchxml = cf.get_input('srafetchxml')\n\tsraexplist = cf.get_output('sraexplist')\n\tsraxmlparser = SRAXMLParser()\n\truns = sraxmlparser.parse(srafetchxml)\n\twriter = csv.writer(open(sraexplist, 'wb'), quoting=csv.QUOTE_NONE)\n\twriter.writerow(['NCBISRAExpID'])\n\taccessions = []\n\tfor run in runs:\n\t\tif platform and \\\n\t\t\tnot run.platform == platform:\n\t\t\tcontinue\n\t\telif not run.exp_accession in accessions:\n\t\t\twriter.writerow([run.exp_accession])\n\t\t\taccessions.append(run.exp_accession)\n\tcf.write_log(\"GetExpAccessions: wrote %s experiment accessions\" % len(accessions))\n\treturn constants.OK",
"def interiors(self):\n return self.substrates.interiors",
"def get_indexes(self):\n indexes = []\n for c in self.components:\n indexes.extend(c.get_indexes())\n return indexes",
"def create_identity_list():\n identity_list = []\n\n # Note, just doing it like this incase some cards want to be removed from INDEXES dictionary\n # Add the rows (and cols) into the identity array\n for i in range(len(INDEXES)):\n identity_list.append([0.0 for j in range(len(INDEXES))])\n\n return identity_list",
"def getAngleIndices(self):\n coord_types, atom_indices = self.force_field.getInternalCoordinateDefinitions()\n angle_indices = np.where((coord_types == 'A') | (coord_types == 'D') | (coord_types == 'I'))[0]\n return angle_indices",
"def eula_ids(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"eula_ids\")",
"def eci(self):\n return self.__eci",
"def course_id_list(self):\r\n\r\n return self.q(css='article.course').attrs('id')",
"def getExons(self):\n rtrn = []\n for i in range(0,len(self.exonStarts)):\n rtrn.append(Interval(self.chr,self.exonStarts[i],self.exonEnds[i],self.strand,name = self.name+\"_exon_\"+str(i+1)))\n return rtrn",
"def jw_number_indices(n_electrons, n_qubits):\n occupations = itertools.combinations(range(n_qubits), n_electrons)\n indices = [sum([2**n for n in occupation]) for occupation in occupations]\n return indices",
"def complex_ids(self):\n\n return self._complex_ids",
"def _identities_iter(self):\n cc = self.coordinate_conversion\n for prop in (\"standard_name\", \"grid_mapping_name\"):\n n = cc.get_parameter(prop, None)\n if n is not None:\n yield f\"{prop}:{n}\"\n\n ncvar = self.nc_get_variable(None)\n if ncvar is not None:\n yield f\"ncvar%{ncvar}\"",
"def orthologueGeneIds(self):\n\t\tgeneIds = []\n\t\tfor geneId,row in self._dataframe.iterrows():\n\t\t\tfor item in row['Orthologue'].split(','):\t# looks like 'ENSG00003435:Gene1,ENSG00002525:Gene2' (multiple orthologues possible)\n\t\t\t\tif item.split(':')[0]: geneIds.append(item.split(':')[0])\n\t\treturn list(set(geneIds))",
"def cluster_interaction_tensors(self):\n interaction_tensors = (self.coefs[0],) + tuple(\n sum(\n m * self.eci[orbit.bit_id + i] * tensor\n for i, (m, tensor) in enumerate(\n zip(orbit.bit_combo_multiplicities, orbit.correlation_tensors)\n )\n )\n for orbit in self._subspace.orbits\n )\n return interaction_tensors",
"def getIDs(self):\n return self.multiengine.getIDs()",
"def cluster_ids(self):\n return self.model.cluster_ids",
"def get_es_ids(self):\n search = self.search.source(['uri']).sort(['uri'])\n es_ids = [item.meta.id for item in search.scan()]\n return es_ids",
"def i_coords(self):\n ref_x = np.arange(-self.ref_w / 2, self.ref_w / 2 + 0.002, 0.002)\n\n if self.ref_shape == 'c': # Curved reflector\n dist_coords1 = [(ref_x[i], pos_on_semicircle(ref_x[i], self.R, self.c_xy)) for i in range(self.I)]\n dist_coords2 = [(ref_x[i + 1], pos_on_semicircle(ref_x[i + 1], self.R, self.c_xy)) for i in range(self.I)]\n a_i = [distance(dist_coords1[i], dist_coords2[i]) for i in range(self.I)]\n\n cx_i = [ref_x[i] + (ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n cy_i = [pos_on_semicircle(x, self.R, self.c_xy) for x in cx_i]\n i_coords = list(zip(cx_i, cy_i))\n else: # Flat reflector\n a_i = [(ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n cx_i = [ref_x[i] + (ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n i_coords = [(x, self.h) for x in cx_i]\n d = {'ref_x': ref_x, 'A_i': a_i, 'I_coords': i_coords, 'cx_i': cx_i}\n\n return d",
"def get_conjugate_acids_of(chebi_ent):\n if hasattr(chebi_ent, 'OntologyParents'):\n return [ent.chebiId for ent in chebi_ent.OntologyParents if\n (ent.type == \"is conjugate acid of\")]\n else:\n return []",
"def ccf_ids(self):\n return self._ccf_ids",
"def seq_xref_ids(entry):\n\n xref_ids = []\n exon_data = exons(entry)\n for ids in xref_data(entry).values():\n for exon in exon_data:\n for xref_id in ids:\n key = \"{xref_id}-{gene_id}-{chr}:{start}..{stop}\".format(\n xref_id=xref_id,\n gene_id=primary_id(entry),\n chr=exon.chromosome_name,\n start=exon.primary_start,\n stop=exon.primary_end,\n )\n xref_ids.append((key, exon))\n\n return xref_ids",
"def getEccentricities(self, eccType=\"ed\", r_power=2, order=2, where=\"\", orderBy=\"event_id\"):\n whereClause = \"ecc_id=%d and r_power=%d and n=%d\" % (self._ecc_id(eccType), r_power, order)\n if where:\n whereClause += \" and \" + where\n return np.asarray(self.db.selectFromTable(\"eccentricities\", (\"ecc_real, ecc_imag\"), whereClause=whereClause, orderByClause=orderBy))",
"def object_ids(self):\n return self._extract_set('id')",
"def list_coe_clusters(self):\n return list(self.container_infrastructure_management.clusters())",
"def get_ids(self, text):\n\n tokens = [token.orth for token in self.tokenizer(text)]\n ids = []\n for token in tokens:\n try:\n id = self.vocab.vectors.key2row[token]\n except KeyError:\n id = self.oov_id\n\n ids.append(id)\n\n return ids",
"def all_orcids_embedded(self):\n results = [self.orcid_embedded]\n # Business rule: the author must be curated in order to consider her\n # author_record_metadata.json_model.orcid_embedded.\n if self.is_curated and self.recid:\n results.append(self.author_record_metadata.json_model.orcid_embedded)\n results = list(filter(bool, results))\n # Ensure all orcids actually have the same value.\n if len(results) > 1:\n for i in range(len(results)-1):\n if results[i]['value'] != results[i+1]['value']:\n # TODO specific exception\n raise Exception('This guy have multiple different orcids')\n return results",
"def orbital_eccentricity(self):\n return self._orbital_eccentricity",
"def getChipCoreAndCxId(layer):\n core_ids = []\n cx_ids = []\n chip_ids = []\n for id in layer.nodeIds:\n _, chip_id, core_id, cx_id, _, _ = layer.net.resourceMap.compartment(id)\n chip_ids.append(chip_id)\n core_ids.append(core_id)\n cx_ids.append(cx_id)\n return np.array(chip_ids), np.array(core_ids), np.array(cx_ids)"
] | [
"0.58896",
"0.5580595",
"0.5454563",
"0.54299253",
"0.54212594",
"0.54058343",
"0.5400522",
"0.5378499",
"0.5372512",
"0.53594655",
"0.53535104",
"0.53434646",
"0.5342312",
"0.5337998",
"0.5327088",
"0.53255314",
"0.53094417",
"0.5304125",
"0.5278686",
"0.52694434",
"0.52672243",
"0.52495384",
"0.52265245",
"0.52247083",
"0.52232885",
"0.52147883",
"0.5213274",
"0.52114856",
"0.52055055",
"0.52015483"
] | 0.75284874 | 0 |
Calculate the cluster weights. The cluster weights are defined as the weighted sum of ECI squared, where the weights are the ordering multiplicities. | def effective_cluster_weights(self):
weights = np.array(
[
np.sum(
self._subspace.function_ordering_multiplicities[
self._subspace.function_orbit_ids == i
]
* self.eci[self.eci_orbit_ids == i] ** 2
)
for i in range(len(self._subspace.orbits) + 1)
]
)
return weights | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_clusterable_weights(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def compute_weights(self):\n\n # Init lists\n weights, weights_k_idx = [], []\n for i_order in range(self.n_orders): # For each orders\n\n weights_n, k_idx_n = self.get_w(i_order) # Compute weights\n\n # Convert to sparse matrix\n # First get the dimension of the convolved grid\n n_kc = np.diff(self.i_bounds[i_order]).astype(int)[0]\n\n # Then convert to sparse\n weights_n = atoca_utils.sparse_k(weights_n, k_idx_n, n_kc)\n weights.append(weights_n), weights_k_idx.append(k_idx_n)\n\n return weights, weights_k_idx",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)",
"def SquareClusteringCoefficient(graph):\n coef = np.mean(list(nx.square_clustering(graph).values()))\n return coef",
"def get_weights(self):",
"def test_weighting_implementation():\n\n # generate two locusts of points\n npts = 100\n epsilon = 0.05\n # cluster 1\n coords1 = generate_locus_of_3d_points(npts, 0.1, 0.1, 0.1, epsilon=epsilon)\n # cluster 2\n coords2 = generate_locus_of_3d_points(npts, 0.9, 0.9, 0.9, epsilon=epsilon)\n\n # generate orientation vectors for cluster 1\n vectors1 = generate_aligned_vectors(len(coords1))\n\n # generate a random index value to check for each cluster\n idx = np.random.randint(npts)\n idx2 = np.random.randint(npts)\n\n # calculate dot product between vectors1 and cluster 2\n r = np.sqrt((0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2)\n # s, vector between coords1 and cluster2\n s = np.zeros((3))\n s[0] = coords2[idx2, 0] - coords1[idx, 0]\n s[1] = coords2[idx2, 1] - coords1[idx, 1]\n s[2] = coords2[idx2, 2] - coords1[idx, 2]\n\n # calculate dot product between orientation and direction between cluster 1 and 2\n angles = angles_between_list_of_vectors(vectors1[idx], s)\n costheta = np.cos(angles) # dot product between vectors\n\n idx_costheta = costheta\n\n # define radial bins\n rbins = np.array([0.0, 0.1, r + 2.0 * epsilon])\n\n # define weights appropiate for weighting function\n weights1 = np.zeros((npts, 4))\n weights1[idx] = 1.0\n weights1[:, 1] = vectors1[:, 0]\n weights1[:, 2] = vectors1[:, 1]\n weights1[:, 3] = vectors1[:, 2]\n weights2 = np.zeros(npts)\n weights2[idx2] = 1.0\n\n # calculate weighted counts\n\n # weighting 1\n # calculate weighted counts\n weighted_counts, counts = positional_marked_npairs_3d(\n coords1,\n coords2,\n rbins,\n period=None,\n weights1=weights1,\n weights2=weights2,\n weight_func_id=1,\n num_threads=1,\n )\n\n msg = \"weighted counts do not match expected result given the weighting function\"\n assert np.isclose(weighted_counts[-1], idx_costheta, rtol=0.01 / npts), msg",
"def get_weight(self):\n return self.W * self.get_z_mean()",
"def weights(self) -> List[float]:",
"def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))",
"def weights(self):\n \n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n \n c = .5 / (n + lambda_)\n Wc = np.full(2*n + 1, c)\n Wm = np.full(2*n + 1, c)\n Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n Wm[0] = lambda_ / (n + lambda_)\n \n return Wm, Wc",
"def weights(self):\n return self.__weights",
"def weights(self):\n return self.__weights",
"def weights(self):\n return self.__weights",
"def weights(self):\n return self.__weights",
"def weights(self):\n return self.__weights",
"def _weightAndScaleClusters(self, features, featureGroups, clusterFeatures, weightingStrategy):\n # initialize structure\n weights = np.zeros(len(features))\n for f, feature in enumerate(features):\n # scale the data\n data = np.asarray(clusterFeatures[feature])\n # using Z normalization allows the data that is truly far apart to be streched,\n ## while data that is close together remains clustered.\n ## This does not work well if SMALL relative differences SHOULD make a big difference in clustering,\n ## or if LARGE relative distances should NOT make a big difference in clustering!\n loc, scale = mathUtils.normalizationFactors(data, mode='z')\n clusterFeatures[feature] = (data - loc)/scale\n # weight the data --> NOTE doesn't really work like we think it does!\n _, metric, ID = feature.split('|', 2)\n if weightingStrategy == 'uniform':\n weight = 1.0\n else:\n # TODO when this gets moved to an input spec, we won't need to check it here.\n ## for now, though, it's the only option.\n self.raiseAnError(RuntimeError, 'Unrecognized weighting strategy: \"{}\"!'.format(weightingStrategy))\n weights[f] = weight\n for f, feature in enumerate(features):\n clusterFeatures[feature] = clusterFeatures[feature] * weights[f]\n return clusterFeatures",
"def compute_geom_weights(self):\n weights = np.zeros([np.size(self._triangles, 0), 3])\n tris_pts = self._tris_pts\n for ipt in range(3):\n p0 = tris_pts[:, (ipt) % 3, :]\n p1 = tris_pts[:, (ipt+1) % 3, :]\n p2 = tris_pts[:, (ipt-1) % 3, :]\n alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])\n alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])\n # In the below formula we could take modulo 2. but\n # modulo 1. is safer regarding round-off errors (flat triangles).\n angle = np.abs(np.mod((alpha2-alpha1) / np.pi, 1.))\n # Weight proportional to angle up np.pi/2 ; null weight for\n # degenerated cases 0. and np.pi (Note that `angle` is normalized\n # by np.pi)\n weights[:, ipt] = 0.5 - np.abs(angle-0.5)\n return weights",
"def calculate_cost(self, medoids, clusters):\n cost = 0.0\n for i in range(0, len(medoids)):\n for j in range(0, len(clusters[i])):\n cost += distance.sqeuclidean(medoids[i], clusters[i][j])\n return cost\n pass",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def weights(self):\n return self._weights",
"def calculateWeights(self):\n return self.distances #En lo que encontramos una funcion que represente",
"def calculate_weight(self, element, total_cores_used, total_disk_used,\n total_memory_used):\n cpu_capacity = self.model.get_resource_from_id(\n resource.ResourceType.cpu_cores).get_capacity(element)\n\n disk_capacity = self.model.get_resource_from_id(\n resource.ResourceType.disk).get_capacity(element)\n\n memory_capacity = self.model.get_resource_from_id(\n resource.ResourceType.memory).get_capacity(element)\n\n score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) /\n float(cpu_capacity))\n\n # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0\n if disk_capacity == 0:\n score_disk = 0\n else:\n score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) /\n float(disk_capacity))\n\n score_memory = (\n 1 - (float(memory_capacity) - float(total_memory_used)) /\n float(memory_capacity))\n # TODO(jed): take in account weight\n return (score_cores + score_disk + score_memory) / 3",
"def get_weight(self):\n # FIXME: BELUM ADA KEPUTUSAN\n return 0",
"def weights(self):\r\n\t\treturn None",
"def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g",
"def _compute_weights(self):\n with variable_scope.variable_scope('compute_weights'):\n self.layer.W = nn_impl.l2_normalize(\n self.layer.v, axis=self.norm_axes) * self.layer.g",
"def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)",
"def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W"
] | [
"0.7190651",
"0.70356905",
"0.6451532",
"0.6440331",
"0.6369305",
"0.63520586",
"0.63319385",
"0.62741387",
"0.6229008",
"0.6212792",
"0.61735576",
"0.61735576",
"0.61735576",
"0.61735576",
"0.61735576",
"0.613884",
"0.61196315",
"0.6102148",
"0.6101437",
"0.6101437",
"0.6101437",
"0.6101437",
"0.6097487",
"0.60858065",
"0.60496974",
"0.60477704",
"0.60390514",
"0.60390514",
"0.60376996",
"0.60250014"
] | 0.82398254 | 0 |
Get the feature matrix used in fit. If not given, returns an identity matrix of len num_corrs | def feature_matrix(self):
return self._feat_matrix | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def matrix_features(self):\n return self._matrix_features",
"def compute_feature_matrix(sequences, split, dinuc=False, model='cterm'):\n if model == 'cterm':\n X = compute_cterm_feature_matrix(sequences, split, dinuc=dinuc)\n else:\n X = compute_nterm_feature_matrix(sequences, split, dinuc=dinuc)\n return X",
"def get_feature_matrix(images, all_features):\n timestamp = time()\n heigth = len(all_features)\n width = len(images)\n\n feature_matrix = zeros((heigth, width))\n for y, feature in enumerate(all_features):\n for x, image in enumerate(images):\n feature_matrix[y][x] = feature.calculate(image)\n\n stdout.write(\"\\rget feature matrix: {}\\r\".format(time() - timestamp))\n\n return feature_matrix",
"def _make_random_matrix(self, n_components, n_features):",
"def get_feature_oriented_matrix(self):\n nbr_features = self.hyperparameters.time_series_depth\n matrix = np.ones(shape=(nbr_features, nbr_features), dtype=np.float)\n np.fill_diagonal(matrix, val=0)\n return matrix",
"def get_matrix(df, features, output):\n #add a constant column as coefficient for w0\n df[\"constant\"] = 1.0\n feature_x, output_y = df[features].astype(float), df[output].astype(int)\n return feature_x, output_y",
"def load_feature_matrix(src):\n feat_mat = smat_util.load_matrix(src)\n if isinstance(feat_mat, np.ndarray):\n feat_mat = np.ascontiguousarray(feat_mat)\n elif isinstance(feat_mat, smat.spmatrix):\n feat_mat = feat_mat.tocsr()\n feat_mat.sort_indices()\n return feat_mat",
"def get_games_features_matrix(tfidf_matrix: csr_matrix,\n games_genres_matrix: csr_matrix,\n price_matrix: csr_matrix,\n params: Dict[str, Any]) -> csr_matrix:\n logging.getLogger(__name__).debug('Games features matrix calculating...')\n if params['isOtherFeatures']:\n # scale games_genres_matrix\n games_genres_sum = np.log(np.sum(games_genres_matrix, axis=0)) + 1\n games_genres_matrix = games_genres_matrix / games_genres_sum\n # price_matrix has been scaled\n games_features_matrix = sp.hstack([tfidf_matrix, games_genres_matrix, price_matrix])\n else:\n games_features_matrix = tfidf_matrix\n logging.getLogger(__name__).debug('games_features_matrix.shape: ' + str(games_features_matrix.shape))\n return games_features_matrix",
"def matrix(self):\n if self._matrix is None:\n self._matrix = self.get_face_matrix()\n return self._matrix",
"def get_corrmat(self, f):\n return self._get_corrmat(f)",
"def build_feature_matrix(self, dataset):\n # Create the dictionary of feature functions if it is not created\n if len(features.features_fun_dict) == 0:\n i = 0\n for o in getmembers(features):\n if isfunction(o[1]):\n features.features_fun_dict[i] = o[1]\n i += 1\n features.num_features = len(features.features_fun_dict)\n\n matrix = np.zeros([dataset.shape[0], features.num_features])\n\n # For each sample in dataset, call every feature function and store its value\n for i in range(dataset.shape[0]):\n for j in range(features.num_features):\n args = getargspec(features.features_fun_dict[j]).args\n if len(args) == 2:\n matrix[i, j] = features.features_fun_dict[j](dataset[i], self.inv_vocab)\n else:\n matrix[i, j] = features.features_fun_dict[j](dataset[i])\n\n # Return sparse matrix with the features (needed by the classifier)\n return csr_matrix(matrix)",
"def confusion_matrix(self):\n return np.array([[self.tn, self.fp],\n [self.fn, self.tp]])",
"def __get_feature_mat(self, (cluster, articleID)):\n feat = self.feature_cache.get((cluster, articleID))\n\n if feat is None:\n feat = np.outer(self.user_feat[cluster],\n self.article_feat[articleID])\n self.feature_cache[(cluster, articleID)] = feat\n\n return feat",
"def get_users_features_matrix(games_features_matrix: csr_matrix, users_games_matrix: csr_matrix) -> csr_matrix:\n logging.getLogger(__name__).debug('Users features matrix calculating...')\n users_features_matrix = users_games_matrix * games_features_matrix\n logging.getLogger(__name__).debug('users_features_matrix.shape: ' + str(users_features_matrix.shape))\n return users_features_matrix",
"def get_feature_matrix(N, Xtrain, D):\n for i in range(D+1):\n if i == 0:\n X = [1] * N\n else:\n X = np.vstack([np.power(Xtrain, i), X])\n X = X.transpose()\n return X",
"def get_feature(self, *clip_id_or_ids):\n assert all(isinstance(e, int) for e in clip_id_or_ids), \\\n \"Not given an integer or a valid iterable over integers!\"\n\n with self._rw_lock.read_lock():\n # rows = num of IDs given, cols = width of feature matrix\n with SimpleTimer(\"Allocating return matrix\", self._log):\n # noinspection PyUnresolvedReferences\n # -> matrix class DOES have ``dtype`` property...\n ret_mat = matrix(ndarray((len(clip_id_or_ids),\n self._feature_mat.shape[1]),\n self._feature_mat.dtype))\n for i, cid in enumerate(clip_id_or_ids):\n feature_idx = self._cid2idx_map[cid]\n ret_mat[i, :] = self._feature_mat[feature_idx, :]\n return ret_mat",
"def getMatrix(self) -> CMatrix4:\n ...",
"def default_single_feature_X(self):\n X = 2 * np.random.rand(self.num_examples, self.num_features)\n ones = np.ones((self.num_examples, 1))\n return np.concatenate((ones, X), axis=1)",
"def nr_features(self):\n if self.is_predict_only:\n return clib.xlinear_get_int_attr(self.model_chain, \"nr_features\")\n else:\n return self.model_chain[0].nr_features",
"def feature_selection(feature_matrix, missing_threshold=90, correlation_threshold=0.95):\n \n feature_matrix = pd.get_dummies(feature_matrix)\n n_features_start = feature_matrix.shape[1]\n print('Original shape: ', feature_matrix.shape)\n\n # Find missing and percentage\n missing = pd.DataFrame(feature_matrix.isnull().sum())\n missing['percent'] = 100 * (missing[0] / feature_matrix.shape[0])\n missing.sort_values('percent', ascending = False, inplace = True)\n\n # Missing above threshold\n missing_cols = list(missing[missing['percent'] > missing_threshold].index)\n n_missing_cols = len(missing_cols)\n\n # Remove missing columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in missing_cols]]\n print('{} missing columns with threshold: {}.'.format(n_missing_cols,\n missing_threshold))\n \n # Zero variance\n unique_counts = pd.DataFrame(feature_matrix.nunique()).sort_values(0, ascending = True)\n zero_variance_cols = list(unique_counts[unique_counts[0] == 1].index)\n n_zero_variance_cols = len(zero_variance_cols)\n\n # Remove zero variance columns\n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in zero_variance_cols]]\n print('{} zero variance columns.'.format(n_zero_variance_cols))\n \n # Correlations\n corr_matrix = feature_matrix.corr()\n\n # Extract the upper triangle of the correlation matrix\n upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k = 1).astype(np.bool))\n\n # Select the features with correlations above the threshold\n # Need to use the absolute value\n to_drop = [column for column in upper.columns if any(upper[column].abs() > correlation_threshold)]\n\n n_collinear = len(to_drop)\n \n feature_matrix = feature_matrix[[x for x in feature_matrix if x not in to_drop]]\n print('{} collinear columns removed with threshold: {}.'.format(n_collinear,\n correlation_threshold))\n \n total_removed = n_missing_cols + n_zero_variance_cols + n_collinear\n \n print('Total columns removed: ', total_removed)\n print('Shape after feature selection: {}.'.format(feature_matrix.shape))\n return feature_matrix",
"def get_feature_importances(self):\n X,y = self.define_dataset(self.df, self.col_list, self.target_var)\n\n # execute search\n search = self.set_Randomized_search(self.model)\n\n X_train, X_test, y_train, y_test= self.holdout(X, y)\n X_train_sc, X_test_sc = self.scale(X_train, X_test)\n res = search.fit(X_train_sc, y_train)\n\n #model = self.set_model(self.model)\n\n\n if (self.model == \"Lasso\") | (self.model == \"Ridge\"):\n\n model = self.set_model(self.model)\n best = model.set_params(**res.best_params_)\n best.fit(X_train_sc,y_train)\n features = best.coef_\n\n else:\n #RandomForest or XGBoost\n model = self.set_model(self.model)\n best = model.set_params(**res.best_params_)\n best.fit(X_train_sc,y_train)\n features = pd.DataFrame(best.feature_importances_,\n index = X_train.columns,\n columns=['importance']).sort_values('importance', ascending=False)\n\n return features",
"def num_features(self):\n if self.x is None:\n return 0\n return 1 if self.x.dim() == 1 else self.x.size(1)",
"def get_confusion_matrix(self):\n return confusion_matrix(self.test_y, self.predict())",
"def cofactorMatrix(self):\n returnvalue = Matrix()\n for i in range(self._height):\n newRow = list()\n for j in range(self._width):\n newRow.append(self.cofactor(i, j))\n returnvalue.addRow(*newRow)\n return returnvalue",
"def get_matrix(self):\n return self._matrix[:3, :]",
"def predict_mat(self):\n mat = self.covs_mat.dot(self.alpha)\n return mat.reshape(self.shape)",
"def testLogisticRegression_MatrixData(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n classifier = tf.contrib.learn.DNNClassifier(\n feature_columns=cont_features,\n hidden_units=[3, 3],\n config=tf.contrib.learn.RunConfig(tf_random_seed=1))\n\n classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)\n scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.9)\n self.assertLess(scores['loss'], 0.3)",
"def testMultiClass_MatrixData(self):\n cont_features = [\n tf.contrib.layers.real_valued_column('feature', dimension=4)]\n\n classifier = tf.contrib.learn.DNNClassifier(\n n_classes=3,\n feature_columns=cont_features,\n hidden_units=[3, 3],\n config=tf.contrib.learn.RunConfig(tf_random_seed=1))\n\n classifier.fit(input_fn=_iris_input_multiclass_fn, steps=200)\n self.assertTrue('centered_bias_weight' in classifier.get_variable_names())\n scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=1)\n self.assertGreater(scores['accuracy'], 0.8)\n self.assertLess(scores['loss'], 0.3)",
"def _make_random_matrix(self, n_components, n_features):\n #random_state = check_random_state(self.random_state)\n return _gaussian_random_matrix(\n n_components, n_features, random_state=self.random_state\n )",
"def modeling(matrix):\n cv = CountVectorizer()\n cv_fit = cv.fit_transform(matrix)\n return cv, cv_fit"
] | [
"0.5865127",
"0.5651641",
"0.55444366",
"0.547327",
"0.538161",
"0.53447926",
"0.52134264",
"0.52046114",
"0.51970637",
"0.5180495",
"0.5131346",
"0.51012635",
"0.5094566",
"0.5063523",
"0.50486535",
"0.5048352",
"0.503609",
"0.5031869",
"0.50282717",
"0.5007706",
"0.5006746",
"0.50027597",
"0.49543795",
"0.49299183",
"0.49283648",
"0.49228966",
"0.49153244",
"0.4889923",
"0.48688915",
"0.48660997"
] | 0.6299052 | 0 |
Compute the vector of cluster interaction values for given structure. A cluster interaction is simply a vector made up of the sum of all cluster expansion terms over the same orbit. | def cluster_interactions_from_structure(
self, structure, normalized=True, scmatrix=None, site_mapping=None
):
if scmatrix is None:
scmatrix = self._subspace.scmatrix_from_structure(structure)
occu = self.cluster_subspace.occupancy_from_structure(
structure, scmatrix=scmatrix, site_mapping=site_mapping, encode=True
)
indices = self._subspace.get_orbit_indices(scmatrix)
interactions = self._subspace.evaluator.interactions_from_occupancy(
occu, indices.container
)
if not normalized:
interactions *= self._subspace.num_prims_from_matrix(scmatrix)
return interactions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cluster_interaction_tensors(self):\n interaction_tensors = (self.coefs[0],) + tuple(\n sum(\n m * self.eci[orbit.bit_id + i] * tensor\n for i, (m, tensor) in enumerate(\n zip(orbit.bit_combo_multiplicities, orbit.correlation_tensors)\n )\n )\n for orbit in self._subspace.orbits\n )\n return interaction_tensors",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)",
"def compute_covar_from_instance_centroids(instance_centroids):\n\n cov_mat_allStructures = {}\n radii_allStructures = {}\n ellipsoid_matrix_allStructures = {}\n for name_s, centroids in sorted(instance_centroids.items()):\n centroids2 = np.array(centroids)\n cov_mat = np.cov(centroids2.T)\n cov_mat_allStructures[name_s] = cov_mat\n u, s, vt = np.linalg.svd(cov_mat)\n # print name_s, u[:,0], u[:,1], u[:,2],\n radii_allStructures[name_s] = np.sqrt(s)\n ellipsoid_matrix_allStructures[name_s] = vt\n\n return cov_mat_allStructures, radii_allStructures, ellipsoid_matrix_allStructures",
"def enumerate_clusterings(self):\n\n # Initialize an empty list of clusterings. Each element of the list\n # is a dictionary mapping NOEs to the signatures they are clustered to\n # in a solution. Each clustering is initialize with all uniquely\n # clusterable NOEs as keys mapping to their unique clusters\n\n clusterings = []\n\n while True:\n\n # Run the solver and get a solution back\n\n solution = self.solve()\n\n # If UNSAT, then flush aux clauses from the formula and return\n # all the clusterings we found so far\n\n if not solution:\n self.flush()\n return clusterings\n\n # Iterate over the clustering variables set to true by in the\n # discovered solution. Forbid this clustering from reoccuring and\n # add it to the list of found clusterings\n\n clause = []\n clustering = {}\n for node in self.clustering_variables.keys():\n if len(node.clusters) == 1:\n clustering[node] = list(node.clusters)[0]\n\n for vtype, node, cluster in solution:\n if vtype == Formula.CST_VAR:\n clustering[node] = cluster\n clause.append(-self.clustering_variables[node][cluster])\n\n self.add_clause(clause)\n clusterings.append(clustering)",
"def effective_cluster_weights(self):\n weights = np.array(\n [\n np.sum(\n self._subspace.function_ordering_multiplicities[\n self._subspace.function_orbit_ids == i\n ]\n * self.eci[self.eci_orbit_ids == i] ** 2\n )\n for i in range(len(self._subspace.orbits) + 1)\n ]\n )\n return weights",
"def _get_eci_chem_pot(self):\n bf = self.atoms.get_calculator().BC.basis_functions\n bf_change_vec = np.zeros((1, len(bf)))\n for i, func in enumerate(bf):\n for key, num in self.groups[0].items():\n bf_change_vec[0, i] += func[key] * num\n\n for key, num in self.groups[1].items():\n bf_change_vec[0, i] -= func[key] * num\n pinv = np.linalg.pinv(bf_change_vec)\n mu_vec = pinv.dot(np.array([self._chem_pot]))\n\n chem_pot_dict = {}\n for i in range(len(mu_vec)):\n chem_pot_dict[\"c1_{}\".format(i)] = mu_vec[i]\n return chem_pot_dict",
"def com(self):\n\n\t\tcom = vector3d()\n\n\t\tcom.x = 0.0; com.y = 0.0; com.z = 0.0\n\t\tnAt = 0.0\n\n\t\tfor chain in self.chain:\n\t\t\tfor residue in chain.residue:\n\t\t\t\tfor atom in residue.atom:\n\t\t\t\t\tcom.x += atom.coord.x\n\t\t\t\t\tcom.y += atom.coord.y\n\t\t\t\t\tcom.z += atom.coord.z\n\n\t\t\t\t\tnAt += 1.0\n\n\t\tif nAt == 0:\n\t\t\tprint \"ERROR: zero atoms present for COM calculation!\"\n\t\t\tsys.exit()\n\n\t\tcom /= nAt\n\t\treturn com",
"def classify_defect_clusters_modifier(frame, data):\n\n if data.particles.count == 0:\n # No particles there to classify, create empty properties anyway\n data.particles_.create_property('Si_V', dtype=int, components=1)\n data.particles_.create_property('Si_I', dtype=int, components=1)\n data.particles_.create_property('Si_C', dtype=int, components=1)\n data.particles_.create_property('C_V', dtype=int, components=1)\n data.particles_.create_property('C_I', dtype=int, components=1)\n data.particles_.create_property('C_Si', dtype=int, components=1)\n return\n\n # TODO Create numpy arrays containing the number of Si vacancies,\n # interstitials, etc for each particle site in `data.particles`. These\n # next lines are just placeholders!\n si_vacancy = data.particles[\"vacancy_mask\"][...] * data.particles[\"Is Si Site\"][...]\n si_interstitial = (data.particles[\"Is Si Site\"][...] & (data.particles[\"Si Occupancy\"][...] > 1)) * (\n data.particles[\"Si Occupancy\"][...] - 1) + (\n (data.particles[\"Is C Site\"][...]) * data.particles[\"Si Occupancy\"][...]) - (\n data.particles[\"Is C Site\"][...] & data.particles[\"antisite_mask\"][...])\n si_antisite = data.particles[\"antisite_mask\"][...] * data.particles[\"Is Si Site\"][...]\n c_vacancy = data.particles[\"vacancy_mask\"][...] * data.particles[\"Is C Site\"][...]\n c_interstitial = (data.particles[\"Is C Site\"][...] & (data.particles[\"C Occupancy\"][...] > 1)) * (\n data.particles[\"C Occupancy\"][...] - 1) + (\n (data.particles[\"Is Si Site\"][...]) * data.particles[\"C Occupancy\"][...]) - (\n data.particles[\"Is Si Site\"][...] & data.particles[\"antisite_mask\"][...])\n c_antisite = data.particles[\"antisite_mask\"][...] * data.particles[\"Is C Site\"][...]\n\n\n data.particles_.create_property('Si_V', data=si_vacancy.astype(int))\n data.particles_.create_property('Si_I', data=si_interstitial.astype(int))\n data.particles_.create_property('Si_C', data=si_antisite.astype(int))\n data.particles_.create_property('C_V', data=c_vacancy.astype(int))\n data.particles_.create_property('C_I', data=c_interstitial.astype(int))\n data.particles_.create_property('C_Si', data=c_antisite.astype(int))",
"def GlobalClusteringCoefficient(graph):\n coef = np.mean(list(nx.clustering(graph).values()))\n return coef",
"def get_modularity3(adjacency, clusters):\n\n rows, cols = adjacency.shape\n num_ids = adjacency.shape[0]\n id_to_cluster = get_id_to_cluster(clusters, num_ids)\n degrees = np.sum(adjacency, axis=1)\n total_weight = np.sum(adjacency)\n sum = 0\n for i in range(rows):\n for j in range(cols):\n if id_to_cluster[i] == id_to_cluster[j]:\n sum += adjacency[i, j] - (degrees[i] * degrees[j]) / total_weight\n sum = sum / total_weight\n return sum",
"def predict(self, structure, normalized=False, scmatrix=None, site_mapping=None):\n corrs = self.cluster_subspace.corr_from_structure(\n structure,\n scmatrix=scmatrix,\n normalized=normalized,\n site_mapping=site_mapping,\n )\n return np.dot(self.coefs, corrs)",
"def sum_cluster(self, labelled_cluster):\n # assumes len(cluster) > 0\n sum_ = labelled_cluster[0][1].copy()\n for (label, vector) in labelled_cluster[1:]:\n sum_ += vector\n\n if self.sigma_cl1:\n sum_ = self.add_gaussian(sum_, np.sqrt(2)*self.sigma_cl1)\n\n return sum_",
"def get_cluster_dstructure(self, curs, mcl_id, splat_table, mcl_table):\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tcluster = self.get_basic_cluster_dstructure(curs, mcl_id, splat_table, mcl_table)\n\t\tif cluster:\t#not None\n\t\t\tcluster.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, cluster.vertex_set)\n\t\t\tcluster.go_no2information = self.get_information_of_go_functions(curs, cluster.go_no2association_genes, \\\n\t\t\t\tlen(cluster.vertex_set), no_of_total_genes)\n\t\t\tcluster.edge_cor_2d_list, cluster.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, cluster.edge_set)\n\t\t\t#graph = self.graph_from_node_edge_set(cluster.vertex_set, cluster.edge_set)\n\t\treturn cluster\n\t\t\n\t\t\"\"\"\n\t\tprint \"vertex_set\"\n\t\tprint cluster.vertex_set\n\t\tprint \"edge_set\"\n\t\tprint cluster.edge_set\n\t\trecurrence_list_2d = ['recurrence_array']+cluster.recurrence_array\n\t\trecurrence_list_2d_1 = ['recurrence_array_1']+cluster.recurrence_array\n\t\trecurrence_list_2d = [recurrence_list_2d, recurrence_list_2d_1]\n\t\tself.column_output('/tmp/yh/recurrence_array',recurrence_list_2d)\n\n\t\tprint cluster.splat_connectivity\n\t\tprint \"connectivity\"\n\t\tprint cluster.connectivity\n\t\tprint \"connectivity_original\"\n\t\tprint cluster.connectivity_original\n\t\tcor_list_2d = []\n\t\tsig_list_2d = []\n\t\tfor i in range(len(cluster.edge_set)):\n\t\t\tcor_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_cor_2d_list[i])\n\t\t\tsig_list_2d.append([repr(cluster.edge_set[i])]+cluster.edge_sig_2d_list[i])\n\t\tself.column_output('/tmp/yh/edge_cor_2d_list', cor_list_2d)\n\t\tself.column_output('/tmp/yh/edge_sig_2d_list', sig_list_2d)\n\n\t\tgo_no_list_2d = []\n\t\tfor go_no,information in cluster.go_no2information.iteritems():\n\t\t\tgo_no_list_2d.append(list(information)+[len(cluster.go_no2association_genes[go_no])])\n\t\t#self.column_output('/tmp/yh/go_no_list_2d', go_no_list_2d)\n\t\t\"\"\"",
"def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means",
"def get_eccentricity(self, h_arr, k_arr):\n n = len(self.planets)\n h, k = h_arr, k_arr\n eccentricities = []\n for j in range(n): \n # eccentricities.append(np.sqrt(h[j]**2+k[j]**2))\n eccentricities.append(np.real(np.sqrt(h[j]*np.conjugate(h[j])+k[j]*np.conjugate(k[j]))))\n return np.array(eccentricities)",
"def _get_cluster_orbit(cluster, ofile, advance=False, **kwargs):\n nsnap = int(kwargs.get(\"nsnap\", cluster.nsnap))\n ounits = kwargs.get(\"ounits\", None)\n\n # Read in orbital information from orbit\n if nsnap != 0 and not advance:\n for i in range(0, int(nsnap) + 1):\n data = ofile.readline().split()\n else:\n data = ofile.readline().split()\n\n if \"gc_orbit.dat\" in ofile.name:\n # Saved orbit from doing a grep of NBODY6 or NBODY6++ logfile\n\n if len(data) == 18:\n xgc = float(data[9])\n ygc = float(data[10])\n zgc = float(data[11])\n vxgc = float(data[12])\n vygc = float(data[13])\n vzgc = float(data[14])\n else:\n xgc = float(data[8])\n ygc = float(data[9])\n zgc = float(data[10])\n vxgc = float(data[11])\n vygc = float(data[12])\n vzgc = float(data[13])\n else:\n tphys = float(data[0])\n xgc = float(data[1])\n ygc = float(data[2])\n zgc = float(data[3])\n vxgc = float(data[4])\n vygc = float(data[5])\n vzgc = float(data[6])\n\n if cluster.tphys == 0.0:\n cluster.tphys = tphys\n\n if ounits == None and \"gc_orbit.dat\" in ofile.name:\n ounits = \"kpckms\"\n\n cluster.add_orbit(xgc, ygc, zgc, vxgc, vygc, vzgc, ounits)\n\n return",
"def find_clusters(struct, connected_matrix):\n n_atoms = len(struct.species)\n if n_atoms == 0:\n return [0, 0, 0]\n if 0 in np.sum(connected_matrix, axis=0):\n return [0, 1, 0]\n\n cluster_sizes = []\n clusters = []\n visited = [False for item in range(n_atoms)]\n connected_matrix += np.eye(len(connected_matrix))\n\n def visit(atom, atom_cluster):\n visited[atom] = True\n new_cluster = set(np.where(connected_matrix[atom] != 0)[0]).union(atom_cluster)\n atom_cluster = new_cluster\n for new_atom in atom_cluster:\n if not visited[new_atom]:\n visited[new_atom] = True\n atom_cluster = visit(new_atom, atom_cluster)\n return atom_cluster\n\n for i in range(n_atoms):\n if not visited[i]:\n atom_cluster = set()\n cluster=visit(i, atom_cluster)\n clusters.append(cluster)\n cluster_sizes.append(len(cluster))\n\n max_cluster = max(cluster_sizes)\n min_cluster = min(cluster_sizes)\n return [max_cluster, min_cluster, clusters]",
"def get_repulsion_vector(item: str) -> np.ndarray:\n return self._centroids[np.random.choice(cluster_ids, p=sim_map[item])]",
"def nt_3d_centers(cif_file, consider_all_atoms):\n result =[]\n try:\n structure = MMCIFParser().get_structure(cif_file, cif_file)\n except Exception as e:\n warn(f\"\\n{cif_file.split('/')[-1]} : {e}\", error=True)\n with open(runDir + \"/errors.txt\", \"a\") as f:\n f.write(f\"Exception in nt_3d_centers({cif_file.split('/')[-1]})\\n\")\n f.write(str(e))\n f.write(\"\\n\\n\")\n return result\n for model in structure:\n for chain in model:\n for residue in chain:\n if consider_all_atoms:\n temp_list = []\n for atom in residue:\n temp_list.append(atom.get_coord())\n lg = len(temp_list)\n summ = np.sum(temp_list, axis = 0)\n res_isobaricentre = [summ[0]/lg, summ[1]/lg, summ[2]/lg]\n result.append([res_isobaricentre[0], res_isobaricentre[1], res_isobaricentre[2]])\n else:\n coordinates = None\n for atom in residue:\n if atom.get_name() == \"C1'\":\n coordinates = atom.get_coord()\n if coordinates is None:\n # Residue has no C1'\n res = np.nan\n else:\n res = [coordinates[0], coordinates[1], coordinates[2]]\n result.append(res)\n return(result)",
"def SquareClusteringCoefficient(graph):\n coef = np.mean(list(nx.square_clustering(graph).values()))\n return coef",
"def evaluate_clusters(self, cluster_formulas, value='weighted_sum'):\n num_elems = len(self.labels)\n total_val = {}\n num_cl = len(cluster_formulas)\n clustered_points_num = 0\n print(\"\\n\\n\")\n print(\"Sufficiently big clusters: {}\".format(num_cl))\n for c, formula, val in cluster_formulas:\n c_size = len([l for l in self.labels if l == c])\n clustered_points_num += c_size\n\n if value == 'weighted_sum':\n total_val[c] = val * c_size / num_elems\n elif value == 'sum':\n total_val[c] = val * 1\n\n clust_val = sum(total_val.values())\n self.clustering_value = total_val\n print(\"Value of clustering: {}\".format(clust_val))\n return clust_val",
"def compute_centrifugal(self):\r\n # update the coordinates\r\n self.get_coords()\r\n\r\n # compute the centrifugal force\r\n self.centrifugal.assign(project(\r\n -1*self.rho*cross(self.omega, cross(self.omega, self.r)), self.V))",
"def evaluate(self, clustering):\n # Pca for each one of the clusters\n pca_mean_val = 0.;\n MAX_ELEMENTS = 1000\n for c in clustering.clusters:\n # Pick the coordinates (ensuring that we are copying them)\n element_indexes = c.all_elements\n ###################\n # Performance hack\n ###################\n # As it can be very slow for big clusters (i.e. > 3k elements) we'll compress this clusters \n # before calculating PCA. It should increase variance but will allow calculations.\n # It should use the kmedoids compressor\n if len(c.all_elements) > MAX_ELEMENTS:\n element_indexes = c.get_random_sample(MAX_ELEMENTS)\n print \"[PCA] Random sampling too big cluster to improve performance (%d elements -> %d elements).\"%(len(c.all_elements),MAX_ELEMENTS)\n ###################\n \n fitting_coordinates_of_this_cluster = self.fitting_coordinates[element_indexes]\n \n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster)\n \n if self.calculation_coordinates is not None:\n calculation_coordinates_of_this_cluster = self.calculation_coordinates[element_indexes]\n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster,\n calculationCoordsets = calculation_coordinates_of_this_cluster)\n \n # Make an iterative superposition (to get the minimum RMSD of all with respect to a mean conformation)\n calculator.iterativeSuperposition()\n\n # Calculate the covariance matrix\n if self.calculation_coordinates is None:\n covariance_matrix = PCAMetric.create_covariance_matrix(fitting_coordinates_of_this_cluster)\n else:\n covariance_matrix = PCAMetric.create_covariance_matrix(calculation_coordinates_of_this_cluster)\n \n # And then the eigenvalue we are interested in\n pca_mean_val += PCAMetric.calculate_biggest_eigenvalue(covariance_matrix)\n print \"PCA finished\"\n return pca_mean_val /clustering.total_number_of_elements",
"def _get_amuse_particles(\n particles, units=\"kpckms\", origin=\"galaxy\", ofile=None, **kwargs\n):\n\n cluster = StarCluster(\n len(particles),\n tphys=0.0,\n units=units,\n origin=origin,\n ctype=\"amuse\",\n **kwargs\n )\n i_d = np.linspace(1, len(particles), len(particles), dtype=\"int\")\n\n m = particles.mass.value_in(u.MSun)\n\n if units == \"pckms\":\n x = particles.x.value_in(u.parsec)\n y = particles.y.value_in(u.parsec)\n z = particles.z.value_in(u.parsec)\n vx = particles.vx.value_in(u.kms)\n vy = particles.vy.value_in(u.kms)\n vz = particles.vz.value_in(u.kms)\n\n elif units == \"kpckms\":\n x = particles.x.value_in(u.kpc)\n y = particles.y.value_in(u.kpc)\n z = particles.z.value_in(u.kpc)\n vx = particles.vx.value_in(u.kms)\n vy = particles.vy.value_in(u.kms)\n vz = particles.vz.value_in(u.kms)\n\n else:\n print(\"PLEASE SPECIFY UNITS\")\n return 0\n\n cluster.add_stars(x, y, z, vx, vy, vz, m, i_d, do_key_params=True)\n\n if origin == \"galaxy\":\n if ofile == None:\n cluster.find_centre()\n else:\n _get_cluster_orbit(cluster, ofile, advance=advance, **kwargs)\n\n if kwargs.get(\"do_key_params\", True):\n do_order=kwargs.get(\"do_key_params\", True)\n cluster.to_cluster()\n cluster.find_centre()\n cluster.to_centre(do_key_params=True, do_order=do_order)\n cluster.to_galaxy()\n\n elif origin == \"cluster\":\n if kwargs.get(\"do_key_params\", True):\n do_order=kwargs.get(\"do_key_params\", True)\n # Estimate centre of distribution\n cluster.find_centre()\n cluster.to_centre(do_key_params=True, do_order=do_order)\n cluster.to_cluster()\n\n if ofile != None:\n _get_cluster_orbit(cluster, ofile, advance=advance, **kwargs)\n\n return cluster",
"def calculate(self) -> complex128:\n\n if not self.__can_calculation_be_performed():\n raise AttributeError\n\n v_vectors = self.__calculate_v_vectors()\n\n permanent = 0\n for v_vector in v_vectors:\n v_sum = sum(v_vector)\n addend = pow(-1, v_sum)\n # Binomials calculation\n for i in range(len(v_vector)):\n addend *= binom(self.__input_state[i], v_vector[i])\n\n # Product calculation\n product = 1\n for j in range(len(self.__input_state)):\n if self.__output_state[j] == 0: # There's no reason to calculate the sum if t_j = 0\n continue\n # Otherwise we calculate the sum\n product_part = 0\n for i in range(len(self.__input_state)):\n product_part += (self.__input_state[i] - 2 * v_vector[i]) * self.__matrix[j][i]\n product_part = pow(product_part, self.__output_state[j])\n product *= product_part\n addend *= product\n permanent += addend\n\n permanent /= pow(2, sum(self.__input_state))\n\n return permanent",
"def calculate_cb_vecs(self, clusters):\n if not clusters or not clusters[0]:\n return None\n\n # :param:`n` is the dimension of the vectors\n n = len(clusters[0][0])\n # Initialize the codebook vectors to 0\n cb_vectors = np.zeros([n * self.K]).reshape(self.K, n)\n for i in range(self.K):\n sum = np.zeros([n], dtype=np.uint).reshape(1, n)\n for vector in clusters[i]:\n sum += vector\n # divide the sum of the vectors by the size of the cluster\n cb_vectors[i] = np.divide(sum, len(clusters[i]))\n return cb_vectors",
"def clustering(self) -> 'outputs.ClusteringResponse':\n return pulumi.get(self, \"clustering\")",
"def __cluster_simi(self, i, j):\n sum_ = 0.\n for si in self.__indexclusters[i]:\n for sj in self.__indexclusters[j]:\n simi = self.__sample_simi(si, sj)\n sum_ += simi\n return sum_ / (len(self.__indexclusters[i]) * len(self.__indexclusters[j]))",
"def get_soap_vec(struct: Structure) -> NDArray:\n adaptor = AseAtomsAdaptor()\n species_ = [str(el) for el in struct.composition.elements]\n dummy_structure = struct.copy()\n for el in species_:\n dummy_structure.replace_species({str(el): DUMMY_SPECIES})\n soap_desc = SOAP(species=[DUMMY_SPECIES], r_cut=5, n_max=8, l_max=6, periodic=True)\n vecs = soap_desc.create(adaptor.get_atoms(dummy_structure))\n return vecs",
"def vector(molec, dihed, nonH, energy):\n #Torison\n if dihed:\n pass\n #XYZ\n else:\n coords = ()\n if nonH:\n for atom in molec.atoms:\n coords += atom.coords\n else:\n for atom in molec.atoms:\n if atom.atomicnum > 1:\n coords += atom.coords\n #Energy\n if energy:\n coords += (molec.energy/10.0,)\n return coords"
] | [
"0.59573764",
"0.56333023",
"0.54057556",
"0.53692955",
"0.52924997",
"0.51834893",
"0.51754993",
"0.5105054",
"0.50476855",
"0.5041256",
"0.50108224",
"0.50036925",
"0.49943978",
"0.49627247",
"0.4951808",
"0.49466053",
"0.49453312",
"0.4925825",
"0.49249965",
"0.4916185",
"0.48957744",
"0.48939958",
"0.48905924",
"0.48876455",
"0.48856544",
"0.4883854",
"0.48677883",
"0.48584744",
"0.4850701",
"0.48495448"
] | 0.62799895 | 0 |
Remove fit coefficients or ECI's with small values. Removes ECI's and orbits in the ClusterSubspaces that have ECI/parameter values smaller than the given threshold. This will change the fits error metrics (i.e. RMSE) a little, but it should not be much. If they change a lot then the threshold used is probably too high and important functions are being pruned. This will not refit the ClusterExpansion. Note that if you refit after pruning, the ECI will probably change and hence also the fit performance. | def prune(self, threshold=0, with_multiplicity=False):
coefs = self.eci if with_multiplicity else self.coefs
bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]
self.cluster_subspace.remove_corr_functions(bit_ids)
# Update necessary attributes
ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))
ids_complement.sort()
self.coefs = self.coefs[ids_complement]
if self._feat_matrix is not None:
self._feat_matrix = self._feat_matrix[:, ids_complement]
if hasattr(self, "eci"): # reset cache
del self.eci
if hasattr(self, "cluster_interaction_tensors"): # reset cache
del self.cluster_interaction_tensors
# reset the evaluator
self._set_evaluator_data(set_orbits=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def trimCompo(self, threshold):\n newCompo = {}\n for key,value in self.m_compo.items():\n if value > threshold:\n newCompo[ key ] = value\n self.m_compo = newCompo",
"def prune_outliers_from_dataset(datasets: tuple, threshold: float, verbose: bool) -> tuple:\n # Printing pruning control flow\n outlier_frac = 1.0 - threshold\n\n non_outlier_datasets = []\n pruned_training_size = 0\n\n for dataset in datasets:\n outliers_predict = EllipticEnvelope(\n contamination=outlier_frac).fit(dataset[0]).predict(dataset[0])\n\n pruned_atts, pruned_targs = [], [] # = data\n\n for i, pred in enumerate(outliers_predict):\n if pred != -1:\n pruned_atts.append(dataset[0][i])\n pruned_targs.append(dataset[1][i])\n\n non_outlier_data = (pruned_atts, pruned_targs)\n\n # Make certain that both training and test will have the same shape when modeling!\n\n size_outliers = len(pruned_targs)\n\n # Training set's outliers\n if pruned_training_size == 0:\n pruned_training_size = len(non_outlier_data[0])\n\n if verbose:\n print(\n f\"initalised training non-outliers to size {size_outliers}\")\n\n # If test set's # of non-outliers are smaller than training set's, prune training set empathetically\n elif pruned_training_size > size_outliers:\n test_shape = size_outliers\n training_data_to_prune_more = non_outlier_datasets[0]\n\n non_outlier_datasets[0] = (\n training_data_to_prune_more[0][:test_shape], training_data_to_prune_more[1][:test_shape])\n\n pruned_training_size = len(non_outlier_data[0][0])\n\n if verbose:\n print(f\"pruning training non-outliers to size {test_shape}\")\n\n # Any other case (test outliers greater than training, or UNLIKELY, if equal)\n else:\n non_outlier_data = (\n non_outlier_data[0][:pruned_training_size], non_outlier_data[1][:pruned_training_size])\n\n if verbose:\n print(\n f\"pruning test non-outliers to size {pruned_training_size}\")\n\n # Add finished dataset to the complete dataset: (training data, test data)\n non_outlier_datasets.append(non_outlier_data)\n\n return non_outlier_datasets",
"def delete_small_clusters(new_centroids, centroid_counter, threshold):\n\n out_centroids = []\n for n in range(len(new_centroids)):\n if centroid_counter[n] > threshold:\n out_centroids.append(new_centroids[n])\n out_centroids = np.array(out_centroids)\n return out_centroids",
"def truncate(coeffs, threshold=99):\n sortedindex = np.argsort(np.abs(coeffs))[::-1]\n Ncoeff = coeffs.shape[-1]\n cutoff = np.int(np.round(Ncoeff*threshold/100.))\n \n# print \"Keeping %2.0f %% (N=%s) of the biggest coefficients\"%(threshold,cutoff)\n\n coeffs_trunc = coeffs.copy() \t\t\t# copy of all coeff\n coeffs_trunc[sortedindex[cutoff:]] = 0 \t\t# put coeff\n \n return coeffs_trunc",
"def replace_outliers(data, threshold=4):\n zscores = stats.zscore(data)\n mean, std = data.mean(), data.std()\n data.loc[zscores >= threshold] = mean + std * threshold\n data.loc[zscores <= -threshold] = mean - std * threshold\n\n return data",
"def delete_outliers_of_data_before(data: np.ndarray, qi_inspect: int, threshold: int):\n idx_to_del = []\n done = False\n for j in range(data.shape[0]):\n if data[j, qi_inspect] < threshold:\n if not done:\n idx_to_del = j\n done = True\n else:\n idx_to_del = np.append(idx_to_del, j)\n return np.delete(data, idx_to_del, axis=0)",
"def prune_dims(variances, threshold=0.005):\r\n scale_z = np.sqrt(variances)\r\n return scale_z >= threshold",
"def remove_small_boxes(boxes, min_size):\r\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\r\n keep = (ws >= min_size) & (hs >= min_size)\r\n keep = np.where(keep)[0]\r\n return keep",
"def non_max_suppression(bboxes, iou_threshold, threshold, box_format=\"corners\"):\n\n # 49 x 6 \n assert type(bboxes) == list\n # print(bboxes)\n bboxes = [box for box in bboxes if box[1] > threshold]\n bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)\n bboxes_after_nms = []\n # print(bboxes)\n while bboxes:\n chosen_box = bboxes.pop(0)\n bbox_temp = bboxes.copy()\n bboxes = []\n for box in bbox_temp: # not the same class or not overlap a lot \n if box[0] != chosen_box[0] or intersection_over_union(torch.tensor(chosen_box[2:]),torch.tensor(box[2:]), box_format=box_format,) < iou_threshold:\n bboxes.append(box)\n\n bboxes_after_nms.append(chosen_box)\n # print(\"NMS: \" + str(len(bboxes_after_nms)))\n return bboxes_after_nms",
"def purge_control(df, fold=3, exterminate=False):\n df = df.copy()\n # Find maxima for each peak in all blank controls\n maxima = df[blanks].max(axis=1)\n\n # In case of extermination turn to NA all peaks which contains intensity > 0 in any of blanks\n if exterminate:\n df.loc[maxima[maxima != 0].index, samples_wo_controls_qc] = np.nan\n\n # Otherwise turn to NA values whose intensities less than fold * blank_intensity\n else:\n # Find samples where peaks' concentration less than in blank times fold multiplier\n less_than_blank = df[samples_wo_controls_qc].apply(lambda x: x < maxima * fold)\n # Purge observations with abundance less than blank one\n df[less_than_blank] = np.nan\n # Should I subtract blank value from samples?\n # df[samples_wo_controls_qc] = df[samples_wo_controls_qc].sub(maxima, axis=0)\n return df",
"def remove_outliers(df, std_threshold: float = 3):\n\n df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]\n return df",
"def prune_trie(trie, threshold):\n\tnode = trie.root\n\tpq = []\n\tfor i in node.children.keys():\n\t\tpq.append((node.children[i],node.children[i].char))\n\twhile len(pq) > 0:\n\t\tcur_node, char = pq.pop()\n\t\tif cur_node.isEnd == False:\n\t\t\tfor i in cur_node.children.keys():\n\t\t\t\tpq.append((cur_node.children[i],char + cur_node.children[i].char))\n\t\telse:\n\t\t\tif cur_node.weight < threshold:\n\t\t\t\tdelete(trie, char)\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn trie",
"def remove_outliers(self, std_tol=1.5):\r\n from lsst.analysis import outlier\r\n for tnum in numpy.unique(self.data[\"tiles\"]):\r\n self.decimate(outlier.valid(self, self.data[\"tiles\"]==tnum, std_tol=std_tol))",
"def filter_by_size(self, thin_df, thin_image, size_threshold=14):\n for i, file in enumerate(thin_df):\n if file is not None:\n # Filter MPIL by predefined size threshold\n cut_thin_label = file[file['area'] < size_threshold].label.values\n file['length_keep'] = file.apply(lambda row: row.area >= size_threshold, axis=1)\n\n if len(cut_thin_label) > 0:\n cut_idx = np.isin(thin_image[i], cut_thin_label)\n # Remove filtered instances after filtering\n thin_image[i][cut_idx] = 0\n\n return thin_image",
"def prune_margin(receptive_box, imsize, threshold=0):\n im_width = imsize[1]\n im_height = imsize[0]\n\n xmin = util.where(receptive_box[:, 0] >= 0 - threshold)\n ymin = util.where(receptive_box[:, 1] >= 0 - threshold)\n xmax = util.where(receptive_box[:, 2] < im_width + threshold)\n ymax = util.where(receptive_box[:, 3] < im_height + threshold)\n\n val1 = util.intersect1d(xmin, ymin)\n val2 = util.intersect1d(xmax, ymax)\n valid_ids = torch.sort(torch.unique(util.intersect1d(val1, val2)))[0]\n\n pruned_receptive_box = receptive_box[valid_ids]\n\n return pruned_receptive_box, valid_ids",
"def remove_low_variance(X, threshold=0.0):\n selector = VarianceThreshold(threshold=threshold)\n return selector.fit_transform(X)",
"def non_max_suppression_all_classes(boxes, scores, labels, iou_threshold=0.5):\n excluded_indices = []\n for i in range(0,len(boxes)):\n obj1_box, _, obj1_label = boxes[i], scores[i], labels[i]\n for j in range(i+1,len(boxes)):\n obj2_box, _, obj2_label = boxes[j], scores[j], labels[j]\n if (get_iou(obj1_box, obj2_box) > iou_threshold):\n #print('excluding idx={}, class={}, score={}, bbox={}'.format(j, obj2_label, obj2_score, obj2_box))\n excluded_indices.append(j)\n \n excluded_indices = list(set(excluded_indices)) #Elimina indices repetidos\n included_indices = [idx for idx in range(len(boxes)) if idx not in excluded_indices]\n #print(included_indices)\n return included_indices",
"def DropSmallEntries(self, tol):\n return _hypre.HypreParMatrix_DropSmallEntries(self, tol)",
"def delete_small_trajectories(trajectories, best_parameters):\n print('Filtering small trajectories...', end = ' ')\n size = best_parameters['min_size']\n pop_ind =[]\n for k, trajectory in enumerate(trajectories):\n traj = vis.get_points(trajectory)\n if len(np.unique(traj, axis = 0))<=size:\n pop_ind.append(k)\n for index in sorted(pop_ind, reverse = True):\n del trajectories[index]\n print('Done.')",
"def remove_outliers(self, tolerance: int = 2):\r\n\r\n # Find the median distance between the rows of the field in pixels\r\n d = []\r\n for i in range(1, len(self.lines)):\r\n d.append(abs(self.lines[i][1] - self.lines[i-1][1]))\r\n \r\n row_dist = median(d)\r\n\r\n # Iterate through all of the rows\r\n for row_num in range(len(self.rows)):\r\n i = 0\r\n\r\n # Iterate through each plant in the row\r\n while i < len(self.rows[row_num]):\r\n\r\n # Find each plants distance from it's line\r\n c = self.rows[row_num][i].get_center()\r\n dis = abs(c[0]\r\n * self.lines[row_num][0]\r\n + self.lines[row_num][1]\r\n - c[1])\r\n\r\n # If it's too far off, remove it\r\n if dis < row_dist/tolerance:\r\n i += 1\r\n else:\r\n self.remove_plant_by_center(c)",
"def _importance_based_graph_cut(self, graph, threshold):\n for node, data in graph.nodes_iter(data=True):\n if float(data['importance']) < threshold:\n graph.remove_node(node)\n return",
"def prune_values(self, threshold):\n changed = False\n new_table = dict()\n for assignment in self._table.keys():\n prob = self._table[assignment]\n if prob >= threshold:\n new_table[assignment] = prob\n else:\n changed = True\n\n self._table = new_table\n return changed",
"def filter_contigs(self, criteria):\n eligible_contigs = self.passed.contigs[self.passed.contigs > 10]\n not_enough_contigs = self.passed.contigs[self.passed.contigs <= 10]\n # TODO Define separate function for this\n med_abs_dev = abs(eligible_contigs - eligible_contigs.median()).mean()\n self.med_abs_devs[\"contigs\"] = med_abs_dev\n # Define separate function for this\n # The \"deviation reference\"\n dev_ref = med_abs_dev * self.contigs\n self.dev_refs[\"contigs\"] = dev_ref\n self.allowed[\"contigs\"] = eligible_contigs.median() + dev_ref\n self.failed[\"contigs\"] = eligible_contigs[\n abs(eligible_contigs - eligible_contigs.median()) > dev_ref\n ].index\n eligible_contigs = eligible_contigs[\n abs(eligible_contigs - eligible_contigs.median()) <= dev_ref\n ]\n eligible_contigs = pd.concat([eligible_contigs, not_enough_contigs])\n eligible_contigs = eligible_contigs.index\n self.passed = self.passed.loc[eligible_contigs]",
"def remove_small_objects(img, min_size=7500):\n img2 = np.copy(img)\n img2 = np.uint8(img2)\n nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(img2, connectivity=8)\n # connectedComponentswithStats yields every seperated component with information on each of them, such as size\n # the following part is just taking out the background which is also considered a component, but most of the time we don't want that.\n sizes = stats[1:, -1]\n nb_components = nb_components - 1\n\n # your answer image\n # for every component in the image, you keep it only if it's above min_size\n for i in range(0, nb_components):\n if sizes[i] < min_size:\n img2[output == i + 1] = 0\n\n return img2",
"def remove_small_cc(binary, thres=10):\n cc, n_cc = measure.label(binary)\n binary2 = np.copy(binary)\n for n in range(1, n_cc + 1):\n area = np.sum(cc == n)\n if area < thres:\n binary2[cc == n] = 0\n return binary2",
"def display_outlayers_above(self, threshold):\n\n from peleffy.topology import Molecule\n from IPython.display import display\n\n compound_ids, smiles_tags, _ = self._read_dataset()\n cid_to_smiles = dict()\n\n for cid, smiles_tag in zip(compound_ids, smiles_tags):\n cid_to_smiles[cid] = smiles_tag\n\n for cid, diff, expv in zip(self.results['cids'],\n self.results['differences'],\n self.results['experimental_values']):\n if abs(diff - expv) > 10:\n smiles = cid_to_smiles[cid]\n print('-' * max((len(cid) + len(smiles) + 3), 47))\n print(cid, '-', smiles)\n print('-' * max((len(cid) + len(smiles) + 3), 47))\n mol = Molecule(smiles=smiles)\n print(' - Experimental difference: '\n + '{: 10.1f} kcal/mol'.format(expv))\n print(' - Predicted difference: '\n + '{: 10.1f} kcal/mol'.format(diff))\n print(' - Absolute error: '\n + '{: 10.1f} kcal/mol'.format(abs(diff - expv)))\n display(mol)",
"def remove_outliers(clusters):\n pixel_sums = {} \n outliers = []\n\n for cluster, nodes in clusters.items():\n if len(nodes) > 1:\n pixel_sums[cluster] = []\n for node in nodes:\n pixel_sums[cluster].append(sum(sum(extract_2D[node])))\n\n for cluster, psums in pixel_sums.items():\n med = np.median(psums)\n m_psums = [abs(x - med) for x in psums]\n mad = np.median(m_psums)\n \n if mad == 0:\n next \n else:\n for i, proj in enumerate(psums): \n z = 0.6745*(proj - med)/mad\n if abs(z) > 3.5:\n outliers.append((cluster, clusters[cluster][i]))\n\n clusters[\"outliers\"] = [o[1] for o in outliers]\n \n for outlier in outliers:\n cluster, node = outlier[0], outlier[1]\n clusters[cluster].remove(node)\n print('class_avg node {0} was removed from cluster {1} as an outlier'.format(node, cluster))",
"def threshold_col_del(self, threshold):\n self.data = self.data.dropna(thresh=threshold*len(self.data), axis=1) \n self.X = self.data.drop(self.target, axis =1)\n self.y = self.data[self.target]",
"def non_maximum_suppression(boxes, confs, overlap_threshold, top_k):\n eps = 1e-15\n \n boxes = np.asarray(boxes, dtype='float32')\n \n pick = []\n x1, y1, x2, y2 = boxes.T\n \n idxs = np.argsort(confs)\n area = (x2 - x1) * (y2 - y1)\n \n while len(idxs) > 0:\n i = idxs[-1]\n \n pick.append(i)\n if len(pick) >= top_k:\n break\n \n idxs = idxs[:-1]\n \n xx1 = np.maximum(x1[i], x1[idxs])\n yy1 = np.maximum(y1[i], y1[idxs])\n xx2 = np.minimum(x2[i], x2[idxs])\n yy2 = np.minimum(y2[i], y2[idxs])\n \n w = np.maximum(0, xx2 - xx1)\n h = np.maximum(0, yy2 - yy1)\n I = w * h\n \n overlap = I / (area[idxs] + eps)\n # as in Girshick et. al.\n \n #U = area[idxs] + area[i] - I\n #overlap = I / (U + eps)\n \n idxs = idxs[overlap <= overlap_threshold]\n \n return pick",
"def remove_hi_confidence_chromosome(G):\n to_remove = []\n for nd in G.nodes():\n if get_length_from_spades_name(nd) > PARAMS.CHROMOSOME_LEN_THRESH and \\\n G.nodes[nd]['score'] < PARAMS.CHROMOSOME_SCORE_THRESH:\n to_remove.append(nd)\n to_remove.append(rc_node(nd))\n G.remove_nodes_from(to_remove)\n logger.info(\"Removed %d long, likely chromosomal nodes\" % len(set(to_remove)))"
] | [
"0.62536013",
"0.57788783",
"0.5763838",
"0.5722056",
"0.571018",
"0.5643245",
"0.5627789",
"0.55441475",
"0.5527665",
"0.55141956",
"0.5495865",
"0.54696786",
"0.5461309",
"0.5422423",
"0.53910226",
"0.5389687",
"0.53792",
"0.5369068",
"0.53648835",
"0.5363127",
"0.5340759",
"0.533493",
"0.53178173",
"0.52869153",
"0.5285524",
"0.5284276",
"0.52751875",
"0.52749294",
"0.5270539",
"0.5246159"
] | 0.6851633 | 0 |
Set the orbit and cluster interaction data in evaluator. | def _set_evaluator_data(self, set_orbits=False):
if set_orbits:
self._subspace.evaluator.reset_data(
get_orbit_data(self._subspace.orbits),
self._subspace.num_orbits,
self._subspace.num_corr_functions,
)
flat_interaction_tensors = tuple(
np.ravel(tensor, order="C")
for tensor in self.cluster_interaction_tensors[1:]
)
self._subspace.evaluator.set_cluster_interactions(
flat_interaction_tensors, offset=self.cluster_interaction_tensors[0]
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, data):\n\n self.data = data\n self.calculator = Calculator(descriptors, ignore_3D=True)\n self.described_molecules = self.featurize()",
"def __init__(self, synonyme_cache, intersection_cache, data, evaluator):\n self.SYNSET_CACHE = synonyme_cache\n self.INTERSECTION_CACHE = intersection_cache\n self.pen = Penalties()\n self.evaluator = evaluator\n self.data = data",
"def setUp(self):\n\n # Load the data\n dataset = tagging.data.DataSet.from_fits(DATA_PATH, extension=1)\n\n # Assign all as field.\n dataset.data[\"FIELD/CLUSTER\"] = \"FIELD\"\n\n # [TODO] Delete benchmarks\n clusters = (\"Cha_I\", \"Br81\", \"M15\", \"NGC2808\", \"NGC6633\", \"IC4665\", \n \"NGC104\", \"gamma2_Vel\", \"GJ880\", \"NGC4815\", \"NGC2547\", \"NGC5927\",\n \"NGC4833\", \"NGC1851\", \"NGC2243\", \"NGC3532\", \"NGC6752\", \"Br25\", \n \"NGC4372\", \"NGC6705\", \"M67\", \"NGC2516\", \"Trumpler20\")\n\n # Assign all as members.\n for cluster in clusters:\n members = dataset.assign_cluster_members(cluster,\n lambda row: row[\"TARGET\"].startswith(cluster))\n\n # Special hack:\n if cluster == \"Trumpler20\":\n members += dataset.assign_cluster_members(cluster,\n lambda row: row[\"TARGET\"].startswith(\"Trumpler_20\"))\n\n logger.info(\"Assigned stars to {} clusters\".format(len(clusters)))\n self.dataset = dataset\n return None",
"def setup(self):\n super().setup()\n self.ctx.restart_calc = None\n self.ctx.inputs = AttributeDict(self.exposed_inputs(XspectraCalculation, 'xspectra'))\n\n self.ctx.inputs.parameters = self.ctx.inputs.parameters.get_dict()",
"def setData(self, data):\n self.data = data\n dagPath, components = self.__getGeometryComponents()\n self.setInfluenceWeights(dagPath, components)\n self.setBlendWeights(dagPath, components)\n\n for attr in ['skinningMethod', 'normalizeWeights']:\n cmds.setAttr('%s.%s' % (self.node, attr), self.data[attr])",
"def fun_set(self):\n\n self.type.set(self.xtl._scattering_type)\n # self.energy_kev.set(8)\n self.theta_offset.set(self.xtl._scattering_theta_offset)\n self.theta_min.set(self.xtl._scattering_min_theta)\n self.theta_max.set(self.xtl._scattering_max_theta)\n self.twotheta_min.set(self.xtl._scattering_min_two_theta)\n self.twotheta_max.set(self.xtl._scattering_max_two_theta)\n\n if self.orientation.get() == 'Reflection':\n self.direction_h.set(self.xtl._scattering_specular_direction[0])\n self.direction_k.set(self.xtl._scattering_specular_direction[1])\n self.direction_l.set(self.xtl._scattering_specular_direction[2])\n else:\n self.direction_h.set(self.xtl._scattering_parallel_direction[0])\n self.direction_k.set(self.xtl._scattering_parallel_direction[1])\n self.direction_l.set(self.xtl._scattering_parallel_direction[2])",
"def set_data(self, data):\n self.closeContext()\n self.clear()\n self.clear_messages()\n\n self.data = data\n if data is not None:\n n_instances = len(data)\n n_attrs = len(data.domain.attributes)\n self.infoLabel.setText(\"%i instances on input\\n%i attributes\" % (\n n_instances, n_attrs))\n\n self.graph_variables = [var for var in data.domain.attributes\n if var.is_continuous]\n if len(self.graph_variables) < 1:\n self.Information.not_enough_attrs()\n else:\n groupvars = [var for var in data.domain.variables +\n data.domain.metas if var.is_discrete]\n\n if len(groupvars) > 0:\n self.cb_attr.addItems([str(var) for var in groupvars])\n self.group_var = str(groupvars[0])\n self.group_variables = groupvars\n self.update_group_var()\n else:\n self._setup_plot()\n\n self.selection = []\n self.openContext(data)\n self.select_data_instances()\n self.commit()",
"def __init__(self, experiment_data):\n self._experiment_data = experiment_data",
"def setgeo(rundata):\n#-------------------\n\n try:\n geodata = rundata.geodata\n except:\n print \"*** Error, this rundata has no geodata attribute\"\n raise AttributeError(\"Missing geodata attribute\")\n\n # == setgeo.data values ==\n geodata.variable_dt_refinement_ratios = True ## Overrides clawdata.inratt, above\n\n geodata.igravity = 1\n geodata.gravity = 9.81\n geodata.icoordsys = 2\n geodata.Rearth = 6367.5e3\n geodata.icoriolis = 0\n\n # == settsunami.data values ==\n geodata.sealevel = 0.\n geodata.drytolerance = 1.e-2\n geodata.wavetolerance = 1.e-1 ##\n geodata.depthdeep = 1.e6 ## Definition of \"deep\" water\n geodata.maxleveldeep = 10 ## Restriction on the number of deep water levels\n geodata.ifriction = 1 ## Friction switch. 0=off, 1=on\n # geodata.coeffmanning =0.0\n geodata.coeffmanning =.025\n geodata.frictiondepth = 10.\n\n #okushiri_dir = '/Users/FrankGonzalez/daily/modeling/tsunami-benchmarks/github/' \\\n #+ 'FrankGonzalez/geoclaw-group/benchmarks/bp09' ##\n okushiri_dir = '..' ## this directory\n \n # == settopo.data values ==\n geodata.topofiles = []\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n # geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/OK24.tt1']) ## 24-s, ~550-740 m Entire Domain (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n okushiri_dir + '/OK08.tt1']) ## 8-s, ~184-247 m Okushiri (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n okushiri_dir + '/OK03.tt1']) ## 2.67 s (8/3s), ~61-82 m Okushiri (Dmitry's version of Kansai U.)\n geodata.topofiles.append([1, 1, 1, 0., 1.e10, \\\n okushiri_dir + '/AO15.tt1']) ## 0.53-0.89 s, ~16.5-20.4 m, Aonae (Dmitry's version of Kansai U.)\n # geodata.topofiles.append([1, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/MO01.tt1']) ## 0.89 s, ~20-27 m, Monai (Dmitry's version of Kansai U.)\n # geodata.topofiles.append([1, 1, 1, 0., 1.e10, \\\n # okushiri_dir + '/MB05.tt1']) ## 0.13-0.18 s, ~4 m Monai (Dmitry's version of Kansai U.)\n\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth40_138.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth40_140.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth42_138.txt']) ## JODC 500 m\n # geodata.topofiles.append([-3, 1, 1, 0, 1.e10, \\\n # okushiri_dir + '/depth42_140.txt']) ## JODC 500 m\n \n # == setdtopo.data values ==\n geodata.dtopofiles = []\n # for moving topography, append lines of the form: (<= 1 allowed for now!)\n # [topotype, minlevel,maxlevel,fname]\n geodata.dtopofiles.append([1,2,3, okushiri_dir + '/HNO1993.txyz']) ## Dmitry N.'s version of Kansai U.\n\n # == setqinit.data values ==\n geodata.iqinit = 0\n geodata.qinitfiles = []\n # for qinit perturbations, append lines of the form: (<= 1 allowed for now!)\n # [minlev, maxlev, fname]\n #geodata.qinitfiles.append([1, 1, 'hump.xyz'])\n\n # == setregions.data values ==\n geodata.regions = []\n # to specify regions of refinement append lines of the form\n # [minlevel,maxlevel,t1,t2,x1,x2,y1,y2]\n # Note: Level 1 = 24 s & Levels [2,3,4,5] = RF [3,3,3,8] => Res of 8 sec to 8/3 sec to 8/9 to 1/9 sec/cell\n # Grid Limits\n # Name x1 x2 y1 y2\n # OK24 137.53666670 141.53000000 39.53666670 44.26333330\n # HNO 138.50000000 140.55000000 40.51666670 43.30000000\n # OK08 138.50111110 140.55222220 40.52111110 43.29888890\n # OK03 139.38925930 139.66407410 41.99592590 42.27074070\n # AO15 139.43419750 139.49987650 42.03118520 42.07251850\n # MO01 139.41123460 139.43320990 42.07790120 42.14580250\n # MB05 139.41385190 139.42639510 42.09458550 42.10343920\n \n #geodata.regions.append([1, 1, 0., 1e9, 0.0, 360.0, -90.0, 90.0]) ## OK24: 24-s, ~550-740 m Entire Domain\n geodata.regions.append([1, 2, 0., 1e9, 138.5, 139.7, 41.4, 43.3]) ## OK08: 8-s, ~184-247 m Okushiri \n geodata.regions.append([1, 3, 0., 1e9, 139.39, 139.6, 42.0, 42.25]) ## OK03: 2.67 s (8/3s), ~61-82 m Okushiri \n # geodata.regions.append([1, 4, 0., 1e9, 139.42, 139.57, 42.03, 42.23]) ## AO15: 0.53-8/9 s, ~16.5-20.4 m, Aonae \n #geodata.regions.append([1, 4, 0., 1e9, 139.40, 139.46, 42.03, 42.22]) ## West coast Okushiri\n geodata.regions.append([4, 4, 90., 1e9, 139.42, 139.431, 42.07, 42.12])\n \n\n # == setgauges.data values ==\n geodata.gauges = []\n # for gauges append lines of the form [gaugeno, x, y, t1, t2]\n \n # geodata.gauges.append([1,139.429211710298,42.188181491811,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([3,139.411185686023,42.162762869034,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([5,139.418261206409,42.137404393442,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([6,139.428035766149,42.093012384481,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([7,139.426244998662,42.116554785296,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([8,139.423714744650,42.100414145210,0.0,1e9]) ## Tsuji Obs\n geodata.gauges.append([9,139.428901803617,42.076636582137,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([10,139.427853421935,42.065461519438,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([11,139.451539852594,42.044696547058,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([12,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs\n # geodata.gauges.append([13,139.456528443496,42.051692262353,0.0,1e9]) ## Tsuji Obs\n # \n # == setfixedgrids.data values ==\n\n geodata.fixedgrids = []\n \n for g in geodata.gauges:\n xg = g[1]\n yg = g[2]\n xg1 = xg - 0.001\n xg2 = xg + 0.002\n yg1 = yg - 0.001\n yg2 = yg + 0.002\n nx = 31\n ny = 31\n gaugeno = g[0]\n if gaugeno == 9:\n xg2 = xg + 0.003\n nx = 41\n if gaugeno == 8:\n xg1 = xg - 0.002\n xg2 = xg + 0.001\n yg1 = yg - 0.002\n yg2 = yg + 0.001\n \n geodata.fixedgrids.append([210.0,360.0,11,xg1,xg2,yg1,yg2,nx,ny,0,1])\n geodata.regions.append([5, 5, 180., 1e9, xg1,xg2,yg1,yg2])\n \n \n return rundata\n\n # end of function setgeo\n # ----------------------",
"def fillData(self):\n self.textexpt.SetValue(c.getExperimentFolder(self._user))\n self.textfold.SetValue(c.getDataFolder(self._user))\n self.textfile.SetValue(c.getDataFile(self._user))\n self.prependscan.SetValue(c.getPrependScan(self._user))",
"def setUp(self):\r\n self.colwell_data1 = asarray(colwell_data1)\r\n self.colwell_data2 = asarray(colwell_data2)\r\n\r\n self.est1 = AbstractPointEstimator(asarray([0, 1, 2, 3, 4, 5]))\r\n self.est2 = AbstractPointEstimator(self.colwell_data1)\r\n self.est3 = AbstractPointEstimator(self.colwell_data2)",
"def setgeo(rundata):\n#-------------------\n\n try:\n geodata = rundata.geodata\n except:\n print \"*** Error, this rundata has no geodata attribute\"\n raise AttributeError(\"Missing geodata attribute\")\n\n # == setgeo.data values ==\n\n geodata.variable_dt_refinement_ratios = True\n\n geodata.igravity = 1\n geodata.gravity = 9.81\n geodata.icoordsys = 2\n geodata.Rearth = 6367.5e3\n geodata.icoriolis = 0\n\n # == settsunami.data values ==\n geodata.sealevel = 0.\n geodata.drytolerance = 1.e-3\n geodata.wavetolerance = 1.e-1\n geodata.depthdeep = 1.e2\n geodata.maxleveldeep = 4\n geodata.ifriction = 1\n geodata.coeffmanning =.025\n geodata.frictiondepth = 200.\n\n\n # == settopo.data values ==\n geodata.topofiles = []\n # for topography, append lines of the form\n # [topotype, minlevel, maxlevel, t1, t2, fname]\n geodata.topofiles.append([3, 1, 1, 0., 1e10, 'ebanda.asc'])\n \n\n # == setdtopo.data values ==\n geodata.dtopofiles = []\n # for moving topography, append lines of the form: (<= 1 allowed for now!)\n # [topotype, minlevel, maxlevel, fname]\n geodata.dtopofiles.append([3,1,3,'BandaArc1852.tt3'])\n\n geodata.iqinit = 0\n geodata.qinitfiles = []\n\n # == setgauges.data values ==\n geodata.gauges = []\n # for gauges append lines of the form [gaugeno,x,y,t1,t2]\n geodata.gauges.append([1, 109.000, -7.789, 0., 1e10]) #Cialciap\n geodata.gauges.append([2, 109.040, -7.722, 0., 1e10]) #Cialciap Bay\n geodata.gauges.append([3, 110.292, -8.027, 0., 1e10]) #Bantul\n geodata.gauges.append([4, 111.086, -8.233, 0., 1e10]) #Pacitan\n geodata.gauges.append([5, 111.558, -8.319, 0., 1e10]) #Pelang Beach\n geodata.gauges.append([6, 111.968, -8.286, 0., 1e10]) #Sine Beach\n geodata.gauges.append([7, 112.982, -8.326, 0., 1e10]) #Guying\n geodata.gauges.append([8, 113.176, -8.286, 0., 1e10]) #Muara\n geodata.gauges.append([9, 113.461, -8.383, 0., 1e10]) #Puger\n geodata.gauges.append([10, 113.336, -8.506, 0., 1e10]) #Barung Island\n geodata.gauges.append([11, 114.110, -8.621, 0., 1e10]) #Lampon\n geodata.gauges.append([12, 114.396, -8.231, 0., 1e10]) #Banyuwani\n geodata.gauges.append([13, 112.880, -7.278, 0., 1e10]) #Surabiya\n geodata.gauges.append([14, 114.965, -8.533, 0., 1e10]) #Tabanan\n geodata.gauges.append([15, 115.144, -8.697, 0., 1e10]) #Kuta\n geodata.gauges.append([16, 115.193, -8.848, 0., 1e10]) #Nusa Dua\n geodata.gauges.append([17, 116.064, -8.586, 0., 1e10]) #Mataram\n geodata.gauges.append([18, 115.260, -8.727, 0., 1e10]) #Sanur\n geodata.gauges.append([19, 116.031, -8.873, 0., 1e10]) #Sepi Bay\n geodata.gauges.append([20, 116.135, -8.872, 0., 1e10]) #Serangan Beach\n geodata.gauges.append([21, 116.283, -8.902, 0., 1e10]) #Kuta Lombok\n geodata.gauges.append([22, 116.400, -8.868, 0., 1e10]) #Awang Bay\n geodata.gauges.append([23, 116.466, -8.924, 0., 1e10]) #Surga Beach\n geodata.gauges.append([24, 116.744, -8.918, 0., 1e10]) #Maluk\n geodata.gauges.append([25, 116.833, -9.047, 0., 1e10]) #Tongo\n geodata.gauges.append([26, 117.199, -9.023, 0., 1e10]) #Linyuk\n geodata.gauges.append([27, 117.762, -8.939, 0., 1e10]) #Leppu\n geodata.gauges.append([28, 118.377, -8.785, 0., 1e10]) #Huu\n geodata.gauges.append([29, 118.172, -8.780, 0., 1e10]) #Rontu Beach\n geodata.gauges.append([30, 119.403, -8.729, 0., 1e10]) #Mantea Alley\n geodata.gauges.append([31, 119.374, -9.788, 0., 1e10]) #Nihiwatu\n geodata.gauges.append([32, 119.466, -9.742, 0., 1e10]) #Waigalli\n geodata.gauges.append([33, 119.945, -9.975, 0., 1e10]) #Tarimbang Beach\n geodata.gauges.append([34, 120.183, -10.233, 0., 1e10]) #Lalindi\n geodata.gauges.append([35, 120.264, -10.257, 0., 1e10]) #Manoekangga\n geodata.gauges.append([36, 120.546, -10.241, 0., 1e10]) #Baing\n geodata.gauges.append([37, 120.312, -9.661, 0., 1e10]) #Waingapu\n geodata.gauges.append([38, 119.871, -8.501, 0., 1e10]) #Labun Badjo\n geodata.gauges.append([39, 120.604, -8.822, 0., 1e10]) #Mborong\n geodata.gauges.append([40, 123.560, -10.166, 0., 1e10]) #Kupang\n geodata.gauges.append([41, 121.824, -10.491, 0., 1e10]) #Baa",
"def __init__(self):\n super().__init__()\n self.data_set_loc = conf.config_section_mapper(\"filePath\").get(\"data_set_loc\")\n self.data_extractor = DataExtractor(self.data_set_loc)\n actor_actor_matrix_obj.fetchActorActorSimilarityMatrix()",
"def set_simulation(self, param, operators, mesh):\n self.param = param\n self.operators = operators\n self.mesh = mesh\n\n # Set simulation for the components\n self.electrolyte.set_simulation(param, operators, mesh)\n self.interface.set_simulation(param, mesh)",
"def set_equations(self, *args, **kwargs):\n pass",
"def set_gear_data(self, geardata):\n\n self.__init__(geardata, self.modifications)",
"def startEvaluationMode(self):\r\n self.storeDataRef = self.dataRef",
"def setup_simulation(self, **kwargs):\n\n self.distance = self.config[\"site\"][\"distance\"]\n self.num_substations = self.config[\"num_substations\"]\n\n self.initialize_substructure_production()\n self.initialize_installation_vessel()",
"def setup(self):\n\n for name, infos in Rt.geom_dict.items():\n if name in Rt.optim_var_dict:\n self.add_input(name, val=infos[1][0])",
"def SetGeData(self, *args, **kwargs):\n pass",
"def setData(self,data):\n self.data=data\n self.leaf=True",
"def evaluator(self, evaluator):\n self.__evaluator = evaluator",
"def run(self, data):\n\t\treduced_data = PCA(n_components=2).fit_transform(data)\n\n\t\t# Run the algorithm\n\t\tself.estimator.fit_transform(reduced_data)\n\n\t\t# Save all relevent properties\n\t\tself.input_data = data\n\t\tself.centroids = self.estimator.cluster_centers_\n\t\tself.node_positions = reduced_data\n\t\tself.labels = self.estimator.labels_\n\n\t\t# Enable visualising when debugging\n\t\t# self.visualize(reduced_data)",
"def set_up_and_parameterise_experiment(self):\n # Update experiment using capacity\n capacity = self._parameter_values[\"Nominal cell capacity [A.h]\"]\n for op_conds in self.experiment.operating_conditions_steps:\n if op_conds.type == \"C-rate\":\n op_conds.type = \"current\"\n op_conds.value = op_conds.value * capacity\n\n # Update terminations\n termination = op_conds.termination\n for term in termination:\n term_type = term[\"type\"]\n if term_type == \"C-rate\":\n # Change type to current\n term[\"type\"] = \"current\"\n # Scale C-rate with capacity to obtain current\n term[\"value\"] = term[\"value\"] * capacity\n\n # Add time to the experiment times\n dt = op_conds.duration\n if dt is None:\n if op_conds.type == \"current\":\n # Current control: max simulation time: 3h / C-rate\n Crate = op_conds.value / capacity\n dt = 3 / abs(Crate) * 3600 # seconds\n else:\n # max simulation time: 1 day\n dt = 24 * 3600 # seconds\n op_conds.duration = dt\n\n # Set up model for experiment\n self.set_up_and_parameterise_model_for_experiment()",
"def experiments_init(self):\n pass",
"def setExperiment(self, **kwargs):\n # If the dictionary robot value is 'tb1' then change the button Style\n global robot_Selected_Value\n if kwargs['robot'] =='1':\n robot_Selected_Value = 'TB1'\n elif kwargs['robot'] =='2':\n robot_Selected_Value = 'TB2'\n elif kwargs['robot'] =='3':\n robot_Selected_Value = 'TB3'\n elif kwargs['robot'] =='4':\n robot_Selected_Value = 'TB4'\n elif kwargs['set'] =='OK':\n # CONFIGURATION VARIABLES\n robot_Type_Value = self.robot_Selection_Type.currentText()\n robot_Role_Value = self.robot_Selection_Role.currentText()\n robot_Task_Value = self.robot_Selection_Task.currentText()\n robot_Behavior_Value = self.robot_Selection_Behavior.currentText()\n robot_Experiment_Value = self.robot_Selection_Experiment.currentText()\n # XML CREATION\n environmentXMLFile = et.Element('EXP_CONFIGURATIONS')\n comment = et.Comment(\"Experiment Configuration and Variables\")\n environmentXMLFile.append(comment)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_SELECTED')\n environmentConfig.text = str(robot_Selected_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_TYPE')\n environmentConfig.text = str(robot_Type_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_ROLE')\n environmentConfig.text = str(robot_Role_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_TASK')\n environmentConfig.text = str(robot_Task_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_BEHAVIOR')\n environmentConfig.text = str(robot_Behavior_Value)\n environmentConfig = et.SubElement(environmentXMLFile, 'ROBOT_EXPERIMENT')\n environmentConfig.text = str(robot_Experiment_Value)\n try:\n tree = et.ElementTree(environmentXMLFile)\n tree.write('experimentConfig.xml', encoding='utf8')\n sendFiles.sshSendFiles()\n operationSucess()\n except Exception:\n operationError()",
"def experiment_init(self):\n raise NotImplementedError(\"this needs to be implemented!\")",
"def setUp(self):\r\n self.colwell_data1 = asarray(colwell_data1)\r\n self.colwell_data2 = asarray(colwell_data2)\r\n\r\n self.samp_data1 = asarray([1, 2, 3, 4, 5])\r\n self.samp_data2 = asarray([1, 3, 4, 5])\r\n\r\n self.estimator1 = Chao1MultinomialPointEstimator(self.colwell_data1)\r\n self.estimator2 = Chao1MultinomialPointEstimator(self.colwell_data2)\r\n self.estimator3 = Chao1MultinomialPointEstimator(self.samp_data1)\r\n self.estimator4 = Chao1MultinomialPointEstimator(self.samp_data2)",
"def _setVals(self, qubit_id=0, instr=0, notify=False, block=False, action=False):\n self.qubit_id = qubit_id\n self.instr = instr\n self.notify = notify\n self.block = block\n self.action = action",
"def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)"
] | [
"0.57773596",
"0.5721634",
"0.56163263",
"0.54464465",
"0.54463744",
"0.54107165",
"0.5388258",
"0.5385944",
"0.5348698",
"0.5345845",
"0.52903944",
"0.5274697",
"0.52731305",
"0.5257818",
"0.5226652",
"0.52054065",
"0.5167687",
"0.51592535",
"0.515142",
"0.51405585",
"0.5131831",
"0.51308584",
"0.5130674",
"0.51270044",
"0.5117059",
"0.51148045",
"0.51081544",
"0.51059276",
"0.51004475",
"0.5099418"
] | 0.8182642 | 0 |
Create ClusterExpansion from serialized MSONable dict. | def from_dict(cls, d):
reg_data_dict = deepcopy(d.get("regression_data"))
if reg_data_dict is not None:
reg_data_dict["feature_matrix"] = np.array(
d["regression_data"]["feature_matrix"]
)
reg_data_dict["property_vector"] = np.array(
d["regression_data"]["property_vector"]
)
reg_data = RegressionData(**reg_data_dict)
else:
reg_data = None
cluster_expansion = cls(
ClusterSubspace.from_dict(d["cluster_subspace"]),
coefficients=np.array(d["coefs"]),
regression_data=reg_data,
)
# update copy of feature matrix to keep any changes
if d["feature_matrix"] is not None:
cls._feat_matrix = np.array(d["feature_matrix"])
return cluster_expansion | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def copy(self):\n return ClusterExpansion.from_dict(self.as_dict())",
"def from_dict(cls, dikt) -> 'ShardingDescriptor':\n return util.deserialize_model(dikt, cls)",
"def _from_string(cls, serialized):\r\n parse = cls.URL_RE.match(serialized)\r\n if not parse:\r\n raise InvalidKeyError(cls, serialized)\r\n\r\n parse = parse.groupdict()\r\n if parse['definition_id']:\r\n parse['definition_id'] = cls.as_object_id(parse['definition_id'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})",
"def from_dict(cls, dikt) -> 'Spacecraft':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Espacio':\n return deserialize_model(dikt, cls)",
"def _from_string(cls, serialized):\r\n parse = cls.parse_url(serialized)\r\n\r\n if parse['version_guid']:\r\n parse['version_guid'] = cls.as_object_id(parse['version_guid'])\r\n\r\n return cls(**{key: parse.get(key) for key in cls.KEY_FIELDS})",
"def test_create_from_serialized(self, molecule):\n serialized_molecule = molecule.to_dict()\n molecule_copy = Molecule(serialized_molecule)\n assert molecule == molecule_copy",
"def _from_string(cls, serialized):\r\n course_key = CourseLocator._from_string(serialized)\r\n parsed_parts = cls.parse_url(serialized)\r\n block_id = parsed_parts.get('block_id', None)\r\n if block_id is None:\r\n raise InvalidKeyError(cls, serialized)\r\n return cls(course_key, parsed_parts.get('block_type'), block_id)",
"def from_dict(cls, dikt) -> 'InstallmentRequest':\n return util.deserialize_model(dikt, cls)",
"def from_json(cls, json_str: str) -> MissionStickerRequest:\n return cls.from_dict(json.loads(json_str))",
"def from_dict(cls, dikt) -> 'PipelineDefinition':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'JobOutputRequest':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Operations':\n return util.deserialize_model(dikt, cls)",
"def from_str(cls, data: AnyStr) -> \"JobManifest\":\n as_dict = json.loads(data)\n as_dict[\"creation_time\"] = datetime.datetime.fromisoformat(\n as_dict[\"creation_time\"]\n )\n return cls(**as_dict)",
"def deserialize(cls, serd):\r\n return cls(*serd)",
"def from_dict(cls, dikt) -> 'CatalogDataCategoryTreeInterface':\n return deserialize_model(dikt, cls)",
"def from_json(cls, s, **kwargs):\n return loads(s, cls, **kwargs)",
"def from_dict(cls, dikt) -> 'PathComputationContext':\n return util.deserialize_model(dikt, cls)",
"def test_molecule_subclass_from_dict(self):\n orig_mol = Molecule.from_smiles(\"CCO\")\n mol = MyMol.from_dict(orig_mol.to_dict())\n assert isinstance(mol, MyMol)",
"def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])",
"def from_dict(cls, dikt) -> 'LightSourceMaterialSchema':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'POSTExecution':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'CardBlockRequest':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'Expression':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, tree_dict):\n\n class ProxyReaction:\n def __init__(self, smiles, metadata):\n self.smiles = smiles\n self.metadata = metadata\n\n obj = cls()\n obj._stock = []\n ReactionTree._parse_tree_dict(tree_dict, obj, ProxyReaction)\n\n obj._find_repeating_patterns()\n return obj",
"def from_dict(cls, dikt) -> 'MeshNgdResponse':\n return util.deserialize_model(dikt, cls)",
"def from_dict(cls, dikt) -> 'RequestMnemonicModel':\n return util.deserialize_model(dikt, cls)",
"def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)",
"def from_json(cls, s):\n\n d = json.loads(s, object_pairs_hook=OrderedDict)\n return cls.from_definition(d)",
"def from_json(self, scaler_json):\n\n json_dict=json.loads(scaler_json)\n\n # Basic fields\n for key in ['feature_range', 'copy',\n 'n_features_in_', 'n_samples_seen_']:\n self.__setattr__(key, json_dict[key])\n\n # Some fields need to be numpy arraysget\n for key in ['scale_', 'min_', 'data_min_', 'data_max_', 'data_range_']:\n self.__setattr__(key, np.array(json_dict[key]))"
] | [
"0.5919251",
"0.57916635",
"0.56996584",
"0.56544054",
"0.54264385",
"0.53941065",
"0.5385412",
"0.5363183",
"0.52725023",
"0.5242033",
"0.52273315",
"0.5222105",
"0.52039033",
"0.51954126",
"0.5185823",
"0.5161533",
"0.51568764",
"0.5140203",
"0.5136915",
"0.51132333",
"0.51095796",
"0.51088655",
"0.50905615",
"0.50645703",
"0.5018488",
"0.5007627",
"0.5003408",
"0.4994869",
"0.4994869",
"0.49945673"
] | 0.705797 | 0 |
Goes through all open orders on pair, figured out if they're in range, wipe and recreate if adjustment is needed | def manage_orders(self):
for coin, pair_info in self.usdt_pairs.items():
orders = self.kc.get_orders(pair_info["symbol"], status="active")
self.log(coin, orders["totalNum"])
if orders["totalNum"]:
self.log(len(orders["items"]))
for order in orders["items"]:
self.log(order)
self.log(mp.mpf())
# ticker = current price action, bid/ask, etc
ticker = self.kc.get_ticker(pair_info["symbol"])
self.log(ticker)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def update_open_trades(self, pair: str):\n\n remove_indexes = []\n\n for index, trade in enumerate(self.trades[pair]['open']):\n if await self._handle_deferred_push(trade):\n remove_indexes.append(index)\n elif await self._handle_deferred_sell(trade):\n remove_indexes.append(index)\n elif await self._handle_stop_loss(trade):\n remove_indexes.append(index)\n\n if not trade['filled']:\n await self._trade_methods['update'](trade)\n\n for index in reversed(remove_indexes):\n del self.trades[pair]['open'][index]\n\n self.save_attr('trade_stats', max_depth=2, filter_items=[pair], filter_keys=[self.time_prefix])\n self.save_attr('last_trades', max_depth=1, filter_items=[pair])\n self.save_attr('trades', max_depth=1, filter_items=[pair])",
"def converge_orders(self, buy_orders, sell_orders, order_status):\n\n tickLog = self.exchange.get_instrument()['tickLog']\n to_amend = []\n to_create = []\n to_cancel = []\n buys_matched = 0\n sells_matched = 0\n existing_orders = self.exchange.get_orders()\n\n # Check all existing orders and match them up with what we want to place.\n # If there's an open one, we might be able to amend it to fit what we want.\n for order in existing_orders:\n if order['ordType'] != 'Limit':\n continue\n try:\n if (order['side'] == 'Buy' and (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1 or order_status == 7)):\n desired_order = buy_orders[buys_matched]\n buys_matched += 1\n elif (order['side'] == 'Sell' and (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3 or order_status == 8)):\n desired_order = sell_orders[sells_matched]\n sells_matched += 1\n elif (order['price'] == buy_orders[buys_matched]['price'] and order_status == 6):\n to_cancel.append(order)\n buys_matched += 1\n continue\n elif (order['price'] == sell_orders[sells_matched]['price'] and order_status == 6):\n to_cancel.append(order)\n sells_matched += 1\n continue\n else:\n continue\n\n # Found an existing order. Do we need to amend it?\n if desired_order['orderQty'] != order['leavesQty'] or (\n # If price has changed, and the change is more than our RELIST_INTERVAL, amend.\n desired_order['price'] != order['price'] and\n abs((desired_order['price'] / order['price']) - 1) > 0):\n to_amend.append({'orderID': order['orderID'], 'orderQty': order['cumQty'] + desired_order['orderQty'],\n 'price': desired_order['price'], 'side': order['side']})\n # Found an stop existing order. Do we need to amend it?\n\n except IndexError:\n # Will throw if there isn't a desired order to match. In that case, cancel it.\n if ((order_status == 2 and order['side'] == 'Sell') or (order_status == 1 and self.running_qty > 0) or (order_status == 4 and order['side'] == 'Buy') or (order_status == 3 and self.running_qty < 0) or (order_status == 7 and order['side'] == 'Buy') or (order_status == 8 and order['side'] == 'Sell')):\n to_cancel.append(order)\n\n if (order_status == 0 or order_status == 4 or order_status == 3 or order_status == 1 or order_status == 5 or order_status == 7):\n while buys_matched < len(buy_orders):\n to_create.append(buy_orders[buys_matched])\n buys_matched += 1\n if (order_status == 0 or order_status == 2 or order_status == 1 or order_status == 3 or order_status == 5 or order_status == 8):\n while sells_matched < len(sell_orders):\n to_create.append(sell_orders[sells_matched])\n sells_matched += 1\n\n if len(to_amend) > 0:\n for amended_order in reversed(to_amend):\n reference_order = [o for o in existing_orders if o['orderID'] == amended_order['orderID']][0]\n logger.info(\"Amending %4s: %d @ %.*f to %d @ %.*f (%+.*f)\" % (\n amended_order['side'],\n reference_order['leavesQty'], tickLog, reference_order['price'],\n (amended_order['orderQty'] - reference_order['cumQty']), tickLog, amended_order['price'],\n tickLog, (amended_order['price'] - reference_order['price'])\n ))\n # This can fail if an order has closed in the time we were processing.\n # The API will send us `invalid ordStatus`, which means that the order's status (Filled/Canceled)\n # made it not amendable.\n # If that happens, we need to catch it and re-tick.\n try:\n self.exchange.amend_bulk_orders(to_amend)\n except requests.exceptions.HTTPError as e:\n errorObj = e.response.json()\n if errorObj['error']['message'] == 'Invalid ordStatus':\n logger.warn(\"Amending failed. Waiting for order data to converge and retrying.\")\n sleep(0.5)\n return self.place_orders()\n else:\n logger.error(\"Unknown error on amend: %s. Exiting\" % errorObj)\n sys.exit(1)\n\n if len(to_create) > 0:\n logger.info(\"Creating %d orders:\" % (len(to_create)))\n for order in reversed(to_create):\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['orderQty'], tickLog, order['price']))\n self.exchange.create_bulk_orders(to_create)\n\n # Could happen if we exceed a delta limit\n if len(to_cancel) > 0:\n logger.info(\"Canceling %d orders:\" % (len(to_cancel)))\n for order in reversed(to_cancel):\n logger.info(\"%4s %d @ %.*f\" % (order['side'], order['leavesQty'], tickLog, order['price']))\n self.exchange.cancel_bulk_orders(to_cancel)",
"def clearOrderList(self):\r\n\t\tself.pair.orders = []",
"def __handle_open_orders(self):\n portfolio = self.get_portfolio_object()\n # only take complete orders\n orders = [order for order in portfolio.orders if order.status == Status.confirmed]\n time_zone = TraderBase.get_timezone()\n now = datetime.datetime.now(time_zone)\n for order in orders:\n price = self.db_tool.session.query(Series)\\\n .filter(order.stock_id == Series.stock_id) \\\n .filter(Series.date.between(order.date, now)) \\\n .filter(order.price >= Series.pricehigh)\\\n .order_by(Series.date.asc()).first()\n if price:\n order.status = Status.completed\n order.date = price.date\n self.connect_related_order(order)\n else:\n diff = now - order.date.replace(tzinfo=time_zone)\n hours = diff.total_seconds() / 60\n if hours >= self.expire_in_hours:\n self.logger.info(\"Order is expired because limit {} for {} \"\n \"was not reached during the day\".\n format(order.price, order.stock_id))\n order.status = Status.expired\n portfolio.cash -= order.price_complete",
"def close_orders(self):",
"def cleanup_orders(self):\n removed = False\n with self.lock:\n remove_orders = [order for order in self.orders.values() if\n order.get_value() == 0 or\n order.order_state == ORDER_PICKEDUP or\n order.order_state == ORDER_WASTED]\n for item in remove_orders:\n print(\"< cleanup thread removed order {} from shelf {}\"\n .format(item, self.temperature))\n del self.orders[item.id]\n if item.get_value() == 0:\n item.order_state = ORDER_WASTED\n removed = True\n return removed",
"def generate_orders(self, good):\n surplus = self.inventory.surplus(good)\n if surplus >= 1: # sell inventory\n # the original only old one item here\n sell_amount = surplus\n order = self.create_sell_order(good, surplus)\n if order:\n # print('{} sells {} {}'.format(self.pop_job.title, sell_amount, good.name))\n self.market.sell(order)\n else: # buy more\n shortage = self.inventory.shortage(good)\n free_space = self.inventory.empty_space\n\n if shortage > 0:\n if shortage <= free_space:\n # enough space for ideal order\n limit = shortage\n else:\n # not enough space for ideal order\n limit = math.floor(free_space / shortage)\n\n if limit > 0:\n order = self.create_buy_order(good, limit)\n if order:\n # print('{} buys {} {}'.format(self.pop_job.title, limit, good.name))\n self.market.buy(order)\n # else:\n # print(\"{} has no shortage of {} (has shortage: {})\".format(self.pop_job.title, good.title, shortage))",
"def checkTrailingStop(self):\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n for position in open_positions:\n\n last_price = position[\"Last_Price\"]\n\n high_price = position[\"High_Price\"]\n\n five_percent = round(high_price * 0.95, 2)\n\n if last_price > high_price:\n\n self.open_positions.update_one({\"Trader\": self.user[\"Name\"], \"Symbol\": position[\"Symbol\"], \"Strategy\": position[\"Strategy\"], \"Asset_Type\": self.asset_type}, {\n \"$set\": {\"High_Price\": last_price}})\n\n # CHECK IF LAST PRICE < 5% OF HIGH PRICE\n elif last_price < five_percent and self.user[\"Accounts\"][self.account_id][\"Trailing_Stop_Active\"]:\n\n queued = self.queue.find_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": position[\"Symbol\"], \"Strategy\": position[\"Strategy\"], \"Asset_Type\": self.asset_type})\n\n # IF TRUE AND NOT IN QUEUE, SELL OUT POSITION\n if not queued:\n\n trade_data = {\n \"Symbol\": position[\"Symbol\"],\n \"Aggregation\": position[\"Aggregation\"],\n \"Strategy\": position[\"Strategy\"],\n \"Asset_Type\": position[\"Asset_Type\"],\n \"Account_ID\": self.account_id\n }\n\n trade_data[\"Side\"] = \"SELL\"\n\n if self.asset_type == \"OPTION\":\n\n trade_data[\"Exp_Date\"] = position[\"Exp_Date\"]\n\n trade_data[\"Pre_Symbol\"] = position[\"Pre_Symbol\"]\n\n trade_data[\"Side\"] = \"SELL_TO_CLOSE\"\n\n self.placeOrder(trade_data, position)\n\n msg = f\"Symbol {position['Symbol']} is selling due to 5% drop of high price - TRADER: {self.user['Name']}\"\n\n self.logger.INFO(msg)",
"def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": self.asset_type, \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Aggregation\": order[\"Aggregation\"],\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n if self.asset_type == \"OPTION\":\n\n other[\"Pre_Symbol\"] = order[\"Pre_Symbol\"]\n\n other[\"Exp_Date\"] = order[\"Exp_Date\"]\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"], \"Asset_Type\": self.asset_type})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])",
"def sellOutOptions(self):\n\n open_positions = self.open_positions.find(\n {\"Trader\": self.user[\"Name\"], \"Asset_Type\": \"OPTION\"})\n\n dt = getDatetime()\n\n for position in open_positions:\n\n day_before = (position[\"Exp_Date\"] -\n timedelta(days=1)).strftime(\"%Y-%m-%d\")\n\n if day_before == dt.strftime(\"%Y-%m-%d\"):\n\n trade_data = {\n \"Symbol\": position[\"Symbol\"],\n \"Pre_Symbol\": position[\"Pre_Symbol\"],\n \"Side\": \"SELL_TO_CLOSE\",\n \"Aggregation\": position[\"Aggregation\"],\n \"Strategy\": position[\"Strategy\"],\n \"Asset_Type\": position[\"Asset_Type\"],\n \"Exp_Date\": position[\"Exp_Date\"]\n }\n\n self.placeOrder(trade_data, position)",
"def remove_working_order_rows(self):\n for key, working_order in enumerate(self.working_order):\n if not working_order['quantity']:\n self.working_order.pop(key)",
"def __clean_orders(self):\n canceled_id = []\n for order_id, order in self.orders_dict.items():\n if order[\"status\"] == \"canceled\":\n canceled_id.append(order_id)\n for id in canceled_id:\n del self.orders_dict[id]",
"def remove_orders(frame: pd.DataFrame, mode: str, side: str, perc: int = 0) -> dict:\n empty_output = dict(asks=pd.Series(), bids=pd.Series())\n\n if perc == 0:\n return dict(asks=frame['asks'], bids=frame['bids'])\n\n bids = frame['bids'].to_numpy().copy()\n asks = frame['asks'].to_numpy().copy()\n\n if mode == 'market':\n if frame.loc[0, :].sum() == 0:\n return empty_output # Only considering limit orders for adjustments\n else:\n rem_bid = bids[0] * perc\n rem_ask = asks[0] * perc\n\n elif mode == 'liquidity':\n rem_bid = min((sum(bids) + sum(asks)) * perc / 2, sum(bids))\n rem_ask = min((sum(bids) + sum(asks)) * perc / 2, sum(asks))\n elif mode == 'execution':\n close_volume = calculate_uncross(bids=frame['bids'], asks=frame['asks'])['trade_vol']\n rem_bid = close_volume * perc\n rem_ask = close_volume * perc\n\n else:\n raise KeyError(f\"Incorrect value for mode submitted : {mode}\")\n\n if side in ['bid', 'both']:\n b = len(bids) - 1\n while rem_bid > 0:\n if bids[0] != 0:\n local_vol = bids[0]\n bids[0] = local_vol - min(local_vol, rem_bid)\n rem_bid -= min(rem_bid, local_vol)\n else:\n local_vol = bids[b]\n bids[b] = local_vol - min(local_vol, rem_bid)\n rem_bid -= min(rem_bid, local_vol)\n b -= 1\n\n if side in ['ask', 'both']:\n a = 1\n while rem_ask > 0:\n if asks[0] != 0:\n local_vol = asks[0]\n asks[0] = local_vol - min(local_vol, rem_ask)\n rem_ask -= min(rem_ask, local_vol)\n else:\n local_vol = asks[a]\n asks[a] = local_vol - min(local_vol, rem_ask)\n rem_ask -= min(rem_ask, local_vol)\n a += 1\n\n ret_df = pd.DataFrame([asks, bids], index=['end_close_vol_ask', 'end_close_vol_bid'], columns=frame.index).T\n return dict(asks=ret_df['end_close_vol_ask'], bids=ret_df['end_close_vol_bid'])",
"async def update_adjusted_tick_data(self, pair: str):\n\n base = config['trade_base']\n pair_base = pair.split('-')[0]\n\n try:\n last_time = self.last_adjusted_close_times[pair]\n start_index = self.close_times[pair].index(last_time) + 1\n\n except ValueError:\n self.log.error(\"{} has no adjusted close times.\", pair)\n last_time = 0\n start_index = 0\n\n diff = len(self.close_times[pair]) - start_index\n if diff != 1:\n self.log.debug(\"{} got diff {}, source length {}, last time {}.\",\n pair, diff, len(self.close_times[pair]), last_time)\n\n if base == pair_base:\n self.adjusted_close_values[pair] = self.close_values[pair]\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n await self._update_volume_derivatives(pair, diff, start_index)\n await self._truncate_adjusted_tick_data(pair)\n return\n\n convert_pair = '{}-{}'.format(base, pair_base)\n missing = 0\n\n for index in range(diff):\n try:\n convert_value = self.close_values[convert_pair][start_index + index]\n except IndexError:\n convert_value = self.close_values[convert_pair][-1]\n missing += 1\n\n close_value = self.close_values[pair][start_index + index]\n self.adjusted_close_values[pair].append(close_value * convert_value)\n\n if missing > 0:\n self.log.debug(\"{} padded {} values at end.\", pair, missing)\n\n self.last_adjusted_close_times[pair] = self.close_times[pair][-1]\n await self._update_volume_derivatives(pair, diff, start_index)\n await self._truncate_adjusted_tick_data(pair)",
"def test_decreasing_stop_price__with_open_positions(self):\n position_sizer = self.simple_position_sizer\n self.broker.get_open_orders.return_value = []\n\n # Set the last available price to 100, fraction_at_risk to 0.1, stop price would be in this case\n # equal to 100 * (1 - 0.1) = 90\n self.timer.now.return_value = str_to_date(\"2017-01-01\") + RelativeDelta(hours=7)\n self.last_price = 100\n fraction_at_risk = 0.1\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = position_sizer.size_signals([signal], use_stop_losses=True)\n stop_order_1 = [o for o in orders if isinstance(o.execution_style, StopOrder)][0]\n\n # Simulate placing the orders - broker should return them as open orders\n self.broker.get_open_orders.return_value = orders\n\n # Simulate next day price change to a price above the previous stop_price - StopOrder is not triggered\n self.last_price = 91\n\n # Size signals once again (the next day). The new StopOrder stop price should not be lower than the\n # previous one (90)\n self.timer.now.return_value = str_to_date(\"2017-01-02\") + RelativeDelta(hours=7)\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = position_sizer.size_signals([signal], use_stop_losses=True)\n\n stop_order_2 = [o for o in orders if isinstance(o.execution_style, StopOrder)][0]\n self.assertTrue(stop_order_1.execution_style.stop_price == stop_order_2.execution_style.stop_price)",
"def fill_short_gaps(ls_ls_prices, lim):\n dict_corrections = {}\n for indiv_ind, ls_prices in enumerate(ls_ls_prices):\n day_ind = 0\n while (day_ind < len(ls_prices)) and (ls_prices[day_ind] != ls_prices[day_ind]):\n day_ind += 1\n while day_ind < len(ls_prices):\n if ls_prices[day_ind] != ls_prices[day_ind]:\n relative_day = 0\n while (day_ind + relative_day < len(ls_prices)) and\\\n (ls_prices[day_ind + relative_day] != ls_prices[day_ind + relative_day]) :\n relative_day += 1\n if relative_day < lim and day_ind + relative_day != len(ls_prices):\n ls_ls_prices[indiv_ind] = ls_ls_prices[indiv_ind][:day_ind] +\\\n [ls_ls_prices[indiv_ind][day_ind - 1] for x in range(relative_day)] +\\\n ls_ls_prices[indiv_ind][day_ind + relative_day:]\n dict_corrections.setdefault(indiv_ind, []).append((day_ind - 1, day_ind + relative_day))\n day_ind += relative_day\n else:\n day_ind += 1\n return ls_ls_prices, dict_corrections",
"def shrink_offset_pairs(self):\n\n def int_from_block(i):\n u, v = self.blocks[i].bounds\n block_bytes = self.shrink_target.buffer[u:v]\n return int_from_bytes(block_bytes)\n\n def block_len(i):\n return self.blocks[i].length\n\n # Try reoffseting every pair\n def reoffset_pair(pair, o):\n n = len(self.blocks)\n # Number of blocks may have changed, need to validate\n valid_pair = [\n p\n for p in pair\n if p < n and int_from_block(p) > 0 and self.is_payload_block(p)\n ]\n\n if len(valid_pair) < 2:\n return\n\n m = min([int_from_block(p) for p in valid_pair])\n\n new_blocks = [\n self.shrink_target.buffer[u:v]\n for u, v in self.shrink_target.all_block_bounds()\n ]\n for i in valid_pair:\n new_blocks[i] = int_to_bytes(int_from_block(i) + o - m, block_len(i))\n buffer = hbytes().join(new_blocks)\n return self.incorporate_new_buffer(buffer)\n\n def is_non_zero_payload(block):\n return not block.all_zero and self.is_payload_block(block.index)\n\n for block_i, block_j in self.each_pair_of_blocks(\n is_non_zero_payload, is_non_zero_payload\n ):\n i = block_i.index\n j = block_j.index\n\n value_i = int_from_block(i)\n value_j = int_from_block(j)\n\n offset = min(value_i, value_j)\n Integer.shrink(\n offset, lambda o: reoffset_pair((i, j), o), random=self.random\n )",
"def compact(levels, start):\n\n # print(\"Compacting book\")\n # self.print()\n last_level = None\n for i in range(start, -1, -1):\n level = levels[i]\n if last_level:\n if level.price == last_level.price:\n last_level.qty += level.qty\n last_level.order_count += level.order_count\n del levels[i]\n else:\n break\n else:\n last_level = level",
"def _repair_crossed_asks(self, ask):\r\n while len(self.asks) and self.asks[0].price < ask:\r\n volume = self.asks[0].volume\r\n self._update_total_ask(-volume)\r\n self.asks.pop(0)\r\n self._valid_ask_cache = -1\r\n #self.debug(\"### repaired ask\")\r",
"def test_decreasing_stop_price__no_open_positions(self):\n position_sizer = self.simple_position_sizer\n self.broker.get_positions.return_value = []\n\n # Set the last available price to 100, fraction_at_risk to 0.1, stop price would be in this case\n # equal to 100 * (1 - 0.1) = 90\n self.last_price = 100\n fraction_at_risk = 0.1\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = position_sizer.size_signals([signal], use_stop_losses=True)\n stop_order_1 = [o for o in orders if isinstance(o.execution_style, StopOrder)][0]\n\n # Simulate placing the orders - broker should return them as open orders\n self.broker.get_open_orders.return_value = orders\n\n # Simulate next day price change to a price above the previous stop_price - StopOrder is not triggered\n self.last_price = 91\n\n # Size signals once again (the next day). The new StopOrder stop price should not be lower than the\n # previous one (90)\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = position_sizer.size_signals([signal], use_stop_losses=True)\n\n stop_order_2 = [o for o in orders if isinstance(o.execution_style, StopOrder)][0]\n self.assertTrue(stop_order_1.execution_style.stop_price > stop_order_2.execution_style.stop_price)",
"def slot_user_order(self, dummy_sender, data):\r\n (price, volume, typ, oid, status) = data\r\n found = False\r\n removed = False # was the order removed?\r\n opened = False # did the order change from 'post-pending' to 'open'\"?\r\n voldiff = 0 # did the order volume change (full or partial fill)\r\n if \"executing\" in status:\r\n # don't need this status at all\r\n return\r\n if \"post-pending\" in status:\r\n # don't need this status at all\r\n return\r\n if \"removed\" in status:\r\n for i in range(len(self.owns)):\r\n if self.owns[i].oid == oid:\r\n order = self.owns[i]\r\n\r\n # work around MtGox strangeness:\r\n # for some reason it will send a \"completed_passive\"\r\n # immediately followed by a \"completed_active\" when a\r\n # market order is filled and removed. Since \"completed_passive\"\r\n # is meant for limit orders only we will just completely\r\n # IGNORE all \"completed_passive\" if it affects a market order,\r\n # there WILL follow a \"completed_active\" immediately after.\r\n if order.price == 0:\r\n if \"passive\" in status:\r\n # ignore it, the correct one with\r\n # \"active\" will follow soon\r\n return\r\n\r\n self.debug(\r\n \"### removing order %s \" % oid,\r\n \"price:\", self.gox.quote2str(order.price),\r\n \"type:\", order.typ)\r\n\r\n # remove it from owns...\r\n self.owns.pop(i)\r\n\r\n # ...and update own volume cache in the bids or asks\r\n self._update_level_own_volume(\r\n order.typ,\r\n order.price,\r\n self.get_own_volume_at(order.price, order.typ)\r\n )\r\n removed = True\r\n break\r\n else:\r\n for order in self.owns:\r\n if order.oid == oid:\r\n found = True\r\n self.debug(\r\n \"### updating order %s \" % oid,\r\n \"volume:\", self.gox.base2str(volume),\r\n \"status:\", status)\r\n voldiff = volume - order.volume\r\n opened = (order.status != \"open\" and status == \"open\")\r\n order.volume = volume\r\n order.status = status\r\n break\r\n\r\n if not found:\r\n # This can happen if we added the order with a different\r\n # application or the gox server sent the user_order message\r\n # before the reply to \"order/add\" (this can happen because\r\n # actually there is no guarantee which one arrives first).\r\n # We will treat this like a reply to \"order/add\"\r\n self.add_own(Order(price, volume, typ, oid, status))\r\n\r\n # The add_own() method has handled everything that was needed\r\n # for new orders and also emitted all signals already, we\r\n # can immediately return here because the job is done.\r\n return\r\n\r\n # update level own volume cache\r\n self._update_level_own_volume(\r\n typ, price, self.get_own_volume_at(price, typ))\r\n\r\n # We try to help the strategy with tracking the orders as good\r\n # as we can by sending different signals for different events.\r\n if removed:\r\n reason = self.gox.msg[\"user_order\"][\"reason\"]\r\n self.signal_own_removed(self, (order, reason))\r\n if opened:\r\n self.signal_own_opened(self, (order))\r\n if voldiff:\r\n self.signal_own_volume(self, (order, voldiff))\r\n self.signal_changed(self, None)\r\n self.signal_owns_changed(self, None)",
"async def _truncate_adjusted_tick_data(self, pair: str):\n\n truncate = len(self.close_times[pair]) - self.min_tick_length\n if truncate > 60:\n del self.base_24hr_volumes[pair][1][:truncate]\n del self.adjusted_close_values[pair][:truncate]",
"def on_order(self, order: OrderData):\n self.position_calculator.update_position(order)\n\n self.current_pos = self.position_calculator.pos\n self.avg_price = self.position_calculator.avg_price\n\n if order.status == Status.ALLTRADED and order.vt_orderid in (self.long_orders + self.short_orders):\n\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n if order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n self.last_filled_order = order\n\n for ids in (self.long_orders + self.short_orders + self.profit_orders):\n self.cancel_order(ids)\n\n if abs(self.position_calculator.pos) < self.fixed_size:\n return\n\n step = self.get_step()\n\n # tick 存在且仓位数量还没有达到设置的最大值.\n if self.tick and abs(self.position_calculator.pos) < self.max_pos_size * self.fixed_size:\n buy_price = order.price - step * self.grid_step\n sell_price = order.price + step * self.grid_step\n\n buy_price = min(self.tick.bid_price_1 * (1 - 0.0001), buy_price)\n sell_price = max(self.tick.ask_price_1 * (1 + 0.0001), sell_price)\n\n long_ids = self.buy(buy_price, self.fixed_size)\n short_ids = self.sell(sell_price, self.fixed_size)\n\n self.long_orders.extend(long_ids)\n self.short_orders.extend(short_ids)\n\n if order.status == Status.ALLTRADED and order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n if abs(self.position_calculator.pos) < self.fixed_size:\n self.cancel_all()\n\n if not order.is_active():\n if order.vt_orderid in self.long_orders:\n self.long_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.short_orders:\n self.short_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.profit_orders:\n self.profit_orders.remove(order.vt_orderid)\n\n elif order.vt_orderid in self.stop_orders:\n self.stop_orders.remove(order.vt_orderid)\n\n self.put_event()",
"def killQueueOrder(self):\n # CHECK ALL QUEUE ORDERS AND CANCEL ORDER IF GREATER THAN TWO HOURS OLD\n queue_orders = self.queue.find(\n {\"Trader\": self.user[\"Name\"], \"Account_ID\": self.account_id})\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n two_hours_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(hours=2), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n ten_minutes_ago = datetime.strptime(datetime.strftime(\n dt_central - timedelta(minutes=10), \"%Y-%m-%d %H:%M:%S\"), \"%Y-%m-%d %H:%M:%S\")\n\n for order in queue_orders:\n\n order_date = order[\"Date\"]\n\n order_type = order[\"Order_Type\"]\n\n id = order[\"Order_ID\"]\n\n forbidden = [\"REJECTED\", \"CANCELED\", \"FILLED\"]\n\n if two_hours_ago > order_date and (order_type == \"BUY\" or order_type == \"BUY_TO_OPEN\") and id != None and order[\"Order_Status\"] not in forbidden:\n\n # FIRST CANCEL ORDER\n resp = self.tdameritrade.cancelOrder(id)\n\n if resp.status_code == 200 or resp.status_code == 201:\n\n other = {\n \"Symbol\": order[\"Symbol\"],\n \"Order_Type\": order[\"Order_Type\"],\n \"Order_Status\": \"CANCELED\",\n \"Strategy\": order[\"Strategy\"],\n \"Account_ID\": self.account_id,\n \"Trader\": self.user[\"Name\"],\n \"Date\": getDatetime()\n }\n\n self.other.insert_one(other)\n\n self.queue.delete_one(\n {\"Trader\": self.user[\"Name\"], \"Symbol\": order[\"Symbol\"], \"Strategy\": order[\"Strategy\"]})\n\n self.logger.INFO(\n f\"CANCELED ORDER FOR {order['Symbol']} - TRADER: {self.user['Name']}\", True)\n\n # IF QUEUE ORDER DATE GREATER THAN 10 MINUTES OLD AND ORDER ID EQUALS NONE, SEND ALERT\n if ten_minutes_ago > order_date and order[\"Order_ID\"] == None and order[\"Account_ID\"] == self.account_id:\n\n if order[\"Symbol\"] not in self.no_ids_list:\n\n self.logger.ERROR(\n \"QUEUE ORDER ID ERROR\", f\"ORDER ID FOR {order['Symbol']} NOT FOUND - TRADER: {self.user['Name']} - ACCOUNT ID: {self.account_id}\")\n\n self.no_ids_list.append(order[\"Symbol\"])\n\n else:\n\n if order[\"Symbol\"] in self.no_ids_list:\n\n self.no_ids_list.remove(order[\"Symbol\"])",
"def row_inout_eliminate(values):\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values:\n \n location = values[box][0]\n \n if location in location_dict.keys():\n outside = location_dict[location][0]\n \n if str(6) not in box: #only look at periods 1-5\n \n following_activity = inout_dict[box][0]\n if following_activity not in solved_values:\n temp_list = list(values[following_activity])\n \n for locations_next in values[following_activity]:\n \n if location_dict[locations_next][0] == outside and outside == True:\n \n try:\n temp_list.remove(locations_next)\n except:\n pass\n \n \n values[following_activity] = temp_list\n\n return values",
"def update_past_orders(self):\n\n #TODO: Implement a method to grab the order history for just one stock\n all_past_orders = self.portfolio.all_past_orders() #This is REALLY inefficient (takes forever)\n\n #Now pre-parse into commonly used categories\n self.past_orders = all_past_orders[all_past_orders['symbol']==self.symbol] #Past orders for only this stock\n self.filled_orders = self.past_orders[self.past_orders['state']=='filled'] #Only orders that were filled (not canceled)\n\n return True",
"def zeroDummyChargesAndRemovePerturbations( intop, outtop):\n\n #Read input topology\n file = open( intop, 'r')\n text = file.readlines()\n file.close()\n\n #Parse atoms section, making appropriate modifications\n atomlinenrs = extract_section( text, 'atoms')\n for linenr in atomlinenrs:\n #Retrieve line\n line = text[linenr]\n linenc, comments = stripcomments(line)\n #Split line\n elements = line.split() \n nelements = len(elements)\n elements_nc = linenc.split()\n nelements_nc = len( elements_nc )\n #Skip if not all elements found for a perturbed atom\n if nelements_nc < 10: continue\n\n #Parse line\n atom = dict()\n atom['nr'] = int(elements[0])\n atom['type'] = elements[1] \n atom['resnr'] = int(elements[2])\n atom['residue'] = elements[3]\n atom['atom'] = elements[4]\n atom['cgnr'] = int(elements[5]) \n atom['charge'] = float(elements[6])\n atom['mass'] = float(elements[7])\n atom['typeB'] = elements[8]\n atom['chargeB'] = float( elements[9] )\n atom['massB'] = float( elements[10] )\n atom['comment'] = ''\n for elem in elements[12:]:\n atom['comment']+= elem + ' '\n\n #Make A state charges equal B state charges\n atom['charge'] = atom['chargeB']\n\n #Make A state type same as B state type unless dummy\n if not '_dummy' in atom['typeB']:\n atom['type'] = atom['typeB']\n\n\n #Compose and reinsert line\n line = \"%(nr)6d %(type)10s %(resnr)6d %(residue)6s %(atom)6s %(cgnr)6d %(charge)10.5f %(mass)10.6f %(typeB)10s %(chargeB)10.5f %(mass)10.6f ; %(comment)s\\n\" % atom\n text[linenr] = line\n \n\n #Next handle bonds section and copy B parameters to A, if present, otherwise keep just A parameters (no change needed)\n indices = extract_section( text, 'bonds')\n for index in indices:\n #Extract the line\n line = text[index]\n (linestripped, comments) = stripcomments(line)\n elements = line.split()\n nelements = len( linestripped.split() ) #Length of line without comments\n\n #Skip if no B elements found (just keep A parameters)\n if (nelements < 7): continue\n\n #Parse line\n bond = dict()\n bond['i'] = int( elements[0] )\n bond['j'] = int( elements[1] )\n bond['function'] = int( elements[2] )\n bond['Req'] = float( elements[3] )\n bond[ 'Keq'] = float( elements[4] )\n bond['ReqB'] = float( elements[5] )\n bond[ 'KeqB'] = float( elements[6] )\n bond['comments'] = ''\n for elem in elements[7:]:\n bond['comments'] += elem + ' '\n #Compose a new line\n line = \"%(i)5d %(j)5d %(function)5d%(ReqB)12.4e%(KeqB)12.4e %(ReqB)12.4e%(KeqB)12.4e ; %(comments)s\\n\" % bond\n #Insert line\n text[index] = line\n\n #Next handle angle section and copy B parameters to A\n indices = extract_section( text, 'angles')\n for index in indices:\n #Extract line with and without comments\n line = text[ index ]\n linenocomments, c = stripcomments( line )\n\n elements = line.split()\n #Skip line (keep as is) if there is no B state\n if ( len(linenocomments.split() ) < 8): continue\n\n #Otherwise, get it and make sure A state set to B state\n #Parse line\n angle = dict()\n angle['i'] = int(elements[0] )\n angle['j'] = int(elements[1] )\n angle['k'] = int(elements[2] )\n angle['function'] = int(elements[3] )\n angle['thetaB'] = float(elements[6] )\n angle['cthB'] = float(elements[7] )\n angle['comments'] = ''\n for elem in elements[8:]:\n angle['comments']+= elem + ' '\n #Construct new line\n line = \"%(i)5d %(j)5d %(k)5d %(function)5d%(thetaB)12.4e%(cthB)12.4e %(thetaB)12.4e%(cthB)12.4e ; %(comments)s\\n\" % angle\n #Insert new line\n text[index] = line\n \n\n #Next handle torsions section and copy B parameters to A\n indices = extract_section( text, 'dihedrals' ) \n #Note there are multiple possible formats of this section depending on the particular torsional type used\n\n for index in indices:\n #Extract line with and without comments\n line = text[ index ]\n linenocomments, c = stripcomments( line )\n elements = line.split()\n nelements = len(elements)\n nelements_nocomments = len( linenocomments.split() )\n\n #Function type is normally element 4; skip if don't have that\n if nelements_nocomments < 4: continue \n\n #Else parse\n i = int( elements[0] )\n j = int( elements[1] )\n k = int( elements[2] )\n l = int( elements[3] )\n func = int( elements[4] )\n\n if func==3: #Handle type 3 (RBs for AMBER torsions)\n if nelements_nocomments < 16: continue #Skip if not enough data/no perturbation\n C = list()\n for element in elements[5:17]:\n C.append( float (element ))\n comment = ''\n for element in elements[18:]:\n comment+= element+' '\n\n #Construct new line\n line = \" %-4s %-4s %-4s %-4s %3d%12.5f%12.5f%12.5f%12.5f%12.5f%12.5f %12.5f%12.5f%12.5f%12.5f%12.5f%12.5f ; %s \\n\" % (i, j, k, l, func, C[6], C[7], C[8], C[9], C[10], C[11], C[6], C[7], C[8], C[9], C[10], C[11], comment)\n\n elif func==9 or func == 1: #Handle type 9, new format, or 1, old format impropers\n if nelements_nocomments < 11: continue #Skip if not enough data/no perturbation\n phase = float( elements[5] )\n kd = float( elements[6] )\n pn = int( elements[7] )\n phaseB = float( elements[8] )\n kdB = float( elements[9] )\n pnB = int( elements[10] )\n comment = ''\n\n for element in elements[11:]:\n comment+= element + ' '\n\n #Construct new line\n line = \"-4s %-4s %-4s %-4s %3d%12.5f%12.5f%3df%12.5f%12.5f%3d ; %s \\n\" % (i, j, k, l, func, phaseB, kdB, pnB, phaseB, kdB, pnB, comment ) \n\n\n\n else:\n raise StandardError('[ dihedrals ] function type %s not supported...' % function ) \n\n #Insert line\n text[index] = line\n\n\n\n #Write out resulting topology\n file = open(outtop, 'w')\n file.writelines( text)\n file.close()",
"def place_orders(context, data):\r\n log.info(\"*********Monthly flags: %s\" % context.flags)\r\n \r\n context.sell = []\r\n context.buy = []\r\n \r\n # Go through flags to determine buy/sell signals\r\n for asset, flags in context.flags.items():\r\n # If up > down and multiple blue flags, add to buy\r\n if flags['UP'] > flags['DOWN'] and flags['UP'] > 1:\r\n context.buy.append(asset)\r\n \r\n # If down > up and multiple down flags, add to sell\r\n elif flags['DOWN'] > flags['UP'] and flags['DOWN'] > 1:\r\n context.sell.append(asset)\r\n \r\n # If both SPY and QQQ are buys, rebalance weightings and check components\r\n if sid(8554) in context.buy and sid(19920) in context.buy:\r\n rebalance_weightings(context)\r\n \r\n # Reset down sequence\r\n context.first_down_sequence = set()\r\n \r\n # Reset SPY and QQQ to max weightings\r\n context.target_weights[sid(8554)] = context.max_weights[sid(8554)]\r\n context.target_weights[sid(19920)] = context.max_weights[sid(19920)]\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[sid(8554)] = round(context.target_weights[sid(8554)] * context.portfolio.portfolio_value / context.price[sid(8554)])\r\n context.target_shares[sid(19920)] = round(context.target_weights[sid(19920)] * context.portfolio.portfolio_value / context.price[sid(19920)])\r\n \r\n # If not overweighting:\r\n if not context.overweighting:\r\n context.buy.remove(sid(8554))\r\n context.buy.remove(sid(19920))\r\n \r\n # Check components\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio > 1, add to buy\r\n if asset != sid(8554) and asset != sid(19920) and ratio > 1:\r\n context.buy.append(asset)\r\n \r\n # If SPY is a sell, check UP ratios for components\r\n if sid(8554) in context.sell:\r\n for asset, ratio in context.up_ratios.items():\r\n # If UP ratio < 1, add to sell\r\n if asset != sid(8554) and asset != sid(19920) and ratio < 1:\r\n context.sell.append(asset)\r\n \r\n \r\n \r\n # First month at end August 2017: set all other assets to max weighting, except take UP ratio of JKL to be <1 so sell 20% of weighting\r\n if context.first_iteration:\r\n log.info('First iteration')\r\n \r\n # Initialise weightings\r\n rebalance_weightings(context)\r\n context.first_iteration = False\r\n \r\n for asset, weight in context.max_weights.items(): \r\n # JKL\r\n if asset == sid(26451):\r\n context.sell.append(asset)\r\n\r\n context.target_weights[asset] = weight\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n buy_overweight = []\r\n remaining_cash = context.portfolio.cash\r\n \r\n # Buy components first (before considering overweighting QQQ/SPY)\r\n for asset in sorted(context.buy, reverse=True):\r\n \r\n # This is an up sequence so no subsequent down sequence\r\n if asset in context.first_down_sequence:\r\n context.first_down_sequence.remove(asset) \r\n \r\n # Buy 50% of weighting\r\n log.info('UP flags for %s: Buy 50 percent' % asset)\r\n extra_weight = 0.5 * context.max_weights[asset]\r\n \r\n # Do not exceed max shares by weighting, UNLESS taking from cash from components (overweighting)\r\n if context.target_weights[asset] == context.max_weights[asset] or (context.target_weights[asset] > context.max_weights[asset] and context.overweighting):\r\n buy_overweight.append(asset)\r\n \r\n elif context.target_weights[asset] + extra_weight > context.max_weights[asset]:\r\n context.target_weights[asset] = context.max_weights[asset]\r\n \r\n else:\r\n context.target_weights[asset] += extra_weight\r\n \r\n # Convert weights to number of shares\r\n old_shares = context.target_shares[asset]\r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n remaining_cash -= (context.target_shares[asset] - old_shares) * context.price[asset]\r\n \r\n for asset in buy_overweight:\r\n if remaining_cash > 0:\r\n # If first overweight or 2 assets to be overweighted, take 50% of available cash\r\n if context.target_weights[asset] > context.max_weights[asset] or len(buy_overweight) > 1:\r\n log.info('Taking half of cash of value: %f' % (remaining_cash * 0.5))\r\n context.target_weights[asset] += 0.5 * remaining_cash / context.portfolio.portfolio_value\r\n \r\n # If second overweight, take all remaining cash\r\n else:\r\n log.info('Taking remaining of cash of value: %f' % (remaining_cash))\r\n context.target_weights[asset] += remaining_cash / context.portfolio.portfolio_value\r\n \r\n else:\r\n # If no cash, ignore\r\n log.info('UP flags for %s: No change' % asset)\r\n continue\r\n \r\n \r\n # For assets in sell list\r\n for asset in context.sell:\r\n \r\n # If asset already has 0 holdings, ignore\r\n if context.target_weights[asset] == 0:\r\n log.info('DOWN flags for %s: No change' % asset)\r\n continue\r\n \r\n # If first multiple down flags, sell 20% of UP weight\r\n elif asset not in context.first_down_sequence:\r\n log.info('First DOWN flags for %s: Sell 20 percent' % asset)\r\n context.target_weights[asset] -= 0.2 * context.max_weights[asset]\r\n context.first_down_sequence.add(asset)\r\n \r\n # If this is a subsequent down flag sequence, sell 40% of UP weight\r\n else:\r\n log.info('DOWN flags for %s: Sell 40 percent' % asset)\r\n context.target_weights[asset] -= 0.4 * context.max_weights[asset]\r\n \r\n # Ensure no short position\r\n if context.target_weights[asset] < 0:\r\n context.target_weights[asset] = 0\r\n \r\n # Convert weights to number of shares \r\n context.target_shares[asset] = round(context.target_weights[asset] * context.portfolio.portfolio_value / context.price[asset])\r\n \r\n print(context.target_weights)",
"def returnOpenOrders(self, currency_pair=\"all\"):\n pass",
"def generate_matched_orders(self, new_action, matched_queries):\n if self.sell_list and self.buy_list:\n break_flag = False\n if new_action == \"buy\":\n # for a new buy order, multipleq ueries from sell list are\n # matched as long as formula holds good\n max_buy_order = self.buy_list[-1]\n completed_sell_orders = 0\n for sell_order in self.sell_list:\n buy_qty = max_buy_order.order_qty\n if sell_order.stock_value <= max_buy_order.stock_value:\n sell_qty = sell_order.order_qty\n if buy_qty > sell_qty:\n completed_sell_orders += 1\n max_buy_order.order_qty = buy_qty - sell_qty\n matched_qty = sell_qty\n elif sell_qty == buy_qty:\n self.buy_list.pop()\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n else:\n self.buy_list.pop()\n sell_order.order_qty = sell_qty - buy_qty\n matched_qty = buy_qty\n break_flag = True\n matched_queries.append(\n \"%s %s %s %s\" % (sell_order.order_id,\n matched_qty,\n sell_order.stock_value,\n max_buy_order.order_id))\n else:\n break_flag = True\n if break_flag:\n break\n if completed_sell_orders:\n self.sell_list = self.sell_list[completed_sell_orders:]\n else:\n min_sell_order = self.sell_list[0]\n completed_buy_orders = 0\n # for a new sell order, multiple queries from buy list are\n # matched as long as formula holds good\n for index in range(len(self.buy_list)-1, -1, -1):\n break_flag = False\n buy_order = self.buy_list[index]\n sell_qty = min_sell_order.order_qty\n if min_sell_order.stock_value <= buy_order.stock_value:\n buy_qty = buy_order.order_qty\n if buy_qty > sell_qty:\n buy_order.order_qty = buy_qty - sell_qty\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n elif buy_qty == sell_qty:\n self.buy_list.pop()\n self.sell_list = self.sell_list[1:]\n matched_qty = sell_qty\n break_flag = True\n else:\n completed_buy_orders -= 1\n min_sell_order.order_qty = sell_qty - buy_qty\n matched_qty = buy_qty\n matched_queries.append(\n \"%s %s %s %s\" % (min_sell_order.order_id,\n matched_qty,\n min_sell_order.stock_value,\n buy_order.order_id))\n else:\n break_flag = True\n if break_flag:\n break\n if completed_buy_orders:\n self.buy_list = self.buy_list[:completed_buy_orders]"
] | [
"0.65187794",
"0.5971989",
"0.5893282",
"0.5487122",
"0.5485203",
"0.5469665",
"0.5453112",
"0.5382519",
"0.5372027",
"0.5354412",
"0.53433925",
"0.5335271",
"0.5335135",
"0.532721",
"0.5317983",
"0.5316886",
"0.53092206",
"0.52989537",
"0.52932405",
"0.52614486",
"0.52577066",
"0.52482885",
"0.5211207",
"0.520917",
"0.5189151",
"0.5153316",
"0.51423854",
"0.5139034",
"0.5136574",
"0.51349074"
] | 0.6033398 | 1 |
divides operand_1 by operand_2 | def calc(operand_1, operand_2):
return operand_1 / operand_2 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n\n return operand_1/operand_2",
"def calc(operand_1, operand_2):\n try:\n return operand_1/operand_2\n except ZeroDivisionError:\n return 0",
"def __div__(self, other, **kwargs):\n kwargs.update({'operator': 'div'})\n return self.__add__(other, **kwargs)",
"def division(a, b):\n return (a // b, a / b)",
"def div(self, a, b):\n return (a / b, a % b)",
"def calc(operand_1, operand_2):\n return operand_1 - operand_2",
"def calc(operand_1, operand_2):\n return operand_1 - operand_2",
"def division_algo(a, b):\n return a / b, a % b",
"def division(x, y):\n return x / y",
"def div(self, a, b):\n raise NotImplementedError",
"def division(self, a, b):\n if not check_arguments(a, b): # check if arguments are numbers\n self.last_result = a / b",
"def __div__(self, other):\n return self.__mul__(1 / other)",
"def __div__(self, other):\n return self.__mul__(1 / other)",
"def div(a, b):\n c = Calculator()\n result = c.div(a, b)\n click.echo('{} / {} = {}'.format(a, b, result))",
"def exquo(self, a, b):\n return a // b",
"def exquo(self, a, b):\n return a / b",
"def calc(operand_1, operand_2):\n\n return operand_1 + operand_2",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def calc(operand_1, operand_2):\n return operand_1 + operand_2",
"def div2(left: float, right: float) -> float:\n return left / right",
"def di(o1, o2):\n return o1/o2",
"def divide(n1, n2):\n return n1 / n2",
"def div(x, y):\n return x / y",
"def __div__(self,that):\n return self.__opExpand2(that, np.divide)",
"def div(a,b):\r\n return a/b",
"def calculate(operandOne, operandTwo, operation):\r\n if operation == '+':\r\n return operandOne + operandTwo\r\n elif operation == '-':\r\n return operandOne - operandTwo\r\n elif operation == '*':\r\n return operandOne * operandTwo\r\n elif operation == '/':\r\n return operandOne // operandTwo",
"def divide(lhs, rhs):\n return _make.divide(lhs, rhs)"
] | [
"0.83308667",
"0.83308667",
"0.83308667",
"0.8282008",
"0.77688485",
"0.74107593",
"0.7272513",
"0.721974",
"0.71896094",
"0.71896094",
"0.716603",
"0.7139801",
"0.7103532",
"0.7100584",
"0.7084736",
"0.7084736",
"0.70549184",
"0.7040841",
"0.7022843",
"0.70023674",
"0.699961",
"0.699961",
"0.69732714",
"0.69554657",
"0.6952935",
"0.69499326",
"0.694661",
"0.6922334",
"0.6919944",
"0.6901217"
] | 0.8452331 | 0 |
Inserts value at position idx, shifting the original elements down the list, as needed. Note that inserting a value at len(self) equivalent to appending the value is permitted. Raises IndexError if idx is invalid. | def insert(self, idx, value):
assert(isinstance(idx, int))
nidx = self._normalize_idx(idx)
self.data.append(None)
for i in range(len(self.data)-1,idx,-1):
self.data[i] = self.data[i-1]
self.data[idx] = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def insert(self, idx, element):\n if self._length == self._capacity: # Need to increase size\n self._grow_arr()\n\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n idx = min(self._length, idx) # Any index over the length is converted\n\n # Move values after idx one right to make room for new element\n for i in range(self._length, idx, -1):\n self._arr[i] = self._arr[i - 1]\n self._arr[idx] = element # Insert element at new blank space\n self._length += 1",
"def insert(self, index, value):\n self.__validate_index(index)\n self.__list = self.__list[:index] + [value] + self.__list[index:]\n return self.__list",
"def insert(self, index, value):\n self.__field.validate_element(value)\n return list.insert(self, index, value)",
"def insert(self, index, value):\n self.__field.validate_element(value)\n return list.insert(self, index, value)",
"def insert(self, index, value):\n if self._can_insert(index, value):\n list.insert(self, index, value)",
"def addAtIndex(self, index, val):\n if 0 <= index < len(self.nums):\n self.nums.insert(index, val)\n elif index == len(self.nums):\n self.nums.append(val)",
"def insertAtIndex(self, index, value):\n self._growCheck()\n super().insertAtIndex(index, value)",
"def insert(self, index, item):\n if index > len(self):\n raise IndexError\n elif index == 0:\n self.insert_first(item)\n else:\n self._rest.insert(index-1, item)",
"def insert(self, index, value):\n self.list.insert(index, value)",
"def __setitem__(self, index, value):\n missing = index - len(self) + 1\n if missing > 0:\n self.extend([None] * missing)\n list.__setitem__(self, index, value)",
"def insert(self, index, value):\n list.insert(self, index, value)\n self.emit('inserted', index, value)\n self.emit('modified')",
"def insert(self, index: int, item: Any) -> BaseList:\n super().insert(index, item)\n return self",
"def insert(self, index, newItem):\r\n if self.size() == len(self):\r\n self.grow()\r\n if index >= self.size():\r\n self._items[self.size()] = newItem\r\n else:\r\n index = max(index, 0)\r\n # Shift items down by one position\r\n for i in range(self.size(), index, -1):\r\n self._items[i] = self._items[i - 1]\r\n\r\n # Add new item and increment logical size\r\n self._items[index] = newItem\r\n self._logicalSize += 1",
"def insert(self, index: int, item: Any) -> None:\n if self.is_empty() and index != 0:\n raise IndexError\n # Insert at the beginning.\n elif index == 0:\n to_push = self._first\n # modify self._first\n self._first = item\n # Call insert on to_push onto _rest\n if not self._rest and to_push:\n self._rest = RecursiveList([to_push])\n else:\n self._rest.insert(0, to_push)\n # Append case, add at the end when _rest is None\n elif index == 1 and not self._rest:\n self._rest = RecursiveList([item])\n # Recurse on the rest of the list.\n else:\n if not self._rest:\n raise IndexError\n else:\n self._rest.insert(index - 1, item)",
"def addAtIndex(self, index, val):\n if index > 0 and not self.head:\n return\n \n tmp = Node(val)\n if index == 0 and not self.head:\n self.head = tmp\n self.tail = self.head\n return\n if index == 0 and self.head:\n tmp.nxt = self.head\n self.head = tmp \n return\n \n \n cur = self.head\n i = 1\n while i < index and cur:\n cur = cur.nxt\n i+=1\n if i == index:\n if not cur:\n if self.tail:\n self.tail.nxt = tmp\n self.tail = tmp\n else:\n self.head = tmp\n self.tail = tmp\n# print(\"KMG 1\")\n else:\n# print(\"inserting after the value %d\" %cur.val)\n tmp.nxt = cur.nxt\n cur.nxt = tmp\n if self.tail == cur:\n self.tail = tmp",
"def insert(self, index, value):\n # check the validity of index\n if index < 0 or index > self.n: # larger than no. of items\n print(\"Index Error; please input valid index\")\n return\n # if index==0, same as push_front\n if index==0:\n self.push_front(value)\n return\n # else,\n new_node = Node(value)\n temp_node = self.head\n for _ in range(index-1):\n temp_node = temp_node.next # traverse the list\n new_node.next = temp_node.next # temp_node is index-1 node\n temp_node.next = new_node\n self.n += 1",
"def insert(self, index, item):\n\n # Check if -len(self) <= index <= len(self)\n valid_index = (index <= len(self)) and (index >= -len(self))\n\n try:\n if not valid_index:\n raise IndexError\n\n if self.count == self.capacity:\n self._resize(2 * self.capacity)\n\n # Index '-1' for last item, '-len(self)' for first item\n if index < 0:\n index = index + len(self)\n\n # Move every item after the 'index' to the right\n for i in range(self.count, index - 1, -1):\n self.the_array[i] = self.the_array[i - 1]\n\n # Insert the 'item' at 'index'\n self.the_array[index] = item\n self.count += 1\n\n return True\n\n except IndexError:\n print(\"Index must be in between -len(self) to len(self).\")\n return False",
"def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value",
"def insert(self, value, index=0):\n # Error case: Index out of acceptable range\n if index < 0 or index > self._size:\n raise RangeError(\"index out of range.\")\n\n # Edge case 1: index == 0\n # Behave like push_front()\n if index == 0:\n self.push_front(value)\n return\n\n # Edge case 2: index == size\n # Behave like push_back()\n if index == self._size:\n self.push_back(value)\n return\n\n new_node = self.Node(value)\n i = 1\n current_node = self._head.next\n\n while(i < index):\n current_node = current_node.next\n i += 1\n\n new_node.next = current_node\n new_node.prev = current_node.prev\n current_node.prev.next = new_node\n current_node.prev = new_node\n self._size += 1",
"def __setitem__(self, idx, value):\n assert(isinstance(idx, int))\n nidx = self._normalize_idx(idx)\n if nidx >= len(self.data):\n raise IndexError\n self.data[nidx] = value",
"def insert(self, value, index=0):\n assert index < self._size, \"Limit Execeeded.\"\n\n node = Node(value)\n if index == 0:\n node.next = self._head\n self._head = node\n else:\n count = 0\n current = self._head\n while count < index - 1 and current:\n current = current.next\n count += 1\n current.next = node\n self._size += 1",
"def insert(self, index, item):\n # type: (int, Any) -> None\n return list.insert(self, index, self.ref(item))",
"def insert(self, val):\r\n if len(self.data) != self.len:\r\n self.data[self.len] = val\r\n else:\r\n self.data.append(val)\r\n if val in self.indices:\r\n self.indices[val].append(self.len)\r\n self.len += 1\r\n return False\r\n else:\r\n self.indices[val] = [self.len]\r\n self.len += 1\r\n return True",
"def addAtIndex(self, index: int, val: int) -> None:\n # If index is greater than the length, \n # the node will not be inserted.\n if index > self.size:\n return\n \n # [so weird] If index is negative, \n # the node will be inserted at the head of the list.\n if index < 0:\n index = 0\n \n # find predecessor and successor of the node to be added\n if index < self.size - index:\n pred = self.head\n for _ in range(index):\n pred = pred.next\n succ = pred.next\n else:\n succ = self.tail\n for _ in range(self.size - index):\n succ = succ.prev\n pred = succ.prev\n \n # insertion itself\n self.size += 1\n to_add = ListNode(val)\n to_add.prev = pred\n to_add.next = succ\n pred.next = to_add\n succ.prev = to_add",
"def insert(self, index, value):\n vim.current.buffer.insert(self._clamp(index), value)",
"def ListInsert(raw_list,insert_indice,value = None,padding = None):\n length = len(raw_list)\n if insert_indice+1 <= length:\n raw_list[insert_indice] = value\n else:\n for i in range(length,insert_indice):\n raw_list.append(padding)\n raw_list.append(value)",
"def addAtIndex(self, index: int, val: int) -> None:\n if self.size < index:\n return\n if index < 0:\n return\n\n new_node = Node(val)\n curr = self.head\n for _ in range(index):\n curr = curr.next\n new_node.next = curr.next\n curr.next = new_node\n self.size += 1",
"def insert(self, val):\n self.data.insert(0,val)\n self.size = self.size + 1",
"def addAtIndex(self, index: int, val: int) -> None:\n # 按题目要求,如果索引大于链表长度,不添加\n if index > self.cnt:\n return\n if index == self.cnt:\n # 插到末尾\n self.addAtTail(val)\n\n else:\n tmp = self.dummy\n if index < 0:\n index = 0\n for _ in range(index):\n tmp = tmp.next\n new = ListNode(val)\n new.pre = tmp\n new.next = tmp.next\n\n tmp.next = new\n new.next.pre = new\n\n self.cnt += 1",
"def insert(self, index, value):\n if self.head is None:\n self.append(value)\n return\n \n from_head = True if index >= 0 else False \n if from_head: \n node = self.head\n steps = index \n else:\n node = self.tail \n steps = abs(index) -1 \n while steps > 0 and node is not None:\n node = node.next_node if from_head else node.prev_node \n steps -= 1 \n \n if node is None:\n if from_head: \n self.append(value)\n return\n else:\n self.push_front(value)\n return\n if node is self.head:\n self.push_front(value)\n return\n else:\n new_node = DLLNode(value)\n new_node.next_node = node\n new_node.prev_node = node.prev_node\n node.prev_node.next_node = new_node\n node.prev_node = new_node \n return"
] | [
"0.76569957",
"0.74387354",
"0.7170466",
"0.7159848",
"0.6967311",
"0.69386035",
"0.6900636",
"0.68895215",
"0.6883031",
"0.67318654",
"0.67028767",
"0.665537",
"0.6628325",
"0.65901107",
"0.6585507",
"0.6529382",
"0.64873207",
"0.64773446",
"0.6460093",
"0.6458619",
"0.64099866",
"0.6404724",
"0.6404339",
"0.6355389",
"0.6342164",
"0.62931746",
"0.62693954",
"0.62684083",
"0.6237879",
"0.62314564"
] | 0.7784822 | 0 |
Deletes and returns the element at idx (which is the last element, by default). | def pop(self, idx=-1):
to_ret =self. __getitem__(idx)
self.__delitem__(idx)
return to_ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pop(self, idx=-1):\n if idx < 0: # For negative indexing, convert to positive counterpart\n idx = self._convert_negative_index(idx)\n if not 0 <= idx < self._length: # Ignore indices outside of bounds\n raise IndexError(f'index {idx} out of bounds')\n\n element = self._arr[idx] # Save element so it can be returned\n # Move all elements after index i one forward to \"delete\" element\n for i in range(idx, self._length - 1):\n self._arr[i] = self._arr[i + 1]\n self._length -= 1\n self._check_shrink() # Shrink array if length is too small\n return element",
"def delete(self, idx):\n self.arr[idx] = self.arr[self.current-1]\n self.current -= 1",
"def delete_element(some_list, index):\n del some_list[index]\n return some_list",
"def remove(self, idx):\n indices = range(len(self))\n indices.remove(idx)\n return self.take(indices, axis=0).take(indices, axis=1)",
"def __delitem__(self, idx):\n self.pop(idx)",
"def pop(self, idx):\n tmp = np.copy(self.arr[idx])\n self.arr[idx] = self.arr[self.current-1] # the last one is moved before\n self.current -= 1\n return tmp",
"def _numpy_delete(x, idx):\n # NB: numpy.delete is not yet available in JAX\n mask = jnp.arange(x.shape[0] - 1) < idx\n return jnp.where(mask.reshape((-1,) + (1,) * (x.ndim - 1)), x[:-1], x[1:])",
"def remove(self, index):\n self.__validate_index(index)\n value = self.__list[index]\n self.__list = self.__list[:index] + self.__list[index + 1:]\n return value",
"def delete_at_index(self, index: int) -> T:\n pass",
"def delete_at_index(self, index: int) -> T:\n pass",
"def pop(self, index=None, last=True):\n if index == None:\n return super().pop(last)\n else:\n ret = self[index]\n self.remove(ret)\n return ret",
"def delete_at_index(self, index: int) -> T:\n try:\n previous_node = self.__get_node_at_index(index-1)\n except ValueError as e:\n if self.is_empty(): \n raise ValueError(\"List is empty\")\n elif index == 0:\n item = self.head.items\n self.head = self.head.link\n else:\n raise e\n else:\n item = previous_node.link.items\n previous_node.link = previous_node.link.link\n self.length -= 1\n return item",
"def deleteAtIndex(self, index):\n cur = self.head\n prev = None\n# self.display(\"deleteAtIndex, deleting value at index \"+str(index))\n if not index:\n head = head.nxt\n if self.tail == cur:\n self.tail = None\n del cur\n return\n \n i = 0\n while i < index and cur:\n prev = cur\n cur = cur.nxt\n i+=1\n if prev:\n if cur:\n prev.nxt = cur.nxt\n if self.tail == cur:\n self.tail = prev\n del cur",
"def __delitem__(self, idx):\n row, col = idx\n\n array_row = self._find_row_before(row)\n\n if (array_row.next_row == None or array_row.next_row.row_number > row):\n return\n\n target_row = array_row.next_row\n array_entry = self._find_column_before(target_row, col)\n\n if (array_entry.next_entry == None or array_entry.next_entry.column_number > col):\n return\n\n array_entry.next_entry = array_entry.next_entry.next_entry\n\n # If this row still has entries in it we are finished\n if target_row.row_sentinel.next_entry != None:\n return\n\n array_row.next_row = array_row.next_row.next_row",
"def deleteAtIndex(self, index):\n if 0 <= index < len(self.nums):\n self.nums = self.nums[:index] + self.nums[index+1:]",
"def deleteAtIndex(self, index: int) -> None:\n if index < 0 or index > self.cnt-1:\n return \n tmp = self.dummy\n for _ in range(index):\n tmp = tmp.next\n if index == self.cnt - 1:\n tmp.next = None\n else:\n tmp.next = tmp.next.next\n if tmp.next:\n tmp.next.pre = tmp\n self.cnt -= 1",
"def _remove_element(cls, d, idx):\n d[idx, 2] = 0\n return d",
"def pop(self, index=-1):\n # type: (int) -> Any\n return self.value(list.pop(self, index))",
"def remove_from_list(self,list_,index):\r\n try:\r\n return list_.pop(self._index_to_int(index))\r\n except IndexError:\r\n self._index_error(list_,index)",
"def delete(self):\n first = self.data[0]\n self.data.pop(0)\n self.size = self.size - 1\n return first",
"def __delitem__(self, index):\n # If input is a slice then delete all elements as determined\n # by the slice attributes, using an offset to account for the\n # changing size of the list.\n if isinstance(index, slice):\n offset = 0\n for i in xrange(*index.indices(len(self))):\n if i > -(len(self) + 1) or i < len(self):\n del self[i - offset]\n offset += 1\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n del cur_node.data_list[index]\n self.length -= 1\n\n self.__balance_node(prev_node, cur_node)",
"def removeChildAtIndex(self, index):\n self.__initChild()\n return self.__child.pop(index)",
"def delete(self, index):\n try:\n valid_index = (index < len(self)) and (index >= -len(self))\n if not valid_index:\n raise IndexError\n\n elif valid_index:\n if index < 0:\n index = index + len(self)\n # Move every item after the 'index' to left\n for i in range(index, self.count - 1):\n self.the_array[i] = self.the_array[i + 1]\n self.count -= 1\n\n if (self.capacity // 2 >= self.BASE_SIZE) and (self.count < self.capacity / 8):\n self._resize(self.capacity // 2)\n\n except IndexError:\n print(\"Index must be in between -len(self) to len(self).\")\n\n return valid_index",
"def __delitem__(self, index: Any) -> None:\n del self.contents[index]\n return",
"def deleteAtIndex(self, index):\n cur = self.head\n if cur == None:\n return\n elif index == 0:\n self.head = cur.next\n\n cur, i = self.head, 1\n while cur and i != index:\n cur = cur.next\n i += 1\n if cur.next == None:\n cur = None\n else:\n cur.next = cur.next.next",
"def remove_child(self, index):\n return self.next_population.pop(index)",
"def remove(self, index):\n item = self._items.pop(index)\n if item in self._data:\n data = self._data.pop(item)\n else:\n data = None\n return item, data",
"def remove(self, index):\n if index >= len(self):\n raise IndexError\n else:\n if index == 0:\n self.remove_first()\n else:\n self._rest.remove(index-1)",
"def deleteAtIndex(self, index):\n if index >= self.len:\n return\n p = self.head\n while index > 0:\n index -= 1\n p = p.next\n if p.next is self.tail:\n self.tail = p\n p.next = p.next.next\n self.len -= 1",
"def pop(self, i=None):\n if i is None:\n i = len(self) - 1\n val = self[i]\n del self[i]\n return val"
] | [
"0.80245477",
"0.7400927",
"0.7243163",
"0.72293675",
"0.71489924",
"0.70918417",
"0.7064182",
"0.70333284",
"0.6955016",
"0.6955016",
"0.68780965",
"0.68660593",
"0.6779757",
"0.6776178",
"0.6724961",
"0.67232394",
"0.66534007",
"0.6649281",
"0.66203725",
"0.66099906",
"0.6535594",
"0.6499819",
"0.6494609",
"0.64944154",
"0.64921975",
"0.64881796",
"0.6442411",
"0.6429975",
"0.64119184",
"0.64028287"
] | 0.75975555 | 1 |
Returns True if this ArrayList contains the same elements (in order) as other. If other is not an ArrayList, returns False. | def __eq__(self, other):
to_ret = False
if (not isinstance(other, ArrayList)) or (len(other.data) != len(self.data)):
return to_ret
for i in range(len(self.data)):
if self.data[i] != other.data[i]:
return to_ret
else:
pass
to_ret = True
return to_ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __eq__(self, other):\n\n for vert in self:\n if vert not in other:\n return False\n if len(self) == len(other):\n return True",
"def __eq__(self, other):\n if not isinstance(other, Vector):\n return False\n elif len(self) != len(other):\n return False\n else:\n for i, element in enumerate(self):\n if element != other[i]:\n return False\n return True",
"def __eq__(self, other):\r\n if self is other:\r\n return True\r\n if type(self) != type(other):\r\n return False\r\n if self.size() != other.size():\r\n return False\r\n for index in range(self.size()):\r\n if self[index] != other[index]:\r\n return False\r\n return True",
"def __eq__(self, other):\r\n # checks if self or other is not empty list (empty lists = false)\r\n if not self or not other:\r\n return False\r\n\r\n if (isinstance(self[0], (list, int, float)) and\r\n isinstance(other[0], (list, int, float))):\r\n return self.coordinate_positions_compare(other)\r\n\r\n elif (isinstance(self[0], (unicode, str)) and\r\n isinstance(other[0], (unicode, str))):\r\n return ''.join(self) == ''.join(other)\r\n else: # improper argument types: no (float / int or lists of list\r\n #and float / int pair) or two string / unicode lists pair\r\n return False",
"def almost_equals(self, other):\n if self.__class__ is other.__class__ and len(self) == len(other):\n for a, b in zip(self, other):\n if not a.almost_equals(b):\n return False\n return True\n else:\n return False",
"def __eq__(self, anotherset):\r\n if not isinstance(anotherset, LR0ItemSet):\r\n raise TypeError\r\n if len(self.itemlist) != len(anotherset.itemlist):\r\n return False\r\n for element in self.itemlist:\r\n if element not in anotherset.itemlist:\r\n return False\r\n return True",
"def __eq__(self, other):\r\n\t\treturn self._to_pylist() == other._to_pylist()",
"def __eq__(self, other: Any) -> bool:\n if not isinstance(other, OperationsList):\n return False\n if not self._operations_list == other._operations_list:\n return False\n return True",
"def __eq__(self, other):\n if not isinstance(other, VersionList):\n return False\n\n return self.to_dict() == other.to_dict()",
"def __eq__(self, other):\n for x in self:\n if x not in other:\n return False\n return True",
"def is_superset(self, other):\n \n for element in other:\n if element not in self:\n return False\n\n return True",
"def __eq__(self, other):\n if isinstance(other, OrderedDict):\n # FIXME: efficiency?\n # Generate both item lists for each compare\n return (self.items() == other.items())\n else:\n return False",
"def __eq__(self, other):\n if isinstance(other, DFCollection):\n return all([sdf == odf for sdf, odf in zip(self, other)])\n else:\n return False",
"def __eq__(self, other):\n if not len(self) == len(other):\n return False\n for key in self.ordered_list:\n if key not in other.ordered_list:\n return False\n if self[key] != other[key]:\n return False\n return True",
"def __eq__(self, other):\r\n\t\tif(not(self.checkCmp(other))):\r\n\t\t\treturn False\r\n\r\n\t\tcmpflag = True\r\n\t\tfor li1, li2 in zip(self.vector, other):\r\n\t\t\tif(li1 != li2):\r\n\t\t\t\tcmpflag = False\r\n\t\treturn cmpflag",
"def __eq__(self, other):\n\n if not self or not other: #either one of them is null\n return False\n\n if len(self.documents) != len(other.documents):\n return False\n\n for i in xrange(0, len(self.documents)):\n if self.documents[i].index != other.documents[i].index:\n return False\n\n return True",
"def __eq__(self, other):\n if len(self.intervals) != len(other.intervals):\n return False\n for i in range(0, len(self.intervals)):\n if self.intervals[i] != other.intervals[i]:\n return False\n return True",
"def equals(self, other):\n # Check keys\n diff = set(self.keys()).symmetric_difference(\n other.keys())\n if len(diff) != 0:\n return False\n for key in self.keys():\n if type(self[key]) != type(other[key]):\n return False\n this_value = self[key]\n other_value = other[key]\n if isinstance(this_value, list):\n if len(this_value) != len(other_value):\n return False\n result = all([t==o for t,o\n in zip(this_value, other_value)])\n else:\n result = this_value == other_value\n if not result:\n return False\n return True",
"def __eq__(self, other):\r\n\r\n # make sure that self and other are\r\n # of the same type. Otherwise, a stack\r\n # and a queue of the same elements are considered\r\n # equal\r\n if not self.is_same_type_as_other(other):\r\n return False\r\n\r\n elif len(self) != len(other):\r\n # if the two deques have a different number\r\n # of elements, then they are different\r\n return False\r\n\r\n else:\r\n # otherwise, we need to check element by element\r\n # to make sure that they all match up\r\n for self_element, other_element in zip(self, other):\r\n if self_element != other_element:\r\n return False\r\n\r\n return True",
"def __eq__(self, other: SymbolicObject) -> bool:\n\n if isinstance(other, ListObject):\n return self._subobjects == other._subobjects\n else:\n return False",
"def __eq__(self, other):\n h1 = [item for row in self.arr for item in row]\n h2 = [item for row in other.arr for item in row]\n for i in range(self.board_size * self.board_size):\n if h1[i] != h2[i]:\n return False\n return True",
"def __eq__(self, other):\n if len(self) != len(other):\n return False\n else:\n for val in self:\n try :\n if self[val] != other[val] :\n return False\n except (KeyError, TypeError) :\n return False\n for val in other:\n try :\n if self[val] != other[val] :\n return False\n except (KeyError, TypeError) :\n return False\n return True",
"def __eq__(self, other):\n for i in range(len(self.puzzle)):\n for j in range(len(self.puzzle[0])):\n if(self.puzzle[i][j] != other.puzzle[i][j]):\n return False\n return True",
"def __eq__(self, other):\n if self.size != other.size:\n return False\n if self.head != other.head or self.tail != other.tail:\n return False\n\n # Traverse through linked list and make sure all nodes are equal\n temp_self = self.head\n temp_other = other.head\n while temp_self is not None:\n if temp_self == temp_other:\n temp_self = temp_self.next_node\n temp_other = temp_other.next_node\n else:\n return False\n # Make sure other is not longer than self\n if temp_self is None and temp_other is None:\n return True\n return False",
"def __eq__(self, other):\n return isinstance(other, type(self)) and self.size == other.size",
"def __eq__(self, other):\n if not isinstance(other, CashFlowList):\n return False\n\n return self.__dict__ == other.__dict__",
"def pequal(self, other):\n if not isinstance(other, plist):\n return False\n if len(self) != len(other):\n return False\n try:\n for x, y in zip(self, other):\n if not x.pequal(y):\n return False\n except Exception:\n for x, y in zip(self, other):\n if x != y:\n return False\n return True",
"def __eq__(\n self: \"HereditaryStratumOrderedStoreList\",\n other: \"HereditaryStratumOrderedStoreList\",\n ) -> bool:\n # adapted from https://stackoverflow.com/a/4522896\n return (\n isinstance(\n other,\n self.__class__,\n )\n and self.__slots__ == other.__slots__\n and all(\n getter(self) == getter(other)\n for getter in [\n operator.attrgetter(attr) for attr in self.__slots__\n ]\n )\n )",
"def __eq__(self, other):\n return isinstance(other, Bag) and Counter(self.items) == Counter(other.items)",
"def __ne__(self, other):\n if not isinstance(other, VersionList):\n return True\n\n return self.to_dict() != other.to_dict()"
] | [
"0.73884636",
"0.7384937",
"0.71894115",
"0.70885074",
"0.70311356",
"0.69708467",
"0.6966033",
"0.6947643",
"0.6930064",
"0.69040084",
"0.690394",
"0.6902855",
"0.6864643",
"0.6862794",
"0.6860097",
"0.6842636",
"0.6841529",
"0.6812321",
"0.6763469",
"0.6750699",
"0.6732056",
"0.6731589",
"0.6680204",
"0.6635664",
"0.663238",
"0.6621896",
"0.6618171",
"0.66144073",
"0.6566504",
"0.65630144"
] | 0.834435 | 0 |
Set up the NM08 grid for pyFEHM | def make_NM08_grid(work_dir, log_base, max_range):
base_name = 'NM08'
dat = fdata.fdata(work_dir=work_dir)
dat.files.root = base_name
pad_1 = [1500., 1500.]
# Symmetric grid in x-y
base = log_base
dx = pad_1[0]
x1 = dx ** (1 - base) * np.linspace(0, dx, max_range) ** base
X = np.sort(list(pad_1[0] - x1) + list(pad_1[0] + x1)[1:] + [pad_1[0]])
# If no. z nodes > 100, temperature_gradient will not like it...
surface_deps = np.linspace(350, -750, 4)
cap_grid = np.linspace(-750, -1200, 4)
perm_zone = np.linspace(-1200., -2100., 30)
lower_reservoir = np.linspace(-2100, -3100, 10)
Z = np.sort(list(surface_deps) + list(cap_grid) + list(perm_zone)
+ list(lower_reservoir))
dat.grid.make('{}_GRID.inp'.format(base_name), x=X, y=X, z=Z,
full_connectivity=True)
grid_dims = [3000., 3000.] # 5x7x5 km grid
# Geology time
dat.new_zone(1, 'suface_units', rect=[[-0.1, -0.1, 350 + 0.1],
[grid_dims[0] + 0.1,
grid_dims[1] + 0.1,
-750 - 0.1]],
permeability=[1.e-15, 1.e-15, 1.e-15], porosity=0.1,
density=2477, specific_heat=800., conductivity=2.2)
dat.new_zone(2, 'clay_cap', rect=[[-0.1, -0.1, -750],
[grid_dims[0] + 0.1,
grid_dims[1] + 0.1,
-1200 - 0.1]],
permeability=1.e-18, porosity=0.01, density=2500,
specific_heat=1200., conductivity=2.2)
return dat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def GLDAS025Cellgrid():\n return GLDAS025Grids(only_land=False)",
"def cell_setup(self):\n p = self.project\n c = p.NewCell(0, area=1000, with_surfacewater=True)\n vgm = cmf.VanGenuchtenMualem()\n vgm.fit_w0()\n for d in self.depth:\n vgm.Ksat = 10 * 2 ** -d\n c.add_layer(d, vgm)\n c.install_connection(cmf.Richards)\n c.install_connection(cmf.GreenAmptInfiltration)\n return CmfConnector(c, 5)",
"def init_grid(self):\n self.pts = np.array(\n np.meshgrid(\n np.arange(self.net_dim[0]) + 1,\n np.arange(self.net_dim[1]) + 1\n )\n ).reshape(2, np.prod(self.net_dim)).T\n if self.topo == \"hexagonal\":\n self.pts[:, 0] = self.pts[:, 0] + .5 * (self.pts[:, 1] % 2)\n self.pts[:, 1] = np.sqrt(3) / 2 * self.pts[:, 1]",
"def create_grids(self):\n \n par = self.par\n\n # a. retirement\n \n # pre-decision states\n par.grid_m_ret = nonlinspace(par.eps,par.m_max_ret,par.Nm_ret,par.phi_m)\n par.Nmcon_ret = par.Nm_ret - par.Na_ret\n \n # post-decision states\n par.grid_a_ret = nonlinspace(0,par.a_max_ret,par.Na_ret,par.phi_m)\n \n # b. working: state space (m,n,k) \n par.grid_m = nonlinspace(par.eps,par.m_max,par.Nm,par.phi_m)\n\n par.Nn = par.Nm\n par.n_max = par.m_max + par.n_add\n par.grid_n = nonlinspace(0,par.n_max,par.Nn,par.phi_n)\n\n par.grid_n_nd, par.grid_m_nd = np.meshgrid(par.grid_n,par.grid_m,indexing='ij')\n\n # c. working: w interpolant (and wa and wb and wq)\n par.Na_pd = np.int_(np.floor(par.pd_fac*par.Nm))\n par.a_max = par.m_max + par.a_add\n par.grid_a_pd = nonlinspace(0,par.a_max,par.Na_pd,par.phi_m)\n \n par.Nb_pd = np.int_(np.floor(par.pd_fac*par.Nn))\n par.b_max = par.n_max + par.b_add\n par.grid_b_pd = nonlinspace(0,par.b_max,par.Nb_pd,par.phi_n)\n \n par.grid_b_pd_nd, par.grid_a_pd_nd = np.meshgrid(par.grid_b_pd,par.grid_a_pd,indexing='ij')\n \n # d. working: egm (seperate grids for each segment)\n \n if par.solmethod == 'G2EGM':\n\n # i. dcon\n par.d_dcon = np.zeros((par.Na_pd,par.Nb_pd),dtype=np.float_,order='C')\n \n # ii. acon\n par.Nc_acon = np.int_(np.floor(par.Na_pd*par.acon_fac))\n par.Nb_acon = np.int_(np.floor(par.Nb_pd*par.acon_fac))\n par.grid_b_acon = nonlinspace(0,par.b_max,par.Nb_acon,par.phi_n)\n par.a_acon = np.zeros(par.grid_b_acon.shape)\n par.b_acon = par.grid_b_acon\n\n # iii. con\n par.Nc_con = np.int_(np.floor(par.Na_pd*par.con_fac))\n par.Nb_con = np.int_(np.floor(par.Nb_pd*par.con_fac))\n \n par.grid_c_con = nonlinspace(par.eps,par.m_max,par.Nc_con,par.phi_m)\n par.grid_b_con = nonlinspace(0,par.b_max,par.Nb_con,par.phi_n)\n\n par.b_con,par.c_con = np.meshgrid(par.grid_b_con,par.grid_c_con,indexing='ij')\n par.a_con = np.zeros(par.c_con.shape)\n par.d_con = np.zeros(par.c_con.shape)\n \n elif par.solmethod == 'NEGM':\n\n par.grid_l = par.grid_m\n\n # e. shocks\n assert (par.Neta == 1 and par.var_eta == 0) or (par.Neta > 1 and par.var_eta > 0)\n\n if par.Neta > 1:\n par.eta,par.w_eta = log_normal_gauss_hermite(np.sqrt(par.var_eta), par.Neta)\n else:\n par.eta = np.ones(1)\n par.w_eta = np.ones(1)\n\n # f. timings\n par.time_work = np.zeros(par.T)\n par.time_w = np.zeros(par.T)\n par.time_egm = np.zeros(par.T)\n par.time_vfi = np.zeros(par.T)",
"def set_grid(self,ug):\n self.grd=ug\n self.set_topology()",
"def define_grid(self):\n self.h_shape = int(\n np.round((self.h_stop - self.h_start) / self.h_step, 2)) + 1\n self.k_shape = int(\n np.round((self.k_stop - self.k_start) / self.k_step, 2)) + 1\n self.l_shape = int(\n np.round((self.l_stop - self.l_start) / self.l_step, 2)) + 1\n self.grid_origin = [self.h_start, self.k_start, self.l_start]\n self.grid_step = [int(np.rint(1.0/self.h_step)),\n int(np.rint(1.0/self.k_step)),\n int(np.rint(1.0/self.l_step))]\n self.grid_shape = [self.h_shape, self.k_shape, self.l_shape]\n self.grid_basis = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]",
"def gen_grids(self):\n self.dx = self.grid_width / self.grid_resol\n self.dk = 2 * np.pi/self.grid_width\n self.grid_x_shifted = -self.grid_width/2 + self.dx * np.arange(0, self.grid_resol)\n self.grid_x = self.grid_x_shifted + self.grid_center\n self.grid_k = - (np.pi * self.grid_resol)/self.grid_width + self.dk * np.arange(0, self.grid_resol)\n self.grid_k = np.roll(self.grid_k, int((self.grid_resol)/2))\n self.grid_kin = np.square(self.h)/ (2*self.m) * np.square(self.grid_k)",
"def setUp(self):\n self.grid = SudukuGrid(BaseCase)\n for i in range(81):\n self.grid[i] = SudukuAlphabet.VALUES[(i+(i//9)*3+i//27)%9]",
"def _load_grid(self):\n\n grid_metrics = ['nbe', 'ntsn', 'nbsn', 'ntve', 'nbve', 'art1', 'art2', 'a1u', 'a2u']\n grid_variables = ['lon', 'lat', 'x', 'y', 'lonc', 'latc', 'xc', 'yc',\n 'h', 'siglay', 'siglev']\n\n # Get the grid data.\n for grid in grid_variables:\n try:\n setattr(self.grid, grid, self.ds.variables[grid][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[grid].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[grid], attribute))\n setattr(self.atts, grid, attributes)\n except KeyError:\n # Make zeros for this missing variable so we can convert from the non-missing data below.\n if grid.endswith('c'):\n setattr(self.grid, grid, np.zeros(self.dims.nele).T)\n else:\n setattr(self.grid, grid, np.zeros(self.dims.node).T)\n except ValueError as value_error_message:\n warn('Variable {} has a problem with the data. Setting value as all zeros.'.format(grid))\n print(value_error_message)\n setattr(self.grid, grid, np.zeros(self.ds.variables[grid].shape))\n\n # Load the grid metrics data separately as we don't want to set a bunch of zeros for missing data.\n for metric in grid_metrics:\n if metric in self.ds.variables:\n setattr(self.grid, metric, self.ds.variables[metric][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[metric].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[metric], attribute))\n setattr(self.atts, metric, attributes)\n\n # Fix the indexing and shapes of the grid metrics variables. Only transpose and offset indexing for nbe.\n try:\n if metric == 'nbe':\n setattr(self.grid, metric, getattr(self.grid, metric).T - 1)\n else:\n setattr(self.grid, metric, getattr(self.grid, metric))\n except AttributeError:\n # We don't have this variable, so just pass by silently.\n pass\n\n try:\n self.grid.nv = self.ds.variables['nv'][:].astype(int) # force integers even though they should already be so\n self.grid.triangles = copy.copy(self.grid.nv.T - 1) # zero-indexed for python\n except KeyError:\n # If we don't have a triangulation, make one.\n triangulation = tri.Triangulation(self.grid.lon, self.grid.lat)\n self.grid.triangles = triangulation.triangles\n self.grid.nv = self.grid.triangles.T + 1\n\n # Fix broken triangulations if necessary.\n if self.grid.nv.min() != 1:\n if self._debug:\n print('Fixing broken triangulation. Current minimum for nv is {} and for triangles is {} but they '\n 'should be 1 and 0, respectively.'.format(self.grid.nv.min(), self.grid.triangles.min()))\n self.grid.nv = (self.ds.variables['nv'][:].astype(int) - self.ds.variables['nv'][:].astype(int).min()) + 1\n self.grid.triangles = copy.copy(self.grid.nv.T) - 1\n\n # If we've been given an element dimension to subsample in, fix the triangulation here. We should really do\n # this for the nodes too.\n if 'nele' in self._dims:\n if self._debug:\n print('Fix triangulation table as we have been asked for only specific elements.')\n print('Triangulation table minimum/maximum: {}/{}'.format(self.grid.nv[:, self._dims['nele']].min(),\n self.grid.nv[:, self._dims['nele']].max()))\n # Redo the triangulation here too.\n new_nv = copy.copy(self.grid.nv[:, self._dims['nele']])\n for i, new in enumerate(np.unique(new_nv)):\n new_nv[new_nv == new] = i\n self.grid.nv = new_nv + 1\n self.grid.triangles = new_nv.T\n\n # Update dimensions to match those we've been given, if any. Omit time here as we shouldn't be touching that\n # dimension for any variable in use in here.\n for dim in self._dims:\n if dim != 'time':\n setattr(self.dims, dim, len(self._dims[dim]))\n\n # Add compatibility for FVCOM3 (these variables are only specified on the element centres in FVCOM4+ output\n # files). Only create the element centred values if we have the same number of nodes as in the triangulation.\n # This does not occur if we've been asked to extract an incompatible set of nodes and elements, for whatever\n # reason (e.g. testing). We don't add attributes for the data if we've created it as doing so is a pain.\n for var in 'h_center', 'siglay_center', 'siglev_center':\n try:\n setattr(self.grid, var, self.ds.variables[var][:])\n # Save the attributes.\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[var].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[var], attribute))\n setattr(self.atts, var, attributes)\n except KeyError:\n if self.grid.nv.max() == len(self.grid.x):\n try:\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]), self.grid.triangles))\n except IndexError:\n # Maybe the array's the wrong way around. Flip it and try again.\n setattr(self.grid, var, nodes2elems(getattr(self.grid, var.split('_')[0]).T, self.grid.triangles))\n\n # Convert the given W/E/S/N coordinates into node and element IDs to subset.\n if self._bounding_box:\n self._dims['node'] = np.argwhere((self.grid.lon > self._dims['wesn'][0]) &\n (self.grid.lon < self._dims['wesn'][1]) &\n (self.grid.lat > self._dims['wesn'][2]) &\n (self.grid.lat < self._dims['wesn'][3])).flatten()\n self._dims['nele'] = np.argwhere((self.grid.lonc > self._dims['wesn'][0]) &\n (self.grid.lonc < self._dims['wesn'][1]) &\n (self.grid.latc > self._dims['wesn'][2]) &\n (self.grid.latc < self._dims['wesn'][3])).flatten()\n\n # If we've been given dimensions to subset in, do that now. Loading the data first and then subsetting\n # shouldn't be a problem from a memory perspective because if you don't have enough memory for the grid data,\n # you probably won't have enough for actually working with the outputs. Also update dimensions to match the\n # given dimensions.\n if 'node' in self._dims:\n self.dims.node = len(self._dims['node'])\n for var in 'x', 'y', 'lon', 'lat', 'h', 'siglay', 'siglev':\n try:\n node_index = self.ds.variables[var].dimensions.index('node')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[node_index] = self.dims.node\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, node in enumerate(self._dims['node']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], node]\n else:\n _temp[..., ni] = self.ds.variables[var][:, node]\n else:\n for ni, node in enumerate(self._dims['node']):\n _temp[..., ni] = self.ds.variables[var][..., node]\n except KeyError:\n if 'siglay' in var:\n _temp = np.empty((self.dims.siglay, self.dims.node))\n elif 'siglev' in var:\n _temp = np.empty((self.dims.siglev, self.dims.node))\n else:\n _temp = np.empty(self.dims.node)\n setattr(self.grid, var, _temp)\n if 'nele' in self._dims:\n self.dims.nele = len(self._dims['nele'])\n for var in 'xc', 'yc', 'lonc', 'latc', 'h_center', 'siglay_center', 'siglev_center':\n try:\n nele_index = self.ds.variables[var].dimensions.index('nele')\n var_shape = [i for i in np.shape(self.ds.variables[var])]\n var_shape[nele_index] = self.dims.nele\n if 'siglay' in self._dims and 'siglay' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglay')] = self.dims.siglay\n elif 'siglev' in self._dims and 'siglev' in self.ds.variables[var].dimensions:\n var_shape[self.ds.variables[var].dimensions.index('siglev')] = self.dims.siglev\n _temp = np.empty(var_shape)\n if 'siglay' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglay' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglay'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n elif 'siglev' in self.ds.variables[var].dimensions:\n for ni, nele in enumerate(self._dims['nele']):\n if 'siglev' in self._dims:\n _temp[..., ni] = self.ds.variables[var][self._dims['siglev'], nele]\n else:\n _temp[..., ni] = self.ds.variables[var][:, nele]\n else:\n for ni, nele in enumerate(self._dims['nele']):\n _temp[..., ni] = self.ds.variables[var][..., nele]\n except KeyError:\n # FVCOM3 files don't have h_center, siglay_center and siglev_center, so make var_shape manually.\n if var.startswith('siglev'):\n var_shape = [self.dims.siglev, self.dims.nele]\n elif var.startswith('siglay'):\n var_shape = [self.dims.siglay, self.dims.nele]\n else:\n var_shape = self.dims.nele\n _temp = np.zeros(var_shape)\n setattr(self.grid, var, _temp)\n\n # Check if we've been given vertical dimensions to subset in too, and if so, do that. Check we haven't\n # already done this if the 'node' and 'nele' sections above first.\n for var in 'siglay', 'siglev', 'siglay_center', 'siglev_center':\n short_dim = copy.copy(var)\n # Assume we need to subset this one unless 'node' or 'nele' are missing from self._dims. If they're in\n # self._dims, we've already subsetted in the 'node' and 'nele' sections above, so doing it again here\n # would fail.\n subset_variable = True\n if 'node' in self._dims or 'nele' in self._dims:\n subset_variable = False\n # Strip off the _center to match the dimension name.\n if short_dim.endswith('_center'):\n short_dim = short_dim.split('_')[0]\n if short_dim in self._dims:\n if short_dim in self.ds.variables[var].dimensions and subset_variable:\n _temp = getattr(self.grid, var)[self._dims[short_dim], ...]\n setattr(self.grid, var, _temp)\n\n # Check ranges and if zero assume we're missing that particular type, so convert from the other accordingly.\n self.grid.lon_range = np.ptp(self.grid.lon)\n self.grid.lat_range = np.ptp(self.grid.lat)\n self.grid.lonc_range = np.ptp(self.grid.lonc)\n self.grid.latc_range = np.ptp(self.grid.latc)\n self.grid.x_range = np.ptp(self.grid.x)\n self.grid.y_range = np.ptp(self.grid.y)\n self.grid.xc_range = np.ptp(self.grid.xc)\n self.grid.yc_range = np.ptp(self.grid.yc)\n\n # Only do the conversions when we have more than a single point since the relevant ranges will be zero with\n # only one position.\n if self.dims.node > 1:\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.lon, self.grid.lat = lonlat_from_utm(self.grid.x, self.grid.y, zone=self._zone)\n if self.grid.lon_range == 0 and self.grid.lat_range == 0:\n self.grid.x, self.grid.y, _ = utm_from_lonlat(self.grid.lon, self.grid.lat)\n if self.dims.nele > 1:\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.lonc, self.grid.latc = lonlat_from_utm(self.grid.xc, self.grid.yc, zone=self._zone)\n if self.grid.lonc_range == 0 and self.grid.latc_range == 0:\n self.grid.xc, self.grid.yc, _ = utm_from_lonlat(self.grid.lonc, self.grid.latc)",
"def fixture_grid():\n return load_earth_relief(registration=\"pixel\")",
"def _prepare_grid(self):\n raise NotImplementedError",
"def initialize_grid(self) -> None:\n for i in range(self.grid_size[0]):\n for j in range(self.grid_size[1]):\n self.set(i, j, self.base_color)",
"def initialize_grid(self):\n self.grid = np.zeros([self.N, self.N, self.N])\n return self.grid",
"def initialize_grid(self):\r\n for i in range(self.height):\r\n for j in range(self.width):\r\n self.grid[i][j] = 0\r\n \r\n # fill up unvisited cells\r\n for r in range(self.height):\r\n for c in range(self.width):\r\n if r % 2 == 0 and c % 2 == 0:\r\n self.unvisited.append((r,c))\r\n\r\n self.visited = []\r\n self.path = dict()\r\n self.generated = False",
"def setUp(self):\r\n self.matrix = array(\r\n [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]])\r\n self.cells = [(0, 1), (1, 3)]\r\n self.cells2 = [(0, 2), (2, 3)]",
"def cloudy_grid_map(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n model_number_matrix,grid_table = cloudy_library._restore_grid_table(grid_ext=p.grid_ext)\n # print(len(grid_table))\n # print(len(grid_table)/len(np.unique(grid_table.nH)))\n\n grid_table = grid_table.fillna(-10)\n grid_table['DTM'] = np.round(grid_table['DTM'] * 10.) / 10.\n grid_table['NH'] = np.round(grid_table['NH'] * 10.) / 10.\n\n # print(grid_table.DTM[np.isnan(grid_table['DTM'])])\n # print(grid_table.NH[np.isnan(grid_table['NH'])])\n # print(grid_table.FUV[np.isnan(grid_table['FUV'])])\n # print(grid_table.nH[np.isnan(grid_table.nH)])\n # print(grid_table.Z[np.isnan(grid_table.Z)])\n\n print('nHs: ',np.unique(grid_table.nH))\n print('DTMs: ',np.unique(grid_table.DTM))\n print('FUVs: ',np.unique(grid_table.FUV))\n print('NHs: ',np.unique(grid_table.NH))\n print('Zs: ',np.unique(grid_table.Z))\n\n fig,ax = plt.subplots(figsize=(8,5))\n\n key1, key2, key3 = list(p.cloudy_param.keys())[0],list(p.cloudy_param.keys())[1],list(p.cloudy_param.keys())[2]\n value1, value2, value3 = list(p.cloudy_param.values())[0],list(p.cloudy_param.values())[1],list(p.cloudy_param.values())[2]\n\n # Decide on what goes on x and y axis\n cloudy_parameters = np.array(['NH','FUV','nH','Z','DTM'])\n x_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2) &\\\n (cloudy_parameters != key3)][0]\n y_index = cloudy_parameters[(cloudy_parameters != key1) &\\\n (cloudy_parameters != key2) &\\\n (cloudy_parameters != key3)][1]\n print(x_index,y_index)\n # Cut in grid table\n grid_table_cut = grid_table.iloc[np.where((grid_table[key1].values == value1) & \\\n (grid_table[key2].values == value2) & \\\n (grid_table[key3].values == value3))[0]]\n\n x, y = grid_table_cut[x_index].values, grid_table_cut[y_index].values\n X, Y = np.meshgrid(np.unique(grid_table_cut[x_index].values), np.unique(grid_table_cut[y_index].values))\n\n # Plot line ratio?\n if '_' in p.line:\n L1 = grid_table_cut[p.line.split('_')[0]].values\n L2 = grid_table_cut[p.line.split('_')[1]].values\n L2[L2 == 0] = 1e9\n line_lum = (L1/L2).astype(float)\n vmin = np.min(np.log10(line_lum[L2 < 1e9]))\n\n else:\n line_lum = grid_table_cut[p.line].values.astype(float)\n vmin = np.min(np.log10(line_lum[line_lum > 0]))\n\n\n # ########## Patching the grid !!\n # line_lum[np.isnan(line_lum)] = -1 # what are these?\n # # 0 values: not sure if we have any?\n # # Negative numbers: missing grid point\n # i_missing = np.where(line_lum <= 0)[0]\n # line_lum[line_lum == 0] = np.min(line_lum[line_lum > 0])\n # while len(i_missing) > 0:\n # print(i_missing)\n # lum = np.log10(line_lum)\n # for i in i_missing:\n # # print(lum[i-1],lum[i+1])\n # try: \n # lum[i] = (lum[i-1] + lum[i+1])/ 2\n # except:\n # pass\n # # print('he',np.isnan(lum[i]))\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i-1] \n # except:\n # pass\n # if np.isnan(lum[i]):\n # try:\n # lum[i] = lum[i+1] \n # except:\n # pass \n # line_lum[i] = 10.**lum[i]\n # # print(i,lum[i])\n # i_missing = np.where(line_lum < 0)[0]\n # ########## End of patching\n \n lum = np.log10(line_lum)\n lum = lum.reshape([len(np.unique(x)), len(np.unique(y))]).T\n\n\n # pdb.set_trace()\n print(p.zlim)\n if p.zlim:\n print(p.zlim)\n lum[lum < p.zlim[0]] = p.zlim[0]\n lum[lum > p.zlim[1]] = p.zlim[1]\n cf = ax.contourf(X,Y, lum, cmap=\"jet\", vmin=p.zlim[0], vmax=p.zlim[1], lw=0, rstride=1, cstride=1,alpha=0.8, levels=20)\n else:\n cf = ax.contourf(X,Y, lum, cmap=\"jet\", vmin=vmin, lw=0, rstride=1, cstride=1,alpha=0.8)\n # print(lum)\n ax.set_xlabel('\\n\\n' + getlabel('l'+x_index))\n ax.set_ylabel('\\n\\n' + getlabel('l'+y_index))\n\n ax.set_xlim([np.min(X),np.max(X)])\n ax.set_ylim([np.min(Y),np.max(Y)])\n\n plt.colorbar(cf)\n\n plt.tight_layout()\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'look-up/'): os.mkdir(p.d_plot + 'look-up/') \n plt.savefig(p.d_plot + 'look-up/cloudy_grid_map_%s%s%s.%s' % (p.line, p.grid_ext, p.ext, p.format), format=p.format, dpi=300)",
"def define_grid():\n grid_left = np.array([[-13.1000000000000, -35.5000000000000, -48.3000000000000, -60, -16.9000000000000,\n -34.8000000000000, -67.5000000000000, -46.1000000000000, -59.8000000000000,\n -14.2000000000000, -28.3000000000000, -42.3000000000000, -67.6000000000000,\n -50.5000000000000, -14.6000000000000, -60.9000000000000, -31.6000000000000,\n -5.10000000000000, -65.6000000000000, -41.8000000000000, -55.1000000000000,\n -22.7000000000000, -5.80000000000000, -49.2000000000000, -34.5000000000000,\n -61.5500000000000, -63.6000000000000, -40.4000000000000, -48.7000000000000,\n -21.8000000000000, -58.2000000000000, -7, -36.3000000000000, -48.1000000000000,\n -56.8000000000000, -7.30000000000000, -22.2000000000000, -36.8000000000000,\n -46.8000000000000],\n [-67.7000000000000, -60, -55.1000000000000, -51.8000000000000, -51.6000000000000,\n -49.3000000000000, -47.1000000000000, -43.7000000000000, -39.6000000000000,\n -39.1000000000000, -31.2000000000000, -30.7000000000000, -30.1000000000000,\n -24.4000000000000, -22.7000000000000, -18.7000000000000, -16.9000000000000,\n -12.6000000000000, -10.8000000000000, -10.2000000000000, -4.01000000000000, 1.20000000000000,\n 2.80000000000000, 3.70000000000000, 3.90000000000000, 6.20000000000000, 8.30000000000000,\n 11.8000000000000, 14.5000000000000, 16, 18.2000000000000, 18.4000000000000, 19.9000000000000,\n 24.6000000000000, 28.5200000000000, 33.8000000000000, 35, 35.4000000000000,\n 35.6000000000000],\n [69.1000000000000, 66, 58.2000000000000, 48, 78, 71.7000000000000, 31, 61.1000000000000,\n 53.3000000000000, 81.1000000000000, 76, 70.2000000000000, 41.2000000000000, 64.4000000000000,\n 80.2000000000000, 50.9000000000000, 75.2000000000000, 77.3000000000000, 37.8000000000000, 67,\n 53.2000000000000, 72, 74.8000000000000, 54.7000000000000, 66.5000000000000, 35.9000000000000,\n 25.7000000000000, 60.7000000000000, 50.5000000000000, 68.9000000000000, 27.3000000000000,\n 70.3000000000000, 59.6000000000000, 44, 20.8000000000000, 61.7000000000000, 57.2000000000000,\n 47, 36]])\n stn_left = np.array([[-14.6, -13.2, -11.7, -9.10, -11.7, -13.2, -7.90, -10],\n [-15.1, -15.1, -15.1, -12.6, -12.6, -12.6, -9.40, -10.1],\n [-5.40, -7.20, -8.70, -8.70, -7.50, -5.10, -10.3, -7.80]])\n grid_right = np.copy(grid_left)\n grid_right[0, :] = grid_right[0, :] * -1\n stn_right = np.copy(stn_left)\n stn_right[0, :] = stn_right[0, :] * -1\n\n return grid_left, grid_right, stn_left, stn_right",
"def setUp(self):\n self.testdatapath = os.path.join(mkdtemp())\n self.testfilenames = [os.path.join(self.testdatapath, \"0001.nc\")]\n\n self.gpis = [1, 10, 11, 12]\n self.lons = [0, 0, 1, 1]\n self.lats = [1, 1, 0, 0]\n self.cells = [1, 1, 1, 1]\n self.grid = grids.CellGrid(self.lons, self.lats, self.cells, self.gpis)",
"def setUp(self):\n self.testdatapath = os.path.join(mkdtemp())\n self.testfilenames = [\n os.path.join(self.testdatapath, \"0035.nc\"),\n os.path.join(self.testdatapath, \"0107.nc\")\n ]\n\n self.gpis = [1, 10, 11, 12]\n reg_grid = grids.genreg_grid().to_cell_grid()\n self.grid = reg_grid.subgrid_from_gpis(self.gpis)",
"def setUp(self):\n self.gameBoard = Grid((100, 100), Cell)",
"def initGrid( self, name, suffix, n, ni, nj, ifields=[],rfields=[], lsize=10):\n #print \"initGrid: initializing %s\"%(name)\n self.name = name\n self.suffix = suffix\n self.n = n\n self.ni = ni\n self.nj = nj\n self.ifields = ifields\n self.rfields = rfields\n #print \"ifields=%s\\nrfields=%s\\nlsize=%d\"%(temp_ifields, temp_rfields, lsize)\n self.lgrid = attributevector.AttributeVector( ifields, rfields, lsize )\n #print \"allocating a temp array...\"\n temp = Numeric.zeros( lsize, Numeric.Float64 )\n #temp = -9999.0\n #print \"Filling real fields with default values...\"\n for f in rfields:\n #print \"\\tFilling field\",f,\":\",\n self.lgrid.importRAttr( f, temp )\n #print \"... OK!\"\n print \"initGrid: Initialized Grid!\"\n # setup av complete\n return",
"def fixture_grid():\n return load_earth_relief(registration=\"gridline\")",
"def GLDAS025LandGrid():\n return GLDAS025Grids(only_land=True)",
"def grid(self):\r\n dimA = self.dimA ; dimC = self.dimA ; W_grid = self.W_grid\r\n \r\n self.tol = 10e-5\r\n self.Niter = 10000\r\n \r\n a0 = 100 / self.dimA\r\n c0 = 100 / self.dimA\r\n a_grid = np.mgrid[0:(dimA):1] ; a_grid = a0 * a_grid ; self.a_grid = a_grid\r\n c_grid = np.mgrid[0:(dimC):1] ; c_grid = c0 * c_grid ; self.c_grid = c_grid\r\n self.W_grid = W_grid",
"def create_initial_grid():\n\n\tgrid = {(x, y) : ' + ' for x in range(8) for y in range(8)}\n\n\t# Define initial positions \n\tgrid[(3,3)] = colors.RED + \"[I]\" + colors.STOP\n\tgrid[(4,3)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(3,4)] = colors.GREEN + \"[A]\" + colors.STOP\n\tgrid[(4,4)] = colors.RED + \"[I]\" + colors.STOP\n\n\treturn grid",
"def setup_class(self):\n args = {'pdb_path':'/sdf/home/a/apeck/tomoxtal/examples/input/193l.pdb', 'resolution':6.0, 'size':250}\n\n # generate structure factors and retrieve associated cell information\n sf = cctbx_tools.reference_sf(args['pdb_path'], args['resolution'], expand_to_p1=True)\n sf_data = cctbx_tools.reformat_sf(sf)\n sg_symbol, sg_no, self.cell, cs = cctbx_tools.unit_cell_info(args['pdb_path'])\n \n # add random phase shifts\n hklIp1, hklIp2, hklIp3 = sf_data.copy(), sf_data.copy(), sf_data.copy()\n hklIp2[:,-1], self.shifts2 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n hklIp3[:,-1], self.shifts3 = phases_utils.add_random_phase_shift(sf_data[:,:3], sf_data[:,-1])\n\n # retain subset of Millers\n for data in [hklIp1,hklIp2,hklIp3]:\n keep_idx = np.unique(np.random.randint(0, high=data.shape[0], size=args['size']))\n data = data[keep_idx]\n \n self.data1, self.data2, self.data3 = hklIp1, hklIp2, hklIp3\n fshifts_list = np.random.uniform(size=(4,3))\n self.fshifts_list = np.vstack((fshifts_list, 1-self.shifts2, 1-self.shifts3))",
"def __init__(self, row=4, col=4, initial=2):\n self.grid = Grid(row, col, initial)",
"def test_hunger_grid_create(self):\n self.grid = Hunger_Grid.hunger_grid()\n self.grid.newGrid = Hunger_Grid.hunger_grid().create_hunger_grid(M=30, N=30, P_LAVA = 1.0)\n self.assertTrue(self.grid.newGrid.size == 900, \"Grid size is incorrect\")\n self.assertTrue(self.grid.newGrid[2, 2] == 1, \"Lava chance is not acting correctly\")\n self.assertTrue(self.grid.newGrid[-3, -3] == 1, \"Lava chance is not acting correctly\")",
"def create_grid(grid):\r\n for i in range(4):\r\n grid.append([0,0,0,0])",
"def _prepare_grids(self):\n if(self.header['element_infos'][0, 2] == 3):\n print('Triangular grid found')\n self.grid_is_rectangular = False\n\n triangles = self.element_data[3]\n triangles = [x.nodes for x in triangles]\n # python starts arrays with 0, but elem.dat with 1\n triangles = np.array(triangles) - 1\n self.elements = triangles\n tri_x = self.nodes['presort'][triangles, 1]\n tri_z = self.nodes['presort'][triangles, 2]\n self.grid = {}\n self.grid['x'] = tri_x\n self.grid['z'] = tri_z\n\n else:\n print('Rectangular grid found')\n self.grid_is_rectangular = True\n quads_raw = [x.nodes for x in self.element_data[8]]\n quads = np.array(quads_raw) - 1\n self.elements = quads\n quads_x = self.nodes['presort'][quads, 1]\n quads_z = self.nodes['presort'][quads, 2]\n self.grid = {}\n self.grid['x'] = quads_x\n self.grid['z'] = quads_z\n\n # calculate the dimensions of the grid\n try:\n self.calculate_dimensions()\n except Exception as e:\n e\n self.nr_nodes_x = None\n self.nr_nodes_z = None\n self.nr_elements_x = None\n self.nr_elements_z = None\n self.nr_of_elements = self.grid['x'].shape[0]"
] | [
"0.67005414",
"0.6574899",
"0.6446238",
"0.64186054",
"0.6409591",
"0.6403353",
"0.6398128",
"0.63680947",
"0.6358191",
"0.6286116",
"0.6284696",
"0.62710345",
"0.62391335",
"0.62309885",
"0.6200199",
"0.6179789",
"0.6178399",
"0.6168873",
"0.61656123",
"0.61655104",
"0.61533856",
"0.6142123",
"0.60665625",
"0.60558015",
"0.6046855",
"0.60187757",
"0.6010864",
"0.5990301",
"0.5986071",
"0.5971338"
] | 0.6873292 | 0 |
Toplevel function to run various models for NM08 stimulation in parallel for all combinations of reservoir parameters, dual macro parameters and permeability model parameters. | def model_multiprocess(reservoir_dicts, dual_lists, root, run_dict,
perm_tups=None, cores=2, machine='laptop',
parallel=False):
sys.setrecursionlimit(5000000)
if parallel:
Parallel(n_jobs=cores)(
delayed(NM08_model_loop)(root, run_dict, res_dict, dual_list,
perm_tup, machine, 100, k+j+m)
for j, res_dict in enumerate(reservoir_dicts)
for k, dual_list in enumerate(dual_lists)
for m, perm_tup in enumerate(perm_tups)
)
else:
for r_dict in reservoir_dicts:
NM08_model_loop(root, run_dict, r_dict, machine)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)",
"def run_all_models(self):\n #self.process_nitrate()\n try:\n sur_df = self.store.get('/said/{}/iv'.format(self.site['id']))\n con_df = self.store.get('/said/{}/qwdata'.format(self.site['id']))\n\n except KeyError:\n print('site {} not found'.format(site['name']))\n\n\n #determine start and end for plots\n start_date, end_date = get_time_limit(sur_df, con_df)\n\n #update start and end according to user\n user_start = self.site.get('start')\n user_end = self.site.get('end')\n\n if user_start:\n start_date = pd.to_datetime(user_start)\n\n if user_end:\n end_date = pd.to_datetime(user_end)\n\n\n #plot_ssc(ssc_model, filename='plots/{}_ssc.png'.format(site['name']),\n # start_date=start_date, end_date=end_date)\n\n #append the model results to summary\n #summary_table= summary_table.append(model_row_summary(ssc_model))\n\n for directory in ['model_data','report']:\n try:\n os.stat(directory)\n except:\n os.mkdir(directory)\n\n #pp_model_list = import pdb; pdb.set_trace()[\n # ['log(PP)',['log(Turb_HACH)']],\n # ['log(PP)',['log(Turb_YSI)']]\n #]\n\n #self.run_model(pp_model_list, 'PP')\n\n no3_model_list = [\n ['Nitrate',['NitrateSurr']],\n ]\n self.run_model(no3_model_list, 'Nitrate')\n\n ssc_model_list = [\n ['log(SSC)',['log(Turb_HACH)']],\n ['log(SSC)',['log(Turb_YSI)']]\n ]\n self.run_model(ssc_model_list, 'SSC')\n\n tp_model_list = [\n ['log(TP)',['log(OrthoP)','log(Turb_HACH)']],\n ['log(TP)',['log(OrthoP)','log(Turb_YSI)']],\n ['log(TP)',['log(Turb_HACH)']],\n ['log(TP)',['log(Turb_YSI)']]\n ]\n self.run_model(tp_model_list, 'TP')\n\n #write ssc model report\n #reportfile = 'report/{}_ssc_report.txt'.format(site['name'])\n #with open(reportfile, 'w') as f:\n # f.write(ssc_model.get_model_report().as_text())\n #summary_table= summary_table.append(model_row_summary(p_model1))\n #summary_table= summary_table.append(model_row_summary(p_model2))\n #plot_model(ssc_model, filename='plots/{}_ssc_model.png'.format(site['name']))\n #plot_phos(p_model1, p_model2, filename='plots/{}_tp.png'.format(site['name']),\n # start_date=start_date, end_date=end_date)\n #plot_model(p_model1, filename='plots/{}_orthoP_model.png'.format(site['name']))\n #\n ## try to plot phosphate\n #try:\n # phos_plot(con_data, sur_data, filename='plots/{}_p.png'.format(site['name']), title=site['name'],\n # return_model=True)\n #except:\n # print('phospate plot didnt work')\n #\n self.summary_table.to_csv('report/{}_model_summary.csv'.format(self.site['name']),\n index=False)",
"def run(self, X, Y, model):\n\n p0 = X.iloc[0] # read in the input info\n params = lmfit.Parameters() # empty parameter class\n success = True # check for success\n\n if model == 'Medlyn':\n min, max = self.param_space('g1')\n params.add('g1', p0.g1, min=min, max=max)\n min, max = self.param_space('sref')\n params.add('sref', p0.sref, min=min, max=max)\n\n if model == 'Eller':\n min, max = self.param_space('kmax')\n params.add('kmaxS1', p0.kmaxS1, min=min, max=max)\n\n if (model == 'ProfitMax') or (model == 'ProfitMax2'):\n min, max = self.param_space('kmax')\n params.add('kmax', p0.kmax, min=min, max=max)\n\n # the following models all require the Sperry kmax as an input!\n if model == 'Tuzet':\n min, max = self.param_space('g1')\n params.add('g1T', p0.g1T, min=min, max=max)\n\n if 'Tleaf' in X.columns: # vary g1 and kmax\n min, max = self.param_space('kmax')\n params.add('kmaxT', p0.kmax, min=min, max=max)\n\n else: # vary g1 and Pref, sref fixed\n min, max = self.param_space('PrefT', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PrefT):\n params.add('PrefT', p0.PrefT, min=min, max=max)\n\n else:\n params.add('PrefT', -p0.P88, min=min, max=max)\n\n if model == 'WUE-LWP':\n min, max = self.param_space('Lambda')\n params.add('Lambda', p0.Lambda, min=min, max=max)\n\n if model == 'CGain':\n min, max = self.param_space('Kappa')\n params.add('Kappa', p0.Kappa, min=min, max=max)\n\n if model == 'CMax':\n min, max = self.param_space('Alpha')\n params.add('Alpha', p0.Alpha, min=min, max=max)\n min, max = self.param_space('Beta')\n params.add('Beta', p0.Beta, min=min, max=max)\n\n if model == 'SOX-OPT':\n min, max = self.param_space('kmax')\n params.add('kmaxS2', p0.kmaxS2, min=min, max=max)\n\n if model == 'LeastCost':\n min, max = self.param_space('kmax')\n params.add('kmaxLC', p0.kmaxLC, min=min, max=max)\n min, max = self.param_space('Eta')\n params.add('Eta', p0.Eta, min=min, max=max)\n\n if model == 'CAP':\n min, max = self.param_space('krl')\n params.add('krlC', p0.krlC, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritC):\n params.add('PcritC', p0.PcritC, min=min, max=max)\n\n else:\n params.add('PcritC', -p0.P88, min=min, max=max)\n\n if model == 'MES':\n min, max = self.param_space('krl')\n params.add('krlM', p0.krlM, min=min, max=max)\n min, max = self.param_space('Pcrit', P50=p0.P50, P88=p0.P88)\n\n if any(X['Ps_pd'] > p0.PcritM):\n params.add('PcritM', p0.PcritM, min=min, max=max)\n\n else:\n params.add('PcritM', -p0.P88, min=min, max=max)\n\n if not os.path.isdir(self.opath): # create output dir\n os.makedirs(self.opath)\n\n # run the minimizer\n if self.method == 'emcee':\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, steps=self.steps,\n nwalkers=self.nchains, burn=self.burn,\n thin=self.thin, is_weighted=False,\n progress=False, nan_policy='omit')\n\n else:\n out = lmfit.minimize(fres, params, args=(model, X, Y,\n self.inf_gb,),\n method=self.method, nan_policy='omit')\n\n for param in out.params.values():\n\n if np.isclose(param.value, param.init_value):\n params[param.name] = lmfit.Parameter(name=param.name,\n value=1.5 *\n param.init_value)\n out = lmfit.minimize(fres, params,\n args=(model, X, Y, self.inf_gb,),\n method=self.method,\n nan_policy='omit')\n\n if not os.path.isfile(os.path.join(self.opath, '%s.txt' % (model))):\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'w+')\n\n else: # append to existing file\n txt = open(os.path.join(self.opath, '%s.txt' % (model)), 'a+')\n\n txt.write('\\n')\n txt.write(lmfit.fit_report(out))\n\n if not success:\n txt.write('\\n## Warning: had to fix first parameter value')\n\n txt.write('\\n')\n txt.close() # close text file\n\n return out.params.valuesdict()",
"def main():\n model = sys.argv[1]\n maxfun = int(sys.argv[2])\n n_threads = int(sys.argv[3])\n\n # Validate input.\n assert maxfun >= 0, \"Maximum number of function evaluations cannot be negative.\"\n assert n_threads >= 1 or n_threads == -1, (\n \"Use -1 to impose no restrictions on maximum number of threads or choose a \"\n \"number higher than zero.\"\n )\n\n # Set number of threads\n os.environ[\"NUMBA_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"MKL_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"OMP_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"NUMEXPR_NUM_THREADS\"] = f\"{n_threads}\"\n\n # Late import of respy to ensure that environment variables are read by Numpy, etc..\n import respy as rp\n\n # Get model\n params, options = rp.get_example_model(model, with_data=False)\n\n # Simulate the data\n simulate = rp.get_simulate_func(params, options)\n df = simulate(params)\n\n # Get the criterion function and the parameter vector.\n crit_func = rp.get_log_like_func(params, options, df)\n\n # Run the estimation\n start = dt.datetime.now()\n\n for _ in range(maxfun):\n crit_func(params)\n\n end = dt.datetime.now()\n\n # Aggregate information\n output = {\n \"model\": model,\n \"maxfun\": maxfun,\n \"n_threads\": n_threads,\n \"start\": str(start),\n \"end\": str(end),\n \"duration\": str(end - start),\n }\n\n # Save time to file\n with open(\"scalability_results.txt\", \"a+\") as file:\n file.write(json.dumps(output))\n file.write(\"\\n\")",
"def main():\n dataset_idx = [11]\n network_idx = [0]\n reshape_input = [False]\n output_idxs = [0, 1]\n lrs = [0, 1, 2]\n dataset_ft_idx = [0,1,2,3]\n counter_exp = 0\n freeze = [0]\n percentages = [12]\n for dts in range(len(dataset_idx)):\n for nt in range(len(network_idx)):\n for opt in output_idxs:\n for dft in dataset_ft_idx:\n for pr in percentages:\n for rsi in range(len(reshape_input)):\n for fr in freeze:\n for lr in lrs:\n config = configuration(dataset_idx=dataset_idx[dts],\n network_idx=network_idx[nt],\n output_idx=opt,\n usage_modus_idx=5,\n dataset_fine_tuning_idx=dft,\n reshape_input=reshape_input[rsi],\n learning_rates_idx=lr,\n name_counter=counter_exp,\n freeze=fr,\n percentage_idx=pr,\n fully_convolutional=False)\n\n setup_experiment_logger(logging_level=logging.DEBUG,\n filename=config['folder_exp'] + \"logger.txt\")\n\n logging.info('Finished')\n\n modus = Modus_Selecter(config)\n\n # Starting process\n modus.net_modus()\n counter_exp += 1\n\n\n return",
"def NM08_model_loop(root, run_dict, res_dict, dual_list, perm_tup, machine,\n decimate=100, i=1, verbose=False):\n if machine == 'laptop':\n fz_file_pat = '/home/chet/gmt/data/NZ/wells/feedzones/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/home/chet/data/mrp_data/Steve_Sewell_MRP_PhD_Data/' \\\n 'Natural_State_Temperatures/NM08_profile_pyfehm_comma.txt'\n excel_file = '/home/chet/data/mrp_data/well_data/flow_rates/' \\\n 'July_2017_final/Merc_Ngatamariki.xlsx'\n elif machine == 'server':\n fz_file_pat = '/Users/home/hoppche/data/merc_data/wells/' \\\n 'NM08_feedzones_?.csv'\n T_file = '/Users/home/hoppche/data/merc_data/temps/' \\\n 'NM08_profile_pyfehm_comma.txt'\n excel_file = '/Users/home/hoppche/data/merc_data/flows/' \\\n 'Merc_Ngatamariki.xlsx'\n # Make the directory for this object\n print('Making grid')\n # Extract just floats and exponent from perms\n work_dir = '{}/run_{}'.format(root, i)\n dat = make_NM08_grid(work_dir=work_dir, log_base=3, max_range=15)\n print('Assigning reservoir parameters')\n dat = reservoir_params(dat, temp_file=T_file, reservoir_dict=res_dict,\n show=False)\n print('Defining well nodes')\n dat = define_well_nodes(\n dat, well_file_pattern=fz_file_pat,\n well_name='NM08', type='injection', surf_loc=[1500., 1500.])\n print('Running initial condition')\n dat = run_initial_conditions(dat)\n dat = set_well_boundary(\n dat, excel_file=excel_file, sheet_name='NM08 Stimulation',\n well_name='NM08', dates=[datetime(2012, 6, 7), datetime(2012, 7, 12)],\n t_step='day', decimate=decimate, debug=0)\n dat = set_stress(dat)\n dat = set_dual(dat, zonelist=['tahorakuri'], dual_list=dual_list)\n if perm_tup:\n dat = set_permmodel(dat, zonelist=['tahorakuri'], index=perm_tup[0],\n permmodel_dict=perm_tup[1])\n model_run(dat, run_dict, verbose=verbose)\n return",
"def run(pars, #parameter files\n #directory of scenario files\n scen_dir = r'C:\\LS\\03_TOOLS\\_git\\COVID_01\\scenarios',\n \n #map to scenario files\n scen_d = {\n 'NoNPI':'NPI_Scenario1_None.R',\n 'BI1918':'NPI_Scenario2_Bootsma_1918Influenza.R',\n 'SouthKorea':'NPI_Scenario3_SouthKorea.R',\n 'Reduced':'NPI_Scenario4_ReducedGamma.R', \n }\n ):\n \n \n \n #===========================================================================\n # precheck \n #===========================================================================\n assert len(pars)==4, 'unexpected inputs count'\n print('pars: \\n%s'%pars)\n \n #check the R Environment variables\n assert 'R_USER' in os.environ\n assert 'R_HOME' in os.environ\n \n #print('R_USER=%s \\nR_HOME=%s'%(os.getenv('R_USER'), os.getenv('R_HOME')))\n\n \n \n \n \n #===========================================================================\n # setup\n #===========================================================================\n s = setup.Setup(setup_name = 'mid_utah_'+pars[2],\n spatial_setup = WestCoastSpatialSetup(),\n nsim = int(pars[1]),\n ti = datetime.date(2020, 3, 6),\n tf = datetime.date(2020, 10, 1),\n interactive = False,\n write_csv = True,\n dt = 1/4)\n \n #===========================================================================\n # set the scenario parmaters\n #===========================================================================\n\n \n \n assert pars[2] in scen_d, 'unrecognized scenario: %s'%pars[2]\n \n rfp = os.path.join(scen_dir, scen_d[pars[2]])\n assert os.path.exists(rfp)\n \n s.script_npi = rfp\n \n print('set script_npi=%s'%s.script_npi)\n\n #===========================================================================\n # execute\n #===========================================================================\n\n print()\n print()\n print(f\">>> Starting {s.nsim} model runs on {pars[3]} processes\")\n print(f\">>> Setup *** {s.setup_name} *** from {s.ti} to {s.tf} !\")\n print(f\">>> writing to folder : {s.datadir}{s.setup_name}\")\n print()\n print()\n \n tic = time.time()\n \n res_l = seir.run_parallel(s, int(pars[3]))\n print(f\">>> Runs done in {time.time()-tic} seconds...\")",
"def one_experiment():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'overfit_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 15, 1: 85}]\n # {0:15, 1:85}]#, {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n features_to_use = ['user', 'countries', 'session', 'format', 'token']\n # set constant parameters\n set_params(use_word_emb=1)\n set_params(epochs=40)\n set_params(features_to_use=features_to_use)\n\n # save constant parameters to a new \"experiment_..\" filgithx+P@2ub\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**3)), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name,\n new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()",
"def main(parameters_file, no_parameters_file, initialise, iterations, scenario, data_dir, output, output_every_iteration,\n debug, repetitions, lockdown_file, use_cache, opencl, opencl_gui, opencl_gpu):\n\n # If we are running with opencl_gui then set opencl to True, so you only need to pass one flag\n if opencl_gui:\n opencl = True\n\n # First see if we're reading a parameters file or using command-line arguments.\n if no_parameters_file:\n print(\"Not reading a parameters file\")\n else:\n print(f\"Reading parameters file: {parameters_file}. \"\n f\"Any other model-related command-line arguments are being ignored\")\n with open(parameters_file, 'r') as f:\n parameters = load(f, Loader=SafeLoader)\n sim_params = parameters[\"microsim\"] # Parameters for the dynamic microsim (python)\n calibration_params = parameters[\"microsim_calibration\"]\n disease_params = parameters[\"disease\"] # Parameters for the disease model (r)\n # TODO Implement a more elegant way to set the parameters and pass them to the model. E.g.:\n # self.params, self.params_changed = Model._init_kwargs(params, kwargs)\n # [setattr(self, key, value) for key, value in self.params.items()]\n # Utility parameters\n scenario = sim_params[\"scenario\"]\n iterations = sim_params[\"iterations\"]\n data_dir = sim_params[\"data-dir\"]\n output = sim_params[\"output\"]\n output_every_iteration = sim_params[\"output-every-iteration\"]\n debug = sim_params[\"debug\"]\n repetitions = sim_params[\"repetitions\"]\n lockdown_file = sim_params[\"lockdown-file\"]\n\n # Check the parameters are sensible\n if iterations < 1:\n raise ValueError(\"Iterations must be > 1. If you want to just initialise the model and then exit, use\"\n \"the --initialise flag\")\n if repetitions < 1:\n raise ValueError(\"Repetitions must be greater than 0\")\n if (not output) and output_every_iteration:\n raise ValueError(\"Can't choose to not output any data (output=False) but also write the data at every \"\n \"iteration (output_every_iteration=True)\")\n\n print(f\"Running model with the following parameters:\\n\"\n f\"\\tParameters file: {parameters_file}\\n\"\n f\"\\tScenario directory: {scenario}\\n\"\n f\"\\tInitialise (and then exit?): {initialise}\\n\"\n f\"\\tNumber of iterations: {iterations}\\n\"\n f\"\\tData dir: {data_dir}\\n\"\n f\"\\tOutputting results?: {output}\\n\"\n f\"\\tOutputting results at every iteration?: {output_every_iteration}\\n\"\n f\"\\tDebug mode?: {debug}\\n\"\n f\"\\tNumber of repetitions: {repetitions}\\n\"\n f\"\\tLockdown file: {lockdown_file}\\n\",\n f\"\\tUse cache?: {use_cache}\\n\",\n f\"\\tUse OpenCL version?: {opencl}\\n\",\n f\"\\tUse OpenCL GUI?: {opencl_gui}\\n\",\n f\"\\tUse OpenCL GPU for processing?: {opencl_gpu}\\n\",\n f\"\\tCalibration parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(calibration_params)}\\n\",\n f\"\\tDisease parameters: {'N/A (not reading parameters file)' if no_parameters_file else str(disease_params)}\\n\")\n\n # To fix file path issues, use absolute/full path at all times\n # Pick either: get working directory (if user starts this script in place, or set working directory\n # Option A: copy current working directory:\n base_dir = os.getcwd() # get current directory\n data_dir = os.path.join(base_dir, data_dir)\n r_script_dir = os.path.join(base_dir, \"R\", \"py_int\")\n\n ### section for fetching data\n if not os.path.isdir(data_dir):\n\n print(f\"No data directory detected.\")\n\n if os.path.isfile(data_dir + \".tar.gz\"):\n print(f\"An archive file matching the name of the data directory has been detected!\")\n print(f\"Unpacking this archive file now.\")\n unpack_data(data_dir + \".tar.gz\")\n \n else:\n print(f\"{data_dir} does not exist. Downloading devon_data.\")\n data_setup()\n\n # Temporarily only want to use Devon MSOAs\n # devon_msoas = pd.read_csv(os.path.join(data_dir, \"devon_msoas.csv\"), header=None,\n # names=[\"x\", \"y\", \"Num\", \"Code\", \"Desc\"])\n\n # Prepare the QUANT api (for estimating school and retail destinations)\n # we only need 1 QuantRampAPI object even if we do multiple iterations\n # the quant_object object will be called by each microsim object\n quant_path = os.path.join(data_dir, \"QUANT_RAMP\")\n if not os.path.isdir(quant_path):\n raise Exception(\"QUANT directory does not exist, please check input\")\n quant_object = QuantRampAPI(quant_path)\n\n # args for population initialisation\n population_args = {\"data_dir\": data_dir, \"debug\": debug,\n \"quant_object\": quant_object}\n\n # args for Python/R Microsim. Use same arguments whether running 1 repetition or many\n msim_args = {\"data_dir\": data_dir, \"r_script_dir\": r_script_dir, \"scen_dir\": scenario, \"output\": output,\n \"output_every_iteration\": output_every_iteration}\n\n if not no_parameters_file: # When using a parameters file, include the calibration parameters\n msim_args.update(**calibration_params) # python calibration parameters are unpacked now\n # Also read the R calibration parameters (this is a separate section in the .yml file)\n if disease_params is not None:\n # (If the 'disease_params' section is included but has no calibration variables then we want to ignore it -\n # it will be turned into an empty dictionary by the Microsim constructor)\n msim_args[\"disease_params\"] = disease_params # R parameters kept as a dictionary and unpacked later\n\n # Temporarily use dummy data for testing\n # data_dir = os.path.join(base_dir, \"dummy_data\")\n # m = Microsim(data_dir=data_dir, testing=True, output=output)\n\n # cache to hold previously calculate population data\n cache = InitialisationCache(cache_dir=os.path.join(data_dir, \"caches\"))\n\n # generate new population dataframes if we aren't using the cache, or if the cache is empty\n if not use_cache or cache.is_empty():\n print(f'Reading population data because {\"caching is disabled\" if not use_cache else \"the cache is empty\"}')\n population = PopulationInitialisation(**population_args)\n individuals = population.individuals\n activity_locations = population.activity_locations\n\n # store in cache so we can load later\n cache.store_in_cache(individuals, activity_locations)\n else: # load from cache\n print(\"Loading data from previous cache\")\n individuals, activity_locations = cache.read_from_cache()\n\n # Calculate the time-activity multiplier (this is for implementing lockdown)\n time_activity_multiplier = None\n if lockdown_file != \"\":\n print(f\"Implementing a lockdown with time activities from {lockdown_file}\")\n time_activity_multiplier: pd.DataFrame = \\\n PopulationInitialisation.read_time_activity_multiplier(os.path.join(data_dir, lockdown_file))\n\n # Select which model implementation to run\n if opencl:\n run_opencl_model(individuals, activity_locations, time_activity_multiplier, iterations, data_dir, base_dir,\n opencl_gui, opencl_gpu, use_cache, initialise, calibration_params, disease_params)\n else:\n # If -init flag set the don't run the model. Note for the opencl model this check needs to happen\n # after the snapshots have been created in run_opencl_model\n if initialise:\n print(\"Have finished initialising model. -init flag is set so not running it. Exitting\")\n return\n run_python_model(individuals, activity_locations, time_activity_multiplier, msim_args, iterations,\n repetitions, parameters_file)",
"def run_models(\n weather_fn: str,\n weather_header_row: int,\n start_date: str,\n start_time: str,\n duration: int,\n selected_models: Dict,\n params_grass: Dict,\n params_mk5: Dict,\n params_vesta: Dict,\n params_vesta_fhr: Dict,\n ) -> Dict:\n start = dt.datetime.now()\n weather_df = get_weather(weather_fn, weather_header_row)\n weather_df = trim_weather(weather_df, start_date, start_time, duration)\n \n\n MODELS = {\n # 'GRASS_Cheney_98': ros_grass_cheney(weather_df, grass_state, grass_curing),\n 'GRASS_Cheney_98': ros_grass_cheney(weather_df, params_grass),\n 'FOREST_Mk5': ros_forest_mk5(weather_df, params_mk5),\n 'FOREST_Vesta': ros_forest_vesta(weather_df, params_vesta),\n 'FOREST_Vesta_FHR': ros_forest_vesta_fhr(weather_df, params_vesta_fhr),\n 'FOREST_Vesta_KT': ros_forest_vesta_kt(weather_df, params_vesta),\n }\n\n model_outputs = {} # model name as key, dataframes as val\n\n models_run = 0\n for key, val in selected_models.items():\n if val:\n model_outputs[key] = MODELS[key]\n models_run += 1\n\n time_elapsed = dt.datetime.now()-start\n print(f'{models_run} models run in {time_elapsed}')\n return model_outputs",
"def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)",
"def main(model_path='models/Nakakuki_Cell_2010_ODE'):\n n_file = []\n fitparam_files = os.listdir(model_path.strip('/') + '/fitparam')\n for file in fitparam_files:\n if re.match(r'\\d', file):\n n_file.append(int(file))\n for nth_paramset in n_file:\n os.makedirs(\n model_path.strip('/') \n + '/dat2npy/out/{:d}'.format(nth_paramset), exist_ok=True\n )\n nth_fitparam_files = os.listdir(\n model_path.strip('/') + '/fitparam/{:d}'.format(nth_paramset)\n )\n for dat_file in nth_fitparam_files:\n if 'fit' in dat_file:\n \"\"\"\n - fit_param%d.dat -> fit_param%d.npy\n - best_fitness.dat -> best_fitness.npy\n \"\"\"\n try:\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='float'\n )\n except ValueError:\n pass\n else:\n \"\"\"\n - count_num.dat -> count_num.npy\n - generation.dat -> generation.npy\n \"\"\"\n data = np.loadtxt(\n model_path.strip('/') + '/fitparam/{:d}/{}'.format(\n nth_paramset, dat_file\n ), dtype='int'\n )\n np.save(\n model_path.strip('/') + '/dat2npy/out/{:d}/'.format(nth_paramset)\n + dat_file.replace('.dat', '.npy'), data\n )\n if os.path.isfile(\n './logs/{:d}.log'.format(nth_paramset)):\n shutil.copyfile(\n './logs/{:d}.log'.format(nth_paramset),\n model_path.strip('/') \n + '/dat2npy/out/{:d}/optimization.log'.format(nth_paramset)\n )",
"def _runModel(self,params=None,text_display=False,nonuniform_parameter=False):\n\n if params is None:\n params = self.params\n \n check_params(params)\n\n if nonuniform_parameter:\n syn_param,syn_range = nonuniform_parameter\n\n FS = float(params[\"fs\"])\n dt = 1./FS\n no_samples = FS*params[\"sweep_length\"]\n no_trials = params[\"num_trials\"]\n no_stims = params[\"num_stim\"]\n no_syn = params[\"num_syn\"]\n time = np.arange(no_samples)*dt\n\n # array of AP times\n ap_times = np.zeros(math.floor(no_samples))\n ap_inds = np.zeros(no_stims)\n\n for i in range(no_stims):\n curr_stim_time = params[\"stim1_time\"] + i * params[\"stim_int\"]\n curr_ind = math.floor(curr_stim_time*FS)\n try:\n ap_times[curr_ind] = 1\n ap_inds[i] = curr_ind\n except IndexError as err:\n print(\"INDEXERROR: {0}\".format(err))\n print(\"Stimulation parameters may exceed length of sweep\")\n return\n\n ####################################\n # Simulate Calcium Channel Opening #\n ####################################\n if nonuniform_parameter:\n #NOTE: As warned, rounding-up error if no_syn not multiple of len(syn_range)\n r_ves_key = {\n 1 : 15,\n 2 : 23.3,\n 3 : 29.2,\n 4 : 33.8,\n 5 : 37.6,\n }\n syn_set_size = int(no_syn / (len(syn_range))) + 1\n for x in range(len(syn_range)):\n params[syn_param] = syn_range[x]\n params['r_cav_ves'] = r_ves_key[syn_range[x]]\n calcium_temp = _sim_CaV_opening(\n params, no_stims, no_trials, syn_set_size, text_display=text_display)\n if (x == 0):\n if params[\"diffusion_model\"]:\n cav_successes,cav_currents,ca_initial,ca_kernel,Ca_t = calcium_temp\n else:\n cav_successes,cav_currents,ca_kernel,Ca_t = calcium_temp\n else:\n cav_successes = np.dstack((cav_successes,calcium_temp[0]))\n cav_currents = np.dstack((cav_currents,calcium_temp[1]))\n ca_initial = np.dstack((ca_initial,calcium_temp[2]))\n Ca_t = np.dstack((Ca_t,calcium_temp[4]))\n \n else:\n if params[\"diffusion_model\"]:\n cav_successes,cav_currents,ca_initial,ca_kernel,Ca_t = _sim_CaV_opening(\n params, no_stims, no_trials, no_syn, text_display=text_display)\n else:\n cav_successes,cav_currents,ca_kernel,Ca_t = _sim_CaV_opening(\n params, no_stims, no_trials, no_syn, text_display=text_display)\n \n #########################################\n # Simulate Ca-Dependent Vesicle Release #\n #########################################\n p_v,corrected_p,p_v_successes = _sim_vesicle_release(\n params,Ca_t,cav_successes,text_display=text_display)\n\n ################################\n # Simulate Vesicular Depletion #\n ################################\n if(params[\"depletion_on\"]):\n quantal_content_per_syn = _sim_vesicle_depletion(\n params,p_v_successes,no_stims,text_display=text_display)\n else:\n quantal_content_per_syn = p_v_successes\n\n ###########################\n # Simulate AMPA Responses #\n ###########################\n quantal_content,epsc,epsc_per_syn,epsc_ave = _sim_ampa_responses(\n params,quantal_content_per_syn,text_display=text_display)\n\n #####################\n # Packaging Results #\n #####################\n\n if text_display:\n print(\"Packaging Results....\")\n \n if params[\"diffusion_model\"]:\n data = {\n \"time\" : time,\n \"ap_times\" : ap_times,\n \"ap_inds\" : ap_inds,\n \"cav_successes\" : cav_successes,\n \"cav_currents\" : cav_currents,\n \"ca_kernel\" : ca_kernel,\n \"ca_initial\" : ca_initial,\n \"Ca_t\" : Ca_t,\n \"p_v_successes\" : p_v_successes,\n \"quantal_content_per_syn\" : quantal_content_per_syn,\n \"epsc_per_syn\" : epsc_per_syn,\n \"quantal_content\" : quantal_content,\n \"epsc\" : epsc,\n \"epsc_ave\" : epsc_ave\n }\n else:\n data = {\n \"time\" : time,\n \"ap_times\" : ap_times,\n \"ap_inds\" : ap_inds,\n \"cav_successes\" : cav_successes,\n \"cav_currents\" : cav_currents,\n \"ca_kernel\" : ca_kernel,\n \"Ca_t\" : Ca_t,\n \"p_v_successes\" : p_v_successes,\n \"quantal_content_per_syn\" : quantal_content_per_syn,\n \"epsc_per_syn\" : epsc_per_syn,\n \"quantal_content\" : quantal_content,\n \"epsc\" : epsc,\n \"epsc_ave\" : epsc_ave\n }\n\n\n sim_run = simulation_run(params,data)\n\n return sim_run",
"def reg_experiment():\n print(\"REG_EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'regularization_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'dropout'\n changing_param_value = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\n # , {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n # set constant parameters\n set_params(use_word_emb=1)\n set_params(epochs=1)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(dropout = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id = new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)",
"def parameter_tuning(D, param_grid):\n grid = ParameterGrid(param_grid)\n\n for params in grid:\n model_file = 'Theshpairs1_Ind_5' + '_emb_' + str(params['embedding_size']) + '_nr_' + str(\n params['negative_ratio']) + \\\n '_batch_' + str(params['batch_size']) + '_epochs_' \\\n + str(params['nb_epochs']) + '_classification_' + str(params['classification'])\n\n print(model_file)\n\n # Train Model\n Prio = NNEmbeddings(D, embedding_size=params['embedding_size'], negative_ratio=params['negative_ratio'],\n nb_epochs=params['nb_epochs'], batch_size=params['batch_size'],\n classification=params['classification'], save=True,\n model_file='Models/' + model_file + '.h5')\n\n # New Predicitons\n df_metrics = Prio.predict(pickle_file=None)\n plot_single(df_metrics)\n plot_metric(df_metrics, name='Plot_Metrics/' + model_file + '.png')",
"def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )",
"def forqs_parallel(configs):\n pool = Pool(21)\n pool.map(forqs_sim, configs)\n pool.close()\n pool.join()",
"def main():\n\n # Create model_dict from arguments\n model_dict = model_dict_create()\n\n # No. of deviations to consider\n no_of_mags = 50\n dev_list = np.linspace(0.1, 5.0, no_of_mags)\n\n # Load dataset specified in model_dict\n print('Loading data...')\n dataset = model_dict['dataset']\n if (dataset == 'MNIST'):\n X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(model_dict)\n # rd_list = [None, 784, 331, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]\n rd_list = [None, 331, 100, 80, 60, 40, 20]\n # rd_list = [None,784,100]\n elif dataset == 'GTSRB':\n X_train, y_train, X_val, y_val, X_test, y_test = load_dataset(model_dict)\n rd_list = [1024, 338, 200, 100, 90, 80, 70, 60, 50, 40, 33, 30, 20, 10]\n elif dataset == 'HAR':\n X_train, y_train, X_test, y_test = load_dataset(model_dict)\n # rd_list = [561, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10]\n rd_list = [561]\n X_val = None\n y_val = None\n\n mean = np.mean(X_train, axis=0)\n X_train -= mean\n X_test -= mean\n if (dataset == 'MNIST') or (dataset == 'GTSRB'): X_val -= mean\n\n # fig, ax = plt.subplots(nrows=1, ncols=1)\n\n # for rd in rd_list:\n # model_setup_carlini(model_dict, X_train, y_train, X_test, y_test, X_val, y_val, mean, rd, ax)\n\n partial_carlini = partial(model_setup_carlini, model_dict=model_dict, X_train=X_train, y_train=y_train, X_test=X_test, y_test=y_test, X_val=X_val, y_val=y_val,\n mean=mean)\n pool=multiprocessing.Pool(processes=8)\n pool.map(partial_carlini,rd_list,1)\n pool.close()\n pool.join()\n\n # dim_red = model_dict['dim_red']\n # plt.legend()\n # plt.savefig('carlini_l2_hist_'+dim_red+'.png')",
"def run():\n if am_i_root():\n\n print(\"*** initializing...\")\n\n # Print parameters\n print(\"N_DIMS = \" + str(N_DIMS))\n print(\"LAMBDA_OVER_DX = \" + str(LAMBDA_OVER_DX))\n print(\"R_DT = \" + str(R_DT))\n print(\"MU0_POISSON = \" + str(MU0_POISSON))\n print(\"NORM_POISSON = \" + NORM_POISSON)\n print(\"N_GRID = \" + str(N_GRID))\n print(\"N_HITS = \" + str(N_HITS))\n print(\"POLICY = \" + str(POLICY))\n if POLICY == -1:\n print(\"MODEL_PATH = \" + str(MODEL_PATH))\n else:\n print(\"STEPS_AHEAD = \" + str(STEPS_AHEAD))\n print(\"EPSILON = \" + str(EPSILON))\n print(\"STOP_t = \" + str(STOP_t))\n print(\"STOP_p = \" + str(STOP_p))\n print(\"N_PARALLEL = \" + str(N_PARALLEL))\n print(\"WITH_MPI = \" + str(WITH_MPI))\n print(\"ADAPTIVE_N_RUNS = \" + str(ADAPTIVE_N_RUNS))\n print(\"REL_TOL = \" + str(REL_TOL))\n print(\"MAX_N_RUNS = \" + str(MAX_N_RUNS))\n print(\"N_RUNS(input) = \" + str(N_RUNS))\n sys.stdout.flush()\n\n # Perform runs\n if am_i_root():\n print(\"*** generating episodes...\")\n\n N_runs = N_RUNS\n if ADAPTIVE_N_RUNS or WITH_MPI:\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n N_runso = 0\n\n if WITH_MPI:\n cdf_t_tot_loc = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot_loc = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_loc = np.nan * np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n failed_loc = - np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n else:\n cdf_t_tot = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_episodes = np.nan * np.ones(MAX_N_RUNS, dtype=float)\n failed_episodes = - np.ones(MAX_N_RUNS, dtype=float)\n\n while True:\n if WITH_MPI: # MPI\n if N_runs % N_PARALLEL != 0:\n raise Exception(\"N_runs must be multiple of N_PARALLEL with MPI\")\n COMM.Barrier()\n # Decomposition\n Nepisodes = N_runs // N_PARALLEL\n episode_list = range(N_runso + ME, N_runs, N_PARALLEL)\n # Run episodes and reduce locally\n ind = N_runso // N_PARALLEL\n for episode in episode_list:\n cdf_t, cdf_h, mean_t_loc[ind], failed_loc[ind] = Worker(episode)\n cdf_t_tot_loc += cdf_t\n cdf_h_tot_loc += cdf_h\n ind += 1\n\n # Reduce globally the mean_t and failed\n mean_t_episodes = np.empty([N_runs], dtype=float)\n failed_episodes = np.empty([N_runs], dtype=float)\n COMM.Barrier()\n COMM.Allgather([mean_t_loc[:ind], Nepisodes, MPI.DOUBLE], [mean_t_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Allgather([failed_loc[:ind], Nepisodes, MPI.DOUBLE], [failed_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Barrier()\n elif N_PARALLEL > 1: # multiprocessing\n # Run episodes in parallel\n pool = multiprocessing.Pool(N_PARALLEL)\n result = pool.map(Worker, range(N_runso, N_runs))\n pool.close()\n pool.join()\n # Reduce\n ind = N_runso\n for cdf_t, cdf_h, mean_t, failed in result:\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n elif N_PARALLEL == 1: # sequential\n ind = N_runso\n for episode in range(N_runso, N_runs):\n cdf_t, cdf_h, mean_t, failed = Worker(episode)\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n else:\n raise Exception(\"Problem with N_PARALLEL: must be an int >= 1\")\n\n # estimate of the error\n mean_ep = np.mean(mean_t_episodes[:N_runs])\n sigma_ep = np.std(mean_t_episodes[:N_runs])\n std_error_mean = sigma_ep / np.sqrt(N_runs)\n rel_std_error_mean = std_error_mean / mean_ep\n\n # break clause\n if not ADAPTIVE_N_RUNS:\n break\n else:\n if rel_std_error_mean < REL_TOL:\n break\n elif N_runs >= MAX_N_RUNS:\n break\n else:\n N_runso = N_runs\n N_runs = int(np.ceil(1.05 * (sigma_ep / mean_ep / REL_TOL) ** 2))\n N_runs = min(N_runs, MAX_N_RUNS)\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n if am_i_root():\n print(\"N_RUNS(performed) = \" + str(N_runs))\n sys.stdout.flush()\n\n # Reduce\n if am_i_root():\n print(\"*** post-processing...\")\n if WITH_MPI:\n # locally\n cdf_t_tot_loc /= N_runs\n cdf_h_tot_loc /= N_runs\n # Reduce globally\n cdf_t_tot = np.empty([LEN_CDF_T], dtype=float)\n cdf_h_tot = np.empty([LEN_CDF_H], dtype=float)\n COMM.Barrier()\n COMM.Allreduce(cdf_t_tot_loc, cdf_t_tot, op=MPI.SUM)\n COMM.Allreduce(cdf_h_tot_loc, cdf_h_tot, op=MPI.SUM)\n COMM.Barrier()\n else:\n cdf_t_tot /= N_runs\n cdf_h_tot /= N_runs\n mean_t_episodes = mean_t_episodes[:N_runs]\n failed_episodes = failed_episodes[:N_runs]\n\n # Further post-processing, save and plot\n if am_i_root():\n\n # from cdf to pdf\n pdf_t_tot = cdf_to_pdf(cdf_t_tot)\n pdf_h_tot = cdf_to_pdf(cdf_h_tot)\n\n # compute stats of number of steps and number of hits\n t_bins = np.arange(BIN_START_T, BIN_END_T, BIN_SIZE_T) + 0.5 * BIN_SIZE_T\n mean_t, sigma_t, skew_t, kurt_t, p_found = stats_from_pdf(t_bins, pdf_t_tot)\n p25_t, p50_t, p75_t, p90_t, p95_t, p99_t, _ = stats_from_cdf(t_bins, cdf_t_tot)\n\n h_bins = np.arange(BIN_START_H, BIN_END_H, BIN_SIZE_H) + 0.5 * BIN_SIZE_H\n mean_h, sigma_h, skew_h, kurt_h, _ = stats_from_pdf(h_bins, pdf_h_tot)\n p25_h, p50_h, p75_h, p90_h, p95_h, p99_h, _ = stats_from_cdf(h_bins, cdf_h_tot)\n\n print(\"probability that the source is never found : %.10f\" % (1.0 - p_found, ))\n print(\"mean number of steps to find the source : %.3f +/- %.3f\" % (mean_t, 1.96 * std_error_mean))\n print(\"number of steps to find the source with 50 %% probability: %.3f\" % p50_t)\n print(\"number of steps to find the source with 99 %% probability: %.3f\" % p99_t)\n nb_failed = np.sum(failed_episodes)\n if np.any(failed_episodes < 0):\n nb_failed = -1\n print(\"problem while recording failures\")\n else:\n print(\"number of failed episodes : %d / %d (%f %%)\"\n % (nb_failed, N_runs, nb_failed / N_runs * 100))\n sys.stdout.flush()\n\n # save all parameters to txt file\n inputs = {\n \"N_DIMS\": N_DIMS,\n \"LAMBDA_OVER_DX\": LAMBDA_OVER_DX,\n \"R_DT\": R_DT,\n \"MU0_POISSON\": MU0_POISSON,\n \"NORM_POISSON\": NORM_POISSON,\n \"N_GRID\": N_GRID,\n \"N_HITS\": N_HITS,\n \"POLICY\": POLICY,\n \"STEPS_AHEAD\": STEPS_AHEAD,\n \"MODEL_PATH\": MODEL_PATH,\n \"STOP_t\": STOP_t,\n \"STOP_p\": STOP_p,\n \"ADAPTIVE_N_RUNS\": ADAPTIVE_N_RUNS,\n \"REL_TOL\": REL_TOL,\n \"MAX_N_RUNS\": MAX_N_RUNS,\n \"N_RUNS_PERFORMED\": N_runs,\n \"BIN_START_T\": BIN_START_T,\n \"BIN_END_T\": BIN_END_T,\n \"BIN_SIZE_T\": BIN_SIZE_T,\n \"BIN_START_H\": BIN_START_H,\n \"BIN_END_H\": BIN_END_H,\n \"BIN_SIZE_H\": BIN_SIZE_H,\n \"EPSILON\": EPSILON,\n }\n param_txt_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_parameters\" + \".txt\"))\n with open(param_txt_file, 'w') as out:\n for key, val in inputs.items():\n print(key + \" = \" + str(val), file=out)\n\n # save stats\n stats_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_statistics\" + \".txt\"))\n with open(stats_file, \"w\") as sfile:\n sfile.write(\"p_not_found\\t%+.4e\\n\" % (1 - p_found,))\n for varname in \\\n ('mean_t', 'sigma_t', 'skew_t', 'kurt_t', 'p25_t', 'p50_t', 'p75_t', 'p90_t', 'p95_t', 'p99_t'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n for varname in \\\n ('mean_h', 'sigma_h', 'skew_h', 'kurt_h', 'p25_h', 'p50_h', 'p75_h', 'p90_h', 'p95_h', 'p99_h'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n\n # save CDF of number of steps\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nsteps\" + \".npy\"))\n np.save(table_file, np.vstack((t_bins, cdf_t_tot)))\n\n # save CDF of number of hits\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nhits\" + \".npy\"))\n np.save(table_file, np.vstack((h_bins, cdf_h_tot)))\n\n # create and save figures\n if POLICY == -1:\n specifics = \"MODEL = \" + os.path.basename(MODEL_PATH)\n else:\n specifics = \"STEPS_AHEAD = \" + str(STEPS_AHEAD)\n subtitle = (\n \"N_DIMS = \"\n + str(N_DIMS)\n + \", \"\n + \"LAMBDA_OVER_DX = \"\n + str(LAMBDA_OVER_DX)\n + \", \"\n + \"R_DT = \"\n + str(R_DT)\n + \", \"\n + \"POLICY = \"\n + str(POLICY)\n + \", \"\n + specifics\n + \", \"\n + \"N_GRID = \"\n + str(N_GRID)\n + \", \"\n + \"N_HITS = \"\n + str(N_HITS)\n + \", \"\n + \"N_RUNS = \"\n + str(N_runs)\n + \"\\n\"\n )\n\n # plot PDF(nsteps), CDF(nsteps), PDF(nhits), CDF(nhits)\n fig, ax = plt.subplots(2, 2, figsize=(12, 10))\n plt.subplots_adjust(left=0.08, bottom=0.06, right=0.96, top=0.92, hspace=0.35, wspace=0.30)\n kwargs = {'xycoords': 'axes fraction', 'fontsize': 8, 'ha': \"right\"}\n for row, varname in enumerate([\"number of steps\", \"number of hits\"]):\n if varname == \"number of steps\":\n bins = t_bins\n cdf_tot = cdf_t_tot\n pdf_tot = pdf_t_tot\n mean = mean_t\n sigma = sigma_t\n skew = skew_t\n kurt = kurt_t\n p50 = p50_t\n p75 = p75_t\n p90 = p90_t\n p99 = p99_t\n filesuffix = 'nsteps'\n color = \"tab:blue\"\n else:\n bins = h_bins\n cdf_tot = cdf_h_tot\n pdf_tot = pdf_h_tot\n mean = mean_h\n sigma = sigma_h\n skew = skew_h\n kurt = kurt_h\n p50 = p50_h\n p75 = p75_h\n p90 = p90_h\n p99 = p99_h\n filesuffix = 'nhits'\n color = \"tab:orange\"\n max_x = bins[np.nonzero(pdf_tot)[0][-1]]\n for col, fct in enumerate([\"PDF\", \"CDF\"]):\n if fct == \"PDF\":\n ydata = pdf_tot\n ylim = (0.0, 1.02 * np.max(pdf_tot))\n elif fct == \"CDF\":\n ydata = cdf_tot\n ylim = (0.0, 1.0)\n\n ax[row, col].plot(bins, ydata, \"-o\", color=color, markersize=2, linewidth=1)\n ax[row, col].set_title(fct + \" of \" + varname)\n ax[row, col].set_xlabel(varname + \" to find the source\")\n ax[row, col].set_xlim((0, max_x))\n ax[row, col].set_ylim(ylim)\n\n if fct == \"PDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"mean = \" + \"{:.3e}\".format(mean), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"std = \" + \"{:.3e}\".format(sigma), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"skew = \" + \"{:.3e}\".format(skew), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"ex. kurt = \" + \"{:.3e}\".format(kurt), xy=(0.98, 0.44), **kwargs)\n elif fct == \"CDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"P50 = \" + \"{:.3e}\".format(p50), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"P75 = \" + \"{:.3e}\".format(p75), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"P90 = \" + \"{:.3e}\".format(p90), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"P99 = \" + \"{:.3e}\".format(p99), xy=(0.98, 0.44), **kwargs)\n plt.grid(False)\n plt.figtext(0.5, 0.985, subtitle, fontsize=7, ha=\"center\", va=\"top\")\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_distributions.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # plot mean nb steps vs number of episodes\n number_episodes = range(1, N_runs + 1)\n cum_mean_t_episodes = np.cumsum(mean_t_episodes) / number_episodes\n if N_runs >= 100:\n number_episodes = number_episodes[20:]\n cum_mean_t_episodes = cum_mean_t_episodes[20:]\n fig, ax = plt.subplots()\n ax.plot(number_episodes, cum_mean_t_episodes, color=\"r\")\n ax.set_title(\"Convergence of the mean number of steps\")\n ax.set_xlabel(\"number of episodes\")\n ax.set_ylabel(\"mean number of steps\")\n plt.figtext(0.5, 0.985, subtitle, fontsize=5, ha=\"center\", va=\"top\")\n plt.grid(False)\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_convergence.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # save monitoring information (concatenate episodes files)\n monitoring_episodes_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_episodes.txt\"))\n filenames = [os.path.join(DIR_TMP, str(\"monitoring_episode_\" + str(episode) + \".txt\")) for episode in range(N_runs)]\n with open(monitoring_episodes_file, \"w\") as mfile:\n mfile.write(\"# episode\\thit_init\\tstop_flag\\tboundary_flag\\t\"\n \"p_not_found\\t\\tmean_nsteps\\t\\ttime_elapsed(sec)\\n\")\n for fname in filenames:\n if os.path.isfile(fname):\n with open(fname) as infile:\n mfile.write(infile.read())\n os.remove(fname)\n else:\n print(\"Unexpected: Missing episode file: \" + str(fname))\n\n # clean up tmp dirs\n if len(os.listdir(DIR_TMP)) != 0:\n print(\"Unexpected: The directory '\" + DIR_TMP\n + \"' is not removed, because it should be empty but is not.\")\n else:\n os.rmdir(DIR_TMP)\n if len(os.listdir(PARENT_DIR_TMP)) == 0:\n os.rmdir(PARENT_DIR_TMP)\n\n # summary\n monitoring_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_summary\" + \".txt\"))\n with open(monitoring_file, \"w\") as mfile:\n mfile.write(\"*** initial hit ***\\n\")\n first_hit = np.loadtxt(monitoring_episodes_file, usecols=1, dtype='int')\n hit_max = np.max(first_hit)\n hit_hist, _ = np.histogram(first_hit, bins=np.arange(0.5, hit_max + 1.5), density=True)\n for h in range(1, hit_max + 1):\n mfile.write(\"hit=%1d: %6.2f %% \\n\" % (h, hit_hist[h - 1] * 100))\n\n mfile.write(\"\\n*** stats convergence ***\\n\")\n mfile.write(\"number of episodes simulated : %d\\n\" % N_runs)\n mfile.write(\"standard error of the mean (estimate): %.4e = %5.2f %%\\n\"\n % (std_error_mean, rel_std_error_mean * 100))\n\n stopping_reason = np.loadtxt(monitoring_episodes_file, usecols=2, dtype='int')\n stop_max = np.max(stopping_reason)\n stopping_hist, _ = np.histogram(stopping_reason, bins=np.arange(0.5, stop_max + 1.5), density=True)\n mfile.write(\"\\n*** reason for stopping (1 is success, anything else is failure) ***\\n\")\n for stop in range(1, stop_max + 1):\n mfile.write(\"stop=%1d: %6.2f %% \\n\" % (stop, stopping_hist[stop - 1] * 100))\n\n mfile.write(\"\\n*** probability that the source is not found at the end of the episodes ***\\n\")\n p_not_found = np.loadtxt(monitoring_episodes_file, usecols=4)\n p_gtr_stop = p_not_found[p_not_found > STOP_p]\n p_not_found_max = np.max(p_not_found)\n mfile.write(\"criteria (STOP_p): %.5e\\n\" % STOP_p)\n mfile.write(\"max(p) : %.5e\\n\" % p_not_found_max)\n mfile.write(\"number of episodes where p > STOP_p: %7d (%8.4f %%)\\n\"\n % (len(p_gtr_stop), len(p_gtr_stop) / N_runs * 100))\n\n near_boundaries = np.loadtxt(monitoring_episodes_file, usecols=3, dtype='int')\n near_boundaries = np.count_nonzero(near_boundaries)\n mfile.write(\"\\n*** agent near boundaries ***\\n\")\n mfile.write(\"number of episodes where it happened: %7d (%8.4f %%)\\n\"\n % (near_boundaries, near_boundaries / N_runs * 100))\n\n episode_elapsed = np.loadtxt(monitoring_episodes_file, usecols=5)\n mfile.write(\"\\n*** computational cost per episode ***\\n\")\n mfile.write(\"avg elapsed seconds per episode: %.5e\\n\" % (np.mean(episode_elapsed)))\n mfile.write(\"max elapsed seconds per episode: %.5e\\n\" % (np.max(episode_elapsed)))\n\n elapsed_time_0 = (time.monotonic() - start_time_0) / 3600.0\n mfile.write(\"\\n*** computational cost ***\\n\")\n mfile.write(\"N_PARALLEL = %d\\n\" % N_PARALLEL)\n mfile.write(\"total elapsed hours : %.5e\\n\" % elapsed_time_0)\n mfile.write(\"cost in hours = total elapsed time * N_PARALLEL: %.5e\\n\" % (elapsed_time_0 * N_PARALLEL))\n\n print(\">>> Results saved in the directory: \" + DIR_OUTPUTS)\n\n sys.stdout.flush()",
"def compute_models_parallel(data, varying_parameters=None, constant_parameters=None, n_max_processes=None):\n mp_models = MultiprocModelsRunner(MultiprocModelsWorkerLDA, data, varying_parameters, constant_parameters,\n n_max_processes=n_max_processes)\n\n return mp_models.run()",
"def ensemble_models(input_data: str, test_file=None,models=None,\n models_file=None,\n genome_handler_file=None,\n top_n=10,\n trained=True,\n ensemble_method=\"average\",\n batch_size=64, nb_epoch=100, early_stop=None, mod=None,\n max_x_length=50, min_rt=0, max_rt=120, unit=\"s\", out_dir=\"./\", prefix=\"test\"):\n from AutoSeq import GenomeHandler\n\n # print(\"The number of models:\", len(models))\n\n # test data\n X_test = np.empty(1)\n Y_test = np.empty(1)\n\n y_pr = []\n score = []\n\n model_list = dict()\n\n\n if genome_handler_file is not None:\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=max_x_length,\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir)\n model_list['dp_model'] = dict()\n model_list['max_x_length'] = X_train.shape[1]\n model_list['aa'] = out_dir + \"/aa.tsv\"\n print(\"max_x_length: %s\" % (max_x_length))\n # read models from genetic search result configure file\n optimizer_name = dict()\n if models_file is not None:\n models = dict()\n gn = pd.read_csv(models_file)\n select_models = gn.sort_values('Val Accuracy', ascending=True).head(top_n)\n genome_handler = pickle.load(open(genome_handler_file, \"rb\"))\n genome_handler.input_shape = X_train.shape[1:]\n select_models = np.array(select_models.iloc[:, 0:(select_models.shape[1] - 2)])\n for i in range(0, select_models.shape[0]):\n #models[i], optimizer_name = genome_handler.decodeOneHot(select_models[i],return_optimizer=True)\n models[i], optimizer_name[i] = genome_handler.decodeOneHotPlusLSTM(select_models[i], return_optimizer=True)\n\n trained = False\n else:\n print(\"\")\n\n if not trained:\n print(\"Training ...\")\n # For each model, train the model\n for (name, model) in models.items():\n print(\"Train model:\", name)\n # perform sample specific training\n res_map = train_model(input_data=input_data, test_file=test_file, batch_size=batch_size,\n nb_epoch=nb_epoch, early_stop=early_stop, mod=mod,\n max_x_length=max_x_length, min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir, prefix=str(name), model=model,\n optimizer_name=optimizer_name[name])\n\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n res_map[\"model\"].save(model_file_path)\n\n model_list['dp_model'][name] = model_file_path\n\n del res_map\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n else:\n print(\"The models have been trained!\")\n\n\n else:\n\n ## Transfer learning\n with open(models_file, \"r\") as read_file:\n model_list = json.load(read_file)\n\n model_folder = os.path.dirname(models_file)\n aa_file = os.path.basename(model_list['aa'])\n aa_file = model_folder + \"/\" + aa_file\n X_train, Y_train, X_test, Y_test, min_rt, max_rt = data_processing(input_data=input_data, test_file=test_file,\n mod=mod, max_x_length=model_list['max_x_length'],\n min_rt=min_rt, max_rt=max_rt, unit=unit,\n out_dir=out_dir,aa_file=aa_file)\n\n\n new_model_list = dict()\n new_model_list['dp_model'] = dict()\n for (name, dp_model_file) in model_list['dp_model'].items():\n print(\"\\nDeep learning model:\", name)\n # keras model evaluation: loss and accuracy\n # load model\n model_name = os.path.basename(dp_model_file)\n model_full_path = model_folder + \"/\" + model_name\n\n model = load_model(model_full_path)\n #new_model = change_model(model, X_train.shape[1:])\n new_model = model\n\n print(\"Perform transfer learning ...\")\n n_layers = len(new_model.layers)\n print(\"The number of layers: %d\" % (n_layers))\n #for layer in new_model.layers:\n # layer_name = str(layer.name)\n # if layer_name.startswith(\"dense\"):\n # break\n # else:\n # layer.trainable = False\n # print(\"layer (frozen:True): %s\" % (layer_name))\n\n new_model.compile(loss='mean_squared_error',\n ## In this case, we cannot change the learning rate.\n optimizer=model.optimizer,\n #optimizer=Adam(lr=0.0001),\n #optimizer=SGD(lr=1e-3, decay=1e-4, momentum=0.9, nesterov=True),\n metrics=['mse', 'mae'])\n my_callbacks = RegCallback(X_train, X_test, Y_train, Y_test, min_rt=min_rt, max_rt=max_rt)\n # Save model\n model_chk_path = out_dir + \"/best_model.hdf5\"\n mcp = ModelCheckpoint(model_chk_path, monitor=\"val_mean_squared_error\", save_best_only=True,\n save_weights_only=False,\n verbose=1, mode='min')\n\n ## monitor training information\n # tbCallBack = callbacks.TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)\n new_model.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, validation_data=(X_test, Y_test),\n callbacks=[my_callbacks, mcp])\n\n ## get the best model\n best_model = load_model(model_chk_path)\n ## save the model to a file:\n model_file_name = \"model_\" + str(name) + \".h5\"\n model_file_path = out_dir + \"/\" + model_file_name\n best_model.save(model_file_path)\n\n new_model_list['dp_model'][name] = model_file_path\n\n gc.collect()\n K.clear_session()\n tf.reset_default_graph()\n\n new_model_list['max_x_length'] = model_list['max_x_length']\n new_aa_file = out_dir + \"/\" + os.path.basename(model_list['aa'])\n copyfile(aa_file, new_aa_file)\n new_model_list['aa'] = new_aa_file\n\n ## Useful for new data prediction\n new_model_list['min_rt'] = min_rt\n new_model_list['max_rt'] = max_rt\n\n model_list = new_model_list\n\n\n # save model data\n #file_all_models = open(out_dir + \"/all_models.obj\", 'wb')\n #pickle.dump(models, file_all_models)\n #file_all_models.close()\n\n ####################################################################################################################\n print(\"Ensemble learning ...\")\n\n\n para = dict()\n para['min_rt'] = min_rt\n para['max_rt'] = max_rt\n\n ## save result\n model_json = out_dir + \"/model.json\"\n with open(model_json, 'w') as f:\n json.dump(model_list, f)\n\n ## evaluation\n if test_file is not None:\n ensemble_predict(model_json,x=X_test,y=Y_test,para=para, batch_size=batch_size,method=ensemble_method,\n out_dir=out_dir,\n prefix=\"final_eval\")\n\n ####################################################################################################################",
"def train_multiple_models(X_train, Y_train, X_dev, Y_dev, numOutputNodes, iterations, hyperparams, print_cost = True, is_charge = False):\n \n results = {}\n params = {}\n\n try:\n # extract the hyperparameters from one item in hyperparams\n for h in hyperparams:\n learning_rate = h['learning_rate'] \n layer1 = h['layer1']\n minibatch_size = h['minibatch_size']\n beta = h['beta']\n dropout = h['dropout']\n istanh1 = h['istanh1']\n batchnorm = h['batchnorm']\n\n # train the model with the given hyperparameters\n accs, parameters = model(X_train, Y_train, X_dev, Y_dev, numOutputNodes, learning_rate, iterations, minibatch_size, layer1, beta, dropout, istanh1, batchnorm, print_cost, is_charge)\n \n results[frozenset(h.items())] = accs[3] # store the dev test MAPEs in a dictionary\n params[frozenset(h.items())] = parameters # do the same for the learned parameters, to be retrieved at the end\n \n except KeyboardInterrupt: # allow for exiting the for loop in case we want to stop testing all the hyperparameters; to use, press Ctrl+C in terminal\n pass\n \n best = min(results, key=results.get) # finds what setting of hyperparameters had the lowest MAPE\n\n return results, list(best), params[best]",
"def load_models_and_params(\n num_models: int,\n use_templates: bool,\n num_recycles: Optional[int] = None,\n recycle_early_stop_tolerance: Optional[float] = None,\n num_ensemble: int = 1,\n model_order: Optional[List[int]] = None,\n model_suffix: str = \"_ptm\",\n data_dir: Path = Path(\".\"),\n stop_at_score: float = 100,\n rank_by: str = \"auto\",\n max_seq: Optional[int] = None,\n max_extra_seq: Optional[int] = None,\n use_cluster_profile: bool = True,\n use_fuse: bool = True,\n use_bfloat16: bool = True,\n use_dropout: bool = False,\n save_all: bool = False,\n\n) -> List[Tuple[str, model.RunModel, haiku.Params]]:\n\n # Use only two model and later swap params to avoid recompiling\n model_runner_and_params: [Tuple[str, model.RunModel, haiku.Params]] = []\n\n if model_order is None:\n model_order = [1, 2, 3, 4, 5]\n else:\n model_order.sort()\n\n model_build_order = [3, 4, 5, 1, 2]\n if \"multimer\" in model_suffix:\n models_need_compilation = [3]\n else:\n # only models 1,2 use templates\n models_need_compilation = [1, 3] if use_templates else [3]\n \n model_runner_and_params_build_order: [Tuple[str, model.RunModel, haiku.Params]] = []\n model_runner = None\n for model_number in model_build_order:\n if model_number in models_need_compilation:\n\n # get configurations\n model_config = config.model_config(\"model_\" + str(model_number) + model_suffix)\n model_config.model.stop_at_score = float(stop_at_score)\n model_config.model.rank_by = rank_by\n\n # set dropouts\n model_config.model.global_config.eval_dropout = use_dropout\n\n # set bfloat options\n model_config.model.global_config.bfloat16 = use_bfloat16\n \n # set fuse options\n model_config.model.embeddings_and_evoformer.evoformer.triangle_multiplication_incoming.fuse_projection_weights = use_fuse\n model_config.model.embeddings_and_evoformer.evoformer.triangle_multiplication_outgoing.fuse_projection_weights = use_fuse\n if \"multimer\" in model_suffix or model_number in [1,2]:\n model_config.model.embeddings_and_evoformer.template.template_pair_stack.triangle_multiplication_incoming.fuse_projection_weights = use_fuse\n model_config.model.embeddings_and_evoformer.template.template_pair_stack.triangle_multiplication_outgoing.fuse_projection_weights = use_fuse\n \n # set number of sequences options\n if max_seq is not None:\n if \"multimer\" in model_suffix:\n model_config.model.embeddings_and_evoformer.num_msa = max_seq\n else:\n model_config.data.eval.max_msa_clusters = max_seq\n \n if max_extra_seq is not None:\n if \"multimer\" in model_suffix:\n model_config.model.embeddings_and_evoformer.num_extra_msa = max_extra_seq\n else:\n model_config.data.common.max_extra_msa = max_extra_seq\n\n # disable some outputs if not being saved\n if not save_all:\n model_config.model.heads.distogram.weight = 0.0\n model_config.model.heads.masked_msa.weight = 0.0\n model_config.model.heads.experimentally_resolved.weight = 0.0\n\n # set number of recycles and ensembles \n if \"multimer\" in model_suffix:\n if num_recycles is not None:\n model_config.model.num_recycle = num_recycles\n model_config.model.embeddings_and_evoformer.use_cluster_profile = use_cluster_profile\n model_config.model.num_ensemble_eval = num_ensemble\n else:\n if num_recycles is not None:\n model_config.data.common.num_recycle = num_recycles\n model_config.model.num_recycle = num_recycles\n model_config.data.eval.num_ensemble = num_ensemble\n\n\n if recycle_early_stop_tolerance is not None:\n model_config.model.recycle_early_stop_tolerance = recycle_early_stop_tolerance\n \n # get model runner\n params = data.get_model_haiku_params(\n model_name=\"model_\" + str(model_number) + model_suffix,\n data_dir=str(data_dir), fuse=use_fuse)\n model_runner = model.RunModel(\n model_config,\n params,\n )\n \n model_name = f\"model_{model_number}\"\n params = data.get_model_haiku_params(\n model_name=model_name + model_suffix, data_dir=str(data_dir), fuse=use_fuse,\n )\n # keep only parameters of compiled model\n params_subset = {}\n for k in model_runner.params.keys():\n params_subset[k] = params[k]\n\n model_runner_and_params_build_order.append(\n (model_name, model_runner, params_subset)\n )\n # reorder model\n for n, model_number in enumerate(model_order):\n if n == num_models:\n break\n model_name = f\"model_{model_number}\"\n for m in model_runner_and_params_build_order:\n if model_name == m[0]:\n model_runner_and_params.append(m)\n break\n return model_runner_and_params",
"def execParallelReducedGAModel(year, region, qntYears=5, times=10):\n observations = list()\n means = list()\n for i in range(qntYears):\n observation = model.loadModelDB(region+'jmaData', year+i)\n observations.append(observation)\n means.append(observation.bins)\n mean = np.mean(means, axis=0)\n for i in range(times):\n model_ = parallelReducedModel.gaModel(\n NGEN=100,\n CXPB=0.9,\n MUTPB=0.1,\n modelOmega=observations,\n year=year +\n qntYears,\n region=region,\n mean=mean)\n model_.executionNumber=i\n model_.year=year+qntYears\n model_.modelName = region+'parallelreducedModel' \n parallelreducedModel_ = model.loadModelDB(region+'ReducedGAModel', year)\n # if (parallelreducedModel_.definitions==None): \n # model.saveModelDB(model_)",
"def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()",
"def run_models(\n self,\n normal=True,\n interrupt=True,\n run_start=None,\n state_builder=\"acis\",\n hrc=False,\n ):\n if hrc:\n loads = hrc_loads\n else:\n loads = test_loads\n if normal and \"normal\" in loads:\n for load in loads[\"normal\"]:\n self.run_model(\n load_week=load,\n run_start=run_start,\n state_builder=state_builder,\n )\n if interrupt and \"interrupt\" in loads:\n for load in loads[\"interrupt\"]:\n self.run_model(\n load_week=load,\n interrupt=True,\n run_start=run_start,\n state_builder=state_builder,\n )",
"def train(self): \n start_time = time()\n\n # reset previous results\n self.best_result = pd.DataFrame()\n self.result = pd.DataFrame()\n\n # Generate dictionaries of all posible parameter permutations\n keys, values = zip(*self.params.items())\n self.permutations_dict = [dict(zip(keys, v)) for v in itertools.product(*values)] \n\n # Run through all models in parallel threads\n with Pool(self.thread_cnt) as p:\n result = p.map(self.analyze_model, self.permutations_dict)\n\n\n # wrap up results\n if self.classes_names: # acts as trigger for computation of cms\n for i, dic in enumerate(result):\n dic[\"id\"] = i\n self.cms = [(dic[\"id\"] ,dic.pop(\"cm\")) for dic in result]\n\n self.result = pd.DataFrame(result)\n self.best_result = self.result.iloc[self.result[\"score\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"f1_score\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"recall\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"precision\"].argmax()] # store row with the best score\n end_time = time()\n print(\"Finished evaluation\")\n print(\"Best parameteters found with:\", self.best_parameter_set())\n print(\"score=\", self.best_score())\n #print(\"f1_score=\", self.best_f1_score())\n #print(\"recall_score=\", self.best_recall_score())\n #print(\"precision_score=\", self.best_precision_score())\n print(\"Total evaluation time = {:.2f}s\".format(end_time-start_time))\n\n return self.best_parameter_set(), self.best_score()",
"def execParallelGA(year, region, qntYears=5, times=10):\n observations = list()\n means = list()\n for i in range(qntYears):\n observation = model.loadModelDB(region+'jmaData', year+i)\n aux = model.loadModelFromFile('../../Zona3/realSCwithP_AVR/'\n + region + 'real' + \"_\" + str(year + i) + '.txt') \n aux.values4poisson = [x+1 for x in aux.values4poisson]\n observation.values4poisson = aux.values4poisson\n del aux\n observation.bins = observation.bins.tolist()\n observations.append(observation)\n means.append(observation.bins)\n mean = np.mean(means, axis=0)\n for i in range(times):\n model_=model.model()\n model_ = parallelGAModelP_AVR.gaModel(\n NGEN=10,\n CXPB=0.9,\n MUTPB=0.1,\n modelOmega=observations,\n year=year +\n qntYears,\n region=region,\n mean=mean, \n n_aval=50)\n model_.executionNumber=i\n model_.year=year+qntYears\n model_.modelName = region+'parallelGA' \n parallelGA_ = model.loadModelDB(region+'parallelGA', year)\n # if (parallelGA_.definitions==None): \n # model.saveModelDB(model_)",
"def train_loop(train_per_list, cut_off_list, C_list,\n factors, non_factors, data_path, executable_path, \n trial_factors_list=None): \n if trial_factors_list is None:\n trial_factors_list=[factors]\n sql_table = 'aggregated_ctr' #Data table\n # remove cross terms\n sql_features = list(set(sum([fs.split('*') for fs in factors], [])))\n# factors+=['campaign_id','ad_account_id','pub_account_id', \n# 'campaign_id*site', 'ad*pub_account_id']\n con_dict_dse={'host':'db.lqm.io','db':'dse',\n 'user':'dse','passwd':'dSe@lQm'}\n con_dict_mad={'host':'db.lqm.io','db':'madvertise_production',\n 'user':'readonly','passwd':'z0q909TVZj'}\n \n rtb_flag=[0,1]\n model_type=0\n has_intercept = True # bias term in LR\n tol = 0.00000001\n # NB these filenames are HARDCODED in write_sparse routines\n weights_file = 'train_ais.txt'\n train_file = 'train_svm.txt'\n test_file = 'test_svm.txt'\n probability_file = 'preds_SummModel_py.txt'\n results = []\n for train_per in train_per_list:\n test_per = ( add_hour(train_per[1], 1), add_hour(train_per[1], 3))\n # DATA RANGE IS INCLUSIVE => 00:00-02:00 = 3 HOURS\n train_df=mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, train_per, sql_features, rtb_flag)\n train_df=mysql_lqm.add_features( train_df)\n test_df= mysql_lqm.MySQL_getdata(con_dict_dse,\n sql_table, test_per, sql_features, rtb_flag)\n test_df = mysql_lqm.add_features(test_df)\n \n sc, click_no_click_df, weights, targets \\\n = libLinear_functions.create_sparse_cat(train_df, factors, non_factors)\n\n \n for cut_off in cut_off_list:\n sparse_train_all = libLinear_functions.create_sparse(sc, cut_off, click_no_click_df)\n sparse_test_all = sc.transform(test_df)\n for trial_factors in trial_factors_list:\n trial_factors=trial_factors[:] # copy\n trial_factors.sort(key=lambda x: sc.factors.index(x))\n # libsvm expects the indices in ascending order\n print (trial_factors) \n sparse_train=sc.select_factors(sparse_train_all, trial_factors)\n sparse_test=sc.select_factors(sparse_test_all, trial_factors)\n libLinear_functions.write_sparse(sc, sparse_train, weights, targets, data_path, len(trial_factors))\n libLinear_functions.write_sparse_test(sc, sparse_test, data_path, n_columns_used= len(trial_factors))\n\n\n for C in C_list:\n model_file = \\\n '{start}_{stop}_cut_{cut_off}_C_{C:0.3}.model'.format(\n start=date_name(train_per[0]),\n stop=date_name(train_per[1]),\n cut_off=cut_off, C=C)\n fit(executable_path, data_path, train_file,\n model_file, weights_file, model_type, reg_param=C, tol=tol,\n has_intercept=has_intercept)\n \n \n pCTR = libLinear_functions.predict(executable_path, data_path, test_file,\n model_file, probability_file)\n if type(pCTR) is pd.Series:\n amounts = pd.DataFrame({\n 'no_clicks':test_df['instances' ]-test_df['clicks'],\n 'clicks':test_df['clicks']})\n mean_log_loss, weighted_log_loss = log_loss_weighted(pCTR, amounts)\n results.append([train_per[:],trial_factors[:],\n cut_off,C,amounts.clicks.sum(),amounts.no_clicks.sum(), mean_log_loss])\n results_df=pd.DataFrame(results,columns=['date','features','cutoff','C','clicks','no_clicks','lloss'])\n results_df.to_csv(data_path+'resultsX.txt',index=False, sep='|')\n # what to do if ERROR?\n return results_df, weighted_log_loss",
"def optimize(self, return_teacher_params_bool = False):\n\n gen_batches = self.DATASET.data_stream(self.BATCH_SIZE)\n \n num_complete_batches, leftover = divmod(self.DATASET.num_example['train'], self.BATCH_SIZE)\n\n # number of minibatches per epoch\n num_mini_batches_per_epochs = num_complete_batches + bool(leftover)\n\n # number of total iterations\n num_total_iters = self.NUM_EPOCHS * num_mini_batches_per_epochs\n\n # number of time that the sparisty levels get updated\n num_sparsity_updates = num_total_iters // self.MASK_UPDATE_FREQ \n \n mask_update_limit = num_total_iters - self.MASK_UPDATE_FREQ\n \n if self.SAVE_BOOL == True:\n # save the transferred results in the desinated directory.\n\n trans_model_dir = self.unique_model_dir\n\n# while os.path.exists(trans_model_dir):\n# trans_model_dir = trans_model_dir + '_0'\n \n if not os.path.exists(trans_model_dir):\n os.makedirs(trans_model_dir)\n\n np.save(trans_model_dir + '/param_dict.npy', self.param_dict) \n \n \n\n nt_trans_params_all_sparsities_all_runs = []\n nt_trans_masks_all_sparsities_all_runs = []\n nt_trans_vali_all_sparsities_all_runs = []\n teacher_params_all_sparsities_all_runs = []\n \n \n num_sparisty_levels = len(self.NN_DENSITY_LEVEL_LIST) \n num_runs = len(range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ))\n all_density_all_run_num_total_iters = num_sparisty_levels * num_runs * num_total_iters\n \n \n for nn_density_level in self.NN_DENSITY_LEVEL_LIST: \n \n \n nt_trans_params_all_runs = []\n nt_trans_masks_all_runs = []\n nt_trans_vali_all_runs = []\n teacher_params_all_runs = []\n\n\n for run_index in range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ):\n\n # do logging\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # a string that summarizes the current ntt experiment\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n if self.SAVE_BOOL == True:\n model_dir_density_run = trans_model_dir + '/' + 'density_' + str(round(nn_density_level, 2) ) + '/' + 'run_' + str(run_index) + '/'\n\n os.makedirs(model_dir_density_run)\n \n logging.basicConfig(filename = model_dir_density_run + \"/\" + model_summary_str + \"_log.log\", format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n\n else: \n logging.basicConfig(filename = model_summary_str + \"_log.log\" , format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n \n \n # for different run indices, randomly draw teacher net's parameters\n _, teacher_net_params = self.init_fun(random.PRNGKey(run_index), tuple(self.batch_input_shape))\n \n # the prediction of the teacher net evaluated on validation samples\n vali_teacher_prediction = self.apply_fn(teacher_net_params, self.vali_samples)\n\n vali_teacher_ntk_mat = self.emp_ntk_fn(self.vali_inputs_1, self.vali_inputs_2, teacher_net_params) \n\n # the initial binary mask\n \n if self.PRUNE_METHOD == 'magnitude': \n masks = get_masks_from_jax_params(teacher_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n elif self.PRUNE_METHOD == 'logit_snip':\n logger.info(\"Use logit snip method to get the initial mask\")\n num_examples_snip = 128\n\n# gen_batches_logit_snip = self.DATASET.data_stream(num_examples_snip)\n \n snip_input = self.DATASET.dataset['train']['input'][:num_examples_snip, :]\n \n if self.GLOBAL_PRUNE_BOOL == False:\n logger.warning(\"layerwise sparse net initialized with logit_snip\") \n masks = get_logit_snip_masks(teacher_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n else:\n raise NotImplementedError(\"not implemented\")\n \n\n # the initial student parameters\n masked_student_net_params = get_sparse_params_filtered_by_masks(teacher_net_params, masks)\n\n # instantiate the optimizer triple \n opt_init, opt_update, get_params = self.OPTIMIZER_WITH_PARAMS\n\n opt_state = opt_init(teacher_net_params) \n\n # one step of NTK transfer\n @jit\n def nt_transfer_step(i, opt_state, x, masks):\n\n # parameters in the current optimizer state\n student_net_params = get_params(opt_state)\n\n # gradients that flow through the binary masks\n masked_g = grad(self.nt_transfer_loss)(student_net_params, masks, teacher_net_params, x, nn_density_level)\n\n return opt_update(i, masked_g, opt_state)\n\n # a list of validation loss\n vali_loss_list = []\n\n # calculate the initial validation loss. \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level)\n\n vali_loss_list.append(vali_loss)\n\n logger.info(\"Before transfer: trans dist %.3f | ntk dist %.3f | targ dist %.3f | l2 pentalty %.3f | nn density %.2f\", vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level)\n itercount = itertools.count()\n\n t = time.time()\n\n # loop through iterations\n for num_iter in range(1, num_total_iters + 1): \n \n # a batch of input data\n batch_xs, _ = next(gen_batches) \n\n # reshape the input to a proper format (2d array for MLP and 3d for CNN)\n batch_xs = batch_xs.reshape(self.batch_input_shape) \n\n # update the optimizer state\n opt_state = nt_transfer_step(next(itercount), opt_state, batch_xs, masks )\n\n\n if num_iter % 100 == 0:\n elapsed_time = time.time() - t\n \n if (num_iter <= 500) and (run_index == self.INIT_RUN_INDEX) and (nn_density_level == self.NN_DENSITY_LEVEL_LIST[0]): \n # estimate the program end time.\n remaining_iter_num = all_density_all_run_num_total_iters - num_iter\n remaining_seconds = elapsed_time * ( remaining_iter_num / 100 )\n expected_end_time = str(datetime.now() + timedelta(seconds = remaining_seconds))\n\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n\n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n # validation loss\n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n\n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s] | expected finish time %s', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time, expected_end_time)\n t = time.time()\n\n\n if (num_iter % self.MASK_UPDATE_FREQ == 0) and (num_iter < mask_update_limit):\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n \n # update masks\n masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n \n# if self.PRUNE_METHOD == 'logit_snip':\n# logit_snip_batch_xs, _ = next(gen_batches_logit_snip)\n# masks = get_logit_snip_masks(student_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n# else:\n# masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n\n\n \n elapsed_time = time.time() - t\n \n student_net_params = get_params(opt_state) \n \n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n \n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s]', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time )\n \n vali_loss_array = np.array(vali_loss_list)\n\n nt_trans_params_all_runs.append(masked_student_net_params)\n nt_trans_masks_all_runs.append(masks)\n nt_trans_vali_all_runs.append(vali_loss_array)\n teacher_params_all_runs.append(teacher_net_params )\n\n if self.SAVE_BOOL == True:\n\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n teacher_param_fileName = model_dir_density_run + 'teacher_params_' + model_summary_str\n np.save(teacher_param_fileName, teacher_net_params)\n\n student_param_fileName = model_dir_density_run + 'transferred_params_' + model_summary_str\n np.save(student_param_fileName, masked_student_net_params)\n\n mask_fileName = model_dir_density_run + 'transferred_masks_' + model_summary_str\n np.save(mask_fileName, masks)\n\n loss_array_fileName = model_dir_density_run + 'loss_array_' + model_summary_str\n np.save(loss_array_fileName, vali_loss_array)\n \n\n nt_trans_params_all_sparsities_all_runs.append( nt_trans_params_all_runs )\n nt_trans_masks_all_sparsities_all_runs.append( nt_trans_masks_all_runs )\n nt_trans_vali_all_sparsities_all_runs.append( nt_trans_vali_all_runs )\n teacher_params_all_sparsities_all_runs.append( teacher_params_all_runs )\n \n if return_teacher_params_bool:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs, teacher_params_all_sparsities_all_runs\n\n else:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs"
] | [
"0.69426864",
"0.6767892",
"0.6682021",
"0.66687495",
"0.6619903",
"0.6563379",
"0.6513622",
"0.6498593",
"0.64675415",
"0.64250636",
"0.63996357",
"0.6380449",
"0.63052523",
"0.62816036",
"0.6271001",
"0.6245444",
"0.6239286",
"0.62295204",
"0.6218943",
"0.620826",
"0.61959547",
"0.61949235",
"0.6194836",
"0.6174871",
"0.6172148",
"0.61535215",
"0.6143405",
"0.6131232",
"0.6119945",
"0.6091596"
] | 0.67882544 | 1 |
Return set containing all adjective synonyms of given word. | def getAdjectives(self, word):
adjectives = set()
for synset in wordnet.synsets(word):
if synset.pos == ADJ:
for synonym in synset.lemma_names:
adjectives.add(synonym)
return adjectives | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_synonyms(word):\n synsets = [];\n syns = wn.synsets(word)\n for ss in syns:\n lemmas = []\n for l in ss.lemmas():\n lemma = { \"name\": l.name(), \"related_forms\": [] }\n for x in l.derivationally_related_forms():\n lemma['related_forms'].append(x.name())\n lemmas.append(lemma)\n synsets.append({\n \"lemmas\": lemmas,\n \"d\": ss.definition(),\n \"pos\": ss.pos(),\n \"id\": ss.name()\n })\n return synsets",
"def get_synonyms(word):\n try:\n query = {'word': word}\n cursor = database['Words'].find(query)\n synonym_set = set()\n if cursor is None:\n return None\n for document in cursor:\n if len(document['synsets']) > 0:\n for key, synset in document['synsets'].items():\n synonyms = synset['synonyms'].split(\",\")\n for synonym in synonyms:\n synonym_set.add(synonym.strip())\n if len(synonym_set) == 0:\n return None\n return synonym_set\n except Exception as e:\n print(e)\n return None",
"def get_synonyms(word):\n syns_sets = wordnet.synsets(word)\n if syns_sets:\n # if there's synonyms, take the first set\n desired = syns_sets[0].lemma_names()\n desired = [the_name.replace(\"_\", \" \") for the_name in desired]\n return desired\n\n else:\n return False",
"def getSynonyms(self, wordSet):\n synonyms = {}\n for w in wordSet:\n # find synonyms\n synsets = wn.synsets(w, pos=wn.NOUN)\n if len(synsets) > 0: \n # there are noun senses for this word, get synonyms\n synonyms[w] = set([synset.name for synset in synsets])\n \n return synonyms",
"def weed_out_synonyms(word, potential_synonyms):\n real_synonyms = set()\n for synonym in potential_synonyms:\n max_distance = abs(len(word) - len(synonym))\n abbr_len = min(len(word), len(synonym))\n forgiveness = round(1/7 * abbr_len)\n if lev.distance(word, synonym) <= max_distance + forgiveness:\n # Then it's a synonym!\n real_synonyms.add(synonym)\n return real_synonyms",
"def get_synsets(word: str, category=wn.NOUN) -> List:\n return wn.synsets(word, pos=category)",
"def get_synset(self, word):\n #word = word.decode('utf-8')\n if word in self.SYNSET_CACHE:\n return self.SYNSET_CACHE[word]\n else:\n synset = set(wn.synsets(word))\n self.SYNSET_CACHE[word] = synset\n return synset",
"def meronym(self, sense=0):\n s = self._synset(self.text, sense=sense)\n\n if not s:\n return []\n\n return s.member_meronyms()",
"def get_from_word_edges(self, word: str) -> Set[str]:\n all_edges = set()\n\n for def_dict in self.word_dictionary[word]:\n processed_def = self.get_filtered_set_tokens(\n definition=def_dict[\"definition\"]\n )\n\n if self.drop_self_cycles:\n if word not in processed_def:\n all_edges = all_edges.union(processed_def)\n else:\n all_edges = all_edges.union(processed_def)\n\n return all_edges",
"def get_synsets_rt(word: str) -> List:\n return rt.categories(word)",
"def antonym(self, sense=0):\n s = self._synset(self.text, sense=sense)\n\n if not s:\n return []\n\n lemmas = s.lemmas()\n\n result = list()\n\n for lemma in lemmas:\n if lemma.antonyms():\n result.append(lemma.antonyms()[0].name())\n\n return result if result else []",
"def get_hyponyms(word):\n syn = wn.synsets(word)\n hnyms = []\n for h in syn[0].hyponyms():\n print h\n hnyms.append({\n \"lemmas\": h.lemma_names(),\n \"d\": h.definition(),\n \"pos\": h.pos(),\n \"id\": h.name()\n })\n return hnyms",
"def word_forms(self, word):\n result = set()\n for dic_name in self.dictionaries.keys():\n for vector in self.dictionaries[dic_name].word_forms(word):\n result.add(tuple(vector))\n return filter(lambda x: len(x), result)",
"def get_synsets(words):\n synsets = {}\n for word in words:\n for syn in wn.synsets(word):\n synsets[syn.name] = tuple([lemma.name for lemma in syn.lemmas])\n return synsets",
"def get_synsets(word: str, pos: Optional[str] = None):\n return wn.synsets(word, pos=pos)",
"def get_possible_synsets(self, word, pos=None):\n candidates = wordnet.synsets(word)\n return [self._synset_dict[SWNEntry.make_unique_key(synset.pos, synset.offset)] for synset in\n candidates]",
"def synonyms(self) -> List[str]:\n return self._synonyms",
"def get_speech(self, word):\n posses = ['verb', 'noun', 'adj', 'adv', 'as in', 'conjunction']\n speeches = []\n\n def get_all_synonyms(word1, speech1):\n for w in Word(word1).synonyms('all', partOfSpeech=speech1):\n if not w == []:\n return w\n return []\n\n def empty_tree(input_list):\n # print(input_list)\n if type(input_list) == type([]):\n for l in input_list:\n if not empty_tree(l):\n return False\n return True\n else:\n return False\n\n for poss in posses:\n if not empty_tree(get_all_synonyms(word, poss)):\n speeches.append(poss)\n return speeches",
"def _get_words(self, sentence):\n _uniq_words = set()\n for word in sentence.split():\n word = normed_word(re.sub(\"\\W\", \"\", word)).lower()\n _uniq_words.add(word)\n return _uniq_words",
"def get_words_from_sysets(synset):\n synlist = []\n for s in synset:\n syns = s.lemmas()[0].name()\n synlist.append(syns)\n return synlist",
"def word_base(self, word):\n return list(set(map(lambda x: self.id_base(x), self.id(word))))",
"def generate_wordnet_candidates(self, word):\n candidates = set()\n if self.check_if_replacable(word):\n for synset in wordnet.synsets(word):\n for lemma in synset.lemmas():\n converted = convert(lemma.name().lower(), word)\n if converted != word and converted != None:\n try:\n w1 = wordnet.synsets(word)[0]\n w2 = wordnet.synsets(converted)[0]\n similarity = w1.wup_similarity(w2)\n if isinstance(similarity,float) and w1.wup_similarity(w2) >0.6 :\n candidates.add(converted)\n except:\n pass\n # print(\"candidate\",word,candidates)\n return candidates",
"def get_searched_single_word_synonym(self, content, stop_words):\n content = re.sub(r\"[^\\w\\s]\", \"\", content)\n content = re.sub(r\"[0-9]+\", \"\", content)\n new_sent = [\n Word(word).singularize()\n for word in content.lower().split()\n if Word(word).singularize() not in stop_words\n ]\n new_sent = [\n Word(word).singularize()\n for word in new_sent\n if Word(word).singularize() in set(self.searched_words)\n ]\n\n syn = []\n for w in new_sent:\n for s in wordnet.synsets(w):\n for lemma in s.lemmas():\n if len(syn) == SYNONYM_LIMIT:\n break\n syn.append(lemma.name())\n syn = list(dict.fromkeys(syn)) #\n syn = \" \".join(syn)\n return syn",
"async def find_similar_words(self, word: str) -> List[str]:\n sorted_word = \"\".join(sorted(word))\n query = {'permutation_similarity_index': sorted_word}\n return [doc['word'] async for doc in self._db_client.find(self._db_name,\n self._db_collection_name,\n query)]",
"def synonyms(self) -> List[str]:\n return pulumi.get(self, \"synonyms\")",
"def get_holonyms(synset):\n return set(\n synset.member_holonyms() + synset.substance_holonyms() + synset.part_holonyms()\n )",
"def allWords(self):\n words = set([])\n for token in self.tokens:\n words.add(token.text)\n return words",
"def edit_distanceN(self, word: str) -> set:\n #FIXME\n ret_val = set(word)\n\n for _ in range(self.max_edit_distance):\n for val in ret_val:\n ret_val = ret_val | self.edit_distance1(val)\n\n return list(ret_val)",
"def synonyms(self):\n\n return [synonym[\"name\"] for synonym in self._get_synonym_json()]",
"def words(self):\n # BEGIN Question 2\n x= str(self.text).lower()\n # m = str(x).translate(string.punctuation)\n y= x.split()\n\n y = set([''.join(c for c in s if c not in string.punctuation) for s in y])\n y = [s for s in y if s]\n while(len(y) != 0):\n self.word_set.append(min(y))\n y.remove(min(y))\n\n\n return self.word_set\n # END Question 2"
] | [
"0.80280876",
"0.7371191",
"0.727186",
"0.71608585",
"0.699722",
"0.6978515",
"0.69227",
"0.67186886",
"0.66606176",
"0.658505",
"0.6563347",
"0.65600806",
"0.65338475",
"0.6508433",
"0.64404345",
"0.6423368",
"0.64093894",
"0.6374424",
"0.63694614",
"0.62788695",
"0.6244392",
"0.62255067",
"0.6169588",
"0.61027944",
"0.6092803",
"0.60505325",
"0.6031157",
"0.602635",
"0.6023096",
"0.6002172"
] | 0.78257185 | 1 |
Week count returns the number of calendar weeks in a year. Most years have 52 weeks of course, but if the year begins on a Thursday or a leap year begins on a Wednesday then it has 53. | def WeekCount(year):
weekday = DayOfWeek(year, 1, 1)
if weekday == 4:
return 53
elif weekday == 3 and LeapYear(year):
return 53
else:
return 52 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def weeks_per_year(year):\n return week_from_date(date(year, 12, 31))",
"def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.",
"def getWeeks(year):\n url = \"http://www.boxofficemojo.com/weekend/?yr=%d\" % year\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n chart = soup.find(border=\"0\", cellspacing=\"1\", cellpadding=\"5\")\n data = parseTable(chart)\n weeks = [int(row[-1]) for row in data[1:]]\n return weeks",
"def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year",
"def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1",
"def get_week_days(year, week):\n d = dt.date(year, 1, 1)\n if(d.weekday() > 3):\n d = d + dt.timedelta(7 - d.weekday())\n else:\n d = d - dt.timedelta(d.weekday())\n dlt = dt.timedelta(days = (week - 1) * 7)\n return d + dlt #, d + dlt + dt.timedelta(days = 6)",
"def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7",
"def get_week_from_date(date) -> int:\n month, year = date.month, date.year\n if month < 4:\n year -= 1\n ld = _labor_day(year)\n wk1_wed = ld + timedelta(days=2)\n days_since = (date - wk1_wed).days\n weeks_since = days_since / 7.\n week = math.floor(weeks_since) + 1\n return int(week)",
"def getCurrentWeek(self):\n return self.wcount % 48",
"def countSundaysFirstOfMonth(startYear, endYear):\n\tdayOfWeek = 1\n\tnumSundays = 0\n\tfor year in xrange(1900, endYear + 1):\n\t\tfor month in xrange(1, 13):\n\t\t\tif year >= startYear and dayOfWeek == 0:\n\t\t\t\tnumSundays += 1\n\t\t\tdayOfWeek += numDays(month, year)\n\t\t\tdayOfWeek %= 7\n\treturn numSundays",
"def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass",
"def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day",
"def current_week_number(date=datetime.datetime.now()):\n return int(date.strftime(\"%W\"))",
"def ISOWEEKNUM(\n date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n datetime_date = utils.number_to_datetime(int(date))\n isoweeknum = datetime_date.isocalendar()[1]\n return isoweeknum",
"def CONST_WEEK_TIMESTAMP() -> int:\n return 604800",
"def test_weeks(self):\n d = datetime(2014, 1, 29)\n eq_(week_start(d), datetime(2014, 1, 27, 0, 0, 0))\n eq_(week_end(d), datetime(2014, 2, 2, 23, 59, 59))",
"def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]",
"def GetWeekDay(self):\n if self.day is None:\n if self.week:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n self.week,\n None)\n elif self.month is None:\n if self.year is None:\n return (self.century, None, None, None, None)\n else:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n None,\n None)\n else:\n raise DateTimeError(\"can't get week day with month precision\")\n else:\n century, year, ordinalDay = self.GetOrdinalDay()\n year += century * 100\n if LeapYear(year):\n yearLength = 366\n else:\n yearLength = 365\n weekday = DayOfWeek(year, self.month, self.day)\n thursday = ordinalDay + 4 - weekday\n if thursday < 1:\n # Thursday this week was actually last year, and so we are\n # part of the last calendar week of last year too.\n # may return year==0\n year -= 1\n week = WeekCount(year)\n elif thursday > yearLength:\n # Thursday this week is actually next year, and so we are\n # part of the first calendar week of next year too.\n # may return century=100\n year += 1\n week = 1\n else:\n # We are part of this year, but which week?\t Jan 4th is always\n # part of the first week of the year, so we calculate the ordinal\n # value of the Monay that began that week\n yearBase = 5 - DayOfWeek(year, 1, 4)\n week = (ordinalDay - yearBase) // 7 + 1\n return year // 100, (year % 100) // 10, (year % 10), week, weekday",
"def WEEKNUM(date, return_type=1):\n if return_type == 21:\n return ISOWEEKNUM(date)\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n date = _make_datetime(date)\n jan1 = datetime.datetime(date.year, 1, 1)\n week1_start = jan1 - datetime.timedelta(days=(jan1.weekday() - first) % 7)\n return (date - week1_start).days // 7 + 1",
"def days_to_weeks(list_of_days):\n all_weeks = []\n for day in list_of_days:\n that_week = day.isocalendar()\n if (\n len(all_weeks) == 0\n or all_weeks[-1].year != that_week.year\n or all_weeks[-1].week != that_week.week\n ):\n all_weeks.append(that_week)\n return list(map(lambda iso: \"{}-{}\".format(iso.year, iso.week), all_weeks))",
"def weeks_of_the_month(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"weeks_of_the_month\")",
"def getWeeks(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.toDateTime().weekday() for x in data]",
"def getWeeksToExpire(self):\n cert = self.getLatestValidCertification()\n if cert == None:\n return ''\n date = cert.getValidTo().asdatetime().date();\n return date - date.today()",
"def gen_weeklyFrequency(self):\n\n if len(self.fields) == 0:\n return None\n\n if self.validator.validate(self.fields) == False:\n return None\n\n weeklyFrequency = 0\n dayFields = ['day1','day2','day3','day4','day5','day6','day7']\n for dayField in dayFields:\n if dayField in self.fields:\n if self.fields[dayField] == True:\n weeklyFrequency += 1\n\n return weeklyFrequency",
"def current_week(self):\n\n if not self.iso_equal() and self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 2\n if not self.iso_equal() or self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 1 \n return self.time_stamp_iso[1]",
"def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1",
"def weeks_of_the_month(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'WeekNumber']]]]]:\n return pulumi.get(self, \"weeks_of_the_month\")",
"def get_num_of_tracks(year):\n track_results = sp.search('year:' + str(year), type='track', limit=1, offset=0)\n return track_results['tracks']['total']",
"def current_year_and_week():\n _update_week_number()\n return _cur_year, _cur_week",
"def num_years():\n years = movies['Year']\n return ('num_years', years.nunique())"
] | [
"0.8074389",
"0.7601402",
"0.6511341",
"0.6500138",
"0.6401162",
"0.63772",
"0.6248234",
"0.61319476",
"0.5956608",
"0.5922138",
"0.5748826",
"0.5710432",
"0.5697062",
"0.56096387",
"0.5576821",
"0.5569819",
"0.55226177",
"0.5512972",
"0.54710263",
"0.5469762",
"0.54560375",
"0.54327005",
"0.5429634",
"0.54134023",
"0.53875846",
"0.5382343",
"0.5374987",
"0.53603184",
"0.53561276",
"0.5354085"
] | 0.8738448 | 0 |
Returns a tuple of (century,year,ordinalDay) | def GetOrdinalDay(self):
if self.day is None:
if self.month is None and self.week is None:
return (self.century, self.year, None)
else:
raise DateTimeError(
"can't get ordinal day with month or week precision")
if self.LeapYear():
mSizes = MONTH_SIZES_LEAPYEAR
else:
mSizes = MONTH_SIZES
ordinalDay = self.day
for m in mSizes[:self.month - 1]:
ordinalDay = ordinalDay + m
return (self.century, self.year, ordinalDay) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_years_eve(year):\n return (year, DEC, 31)",
"def century(year):\r\n century = 0\r\n last_digit = year % 10\r\n if year >= 1 and last_digit == 0:\r\n century = year // 100 \r\n else:\r\n century = year // 100 + 1\r\n return century",
"def dia_independencia(year):\n return year, SEP, 16",
"def decade(year):\r\n # get the first 3 digits of the year\r\n partial = (year[0]//10).item()\r\n # add a 0 to the end, return as decade\r\n return partial * 10",
"def Dooms_day(year):\r\n day = (year % 100 + (year % 100)//4 + Anchor_day(year)) % 7\r\n return day",
"def get_year(self):\n\n # First we get the first 8 bits stored in the yqr register\n year_bcd = self.__read_register(_REGISTER_YEAR)\n\n # Then we extract the digits and the tens\n tens = (year_bcd & 0xF0) >> 4 # 0xF0 = 0b11110000\n digit = (year_bcd & 0x0F) # 0x0F = 0b00001111\n\n # We return year value shifted in range [1970..2129]\n return (10 * (tens) + digit) + 1970",
"def year(self) -> int:\n if self.is_old_style:\n yy = int(self.split('/', 1)[1][0:2])\n else:\n yy = int(self[:2])\n if yy > 90:\n return 1900 + yy\n return 2000 + yy",
"def indigenous_peoples_day(year, country='usa'):\n if country == 'usa':\n return nth_day_of_month(2, MON, OCT, year)\n\n return (year, OCT, 12)",
"def _ord2ymd(n):\n n -= 1\n n400, n = divmod(n, _DI400Y)\n year = n400 * 400 + 1 # ..., -399, 1, 401, ...\n n100, n = divmod(n, _DI100Y)\n n4, n = divmod(n, _DI4Y)\n n1, n = divmod(n, 365)\n year += n100 * 100 + n4 * 4 + n1\n if n1 == 4 or n100 == 4:\n assert n == 0\n return year - 1, 12, 31\n leapyear = n1 == 3 and (n4 != 24 or n100 == 3)\n assert leapyear == _is_leap(year)\n month = (n + 50) >> 5\n preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)\n if preceding > n: # estimate is too large\n month -= 1\n preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)\n n -= preceding\n assert 0 <= n < _days_in_month(year, month)\n return year, month, n + 1",
"def test_20th_century(self):\r\n season = \"1989-90\"\r\n res = get_end_year(season)\r\n assert res == 1990",
"def days_to_years(datum):\n return datum/DAYS_PER_YEAR",
"def get_incidents(year):\n print 'Downloading year: %s' % year\n \n # Build URL from year.\n # If the year is 2007-2011, download the XML straight from ... my S3 account.\n if year in range(2007, 2011):\n url = 'http://wapo-projects.s3.amazonaws.com/techathon/scraperwiki/xml/crime_incidents_%s_plain.xml' % year\n \n # If the year is 2012, get it from the DC government. This is NOT the whole year.\n if year == 2012:\n url = 'http://data.octo.dc.gov/feeds/crime_incidents/crime_incidents_current.xml' \n \n # Request the data using the Requests library.\n request = requests.get(url)\n unzipped_request = request.content\n \n # Parse the XML using lxml's BeautifulSoup parser.\n crime_xml_parsed = fromstring(unzipped_request)\n\n # Return the parsed Element() objects by grabbing the xpath for <entry> tags.\n return crime_xml_parsed.xpath('//entry')",
"def get_incidents(year):\n print 'Downloading year: %s' % year\n \n # Build URL from year.\n # If the year is 2007-2011, download the XML straight from ... my S3 account.\n if year in range(2007, 2011):\n url = 'http://wapo-projects.s3.amazonaws.com/techathon/scraperwiki/xml/crime_incidents_%s_plain.xml' % year\n \n # If the year is 2012, get it from the DC government. This is NOT the whole year.\n if year == 2012:\n url = 'http://data.octo.dc.gov/feeds/crime_incidents/crime_incidents_current.xml' \n \n # Request the data using the Requests library.\n request = requests.get(url)\n unzipped_request = request.content\n \n # Parse the XML using lxml's BeautifulSoup parser.\n crime_xml_parsed = fromstring(unzipped_request)\n\n # Return the parsed Element() objects by grabbing the xpath for <entry> tags.\n return crime_xml_parsed.xpath('//entry')",
"def dia_constitucion(year, observed=True):\n if observed:\n return nth_day_of_month(1, MON, FEB, year)\n\n return (year, FEB, 5)",
"def Anchor_day(year):\r\n day = (5 * ((year // 100) % 4) + 2) % 7\r\n return day",
"def year(self):\n return self._years",
"def build_date():\n def r(x):\n return tuple(ord(i) for i in x)\n return r",
"def day_of_year(month, day):\n try:\n # 2003 is an arbitrary non-leap year.\n return date(2003, month, day).timetuple().tm_yday\n except ValueError:\n if month == 2 and day == 29:\n return 60\n else:\n raise",
"def day_of_year(self):\n return int(self.date.strftime('%j'))",
"def get_year(x):\n return x[\"SALE DATE\"].year",
"def date_year(date):\n return date.year",
"def year_data(self,year):\n idx = [i for i in range(self.dates.shape[0]) if self.dates[i].year == year]\n year_dates = self.dates[idx]\n year_dc = self.dc[idx]\n return year_dates, year_dc",
"def getagefromyear(year=None):\n if year is None:\n print(\"Please enter the year to assign class to them\")\n try:\n t = datetime.datetime.today()\n b = datetime.datetime.strptime(str(year), '%Y')\n a = (t - b).days / 365\n a = int(a)\n if (a < 10) or (a > 80):\n a = None\n except:\n a = None\n return a",
"def test_21st_century(self):\r\n season = \"2019-20\"\r\n res = get_end_year(season)\r\n assert res == 2020",
"def get_year(self, grab):\n return int(\n grab.doc.select(\n '//time[@itemprop=\"releaseDate\"]'\n ).attr('datetime')\n )",
"def independence_day(year, observed=None):\n day = 4\n\n if observed:\n weekday = calendar.weekday(year, JUL, 4)\n if weekday == SAT:\n day = 3\n if weekday == SUN:\n day = 5\n\n return (year, JUL, day)",
"def christmas(year, observed=None):\n day = 25\n if observed:\n weekday = calendar.weekday(year, DEC, 25)\n if weekday == SAT:\n day = 24\n if weekday == SUN:\n day = 26\n return (year, DEC, day)",
"def make_year(res):\n return str(res['issued']['date-parts'][0][0])",
"def day_of_year(date=datetime.datetime.now()):\n return date.strftime(\"Its the %j day of %Y'th year.\")",
"def yearlyDepreciation():\n return .10"
] | [
"0.68773776",
"0.6542961",
"0.6282669",
"0.61359525",
"0.60769606",
"0.6039188",
"0.59386253",
"0.59348583",
"0.5828855",
"0.5789047",
"0.57889295",
"0.57671714",
"0.57671714",
"0.5734408",
"0.5720547",
"0.5702465",
"0.56704795",
"0.56505144",
"0.562306",
"0.5620994",
"0.56198233",
"0.56089956",
"0.5597281",
"0.5578973",
"0.55674195",
"0.5566045",
"0.5556013",
"0.55434084",
"0.5542722",
"0.5532452"
] | 0.71418583 | 0 |
Returns a tuple of (century,decade,year,week,weekday), note that Monday is 1 and Sunday is 7 | def GetWeekDay(self):
if self.day is None:
if self.week:
return (
self.century,
self.year //
10,
self.year %
10,
self.week,
None)
elif self.month is None:
if self.year is None:
return (self.century, None, None, None, None)
else:
return (
self.century,
self.year //
10,
self.year %
10,
None,
None)
else:
raise DateTimeError("can't get week day with month precision")
else:
century, year, ordinalDay = self.GetOrdinalDay()
year += century * 100
if LeapYear(year):
yearLength = 366
else:
yearLength = 365
weekday = DayOfWeek(year, self.month, self.day)
thursday = ordinalDay + 4 - weekday
if thursday < 1:
# Thursday this week was actually last year, and so we are
# part of the last calendar week of last year too.
# may return year==0
year -= 1
week = WeekCount(year)
elif thursday > yearLength:
# Thursday this week is actually next year, and so we are
# part of the first calendar week of next year too.
# may return century=100
year += 1
week = 1
else:
# We are part of this year, but which week? Jan 4th is always
# part of the first week of the year, so we calculate the ordinal
# value of the Monay that began that week
yearBase = 5 - DayOfWeek(year, 1, 4)
week = (ordinalDay - yearBase) // 7 + 1
return year // 100, (year % 100) // 10, (year % 10), week, weekday | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def current_year_and_week():\n _update_week_number()\n return _cur_year, _cur_week",
"def christmas(year, observed=None):\n day = 25\n if observed:\n weekday = calendar.weekday(year, DEC, 25)\n if weekday == SAT:\n day = 24\n if weekday == SUN:\n day = 26\n return (year, DEC, day)",
"def GetOrdinalDay(self):\n if self.day is None:\n if self.month is None and self.week is None:\n return (self.century, self.year, None)\n else:\n raise DateTimeError(\n \"can't get ordinal day with month or week precision\")\n if self.LeapYear():\n mSizes = MONTH_SIZES_LEAPYEAR\n else:\n mSizes = MONTH_SIZES\n ordinalDay = self.day\n for m in mSizes[:self.month - 1]:\n ordinalDay = ordinalDay + m\n return (self.century, self.year, ordinalDay)",
"def get_yearweek(yearweekstr: str) -> tuple:\n return tuple(map(int, yearweekstr.split('-W')))",
"def new_years_eve(year):\n return (year, DEC, 31)",
"def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]",
"def get_week_days(year, week):\n d = dt.date(year, 1, 1)\n if(d.weekday() > 3):\n d = d + dt.timedelta(7 - d.weekday())\n else:\n d = d - dt.timedelta(d.weekday())\n dlt = dt.timedelta(days = (week - 1) * 7)\n return d + dlt #, d + dlt + dt.timedelta(days = 6)",
"def dia_independencia(year):\n return year, SEP, 16",
"def Dooms_day(year):\r\n day = (year % 100 + (year % 100)//4 + Anchor_day(year)) % 7\r\n return day",
"def decade(year):\r\n # get the first 3 digits of the year\r\n partial = (year[0]//10).item()\r\n # add a 0 to the end, return as decade\r\n return partial * 10",
"def indigenous_peoples_day(year, country='usa'):\n if country == 'usa':\n return nth_day_of_month(2, MON, OCT, year)\n\n return (year, OCT, 12)",
"def WeekCount(year):\n weekday = DayOfWeek(year, 1, 1)\n if weekday == 4:\n return 53\n elif weekday == 3 and LeapYear(year):\n return 53\n else:\n return 52",
"def independence_day(year, observed=None):\n day = 4\n\n if observed:\n weekday = calendar.weekday(year, JUL, 4)\n if weekday == SAT:\n day = 3\n if weekday == SUN:\n day = 5\n\n return (year, JUL, day)",
"def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day",
"def dia_constitucion(year, observed=True):\n if observed:\n return nth_day_of_month(1, MON, FEB, year)\n\n return (year, FEB, 5)",
"def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1",
"def century(year):\r\n century = 0\r\n last_digit = year % 10\r\n if year >= 1 and last_digit == 0:\r\n century = year // 100 \r\n else:\r\n century = year // 100 + 1\r\n return century",
"def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))",
"def getWeeks(year):\n url = \"http://www.boxofficemojo.com/weekend/?yr=%d\" % year\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n chart = soup.find(border=\"0\", cellspacing=\"1\", cellpadding=\"5\")\n data = parseTable(chart)\n weeks = [int(row[-1]) for row in data[1:]]\n return weeks",
"def twenty_seventeen():\n return 2017",
"def Count_Friday(century):\r\n year = century * 100\r\n Day_on_13 = [Day_of_week(13, s, t) for s in xrange(1, 13) \\\r\n for t in xrange(year, year + 100)]\r\n return Day_on_13.count(\"Friday\")",
"def getYears():\n url = \"http://www.boxofficemojo.com/weekend/\"\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n year_header = soup.find_all(name = \"b\")[1]\n year_elems = year_header.find_all([\"a\", \"font\"])\n years = [int(year.get_text()) for year in year_elems]\n return years",
"def doomsday(y):",
"def _labor_day(year):\n day = datetime(year, 9, 1)\n delta = timedelta(days=1)\n while day.weekday() != 0:\n day += delta\n return day",
"def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]",
"def get_hebrew_independence_day(self, jewish_year):\n month = 2\n day = 5\n original_hebrew_independence_date = HebrewDate(jewish_year, month, day)\n if original_hebrew_independence_date.weekday() == 6:\n day = 4\n if original_hebrew_independence_date.weekday() == 7:\n day = 3\n if original_hebrew_independence_date.weekday() == 2:\n day = 6\n return [\n (HebrewDate(jewish_year, month, day - 1), \"Independence Day Eve\"),\n (HebrewDate(jewish_year, month, day), \"Independence Day\")\n ]",
"def sadwara_fromordinal(cls, ordinal):\n return (cls.day_fromordinal(ordinal) % 6) + 1",
"def get_teaching_framework():\n\t# maximum number of hours students spend at the school, including lunch\n\t# break and daycare\n\tmax_hours = 9\n\t# number of days in the week\n\tN_weekdays = 7\n\t# days on which no teaching takes place\n\tweekend_days = [6, 7]\n\n\treturn max_hours, N_weekdays, weekend_days",
"def get_weekday():\n try:\n day = config.getint(\"threadbot\", \"debug_day\")\n except ConfigParser.NoOptionError:\n d = datetime.date.today()\n day = d.weekday()\n sort_by_new = False\n\n # 0 / Monday / Feedback thread\n # 1 / Tuesday / How do I make this sound thread\n # 2 / Wednesday / There are no stupid questions thread\n # 3 / Thursday / Marketplace thread\n dayname = \"waffles\"\n if day == 0:\n dayname = \"monday\"\n sort_by_new = True\n elif day == 1:\n dayname = \"tuesday\"\n sort_by_new = True\n elif day == 2:\n dayname = \"wednesday\"\n sort_by_new = True\n elif day == 3:\n dayname = \"thursday\"\n sort_by_new = False\n else:\n sys.exit(1) # woo inelegance\n\n return dayname, sort_by_new",
"def get_father_days(year=2020):\n days_to_countries = defaultdict(list)\n\n _parse_father_days_per_country(year,days_to_countries)\n _parse_recurring_father_days(days_to_countries)\n\n return days_to_countries"
] | [
"0.6633035",
"0.633678",
"0.62908995",
"0.5879685",
"0.57920355",
"0.5673606",
"0.56580865",
"0.56568646",
"0.5642385",
"0.56214803",
"0.5618055",
"0.56165",
"0.56031454",
"0.5583499",
"0.552413",
"0.5509516",
"0.54637563",
"0.5458758",
"0.54468614",
"0.54213107",
"0.53691626",
"0.53685623",
"0.5353163",
"0.53333354",
"0.5293006",
"0.52788055",
"0.52457356",
"0.5243818",
"0.52415735",
"0.5231092"
] | 0.6420974 | 1 |
UpdateStructTime changes the year, month, date, wday and ydat fields of t, a struct_time, to match the values in this date. | def UpdateStructTime(self, t):
if not self.Complete():
raise DateTimeError("UpdateStructTime requires complete date")
t[0] = self.century * 100 + self.year
t[1] = self.month
t[2] = self.day
t[6] = self.GetWeekDay()[4] - 1
t[7] = self.GetOrdinalDay()[2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def UpdateStructTime(self, t):\n self.date.UpdateStructTime(t)\n self.time.UpdateStructTime(t)",
"def UpdateStructTime(self, t):\n if not self.Complete():\n raise DateTimeError(\"UpdateStructTime requires a complete time\")\n t[3] = self.hour\n t[4] = self.minute\n t[5] = self.second\n t[8] = -1",
"def __init__(self, struct_time):\r\n\t\tself.struct_time = struct_time\r\n\t\tself.year = struct_time[0]\r\n\t\tself.mon = self.set_month(struct_time[1])\r\n\t\tself.day = struct_time[2]\r\n\t\tself.hour = struct_time[3]\r\n\t\tself.min = struct_time[4]\r\n\t\tself.wday = self.set_week_day(struct_time[6])\r\n\t\tself.day_or_night = self.set_day_state(struct_time[8])",
"def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second",
"def _marshal_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour=0,\n tm_min=0,\n tm_sec=0,\n tm_wday=-1,\n tm_yday=-1,\n tm_isdst=-1,\n ):\n _struct_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour,\n tm_min,\n tm_sec,\n tm_wday,\n tm_yday,\n tm_isdst,\n )",
"def _update_time(self):\n if self.time.year != datetime.datetime.now().year or self._this_year is None:\n self._this_year = _data.this_year(self.df, 'case_timestamp')\n if self.time.month != datetime.datetime.now().month or self._this_month is None:\n self._this_month = _data.this_month(self.df, 'case_timestamp')\n if self.time.day != datetime.datetime.now().day or self._today is None:\n self._today = _data.today(self.df, 'case_timestamp')\n self.time = datetime.datetime.now()",
"def test_update_dt(self):\n result = self.test_client.update_dt\n\n assert result == \"2020-02-18 01:54:13\"",
"def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])",
"def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))",
"def _convert_struct_time_to_dt(stime):\n return date.fromtimestamp(mktime(stime))",
"def update(self, dt):\n for obj in self.objects:\n obj.update(dt)",
"def svn_info_t_prop_time_set(svn_info_t_self, apr_time_t_prop_time): # real signature unknown; restored from __doc__\n pass",
"async def put_date_time( # pylint: disable=inconsistent-return-statements\n self, complex_body: JSON, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def update(self, dt):\n pass",
"def _convert_struct_time_to_dt(stime):\n\n dt = datetime.datetime.fromtimestamp(mktime(stime))\n\n return dt.date()",
"def update_timeval(self):\n self.timeval = self.get_timeval()",
"async def put_date_time( # pylint: disable=inconsistent-return-statements\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> None:",
"def setTime(self, timeObj, day=None):\n\n # override day if it's None\n if not day:\n day = getDayFromNum(timeObj.weekday())\n\n self._fileCache[day][\"time-hr\"] = timeObj.hour\n self._fileCache[day][\"time-min\"] = timeObj.minute\n self._updateConfig()",
"def update_time(self, update_time):\n\n self._update_time = update_time",
"def update(self, d_t, **kwargs):\n # '_update' depends on the 'backend'\n self._update(d_t * self.Time_Scale, **kwargs)\n for func in self.callbacks:\n func()",
"def update_structure(self, course_key, structure):\n self._clear_cache(structure['_id'])\n bulk_write_record = self._get_bulk_ops_record(course_key)\n if bulk_write_record.active:\n bulk_write_record.structures[structure['_id']] = structure\n else:\n self.db_connection.insert_structure(structure, course_key)",
"def update_time(self):\n pass # Do nothing",
"def update(self, dt):\n\t\tpass",
"def directive_to_struct_time_item(directive, value):\n if directive == DIRECTIVES.YEAR:\n # Return YEAR as TM_YEAR.\n return STRUCT_TIME.TM_YEAR, value\n elif directive == DIRECTIVES.YEAR_NO_CENTURY:\n # Return YEAR_NO_CENTURY as TM_YEAR.\n # Assume that a two-digit year is relative to the year 2000.\n return STRUCT_TIME.TM_YEAR, value + 2000\n elif directive == DIRECTIVES.MONTH:\n # Return MONTH as TM_MON.\n return STRUCT_TIME.TM_MON, value\n elif directive == DIRECTIVES.ABBREV_MONTH_NAME:\n # Return ABBREV_MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, ABBREVIATED_MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.MONTH_NAME:\n # Return MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_MONTH:\n # Return DAY_OF_MONTH as TM_MDAY\n return STRUCT_TIME.TM_MDAY, value\n elif directive == DIRECTIVES.HOUR_24:\n # Return HOUR_24 as TM_HOUR\n return STRUCT_TIME.TM_HOUR, value\n elif directive == DIRECTIVES.HOUR_12:\n # Return HOUR_12 as 0-based TM_HOUR\n return STRUCT_TIME.TM_HOUR, 0 if value == 12 else value\n elif directive == DIRECTIVES.MINUTE:\n # Return MINUTE as TM_MIN\n return STRUCT_TIME.TM_MIN, value\n elif directive == DIRECTIVES.SECOND:\n # Return SECOND as TM_SEC\n return STRUCT_TIME.TM_SEC, value\n elif directive == DIRECTIVES.DAY_OF_WEEK:\n # Return DAY_OF_WEEK as TM_WDAY\n return STRUCT_TIME.TM_WDAY, value\n elif directive == DIRECTIVES.ABBREV_WEEKDAY_NAME:\n # Return ABBREV_WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, ABBREVIATED_WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.WEEKDAY_NAME:\n # Return WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_YEAR:\n # Return DAY_OF_YEAR as TM_YDAY\n return STRUCT_TIME.TM_YDAY, value\n elif directive == DIRECTIVES.TIME_ZONE:\n # Take no action for TIME_ZONE.\n return None\n elif directive == DIRECTIVES.TIME_ZONE_OFFSET:\n # Return TIME_ZONE_OFFSET as TM_MIN - to be subtracted from any\n # existing minute value to arrive at UTC.\n return STRUCT_TIME.TM_MIN, -value\n elif directive == DIRECTIVES.AM_PM:\n # Return AM_PM as TM_HOUR\n # If value = 'PM' return +12 to update hour value to 24-hour format.\n return STRUCT_TIME.TM_HOUR, 12 if value == 'PM' else 0\n elif directive == DIRECTIVES.PERCENT:\n # Take no action for PERCENT.\n return None\n else:\n raise NotImplementedError(\n 'struct_time conversion not defined for directive: {}'\n .format(directive)\n )",
"def unmarshall_time(tyme):\r\n return datetime.datetime(day=tyme['day'],\r\n month=tyme['month'],\r\n year=tyme['year'],\r\n hour=tyme['hour'],\r\n minute=tyme['minute'],\r\n second=tyme['second'],\r\n microsecond=tyme['microsecond'])",
"def update(self, time):\n raise NotImplementedError",
"def update(self, time):\n raise NotImplementedError",
"def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)",
"def update(self, dt=None): #pylint: disable=invalid-name\n if dt is None:\n dt = datetime.utcnow()\n\n self.update_location(self.old_location, dt - timedelta(seconds=1))\n self.update_location(self.current_location, dt)\n self.update_location(self.future_location, dt + timedelta(seconds=1))",
"def modify_struct(self, struct, is_full_struct):\n return struct"
] | [
"0.8585779",
"0.76568127",
"0.5674095",
"0.53683573",
"0.5276543",
"0.5162595",
"0.51445436",
"0.5093863",
"0.50832886",
"0.50665617",
"0.5031827",
"0.48917872",
"0.48271343",
"0.48101082",
"0.47949743",
"0.4784963",
"0.47647735",
"0.4732103",
"0.46906406",
"0.46847725",
"0.46641716",
"0.4648352",
"0.46434045",
"0.46345758",
"0.46340552",
"0.45865777",
"0.45865777",
"0.45769864",
"0.457456",
"0.45710212"
] | 0.7900712 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.