query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test constructing an infinite _db_updates feed.
def test_constructor_db_updates(self): feed = InfiniteFeed(self.client, chunk_size=1, timeout=100, feed='continuous') self.assertEqual(feed._url, '/'.join([self.client.server_url, '_db_updates'])) self.assertIsInstance(feed._r_session, Session) self.assertFalse(feed._raw_data) self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100}) self.assertEqual(feed._chunk_size, 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_infinite_db_updates_feed(self):\n feed = InfiniteFeed(self.client, since='now', timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n new_dbs = list()\n try:\n new_dbs.append(self.client.create_database(self.dbname()))\n for change in feed:\n self.assertTrue(all(x in change for x in ('seq', 'type')))\n new_dbs.append(self.client.create_database(self.dbname()))\n if feed._start.called_count >= 3 and len(new_dbs) >= 3:\n feed.stop()\n if len(new_dbs) >= 15:\n # We stop regardless after 15 databases have been created\n feed.stop()\n finally:\n [db.delete() for db in new_dbs]\n # The test is considered a success if feed._start was called 2+ times.\n # If failure occurs it does not necessarily mean that the InfiniteFeed\n # is not functioning as expected, it might also mean that we reached the\n # db limit threshold of 15 before a timeout and restart of the\n # InfiniteFeed could happen.\n self.assertTrue(feed._start.called_count > 1)", "def test_infinite_feed(self):\n self.populate_db_with_documents()\n feed = InfiniteFeed(self.db, timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n changes = list()\n for change in feed:\n self.assertSetEqual(set(change.keys()), set(['seq', 'changes', 'id']))\n changes.append(change)\n if len(changes) in (100, 200):\n sleep(1) # 1 second > .1 second (timeout)\n self.populate_db_with_documents(off_set=len(changes))\n elif len(changes) == 300:\n feed.stop()\n expected = set(['julia{0:03d}'.format(i) for i in range(300)])\n self.assertSetEqual(set([x['id'] for x in changes]), expected)\n self.assertIsNone(feed.last_seq)\n # Compare infinite/continuous with normal\n normal = Feed(self.db)\n self.assertSetEqual(\n set([x['id'] for x in changes]), set([n['id'] for n in normal]))\n\n # Ensuring that the feed._start method was called 3 times, verifies that\n # the continuous feed was started/restarted 3 separate times.\n self.assertEqual(feed._start.called_count, 3)", "def test_update_new_no_last_updated(reader):\n parser = Parser()\n reader._parser = parser\n\n feed = parser.feed(1, datetime(2010, 1, 1))\n\n reader.add_feed(feed.url)\n # updated must be None if last_updated is None\n reader._storage.update_feed(\n FeedUpdateIntent(feed.url, None, feed=feed._replace(updated=None))\n )\n\n reader.update_feeds(new=True)\n\n parser.entry(1, 1, datetime(2010, 1, 1))\n reader.update_feeds(new=True)\n\n # the entry isn't added because feed is not new on the second update_feeds\n assert len(list(reader.get_entries(feed=feed.url))) == 0", "def test_invalid_source_couchdb(self):\n with self.assertRaises(CloudantFeedException) as cm:\n invalid_feed = [x for x in InfiniteFeed(self.client)]\n self.assertEqual(str(cm.exception),\n 'Infinite _db_updates feed not supported for CouchDB.')", "def test_feed(self):\n mock_iter = (x for x in FIXTURE_DATA.split('\\n'))\n mock_resp = mock.Mock()\n mock_resp.iter_lines = mock.Mock()\n mock_resp.iter_lines.return_value = mock_iter\n\n self.mock_instance.get.return_value = mock_resp\n\n f = Feed(\n self.mock_instance,\n \"http://bob.cloudant.com/bobsdb/_changes\",\n include_docs=True,\n since=\"SINCE\"\n )\n\n result = [x for x in f]\n # 5 empty lines\n self.assertEqual(result.count({}), 5)\n # six non empty lines\n changes = [\n x['changes'] for x in result if x.get('changes') is not None\n ]\n self.assertEqual(len(changes), 6)\n\n errors = [x['error'] for x in result if x.get('error') is not None]\n self.assertEqual(len(errors), 1)", "def test_update_last_updated_entries_updated_feed_not_updated(\n reader, call_update_method\n):\n parser = Parser()\n reader._parser = parser\n\n feed = parser.feed(1, datetime(2010, 1, 1))\n reader.add_feed(feed.url)\n reader._now = lambda: naive_datetime(2010, 1, 1)\n call_update_method(reader, feed.url)\n\n (feed_for_update,) = reader._storage.get_feeds_for_update(url=feed.url)\n assert feed_for_update.last_updated == naive_datetime(2010, 1, 1)\n\n parser.entry(1, 1, datetime(2010, 1, 1))\n reader._now = lambda: naive_datetime(2010, 1, 2)\n call_update_method(reader, feed.url)\n\n (feed_for_update,) = reader._storage.get_feeds_for_update(url=feed.url)\n assert feed_for_update.last_updated == naive_datetime(2010, 1, 2)", "def test_constructor_no_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100)\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_update_new_not_modified(reader):\n parser = NotModifiedParser()\n reader._parser = parser\n\n feed = parser.feed(1, naive_datetime(2010, 1, 1))\n\n reader.add_feed(feed.url)\n reader._storage.update_feed(FeedUpdateIntent(feed.url, None, feed=feed))\n\n reader.update_feeds(new=True)\n\n parser = Parser.from_parser(parser)\n reader._parser = parser\n\n parser.entry(1, 1, naive_datetime(2010, 1, 1))\n reader.update_feeds(new=True)\n\n # the entry isn't added because feed is not new on the second update_feeds\n assert len(list(reader.get_entries(feed=feed.url))) == 0", "def test_constructor_with_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_feed_generator(self):\n moksha.feed_cache = FakeCache()\n feed = Feed(url='http://lewk.org/rss')\n iter = feed.iterentries()\n data = iter.next()\n assert iter.next()", "def test_update_no_updated(reader, chunk_size, call_update_method):\n reader._storage.chunk_size = chunk_size\n\n parser = Parser()\n reader._parser = parser\n\n feed = parser.feed(1, None, title='old')\n entry_one = parser.entry(1, 1, None, title='old')\n reader._now = lambda: naive_datetime(2010, 1, 1)\n reader.add_feed(feed.url)\n call_update_method(reader, feed)\n feed = feed.as_feed(added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 1))\n\n assert set(reader.get_feeds()) == {feed}\n assert set(reader.get_entries()) == {\n entry_one.as_entry(\n feed=feed, updated=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 1)\n )\n }\n\n feed = parser.feed(1, None, title='new')\n entry_one = parser.entry(1, 1, None, title='new')\n entry_two = parser.entry(1, 2, None)\n reader._now = lambda: naive_datetime(2010, 1, 2)\n call_update_method(reader, feed)\n feed = feed.as_feed(added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 2))\n\n assert set(reader.get_feeds()) == {feed}\n assert set(reader.get_entries()) == {\n entry_one.as_entry(\n feed=feed, updated=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 2)\n ),\n entry_two.as_entry(\n feed=feed, updated=datetime(2010, 1, 2), last_updated=datetime(2010, 1, 2)\n ),\n }", "def test_update_stale(reader, call_update_method, entry_updated):\n from utils import utc_datetime as datetime\n\n parser = ParserThatRemembers()\n parser.http_etag = 'etag'\n parser.http_last_modified = 'last-modified'\n reader._parser = parser\n\n feed = parser.feed(1, datetime(2010, 1, 1))\n entry = parser.entry(1, 1, entry_updated)\n\n with pytest.raises(FeedNotFoundError):\n reader._storage.mark_as_stale(feed.url)\n\n reader.add_feed(feed.url)\n\n reader._now = lambda: naive_datetime(2010, 1, 1)\n call_update_method(reader, feed.url)\n\n assert set((f.url, f.title, f.last_updated) for f in reader.get_feeds()) == {\n (feed.url, feed.title, datetime(2010, 1, 1))\n }\n assert set((e.id, e.title, e.last_updated) for e in reader.get_entries()) == {\n (entry.id, entry.title, datetime(2010, 1, 1))\n }\n\n # we can't change feed/entry here because their hash would change,\n # resulting in an update;\n # the only way to check they were updated is through last_updated\n\n # should we deprecate the staleness API? maybe:\n # https://github.com/lemon24/reader/issues/179#issuecomment-663840297\n # OTOH, we may still want an update to happen for other side-effects,\n # even if the hash doesn't change\n\n if entry_updated:\n # nothing changes after update\n reader._now = lambda: naive_datetime(2010, 1, 2)\n call_update_method(reader, feed.url)\n assert set((f.url, f.title, f.last_updated) for f in reader.get_feeds()) == {\n (feed.url, feed.title, datetime(2010, 1, 1))\n }\n assert set((e.id, e.title, e.last_updated) for e in reader.get_entries()) == {\n (entry.id, entry.title, datetime(2010, 1, 1))\n }\n\n # but it does if we mark the feed as stale\n parser.calls[:] = []\n reader._storage.mark_as_stale(feed.url)\n reader._now = lambda: naive_datetime(2010, 1, 3)\n call_update_method(reader, feed.url)\n assert parser.calls == [(feed.url, None, None)]\n assert set((f.url, f.title, f.last_updated) for f in reader.get_feeds()) == {\n (feed.url, feed.title, datetime(2010, 1, 3))\n }\n assert set((e.id, e.title, e.last_updated) for e in reader.get_entries()) == {\n (entry.id, entry.title, datetime(2010, 1, 3))\n }", "def test_update_loop(self):\n self.create_org(provider='qbo')\n old_task_count = 0\n\n while True:\n update_call = self.app.post('/adapter/qbo/test/update')\n self.assertEqual(update_call.status_code, 204)\n\n new_task_count = len(self.taskqueue.get_filtered_tasks())\n\n if new_task_count == old_task_count:\n break\n\n if new_task_count > 100:\n self.fail(\"too many adapter calls, infinite loop maybe???\")\n\n old_task_count = new_task_count\n\n self.assertEqual(new_task_count, 20)", "def test_feed_creation(self):\n items = []\n feed = Feed(items)\n assert isinstance(feed, Feed)\n assert items == feed.items", "async def test_valid_all_updates(database, valid_data,rng):\n \n await test_valid_insert(database,valid_data)\n database = await Database.connect_pool()\n for embeddings in [rng.random(128),None]:\n for batch_id in [random.randint(0,100),None]:\n await database.update(0,0,embeddings,batch_id)\n await database.close_pool()", "def setUp(self):\n super(InfiniteFeedTests, self).setUp()\n self.db_set_up()", "def test_grainbin_updates_latest_get_multiple(flaskclient, auth_headers, dbsession):\n\n grainbin = GrainbinFactory().save()\n\n # create two GrainbinUpdates for each iteration\n for x in range(5):\n grainbin_update = GrainbinUpdate(grainbin.id)\n grainbin_update.timestamp = dt.datetime.now()\n grainbin_update.update_index = x\n grainbin_update_2 = GrainbinUpdate(grainbin.id)\n grainbin_update_2.timestamp = dt.datetime.now()\n grainbin_update_2.update_index = x\n grainbin.total_updates = x\n dbsession.add(grainbin_update)\n dbsession.add(grainbin_update_2)\n\n dbsession.commit()\n\n url = url_for(\"grainbin.GrainbinUpdatesLatest\", grainbin_id=grainbin.id)\n rep = flaskclient.get(url, headers=auth_headers)\n fetched_update = rep.get_json()\n\n assert rep.status_code == 200\n assert len(fetched_update) == 2\n assert fetched_update[0][\"update_index\"] == 4", "def test_update_feed_updated(reader, call_update_method, caplog):\n parser = Parser()\n reader._parser = parser\n\n # Initial update.\n old_feed = parser.feed(1, datetime(2010, 1, 1), title='old')\n entry_one = parser.entry(1, 1, datetime(2010, 1, 1))\n\n reader._now = lambda: naive_datetime(2010, 1, 1)\n reader.add_feed(old_feed.url)\n reader._now = lambda: naive_datetime(2010, 1, 2)\n\n with caplog.at_level(logging.DEBUG, 'reader'):\n call_update_method(reader, old_feed.url)\n\n feed = old_feed.as_feed(\n added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 2)\n )\n assert set(reader.get_entries()) == {\n entry_one.as_entry(feed=feed, last_updated=datetime(2010, 1, 2))\n }\n assert \"feed has no last_updated, treating as updated\" in caplog.text\n caplog.clear()\n\n # Entries should be processed anyway.\n entry_two = parser.entry(1, 2, datetime(2010, 2, 1))\n reader._now = lambda: naive_datetime(2010, 1, 3)\n\n with caplog.at_level(logging.DEBUG, logger='reader'):\n call_update_method(reader, old_feed.url)\n\n feed = old_feed.as_feed(\n added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 3)\n )\n assert set(reader.get_entries()) == {\n entry_one.as_entry(feed=feed, last_updated=datetime(2010, 1, 2)),\n entry_two.as_entry(feed=feed, last_updated=datetime(2010, 1, 3)),\n }\n assert \"feed not updated, updating entries anyway\" in caplog.text\n caplog.clear()\n\n # Feed gets updated because content (hash) changed.\n old_feed = parser.feed(1, datetime(2010, 1, 1), title='old-different-title')\n reader._now = lambda: naive_datetime(2010, 1, 3, 12)\n\n with caplog.at_level(logging.DEBUG, logger='reader'):\n call_update_method(reader, old_feed.url)\n\n feed = old_feed.as_feed(\n added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 3, 12)\n )\n assert reader.get_feed(feed) == feed\n assert \"feed hash changed, treating as updated\" in caplog.text\n caplog.clear()\n\n # The feed doesn't change, because .updated is older.\n # Entries get updated regardless.\n old_feed = parser.feed(1, datetime(2009, 1, 1), title='old-different-title')\n entry_three = parser.entry(1, 3, datetime(2010, 2, 1))\n reader._now = lambda: naive_datetime(2010, 1, 4)\n\n with caplog.at_level(logging.DEBUG, logger='reader'):\n call_update_method(reader, old_feed.url)\n\n feed = old_feed.as_feed(\n added=datetime(2010, 1, 1),\n # doesn't change because it's not newer\n updated=datetime(2010, 1, 1),\n # changes because entries changed\n last_updated=datetime(2010, 1, 4),\n )\n assert set(reader.get_entries()) == {\n entry_one.as_entry(feed=feed, last_updated=datetime(2010, 1, 2)),\n entry_two.as_entry(feed=feed, last_updated=datetime(2010, 1, 3)),\n entry_three.as_entry(feed=feed, last_updated=datetime(2010, 1, 4)),\n }\n assert \"feed not updated, updating entries anyway\" in caplog.text\n caplog.clear()\n\n # The feed doesn't change; despite being newer, no entries have changed.\n old_feed = parser.feed(1, datetime(2010, 1, 2), title='old-different-title')\n reader._now = lambda: naive_datetime(2010, 1, 4, 12)\n\n with caplog.at_level(logging.DEBUG, logger='reader'):\n call_update_method(reader, old_feed.url)\n\n feed = old_feed.as_feed(\n added=datetime(2010, 1, 1),\n # doesn't change because no entries have changed\n updated=datetime(2010, 1, 1),\n # doesn't change because nothing changed\n last_updated=datetime(2010, 1, 4),\n )\n assert set(reader.get_entries()) == {\n entry_one.as_entry(feed=feed, last_updated=datetime(2010, 1, 2)),\n entry_two.as_entry(feed=feed, last_updated=datetime(2010, 1, 3)),\n entry_three.as_entry(feed=feed, last_updated=datetime(2010, 1, 4)),\n }\n assert \"feed not updated, updating entries anyway\" in caplog.text\n caplog.clear()\n\n # The feeds changes because it is newer *and* entries get updated.\n new_feed = parser.feed(1, datetime(2010, 1, 2), title='new')\n entry_four = parser.entry(1, 4, datetime(2010, 2, 1))\n reader._now = lambda: naive_datetime(2010, 1, 5)\n feed = new_feed.as_feed(\n added=datetime(2010, 1, 1), last_updated=datetime(2010, 1, 5)\n )\n\n with caplog.at_level(logging.DEBUG, logger='reader'):\n call_update_method(reader, old_feed.url)\n\n assert set(reader.get_entries()) == {\n entry_one.as_entry(feed=feed, last_updated=datetime(2010, 1, 2)),\n entry_two.as_entry(feed=feed, last_updated=datetime(2010, 1, 3)),\n entry_three.as_entry(feed=feed, last_updated=datetime(2010, 1, 4)),\n entry_four.as_entry(feed=feed, last_updated=datetime(2010, 1, 5)),\n }\n assert \"feed updated\" in caplog.text\n caplog.clear()", "def test_update_feed_deleted(\n db_path, make_reader, call_update_method, feed_action, entry_action\n):\n\n parser = Parser()\n reader = make_reader(db_path)\n reader._parser = parser\n\n feed = parser.feed(1, datetime(2010, 1, 1))\n reader.add_feed(feed.url)\n reader.update_feeds()\n\n if entry_action is not EntryAction.none:\n parser.entry(1, 1, datetime(2010, 1, 1))\n if entry_action is EntryAction.update:\n reader.update_feeds()\n parser.entry(1, 1, datetime(2010, 1, 2))\n\n if feed_action is FeedAction.update:\n feed = parser.feed(1, datetime(2010, 1, 2), title='new title')\n\n if feed_action is not FeedAction.fail:\n parser_cls = BlockingParser\n else:\n\n class parser_cls(BlockingParser, FailingParser):\n pass\n\n blocking_parser = parser_cls.from_parser(parser)\n\n def target():\n # can't use fixture because it would run close() in a different thread\n from reader import make_reader\n\n blocking_parser.in_parser.wait()\n reader = make_reader(db_path)\n try:\n reader.delete_feed(feed.url)\n finally:\n blocking_parser.can_return_from_parser.set()\n try:\n reader.close()\n except StorageError as e:\n if 'database is locked' in str(e):\n pass # sometimes, it can be; we don't care\n else:\n raise\n\n t = threading.Thread(target=target)\n t.start()\n\n try:\n reader._parser = blocking_parser\n if call_update_method.__name__ == 'call_update_feed':\n with pytest.raises(FeedNotFoundError) as excinfo:\n call_update_method(reader, feed.url)\n assert excinfo.value.url == feed.url\n assert 'no such feed' in excinfo.value.message\n elif call_update_method.__name__.startswith('call_update_feeds'):\n # shouldn't raise an exception\n call_update_method(reader, feed.url)\n else:\n assert False, \"shouldn't happen\"\n finally:\n t.join()", "def test_get_feeds_order_added(reader):\n parser = Parser()\n reader._parser = parser\n\n reader._now = lambda: naive_datetime(2010, 1, 1)\n feed1 = parser.feed(1, datetime(2010, 1, 2))\n reader.add_feed(feed1.url)\n\n reader._now = lambda: naive_datetime(2010, 1, 2)\n feed2 = parser.feed(2, datetime(2010, 1, 1))\n reader.add_feed(feed2.url)\n\n reader._now = lambda: naive_datetime(2009, 12, 31)\n feed3 = parser.feed(3, datetime(2010, 1, 3))\n reader.add_feed(feed3.url)\n\n assert list(f.url for f in reader.get_feeds(sort='added')) == '2 1 3'.split()\n\n reader.update_feeds()\n\n assert list(f.url for f in reader.get_feeds(sort='added')) == '2 1 3'.split()", "def update_db_from_rss():\n today = date.today()\n url = 'https://newyork.craigslist.org/search/jjj?query=unpaid&sort=rel&format=rss'\n\n cached_feed = CachedFeed.query.filter_by(rss_url=url, date=today).first()\n if not cached_feed:\n resp = requests.get(url)\n cached_feed = CachedFeed(rss_url=url, text=resp.text)\n db.session.add(cached_feed)\n db.session.commit()\n\n feed = feedparser.parse(cached_feed.text)\n\n for entry in feed.entries:\n link = entry['link']\n\n # Skip postings that already exist when scanning\n posting = Posting.query.filter_by(url=link, rss_url=url).first()\n if posting:\n continue\n\n posting_resp = requests.get(link)\n posting_soup = BeautifulSoup(posting_resp.text)\n\n replylink = posting_soup.find(id=\"replylink\")\n contact_href = replylink.get('href') if replylink else None\n\n contact_url = urljoin(url, contact_href)\n contact_resp = requests.get(contact_url)\n contact_soup = BeautifulSoup(contact_resp.text)\n\n anonemail_el = contact_soup.find(class_=\"anonemail\")\n title = posting_soup.find('title').text\n\n posting = Posting(title=title,\n url=link,\n rss_url=url,\n text=unicode(posting_soup.find(id='postingbody')),\n region='nyc',\n posted_at = datetime.fromtimestamp(mktime(entry.published_parsed)),\n email=anonemail_el.text if anonemail_el else None,\n email_subject=title,\n email_body=current_app.config['EMAIL_DEFAULT_BODY']\n )\n\n db.session.add(posting)\n\n print(u\"finished {}, sleeping\".format(link))\n time.sleep(15)\n\n db.session.commit()", "def test_update_instance_limit1(self):\n pass", "def test_constructor_with_invalid_feed_option(self):\n feed = InfiniteFeed(self.db, feed='longpoll')\n with self.assertRaises(CloudantArgumentError) as cm:\n invalid_feed = [x for x in feed]\n self.assertEqual(\n str(cm.exception),\n 'Invalid infinite feed option: longpoll. Must be set to continuous.'\n )", "async def test_updates_no_user(database,valid_data):\n #reset the database and add values with ids [0,10]\n test_valid_insert(database,valid_data)\n\n for _id in range(100,150):\n try:\n await database.update(_id=_id,user_id=_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_polling_loop(self, cursor):\n cursor._poll_interval = 0\n yield cursor.execute('SELECT COUNT(*) FROM many_rows')\n self.assertEqual((yield cursor.fetchone()), [10000])", "def test_feed_subclassing(self):\n moksha.feed_cache = FakeCache()\n class MyFeed(Feed):\n url = 'http://lewk.org/rss'\n feed = MyFeed()\n assert feed.url == 'http://lewk.org/rss'\n assert feed.num_entries() > 0\n for entry in feed.iterentries():\n pass\n for entry in feed.get_entries():\n pass", "def test_new_entries_are_added(db_session):\n for entry in ENTRIES:\n row = Entries(title=entry[\"title\"], creation_date=entry[\"creation_date\"], body=entry[\"body\"])\n db_session.add(row)\n query = db_session.query(Entries).all()\n assert len(query) == len(ENTRIES)", "def test_grainbin_updates_latest_get(flaskclient, auth_headers, dbsession):\n\n grainbin = GrainbinFactory().save()\n\n # create some GrainbinUpdates\n for x in range(25):\n grainbin_update = GrainbinUpdate(grainbin.id)\n grainbin_update.timestamp = dt.datetime.now()\n grainbin_update.update_index = x\n grainbin.total_updates = x\n dbsession.add(grainbin_update)\n\n dbsession.commit()\n\n url = url_for(\"grainbin.GrainbinUpdatesLatest\", grainbin_id=grainbin.id)\n rep = flaskclient.get(url, headers=auth_headers)\n fetched_update = rep.get_json()\n\n assert rep.status_code == 200\n assert fetched_update[0][\"update_index\"] == 24", "def testDataFeed(self):\n\n start_date = '2008-10-01'\n end_date = '2008-10-02'\n metrics = 'ga:visits'\n\n if not conf.options.get_value('runlive') == 'true':\n return\n conf.configure_cache(self.client, 'testDataFeed')\n\n data_query = gdata.analytics.client.DataFeedQuery({\n 'ids': conf.options.get_value('table_id'),\n 'start-date': start_date,\n 'end-date': end_date,\n 'metrics': metrics,\n 'max-results': '1'\n })\n feed = self.client.GetDataFeed(data_query)\n\n self.assertTrue(feed.entry is not None)\n self.assertEqual(feed.start_date.text, start_date)\n self.assertEqual(feed.end_date.text, end_date)\n self.assertEqual(feed.entry[0].GetMetric(metrics).name, metrics)", "def test_update_instance_limit(self):\n pass" ]
[ "0.8253992", "0.72884977", "0.6604631", "0.64941096", "0.64665896", "0.6328994", "0.6175912", "0.6170512", "0.61609447", "0.6133652", "0.6111069", "0.60689044", "0.6057885", "0.60202515", "0.59075385", "0.58818424", "0.5873431", "0.58687603", "0.58345026", "0.58297986", "0.5819456", "0.58013904", "0.57881457", "0.5769007", "0.5760422", "0.57363075", "0.57147336", "0.57046616", "0.570288", "0.5667633" ]
0.8292704
0
Test that an infinite feed will continue to issue multiple requests until stopped. This check is performed in combination by creating documents 3 separate times and checking that the "_start" method on the InfiniteFeed object was called 3 times as well.
def test_infinite_feed(self): self.populate_db_with_documents() feed = InfiniteFeed(self.db, timeout=100) # Create a proxy for the feed._start method so that we can track how # many times it has been called. feed._start = MethodCallCount(feed._start) changes = list() for change in feed: self.assertSetEqual(set(change.keys()), set(['seq', 'changes', 'id'])) changes.append(change) if len(changes) in (100, 200): sleep(1) # 1 second > .1 second (timeout) self.populate_db_with_documents(off_set=len(changes)) elif len(changes) == 300: feed.stop() expected = set(['julia{0:03d}'.format(i) for i in range(300)]) self.assertSetEqual(set([x['id'] for x in changes]), expected) self.assertIsNone(feed.last_seq) # Compare infinite/continuous with normal normal = Feed(self.db) self.assertSetEqual( set([x['id'] for x in changes]), set([n['id'] for n in normal])) # Ensuring that the feed._start method was called 3 times, verifies that # the continuous feed was started/restarted 3 separate times. self.assertEqual(feed._start.called_count, 3)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_infinite_db_updates_feed(self):\n feed = InfiniteFeed(self.client, since='now', timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n new_dbs = list()\n try:\n new_dbs.append(self.client.create_database(self.dbname()))\n for change in feed:\n self.assertTrue(all(x in change for x in ('seq', 'type')))\n new_dbs.append(self.client.create_database(self.dbname()))\n if feed._start.called_count >= 3 and len(new_dbs) >= 3:\n feed.stop()\n if len(new_dbs) >= 15:\n # We stop regardless after 15 databases have been created\n feed.stop()\n finally:\n [db.delete() for db in new_dbs]\n # The test is considered a success if feed._start was called 2+ times.\n # If failure occurs it does not necessarily mean that the InfiniteFeed\n # is not functioning as expected, it might also mean that we reached the\n # db limit threshold of 15 before a timeout and restart of the\n # InfiniteFeed could happen.\n self.assertTrue(feed._start.called_count > 1)", "def test_constructor_db_updates(self):\n feed = InfiniteFeed(self.client, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.client.server_url, '_db_updates']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_constructor_no_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100)\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_limit_items(self):\n AnnouncementFactory(\n title=\"Not going to be there\",\n expires_at=timezone.now() - datetime.timedelta(days=1),\n )\n for i in range(5):\n AnnouncementFactory()\n\n response = self.get(\"announcements:feed\")\n\n assert \"Not going to be there\" not in response.content.decode()", "def test_update_loop(self):\n self.create_org(provider='qbo')\n old_task_count = 0\n\n while True:\n update_call = self.app.post('/adapter/qbo/test/update')\n self.assertEqual(update_call.status_code, 204)\n\n new_task_count = len(self.taskqueue.get_filtered_tasks())\n\n if new_task_count == old_task_count:\n break\n\n if new_task_count > 100:\n self.fail(\"too many adapter calls, infinite loop maybe???\")\n\n old_task_count = new_task_count\n\n self.assertEqual(new_task_count, 20)", "def test_constructor_with_feed_option(self):\n feed = InfiniteFeed(self.db, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.db.database_url, '_changes']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_withCountIntervalZero(self):\n clock = task.Clock()\n accumulator = []\n\n def foo(cnt):\n accumulator.append(cnt)\n if len(accumulator) > 4:\n loop.stop()\n\n loop = task.LoopingCall.withCount(foo)\n loop.clock = clock\n deferred = loop.start(0, now=False)\n\n clock.advance(0)\n self.successResultOf(deferred)\n\n self.assertEqual([1, 1, 1, 1, 1], accumulator)", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def test_channel_messages_unlimited_pagination():\n clear()\n userOne = auth_register('[email protected]', '123abc!@#', 'First', 'User') \n randChannel = channels_create(userOne['token'], 'randChannel', True)\n for _ in range(149):\n message_send(userOne['token'], randChannel['channel_id'], 'Hello')\n messages = channel_messages(userOne['token'], randChannel['channel_id'], 0)\n assert(messages['start'] == 0)\n assert(messages['end'] == 50) \n messages2 = channel_messages(userOne['token'], randChannel['channel_id'], 50)\n assert(messages2['start'] == 50)\n assert(messages2['end'] == 100) \n messages3 = channel_messages(userOne['token'], randChannel['channel_id'], 100)\n assert(messages3['start'] == 100)\n assert(messages3['end'] == -1) \n assert(len(messages3['messages']) == 49)\n # an error should be raised when start is beyond 149 messages\n with pytest.raises(InputError): \n channel_messages(userOne['token'], randChannel['channel_id'], 150)", "def test_collection_limit(testapp):\n obj1 = {\n 'title': \"Testing1\",\n 'description': \"This is testing object 1\",\n }\n obj2 = {\n 'title': \"Testing2\",\n 'description': \"This is testing object 2\",\n }\n obj3 = {\n 'title': \"Testing3\",\n 'description': \"This is testing object 3\",\n }\n testapp.post_json('/embedding-tests', obj1, status=201)\n testapp.post_json('/embedding-tests', obj2, status=201)\n testapp.post_json('/embedding-tests', obj3, status=201)\n res_all = testapp.get('/embedding-tests/?limit=all', status=200)\n res_2 = testapp.get('/embedding-tests/?limit=2', status=200)\n assert len(res_all.json['@graph']) == 3\n assert len(res_2.json['@graph']) == 2", "def smoke_test_receive_tweets():\n total = 100000\n\n def tweetsource(request):\n while True:\n yield single_tweet + \"\\n\"\n\n def do_test(klass, *args):\n with test_server(handler=tweetsource,\n methods=(\"post\", \"get\"), port=\"random\") as server:\n stream = klass(\"foo\", \"bar\", *args, url=server.baseurl)\n for tweet in stream:\n if stream.count == total:\n break\n\n do_test(TweetStream)\n do_test(FollowStream, [1, 2, 3])\n do_test(TrackStream, [\"foo\", \"bar\"])", "def test_get_stream_too_many_requests(req):\n req.get(ENTREZ_URL, text=u'Whoa, slow down', status_code=429, headers={\"Retry-After\": \"2\"})\n params = dict(id='FAKE')\n with pytest.raises(TooManyRequests):\n core.get_stream(ENTREZ_URL, params)", "def test_issue_335(self):\n num_tasks_in_seq = 5\n seq = MySequentialCollection([\n Application(\n ['echo', 'test1'],\n [], [],\n os.path.join(self.tmpdir, 'test.%d.d' % i))\n for i in range(num_tasks_in_seq)\n ])\n engine = create_engine(self.cfgfile, auto_enable_auth=True)\n engine.add(seq)\n while True:\n engine.progress()\n if (len([task for task in seq.tasks\n if task.execution.state == Run.State.TERMINATED])\n == num_tasks_in_seq):\n engine.progress()\n # check that final SequentialCollection state is TERMINATED\n assert seq.execution.state == Run.State.TERMINATED\n break\n # check that next() has been called once per each task\n assert seq.next_called_n_times == num_tasks_in_seq", "def test_iteration_within_iteration(self):\n\n class Data(Document):\n pass\n\n for i in range(300):\n Data().save()\n\n qs = Data.objects.limit(250)\n for i, doc in enumerate(qs):\n for j, doc2 in enumerate(qs):\n pass\n\n assert i == 249\n assert j == 249", "def test_len_during_iteration(self):\n\n class Data(Document):\n pass\n\n for i in range(300):\n Data().save()\n\n records = Data.objects.limit(250)\n\n # This should pull all 250 docs from mongo and populate the result\n # cache\n len(records)\n\n # Assert that iterating over documents in the qs touches every\n # document even if we call len(qs) midway through the iteration.\n for i, r in enumerate(records):\n if i == 58:\n len(records)\n assert i == 249\n\n # Assert the same behavior is true even if we didn't pre-populate the\n # result cache.\n records = Data.objects.limit(250)\n for i, r in enumerate(records):\n if i == 58:\n len(records)\n assert i == 249", "def generate_feeds():\n os.makedirs(Config.FEED_ROOT_PATH, exist_ok=True)\n use_batching = Config.DAILY_DIGEST is not None\n\n while True:\n _generate_feeds_once(use_batching=use_batching)\n interval = _interval_between_generating_feeds(Config.REFRESH_INTERVAL_SECONDS, Config.DAILY_DIGEST)\n logging.info('Sleeping %ss before attempting to generate feeds again.', interval)\n time.sleep(interval)", "def test_monitor_correctly_does_not_process_already_processed_pages(self):\n # Arrange\n # There are two pages: page # 1 and page # 2\n feeds = [fixtures.PROQUEST_FEED_PAGE_1, fixtures.PROQUEST_FEED_PAGE_2]\n # But only the page # 1 will be processed\n expected_calls = [call(fixtures.PROQUEST_FEED_PAGE_1)]\n\n identifier_parser = ProQuestIdentifierParser()\n\n # Create Identifiers for publications # 2, 3, and 4\n publication_2_identifier, _ = identifier, _ = Identifier.parse(\n self._db,\n fixtures.PROQUEST_PUBLICATION_2.metadata.identifier,\n identifier_parser,\n )\n publication_3_identifier, _ = identifier, _ = Identifier.parse(\n self._db,\n fixtures.PROQUEST_PUBLICATION_3.metadata.identifier,\n identifier_parser,\n )\n publication_4_identifier, _ = identifier, _ = Identifier.parse(\n self._db,\n fixtures.PROQUEST_PUBLICATION_4.metadata.identifier,\n identifier_parser,\n )\n\n # Make sure that all the publications # 2, 3, and 4 were already processed\n max_modified_date = max(\n fixtures.PROQUEST_PUBLICATION_2.metadata.modified,\n fixtures.PROQUEST_PUBLICATION_3.metadata.modified,\n fixtures.PROQUEST_PUBLICATION_4.metadata.modified,\n )\n coverage_date = max_modified_date + datetime.timedelta(days=1)\n\n # Create coverage records for publications # 2, 3, and 4\n CoverageRecord.add_for(\n publication_2_identifier,\n self._proquest_data_source,\n operation=CoverageRecord.IMPORT_OPERATION,\n timestamp=coverage_date,\n )\n CoverageRecord.add_for(\n publication_3_identifier,\n self._proquest_data_source,\n operation=CoverageRecord.IMPORT_OPERATION,\n timestamp=coverage_date,\n )\n CoverageRecord.add_for(\n publication_4_identifier,\n self._proquest_data_source,\n operation=CoverageRecord.IMPORT_OPERATION,\n timestamp=coverage_date,\n )\n\n client = create_autospec(spec=ProQuestAPIClient)\n\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory, self._db, self._proquest_collection, ProQuestOPDS2Importer\n )\n monitor._get_feeds = MagicMock(return_value=list(zip([None] * len(feeds), feeds)))\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n # Act\n monitor.run_once(False)\n\n # Assert\n # Make sure that ProQuestOPDS2ImportMonitor.import_one_feed was called only for the page # 1\n monitor.import_one_feed.assert_has_calls(expected_calls)", "def test_batch_create_occurrences(self):\n pass", "def test_monitor_correctly_processes_pages(self, _, feeds, expected_calls):\n # Arrange\n client = create_autospec(spec=ProQuestAPIClient)\n client_factory = create_autospec(spec=ProQuestAPIClientFactory)\n client_factory.create = MagicMock(return_value=client)\n\n monitor = ProQuestOPDS2ImportMonitor(\n client_factory, self._db, self._proquest_collection, ProQuestOPDS2Importer\n )\n monitor._get_feeds = MagicMock(return_value=list(zip([None] * len(feeds), feeds)))\n monitor.import_one_feed = MagicMock(return_value=([], []))\n\n # Act\n monitor.run_once(False)\n\n # Assert\n # Make sure that ProQuestOPDS2ImportMonitor.import_one_feed was called for each paged feed (if any)\n monitor.import_one_feed.assert_has_calls(expected_calls)", "def test_invalid_source_couchdb(self):\n with self.assertRaises(CloudantFeedException) as cm:\n invalid_feed = [x for x in InfiniteFeed(self.client)]\n self.assertEqual(str(cm.exception),\n 'Infinite _db_updates feed not supported for CouchDB.')", "def test_next_token(self) -> None:\n\n # `next_token` does not appear\n # Number of results is the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=20\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does not appear\n # Number of max results is larger than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=21\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n self.assertNotIn(\"next_token\", channel.json_body)\n\n # `next_token` does appear\n # Number of max results is smaller than the number of entries\n channel = self.make_request(\n \"GET\",\n self.url + \"?limit=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 19)\n self.assertEqual(channel.json_body[\"next_token\"], 19)\n\n # Check\n # Set `from` to value of `next_token` for request remaining entries\n # `next_token` does not appear\n channel = self.make_request(\n \"GET\",\n self.url + \"?from=19\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 1)\n self.assertNotIn(\"next_token\", channel.json_body)", "def test_constructor_with_invalid_feed_option(self):\n feed = InfiniteFeed(self.db, feed='longpoll')\n with self.assertRaises(CloudantArgumentError) as cm:\n invalid_feed = [x for x in feed]\n self.assertEqual(\n str(cm.exception),\n 'Invalid infinite feed option: longpoll. Must be set to continuous.'\n )", "def test_withCountIntervalZeroDelayThenNonZeroInterval(self):\n clock = task.Clock()\n deferred = defer.Deferred()\n accumulator = []\n\n def foo(cnt):\n accumulator.append(cnt)\n if len(accumulator) == 2:\n return deferred\n\n loop = task.LoopingCall.withCount(foo)\n loop.clock = clock\n loop.start(0, now=False)\n\n # Even if a lot of time pass, loop will block at the third call.\n clock.advance(10)\n self.assertEqual([1, 1], accumulator)\n\n # When a new interval is set, once the waiting call got a result the\n # loop continues with the new interval.\n loop.interval = 2\n deferred.callback(None)\n\n # It will count skipped steps since the last loop call.\n clock.advance(7)\n self.assertEqual([1, 1, 3], accumulator)\n\n clock.advance(2)\n self.assertEqual([1, 1, 3, 1], accumulator)\n\n clock.advance(4)\n self.assertEqual([1, 1, 3, 1, 2], accumulator)", "def infinite_loop():\n return True", "def test_stream(self):\n with skipping(NotImplementedError):\n self.es = EventStreamsTestClass(streams='recentchange')\n limit = 50\n self.es.set_maximum_items(limit)\n self.assertLength(list(self.es), limit)", "async def checkNewLoop(self):\n pass", "def test_withCountIntervalZeroDelay(self):\n clock = task.Clock()\n deferred = defer.Deferred()\n accumulator = []\n\n def foo(cnt):\n accumulator.append(cnt)\n\n if len(accumulator) == 2:\n return deferred\n\n if len(accumulator) > 4:\n loop.stop()\n\n loop = task.LoopingCall.withCount(foo)\n loop.clock = clock\n loop.start(0, now=False)\n\n clock.advance(0)\n # Loop will block at the third call.\n self.assertEqual([1, 1], accumulator)\n\n # Even if more time pass, the loops is not\n # advanced.\n clock.advance(2)\n self.assertEqual([1, 1], accumulator)\n\n # Once the waiting call got a result the loop continues without\n # observing any delay in countCallable.\n deferred.callback(None)\n clock.advance(0)\n self.assertEqual([1, 1, 1, 1, 1], accumulator)", "def test_listen_for_dweets_from(self):\n dweets_heard = 0\n for dweet in dweepy.listen_for_dweets_from(self.my_thing_id, timeout=5):\n dweets_heard += 1\n check_valid_dweet_response(self, dweet)\n self.assertGreater(dweets_heard, 0)", "def test_feed_generator(self):\n moksha.feed_cache = FakeCache()\n feed = Feed(url='http://lewk.org/rss')\n iter = feed.iterentries()\n data = iter.next()\n assert iter.next()", "def test_continuous_bulk_parsing(self, aggregator):\n test_data = ensure_bytes(open(NAGIOS_TEST_LOG).read())\n ITERATIONS = 10\n log_file = tempfile.NamedTemporaryFile(mode=\"a+b\")\n\n # Get the config\n config, nagios_cfg = get_config(\"log_file={}\\n\".format(log_file.name), events=True)\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n for _ in range(ITERATIONS):\n log_file.write(test_data)\n log_file.flush()\n nagios.check(config['instances'][0])\n\n log_file.close()\n assert len(aggregator.events) == ITERATIONS * 503" ]
[ "0.72182524", "0.60102415", "0.5983157", "0.58984596", "0.58564985", "0.5765583", "0.574824", "0.56290317", "0.5562241", "0.55338424", "0.55331767", "0.55290794", "0.5521841", "0.55046946", "0.54600227", "0.5453966", "0.54417586", "0.54326344", "0.5425452", "0.54100937", "0.5407624", "0.5401589", "0.5383404", "0.5373058", "0.5371539", "0.533022", "0.53160745", "0.5310447", "0.5308187", "0.53035253" ]
0.8080091
0
Test that an _db_updates infinite feed will continue to issue multiple requests until stopped. Since we do not have control over updates happening within the account as we do within a database, this test is stopped after 15 database creations regardless. Within that span of time we expect that the feed would have been restarted at least once.
def test_infinite_db_updates_feed(self): feed = InfiniteFeed(self.client, since='now', timeout=100) # Create a proxy for the feed._start method so that we can track how # many times it has been called. feed._start = MethodCallCount(feed._start) new_dbs = list() try: new_dbs.append(self.client.create_database(self.dbname())) for change in feed: self.assertTrue(all(x in change for x in ('seq', 'type'))) new_dbs.append(self.client.create_database(self.dbname())) if feed._start.called_count >= 3 and len(new_dbs) >= 3: feed.stop() if len(new_dbs) >= 15: # We stop regardless after 15 databases have been created feed.stop() finally: [db.delete() for db in new_dbs] # The test is considered a success if feed._start was called 2+ times. # If failure occurs it does not necessarily mean that the InfiniteFeed # is not functioning as expected, it might also mean that we reached the # db limit threshold of 15 before a timeout and restart of the # InfiniteFeed could happen. self.assertTrue(feed._start.called_count > 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_constructor_db_updates(self):\n feed = InfiniteFeed(self.client, chunk_size=1, timeout=100, feed='continuous')\n self.assertEqual(feed._url, '/'.join([self.client.server_url, '_db_updates']))\n self.assertIsInstance(feed._r_session, Session)\n self.assertFalse(feed._raw_data)\n self.assertDictEqual(feed._options, {'feed': 'continuous', 'timeout': 100})\n self.assertEqual(feed._chunk_size, 1)", "def test_infinite_feed(self):\n self.populate_db_with_documents()\n feed = InfiniteFeed(self.db, timeout=100)\n\n # Create a proxy for the feed._start method so that we can track how\n # many times it has been called.\n feed._start = MethodCallCount(feed._start)\n\n changes = list()\n for change in feed:\n self.assertSetEqual(set(change.keys()), set(['seq', 'changes', 'id']))\n changes.append(change)\n if len(changes) in (100, 200):\n sleep(1) # 1 second > .1 second (timeout)\n self.populate_db_with_documents(off_set=len(changes))\n elif len(changes) == 300:\n feed.stop()\n expected = set(['julia{0:03d}'.format(i) for i in range(300)])\n self.assertSetEqual(set([x['id'] for x in changes]), expected)\n self.assertIsNone(feed.last_seq)\n # Compare infinite/continuous with normal\n normal = Feed(self.db)\n self.assertSetEqual(\n set([x['id'] for x in changes]), set([n['id'] for n in normal]))\n\n # Ensuring that the feed._start method was called 3 times, verifies that\n # the continuous feed was started/restarted 3 separate times.\n self.assertEqual(feed._start.called_count, 3)", "def test_update_loop(self):\n self.create_org(provider='qbo')\n old_task_count = 0\n\n while True:\n update_call = self.app.post('/adapter/qbo/test/update')\n self.assertEqual(update_call.status_code, 204)\n\n new_task_count = len(self.taskqueue.get_filtered_tasks())\n\n if new_task_count == old_task_count:\n break\n\n if new_task_count > 100:\n self.fail(\"too many adapter calls, infinite loop maybe???\")\n\n old_task_count = new_task_count\n\n self.assertEqual(new_task_count, 20)", "def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=60)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Docs with updated expiry deleted unexpectedly!\")\n\n self.sleep(20, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with updated expiry not deleted after new exp has elapsed!\")", "def test_stop_poll_multiple_users_database_query_count(self):\n self.setup_entitled_users()\n expected = []\n for _ in range(10):\n user, _ = self.create_user()\n user.is_present = True\n user.vote_delegated_to = self.admin\n user.save()\n user.groups.add(self.group)\n expected.append(\n {\n \"user_id\": user.id,\n \"voted\": False,\n \"vote_delegated_to_id\": self.admin.id,\n }\n )\n self.admin.is_present = False\n self.admin.save()\n config[\"users_activate_vote_weight\"] = True\n self.assertEqual(count_queries(self.poll.stop)(), 13)\n self.assertEqual(MotionPoll.objects.get().entitled_users_at_stop, expected)", "def test_update_new_no_last_updated(reader):\n parser = Parser()\n reader._parser = parser\n\n feed = parser.feed(1, datetime(2010, 1, 1))\n\n reader.add_feed(feed.url)\n # updated must be None if last_updated is None\n reader._storage.update_feed(\n FeedUpdateIntent(feed.url, None, feed=feed._replace(updated=None))\n )\n\n reader.update_feeds(new=True)\n\n parser.entry(1, 1, datetime(2010, 1, 1))\n reader.update_feeds(new=True)\n\n # the entry isn't added because feed is not new on the second update_feeds\n assert len(list(reader.get_entries(feed=feed.url))) == 0", "async def test_updates_no_user(database,valid_data):\n #reset the database and add values with ids [0,10]\n test_valid_insert(database,valid_data)\n\n for _id in range(100,150):\n try:\n await database.update(_id=_id,user_id=_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_incomplete_stats(self) -> None:\n\n u1 = self.register_user(\"u1\", \"pass\")\n u1token = self.login(\"u1\", \"pass\")\n u2 = self.register_user(\"u2\", \"pass\")\n u2token = self.login(\"u2\", \"pass\")\n u3 = self.register_user(\"u3\", \"pass\")\n r1 = self.helper.create_room_as(u1, tok=u1token, is_public=False)\n\n # preparation stage of the initial background update\n # Ugh, have to reset this flag\n self.store.db_pool.updates._all_done = False\n\n self.get_success(\n self.store.db_pool.simple_delete(\n \"room_stats_current\", {\"1\": 1}, \"test_delete_stats\"\n )\n )\n self.get_success(\n self.store.db_pool.simple_delete(\n \"user_stats_current\", {\"1\": 1}, \"test_delete_stats\"\n )\n )\n\n self.helper.invite(r1, u1, u2, tok=u1token)\n self.helper.join(r1, u2, tok=u2token)\n self.helper.invite(r1, u1, u3, tok=u1token)\n self.helper.send(r1, \"thou shalt yield\", tok=u1token)\n\n # now do the background updates\n\n self.store.db_pool.updates._all_done = False\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n {\n \"update_name\": \"populate_stats_process_rooms\",\n \"progress_json\": \"{}\",\n },\n )\n )\n self.get_success(\n self.store.db_pool.simple_insert(\n \"background_updates\",\n {\n \"update_name\": \"populate_stats_process_users\",\n \"progress_json\": \"{}\",\n \"depends_on\": \"populate_stats_process_rooms\",\n },\n )\n )\n\n self.wait_for_background_updates()\n\n r1stats_complete = self._get_current_stats(\"room\", r1)\n assert r1stats_complete is not None\n u1stats_complete = self._get_current_stats(\"user\", u1)\n assert u1stats_complete is not None\n u2stats_complete = self._get_current_stats(\"user\", u2)\n assert u2stats_complete is not None\n\n # now we make our assertions\n\n # check that _complete rows are complete and correct\n self.assertEqual(r1stats_complete[\"joined_members\"], 2)\n self.assertEqual(r1stats_complete[\"invited_members\"], 1)\n\n self.assertEqual(\n r1stats_complete[\"current_state_events\"],\n 2 + EXPT_NUM_STATE_EVTS_IN_FRESH_PRIVATE_ROOM,\n )\n\n self.assertEqual(u1stats_complete[\"joined_rooms\"], 1)\n self.assertEqual(u2stats_complete[\"joined_rooms\"], 1)", "def test_polling_loop(self, cursor):\n cursor._poll_interval = 0\n yield cursor.execute('SELECT COUNT(*) FROM many_rows')\n self.assertEqual((yield cursor.fetchone()), [10000])", "def test_grainbin_updates_latest_get_multiple(flaskclient, auth_headers, dbsession):\n\n grainbin = GrainbinFactory().save()\n\n # create two GrainbinUpdates for each iteration\n for x in range(5):\n grainbin_update = GrainbinUpdate(grainbin.id)\n grainbin_update.timestamp = dt.datetime.now()\n grainbin_update.update_index = x\n grainbin_update_2 = GrainbinUpdate(grainbin.id)\n grainbin_update_2.timestamp = dt.datetime.now()\n grainbin_update_2.update_index = x\n grainbin.total_updates = x\n dbsession.add(grainbin_update)\n dbsession.add(grainbin_update_2)\n\n dbsession.commit()\n\n url = url_for(\"grainbin.GrainbinUpdatesLatest\", grainbin_id=grainbin.id)\n rep = flaskclient.get(url, headers=auth_headers)\n fetched_update = rep.get_json()\n\n assert rep.status_code == 200\n assert len(fetched_update) == 2\n assert fetched_update[0][\"update_index\"] == 4", "def test_wait_for_db(self, ts):\n \"\"\" Here we are checking that the wait_for_db command will try the database 5 times\n and on the sixth time it'll be successful and continue \n \"\"\"\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n gi.side_effect=[OperationalError] * 5 + [True]\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 6)", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def test_wait_for_db(self, ts):\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n gi.side_efffect = [OperationalError] * 5 + [True]\n call_command('wait_for_db')\n self.assetEqual(gi.call_count, 6)", "def test_needs_update(self):\n parsers.reset()\n\n # Every 15 minutes\n self._needs_update(\n timedelta(minutes=15), timedelta(hours=1), timedelta(hours=2, minutes=59)\n )\n\n # Every hour\n self._needs_update(\n timedelta(hours=1), timedelta(hours=3), timedelta(hours=23, minutes=59)\n )\n\n # Every 3 hours\n self._needs_update(\n timedelta(hours=3), timedelta(days=1), timedelta(days=9, hours=23, minutes=59)\n )\n\n # Do not download\n current_time = timezone.now()\n initial_date = current_time - timedelta(days=10) - timedelta(days=365)\n\n article = ArticleFactory.create(\n source='mock.nl',\n last_check=current_time - timedelta(days=360),\n last_update=current_time - timedelta(days=360),\n )\n article.initial_date = initial_date\n article.save()\n self.assertEqual(article.needs_update, False)", "def checkAPICallsTvdb(use=False):\n currentime = time.time()\n lastrun = autosub.APICALLSLASTRESET_TVDB\n interval = autosub.APICALLSRESETINT_TVDB\n \n if currentime - lastrun > interval:\n log.info(\"API TVDB: 24h limit, resetting API calls.\")\n autosub.APICALLS_TVDB = autosub.APICALLSMAX_TVDB\n autosub.APICALLSLASTRESET_TVDB = time.time()\n \n if autosub.APICALLS_TVDB > 0:\n if use==True:\n autosub.APICALLS_TVDB-=1\n return True\n else:\n log.debug('checkAPICallsTvdb: Out of API calls for Tvdb')\n return False", "def test_rate_reached_perf_issue(self):\n for i in range(0, 10):\n event = self.store_transaction(\n environment=None,\n project_id=self.project.id,\n user_id=str(i),\n fingerprint=[f\"{GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES.value}-group1\"],\n )\n perf_group = event.groups[0]\n snooze = GroupSnooze.objects.create(group=perf_group, count=10, window=24 * 60)\n assert not snooze.is_valid(test_rates=True)", "def update_database(self) -> None:\n \n # Simulate that we update a database\n time.sleep(10)", "def test_cron_schedule(self, mock_update_batches, *args):\n self.env['ir.config_parameter'].sudo().set_param('runbot.runbot_update_frequency', 1)\n self.env['ir.config_parameter'].sudo().set_param('runbot.runbot_do_fetch', True)\n self.env['runbot.repo'].search([('id', '!=', self.repo_server.id)]).write({'mode': 'disabled'}) # disable all other existing repo than repo_server\n try:\n self.Runbot._cron()\n except SleepException:\n pass # sleep raises an exception to avoid to stay stuck in loop\n mock_update_batches.assert_called()", "def test_multiple_updates_return_old_value(self):\n old_test_plan = self.assessment.test_plan\n response = self.api.put(self.assessment, {\"test_plan\": \"steps\"})\n self.assert200(response)\n\n response = self.api.put(self.assessment, {\"test_plan\": old_test_plan})\n self.assert200(response)\n\n notifs, notif_data = common.get_daily_notifications()\n self.assertEqual(len(notifs), 1)\n self.assertEqual({}, notif_data)", "async def test_valid_all_updates(database, valid_data,rng):\n \n await test_valid_insert(database,valid_data)\n database = await Database.connect_pool()\n for embeddings in [rng.random(128),None]:\n for batch_id in [random.randint(0,100),None]:\n await database.update(0,0,embeddings,batch_id)\n await database.close_pool()", "async def test_setup_failed_update_reauth(\n hass: HomeAssistant, ufp: MockUFPFixture\n) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n assert ufp.entry.state == ConfigEntryState.LOADED\n\n # reauth should not be triggered until there are 10 auth failures in a row\n # to verify it is not transient\n ufp.api.update = AsyncMock(side_effect=NotAuthorized)\n for _ in range(10):\n await time_changed(hass, DEFAULT_SCAN_INTERVAL)\n assert len(hass.config_entries.flow._progress) == 0\n\n assert ufp.api.update.call_count == 10\n assert ufp.entry.state == ConfigEntryState.LOADED\n\n await time_changed(hass, DEFAULT_SCAN_INTERVAL)\n assert ufp.api.update.call_count == 11\n assert len(hass.config_entries.flow._progress) == 1", "async def test_update(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'sleep10'\n interval_schedule.process_name = \"sleep10\"\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule) # Save update on _scheduler\n\n await asyncio.sleep(1)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n # Update 'updated' schedule interval\n interval_schedule.name = 'updated'\n interval_schedule.process_name = \"sleep1\"\n interval_schedule.repeat = datetime.timedelta(seconds=5) # Set time interval to 5 sec\n\n await scheduler.save_schedule(interval_schedule) # Save update on _scheduler\n await asyncio.sleep(6)\n\n # Assert: only 1 task is running\n tasks = await scheduler.get_running_tasks() # list of current running tasks\n assert len(tasks) == 1\n\n interval_schedule.exclusive = False\n await scheduler.save_schedule(interval_schedule)\n\n # Check able to get same schedule after restart\n # Check fields have been modified\n await self.stop_scheduler(scheduler)\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n schedule = await scheduler.get_schedule(interval_schedule.schedule_id)\n\n # Make sure that the values used by schedule are as expected\n assert schedule.process_name == 'sleep1'\n assert schedule.name == 'updated'\n assert schedule.repeat.seconds == 5\n assert not schedule.exclusive\n\n await self.stop_scheduler(scheduler)", "def _watchdog(self):\n while True:\n try:\n # Arno, 2012-07-12: apswtrace detects 7 s commits with yield 5 min, so reduce\n yield 60.0\n\n # flush changes to disk every 1 minutes\n self._database.commit()\n\n except Exception:\n # OperationalError: database is locked\n dprint(exception=True, level=\"error\")\n\n except GeneratorExit:\n if __debug__: dprint(\"shutdown\")\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n # unload all communities\n try:\n while True:\n next(self._communities.itervalues()).unload_community()\n except StopIteration:\n pass\n # commit database\n self._database.commit(exiting = True)\n break", "def updateOneFeed(self):\n feeds = backend.Feed.query.order_by(\"check_date\").limit(1).all()\n if feeds:\n feed = feeds[0]\n print feed.check_date\n # Only check if it has not been checked in at least 10 minutes\n if (datetime.datetime.now() - feed.check_date).seconds > 600:\n print \"Scheduled update of: \",feed.xmlurl\n fetcher_in.put(['update', feed.xmlurl, feed.etag, feed.check_date])", "def test_update_instance_limit(self):\n pass", "def test_update_last_updated_entries_updated_feed_not_updated(\n reader, call_update_method\n):\n parser = Parser()\n reader._parser = parser\n\n feed = parser.feed(1, datetime(2010, 1, 1))\n reader.add_feed(feed.url)\n reader._now = lambda: naive_datetime(2010, 1, 1)\n call_update_method(reader, feed.url)\n\n (feed_for_update,) = reader._storage.get_feeds_for_update(url=feed.url)\n assert feed_for_update.last_updated == naive_datetime(2010, 1, 1)\n\n parser.entry(1, 1, datetime(2010, 1, 1))\n reader._now = lambda: naive_datetime(2010, 1, 2)\n call_update_method(reader, feed.url)\n\n (feed_for_update,) = reader._storage.get_feeds_for_update(url=feed.url)\n assert feed_for_update.last_updated == naive_datetime(2010, 1, 2)", "def test_login_10(self):\n player = self._get_player()\n for i in range(14):\n timestamp = datetime.now() + timedelta(days=-i)\n Activity.objects.create(timestamp=timestamp, user_from=player, action='seen', public=False)\n\n self.assertEqual(consecutive_days_seen(player, datetime.now()), 14)", "def test_grainbin_updates_get(flaskclient, auth_headers, dbsession):\n\n grainbin = GrainbinFactory().save()\n\n # create some GrainbinUpdates\n for x in range(25):\n grainbin_update = GrainbinUpdate(grainbin.id)\n grainbin_update.timestamp = dt.datetime.now()\n grainbin_update.update_index = x\n dbsession.add(grainbin_update)\n\n dbsession.commit()\n\n url = url_for(\"grainbin.GrainbinUpdates\", grainbin_id=grainbin.id)\n rep = flaskclient.get(url, headers=auth_headers)\n fetched_updates = rep.get_json()\n\n assert rep.status_code == 200\n assert len(fetched_updates) == 10", "def _test_expired_liveness_with_limit(self, rf, nodes):\n session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)\n node1 = self.cluster.nodelist()[0]\n\n session.execute('USE ks')\n session.execute(\"CREATE TABLE t (k int PRIMARY KEY, a int, b int)\")\n session.execute((\"CREATE MATERIALIZED VIEW mv AS SELECT * FROM t \"\n \"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)\"))\n session.cluster.control_connection.wait_for_schema_agreement()\n\n for k in range(100):\n session.execute(\"INSERT INTO t (k, a, b) VALUES ({}, {}, {})\".format(k, k, k))\n\n # generate view row with expired liveness except for row 50 and 99\n for k in range(100):\n if k == 50 or k == 99:\n continue\n session.execute(\"DELETE a FROM t where k = {};\".format(k))\n\n # there should be 2 live data\n assert_one(session, \"SELECT k,a,b FROM mv limit 1\", [50, 50, 50])\n assert_all(session, \"SELECT k,a,b FROM mv limit 2\", [[50, 50, 50], [99, 99, 99]])\n assert_all(session, \"SELECT k,a,b FROM mv\", [[50, 50, 50], [99, 99, 99]])\n\n # verify IN\n keys = range(100)\n assert_one(session, \"SELECT k,a,b FROM mv WHERE k in ({}) limit 1\".format(', '.join(str(x) for x in keys)),\n [50, 50, 50])\n assert_all(session, \"SELECT k,a,b FROM mv WHERE k in ({}) limit 2\".format(', '.join(str(x) for x in keys)),\n [[50, 50, 50], [99, 99, 99]])\n assert_all(session, \"SELECT k,a,b FROM mv WHERE k in ({})\".format(', '.join(str(x) for x in keys)),\n [[50, 50, 50], [99, 99, 99]])\n\n # verify fetch size\n session.default_fetch_size = 1\n assert_one(session, \"SELECT k,a,b FROM mv limit 1\", [50, 50, 50])\n assert_all(session, \"SELECT k,a,b FROM mv limit 2\", [[50, 50, 50], [99, 99, 99]])\n assert_all(session, \"SELECT k,a,b FROM mv\", [[50, 50, 50], [99, 99, 99]])", "def test_update_maxttl(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=40)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 40s item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Updated ttl affects docs with larger expiry before updation!\")\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 100s item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with 100s as expiry before maxTTL updation still alive!\")" ]
[ "0.7011999", "0.68627167", "0.6791708", "0.6187079", "0.60834736", "0.6051524", "0.597345", "0.5913373", "0.5900723", "0.5835557", "0.57842183", "0.57730925", "0.574309", "0.56848246", "0.5683321", "0.5671484", "0.56435144", "0.5643226", "0.56350946", "0.56171465", "0.56062394", "0.5602492", "0.5592989", "0.55919963", "0.55841", "0.55800414", "0.55776525", "0.5561796", "0.5561668", "0.5557797" ]
0.8167076
0
verify equal between passcode and verify passcode lines
def check_verify(self): if self.ui.to_register.isChecked(): if self.ui.pass_line.text() == self.ui.verify_line.text(): return else: self.set_pwd = False return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_match(password, verify):\n return password == verify", "def verify():", "def verify(self, code) -> bool:\n totp = self.__initialize_totp()\n return totp.verify(code)", "def verify(verification_code):\n verification.verify(verification_code)", "def is_valid_part2(line):\n pos1, pos2, required_char, password = parse_line(line)\n pos1 = pos1 - 1\n pos2 = pos2 - 1\n if required_char in (password[pos1], password[pos2]):\n if not(password[pos1] == password[pos2]):\n return True\n return False", "def password_is_valid_task_2(row):\n # XOR the two positions in the password\n return (row['letter'] == row['password'][row['policy'][0] - 1]) != \\\n (row['letter'] == row['password'][row['policy'][1] - 1])", "def match(self,pwdmarked,password):\n pwd1 = self.cleanPassword(pwdmarked)\n pwd2 = self.cleanPassword(password)\n if not (pwdmarked or '').startswith('plain:{'):\n pwd2 = crypt(password,self.settings.authenSalt,10000)\n return pwd1==pwd2", "def matches_password_verify(password, verify):\n if password and not password == verify:\n return \"Your passwords didn't match.\"\n else:\n return \"\"", "def validate(self, data):\n password = data['password']\n if data['password'] == data['password2'] and re.fullmatch(r'[A-Za-z0-9@#$%^&+=]{8,}', password):\n return data\n raise serializers.ValidationError(\"Password should be match and password must have number,special char,1-capital,1-small and min 8 char\")", "def pass_check(user_found):\n password = ''\n while password != user_found[1]:\n password = stdiomask.getpass(prompt=\"Please enter your password: \", mask='*')\n pass1 = encrypter.encrypt_password(password)\n if user_found[1] == pass1:\n return \"\\nPassword match\\n\"\n else:\n print(\"\\nPassword do not match\\n\")", "def details_not_matching():\n print(\"login details don't match.\")", "def test_user1_method4():\n assert u.verify_password(USER_CREDENTIALS[\"password\"]), \"Password cannot verify properly\"", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def check_password(pw):\n if (pw == password):\n print('welcome password match')\n\n else:\n print('Wrong password')", "def test_validate_login_info(self):\n assert(PatientService().validate_login_info(self.valid_health_card_nb, self.password) > 0)\n assert(-1 == PatientService().validate_login_info(self.valid_health_card_nb, self.password + \"INVALID\"))", "def test_if_pwd_equals_confirmed(self):\n msg = self.user.registration(\"Githeri\", \"[email protected]\",\n \"iwantgitheri\",\n \"iwantsgitheri\")\n self.assertEqual(msg, \"Your passwords should match\")", "def verify_password(self, password):\n return self.PASS == password", "def check_pass(self):\n if self.validated_data['new_password'] != self.validated_data['confirm_password']:\n raise serializers.ValidationError({\"error\":\"Please enter matching passwords\"})\n return True", "def verify_password(entered_password):\n return PASSWORD_RE.match(entered_password)", "def verify(self, code: str):\n payload = {\"type\": \"verify\", \"username\": self.username, \"code\": code}\n self._send_command(payload)", "def SecondPart():\n return passwordChecker(data)", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def PasswordMatch(self, *args):\n pass1 = self.password.get().lstrip().rstrip()\n pass2 = self.confirm_pass.get().lstrip().rstrip()\n \n if (pass1 and pass1 == pass2):\n self.pass_match_label['text'] = 'Passwords match'\n self.pass_match_label['fg'] = 'green'\n return True\n else:\n self.pass_match_label['text'] = 'Password don\\'t match'\n self.pass_match_label['fg'] = 'red'\n return False", "def create_pwd_login_with_all_validation():\r\n msg, status = \"\", False\r\n try:\r\n if g.platform =='android':\r\n \"Empty values for all fields\"\r\n status1 = create_pwd_login_internal('', '')\r\n print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_blank\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print \"one\"\r\n \"short password value\"\r\n status2 = create_pwd_login_internal('1', '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_short\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print \"two\"\r\n \"not matching passwords\"\r\n status3 = create_pwd_login_internal(g.password, '')\r\n \"validate\"\r\n expected_dialogue_title = g.popup_title_error\r\n expected_dialogue_message = g.popup_message_password_do_not_match\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print \"three\"\r\n #\"no hint\"\r\n #status4 = create_pwd_login_internal(g.password, g. password, '')\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint\r\n # verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n #\"hint same as password\"\r\n #status5 = create_pwd_login_internal(g.password, g.password)\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint_same_as_passsword\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n \"Values from global_vars g\"\r\n status6 = create_pwd_login_internal(g.password, g.password)\r\n\r\n status = status1 and status2 and status3 and status6\r\n \r\n else: \r\n \"Empty values for all fields\"\r\n status1 = create_pwd_login_internal('', '')\r\n \r\n print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'\r\n \"validate\"\r\n expected_dialogue_title = g.ios_popup_title_error\r\n expected_dialogue_message = g.ios_popup_message_password_blank\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print 'one is completed' \r\n \"short password value\"\r\n status2 = create_pwd_login_internal('1', '')\r\n \"validate\"\r\n expected_dialogue_title = g.ios_popup_title_error\r\n expected_dialogue_message = g.ios_popup_message_password_short\r\n verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n print'two is completed'\r\n \"not matching passwords\"\r\n #status3 = create_pwd_login_internal(g.password, '')\r\n \"validate\"\r\n # expected_dialogue_title = g.ios_popup_title_error\r\n #expected_dialogue_message = g.ios_popup_message_password_do_not_match\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n #print 'three is completed'\r\n\r\n #\"no hint\"\r\n #status4 = create_pwd_login_internal(g.password, g. password, '')\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint\r\n # verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n #\"hint same as password\"\r\n #status5 = create_pwd_login_internal(g.password, g.password)\r\n #\"validate\"\r\n #expected_dialogue_title = g.popup_title_error\r\n #expected_dialogue_message = g.popup_message_password_hint_same_as_passsword\r\n #verify_dialogue(title=expected_dialogue_title, message=expected_dialogue_message, name_of_control_to_click='popup_default_button')\r\n\r\n \"Values from global_vars g\"\r\n status6 = create_pwd_login_internal(g.password, g.password)\r\n #status = status1 and status2 \r\n\r\n status = status1 and status2 and status6\r\n\r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n status = False\r\n return status, msg", "def verify(self, phone, code, case):\n key = self.get_key(phone, case)\n tried_count = self.get_tried_count(key)\n if tried_count > self.tried_count:\n return False, 'tried too many times'\n else:\n if tried_count == 0:\n self.set_tried_count(key, 1)\n else:\n self.incr_count(key)\n saved_code = self.get_code(phone, case)\n verified = saved_code == code\n if verified:\n self.rm_code(phone, case)\n return verified, None\n else:\n return verified, '%s code verify failed' % case", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def check_correct_password(status, pwd):\n # generate key from raw password\n key = generate_key_from_password(\n pwd, salt=status.get(\"salt\")\n )\n f = Fernet(key)\n try:\n decrypt_output = f.decrypt(status[\"encrypted_check_phrase\"])\n except InvalidToken:\n return False\n return decrypt_output.decode(\"utf-8\") != settings.CHECK_PHRASE", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def validate_pin():\r\n print(\"Please enter your 4 digits pin\")\r\n pin_list = []\r\n pin_list2 = []\r\n for i in range(4):\r\n pin = int(input())\r\n pin_list.append(pin)\r\n print(\"Please re-enter your pin to confirm\")\r\n for i in range(4):\r\n pin = int(input())\r\n pin_list2.append(pin)\r\n set1 = set(pin_list)\r\n set2 = set(pin_list2)\r\n if set1 == set2:\r\n print(\"Validating pin completed\")\r\n return 1\r\n print(\"Validating pin failed\")\r\n return 0", "def test_user1_method3():\n REGEX_MATCH_BCRYPT_HASH = r\"^\\$2[ayb]\\$.{56}$\"\n hashed_password = u.password.decode()\n assert re.match(REGEX_MATCH_BCRYPT_HASH, hashed_password), \"Password was not hashed correctly\"" ]
[ "0.65438277", "0.64028597", "0.6356857", "0.6348567", "0.6281897", "0.6279664", "0.6217392", "0.61520046", "0.6127023", "0.61227757", "0.6044916", "0.6035572", "0.60347754", "0.603319", "0.60201436", "0.59780884", "0.5974098", "0.59488344", "0.5927072", "0.5911942", "0.5908588", "0.5900083", "0.5862237", "0.5852669", "0.5849262", "0.58392996", "0.5810095", "0.5802135", "0.57852066", "0.5781199" ]
0.66863775
0
Gets or sets the count of assessments reported by the solution.
def assessment_count(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "assessment_count")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def get_scenario_count(self):\n return self.count", "def assessment_points(self) -> int:\n return self._assessment_points", "def validation_set_count(self) -> int:\n return pulumi.get(self, \"validation_set_count\")", "def count_indications(self) -> int:\n return self._count_model(Indication)", "def GetNumberOfAnalysisReports(self):\n return self._analysis_report_stream_number - 1", "def count_explorations():\n return exp_models.ExplorationModel.get_exploration_count()", "def semestral_count_submission(self):\n serie_count = self.count(self.__data[\"normal_semestral_groupby\"])\n self.analysis[\"semestral_count_application\"] = serie_count.to_dict()", "def getEmpiricalCounts(self):\n return self.empirical_counts", "def alerts_count(self) -> int:\n return pulumi.get(self, \"alerts_count\")", "def count(self) -> int:\n return pulumi.get(self, \"count\")", "def get_assessments(self):\n if not self.is_assessment_based_activity():\n raise IllegalState()\n else:\n raise Unimplemented()", "def count_statements(self):\n query = read_query('content exploration/count_statements')\n response = self._submit_query(query)\n return response[0]['count']['value']", "def response_count(self) -> int:\n return pulumi.get(self, \"response_count\")", "def count(self):\n return self.properties.get('count')", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[int]:\n return pulumi.get(self, \"count\")", "def count(self):\n return self.get_count()", "def get_counts(self):\n self._update_counts()\n return self.failures, self.warnings, self.infos", "def _get_hit_count(self, database, enquire):\n return self._get_enquire_mset(\n database, enquire, 0, database.get_doccount()\n ).size()", "def get_count(self):\r\n return self.count", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def count(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"count\")", "def get_n_sets(self):\n if not self._refreshed:\n self.refresh()\n return self._nSets" ]
[ "0.655095", "0.6256413", "0.6221543", "0.60291547", "0.59234524", "0.5899697", "0.5858163", "0.5763119", "0.5760012", "0.57559884", "0.5745691", "0.5726684", "0.5713718", "0.56976104", "0.56974524", "0.56958336", "0.56958336", "0.5683292", "0.5681071", "0.5662276", "0.5566641", "0.5558872", "0.5558872", "0.5558872", "0.5558872", "0.5558872", "0.5558872", "0.5558872", "0.5558872", "0.5539303" ]
0.75875336
0
Gets or sets the extended details reported by the solution.
def extended_details(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: return pulumi.get(self, "extended_details")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extended_info(self):\n return self.client.call('GET', self.name + 'extended-info')", "def extend_info(self):\n return self._extend_info", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self) -> ExtensionDetails:\n return ExtensionDetails(\n extension_id=self.get_identifier(),\n extension_name=\"Debug Extension\",\n extension_description=\"Allows testing through debug.\",\n extension_enabled_by_default=False,\n extension_version=ExtensionManagerConstants.EXTENSION_VERSION_NOT_IMPLEMENTED,\n extension_interface_version=ExtensionManagerConstants.EXTENSION_INTERFACE_VERSION_BASIC,\n extension_url=None,\n extension_configuration=None,\n )", "def additional_info(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"additional_info\")", "def details(self):\n return self._details", "def additional_infos(self) -> Sequence['outputs.GetComputeMachineErrorDetailAdditionalInfoResult']:\n return pulumi.get(self, \"additional_infos\")", "def details(self) -> Optional[pulumi.Input['SolutionDetailsArgs']]:\n return pulumi.get(self, \"details\")", "def get_details(self):\n return self.__config_data", "def get(self):\n return self.__expedition", "def additional_data(self):\n return self._additional_data", "def extra(self):\n return self._extra", "def details(self):\n pass", "def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")", "def extended_location(self) -> pulumi.Output['outputs.ExtendedLocationResponse']:\n return pulumi.get(self, \"extended_location\")", "def detail(self):\n info = self.info()\n return info", "def details(self) -> \"dict\":\n return self._attrs.get(\"details\")", "def details(self):\n raise NotImplementedError()", "def get_details(self):\n raise Exception(\"bad details\")", "def extended_location(self) -> pulumi.Output[Optional['outputs.ExtendedLocationResponse']]:\n return pulumi.get(self, \"extended_location\")", "def additional_data(self):\n # type: () -> string_types\n return self._additional_data", "def ui_additional_info(self) -> Optional[str]:\n return None", "def detail(self):\n return self.status[\"health\"][\"detail\"]", "def getEnergyAdded(self):\n return self.json_state.get(\"charging\").get(\"wh_energy\")", "def getDetail(self):\n\t\t\n\t\treturn (super().setParameters(0,self.getDefense(),0))\n\t\t\n\t\t#return \"\\n#########################################################\\n\"+\"\\nItem of Defense, Name of item:\"+self.getName()+\"\\nCapacity of defense:\"+str(self.getDefense())+\"\\nCapacity of attack:0 \\n Capacity of heal:0 \\n\"+\"#########################################################\\n\"", "def get_info(self) -> str:\n return self.info", "def get_info(self) -> str:\n raise NotImplementedError()", "def info(self) -> DesignSpecs:\n return self._info" ]
[ "0.7648492", "0.7092702", "0.6316318", "0.6316318", "0.6316318", "0.626519", "0.6184532", "0.6146312", "0.60449284", "0.601758", "0.59144187", "0.5871447", "0.5868428", "0.57756555", "0.57584494", "0.57446784", "0.57446784", "0.5723758", "0.5712438", "0.5710641", "0.57040286", "0.56911653", "0.56878966", "0.5667722", "0.5633187", "0.56113154", "0.56106216", "0.5601671", "0.5568599", "0.55608195" ]
0.71451074
1
Gets or sets the count of groups reported by the solution.
def group_count(self) -> Optional[pulumi.Input[int]]: return pulumi.get(self, "group_count")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ngroups(self):\n return self._ngroups", "def getNumGroups(self):\n return len(np.unique(self._group_index))", "def getNumGroups(self):\n return _libsbml.ListOfGroups_getNumGroups(self)", "def getNumGroups(self):\n return _libsbml.GroupsModelPlugin_getNumGroups(self)", "def num_node_groups(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"num_node_groups\")", "def getNumEnergyGroups(self):\n return self.lib.numGroups", "def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")", "def num_node_groups(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"num_node_groups\")", "def num_output_group(self):\n if self.handle is None:\n raise AttributeError('Model not loaded yet')\n out = ctypes.c_size_t()\n _check_call(_LIB.TreeliteQueryNumOutputGroups(self.handle, ctypes.byref(out)))\n return out.value", "def getGroupSize(Id):\r\n return \"Number of groups\"", "def groups(self):\n return self.get_data(\"groups\")", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groups(self):\n return self._groups", "def groupnumber(self):\n return self._groupnumber", "def __len__(self):\n return len(self.group_list)", "def get_n_sets(self):\n if not self._refreshed:\n self.refresh()\n return self._nSets", "def count_group(group_id, failures=False, cached=Conf.CACHED):\n if cached:\n return count_group_cached(group_id, failures)\n return None#Task.get_group_count(group_id, failures)", "def groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"groups\")", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersion.CM1.value))", "def get_owner_group_count(base_reg) -> int:\n count: int = len(base_reg.owner_groups)\n for reg in base_reg.change_registrations:\n if reg.owner_groups:\n count += len(reg.owner_groups)\n return count", "def test_set_count(self) -> int:\n return pulumi.get(self, \"test_set_count\")", "def groups(self):\n # type: (...) -> Set[str]\n return self._groups", "def stats(self) -> Sequence['outputs.GetSystemGroupsGroupStatResult']:\n return pulumi.get(self, \"stats\")", "def coarse_groups(self):\n return self._num_coarse_groups( )", "def getGroups():\r\n return Group.getGroups()", "def validation_set_count(self) -> int:\n return pulumi.get(self, \"validation_set_count\")", "def organismsCount(self) -> int:\n return self.group.organismsCount", "def group_size(self):\n return self._gsize", "def getNumMembers(self):\n return _libsbml.Group_getNumMembers(self)" ]
[ "0.7400885", "0.7161271", "0.7075182", "0.6943827", "0.69214857", "0.68017477", "0.64547825", "0.64547825", "0.6173699", "0.61217064", "0.60980016", "0.5956926", "0.5956926", "0.5956926", "0.59355694", "0.5882286", "0.58765113", "0.5858414", "0.5815837", "0.58114374", "0.57926357", "0.5643587", "0.56051797", "0.558352", "0.5570274", "0.555674", "0.5552305", "0.55457246", "0.5533106", "0.55134314" ]
0.721151
1
Gets or sets the cleanup state of the solution.
def cleanup_state(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "cleanup_state")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanup(self):\n if self._status == 0:\n self.flag = 0\n else:\n self.flag = 2\n\n self.final_params = self._popt", "def cleanup_on_fail(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"cleanup_on_fail\")", "def on_cleanup(self, state) -> TransitionCallbackReturn:\n return TransitionCallbackReturn.SUCCESS", "def cleanup(self):\n if self._status == 0:\n self.flag = 0\n elif self._status == 1:\n self.flag = 1\n else:\n self.flag = 2\n\n self.final_params = self._popt", "def cleanup(self):\n raise NotImplementedError", "def cleanup(self) -> None:\n raise NotImplementedError()", "def _cleanup(self):\n pass", "def cleanup(self):\n return True;", "def final_cleanup(self):\n raise NotImplementedError()", "def cleanup_on_fail(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"cleanup_on_fail\")", "def env_cleanup(self):\n pass", "def cleanup(self):\n raise NotImplementedError()", "def cleanup(self):\n raise NotImplementedError()", "def scm_clean(self):\n return self._data.get('scm_clean')", "def cleanup():", "def cleanup(self):\n raise Exception(\"{0} type does not have cleanup implemented\".format(type(self)))", "def cleanup(self):\r\n print(\"Cleanup not implemented\")", "def post_cleanup(self):\n pass", "def _final_cleanup(self):\n # Clean up and remove the temporary gisdbase\n self._cleanup()\n # Remove resource directories\n if \"error\" in self.run_state or \"terminated\" in self.run_state:\n self.storage_interface.remove_resources()", "def cleanup(self):\r\n pass", "def cleanup (self):\n pass", "def cleanupStorage(self, oStorCfg):\n return oStorCfg.cleanup();", "def reset(self):\n return self.env.reset()", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass" ]
[ "0.5761538", "0.5755214", "0.5706903", "0.5703411", "0.55710393", "0.556933", "0.5542009", "0.5537145", "0.55345905", "0.5530671", "0.55218774", "0.55047137", "0.55047137", "0.5491092", "0.548035", "0.547034", "0.54567224", "0.54559046", "0.54367256", "0.5433634", "0.54175705", "0.54159915", "0.5388515", "0.53813267", "0.53813267", "0.53813267", "0.53813267", "0.53813267", "0.53813267", "0.53813267" ]
0.64944875
0
Gets or sets the details of the solution.
def details(self) -> Optional[pulumi.Input['SolutionDetailsArgs']]: return pulumi.get(self, "details")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_details(self):\n return self.details", "def get_solution(self):\r\n return self.solution", "def get_solution(self):\n solution = self.raw_solution\n if solution is not None:\n return {\n \"solution\": self.raw_solution\n }", "def details(self):\n return self._details", "def details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:\n return pulumi.get(self, \"details\")", "def get_details(self):", "def detail(self):\n info = self.info()\n return info", "def get_details(self):\n return self.__config_data", "def details(self):\n pass", "def get_details(self):\n raise Exception(\"bad details\")", "def details(self):\n raise NotImplementedError()", "def get_details(self, psvm):\n return self.get(psvm)", "def printSolution(self):\n print \"----- Solution -----\"\n for feature in self.features:\n print \"Name = \" + feature.name + \" Value = \" + str(feature.value)", "def refresh(self):\n self.details = self.workspace.get_job(self.id).details", "def get_details(self):\n owner = self.fake.random_element(elements=self.owners)\n return {\n 'jurisdiction_property_id': self.fake.numerify(text='#####'),\n 'pm_parent_property_id': self.fake.numerify(text='#####'),\n 'lot_number': self.fake.numerify(text='#####'),\n 'address_line_1': self.address_line_1(),\n 'city': 'Boring',\n 'state': 'Oregon',\n 'postal_code': \"970{}\".format(self.fake.numerify(text='##')),\n 'year_built': self.fake.random_int(min=1880, max=2015),\n 'site_eui': self.fake.random_int(min=50, max=600),\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'owner_telephone': owner.telephone,\n 'owner_address': owner.address,\n 'owner_city_state': owner.city_state,\n 'owner_postal_code': owner.postal_code,\n }", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def get_details(self):\n owner = self.fake.random_element(elements=self.owners)\n return {\n 'jurisdiction_property_id': self.fake.numerify(text='#####'),\n 'pm_parent_property_id': self.fake.numerify(text='#####'),\n 'lot_number': self.fake.numerify(text='#####'),\n 'address_line_1': self.address_line_1(),\n 'city': 'Boring',\n 'state': 'Oregon',\n 'postal_code': \"970{}\".format(self.fake.numerify(text='##')),\n 'year_built': self.fake.random_int(min=1880, max=2015),\n 'site_eui': self.fake.random_int(min=50, max=600),\n 'gross_floor_area': self.fake.random_number(digits=6),\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'owner_telephone': owner.telephone,\n 'owner_address': owner.address,\n 'owner_city_state': owner.city_state,\n 'owner_postal_code': owner.postal_code,\n }", "def readSolution(solution):\n g = solution\n __data.g = g\n __data.nsp = g.n_species", "def get_project_info(self):\n return self.project_info", "def stats(self):\n return self._solution", "def stats(self):\n return self._solution", "def stats(self):\n return self._solution", "def show_info(self):\n print(\"Problem number: \" + str(self.number))\n print(\"Problem name: \" + str(self.name))\n print(\"Problem description: \" + str(self.desc))", "def detail(self):\n url = '/question/%d' % self.id\n d = req.get(url)\n return parser.detail(d)", "async def setprobdetails(self, ctx, problem_name, *, arg):\n if not await problem_exists(ctx, problem_name):\n return\n problems[problem_name].details = arg\n await ctx.send('Problem details set.')\n await write_problems()", "def workspaceInfo(self):\n pass", "def get_solution(self):\n return self._generate_solution()", "def get_details():\n if not hasattr(env, \"site_name\"):\n env.site_name = prompt(\"Enter site domain name:\")\n env.site_is_secure = confirm(\"Do you need SSL? (Yes/No)\", default=False)\n env.app_server = prompt(\"Enter app server you wish to use (apache/uwsgi/gunicorn):\")\n if env.site_is_secure:\n env.ip_address = prompt(\"Enter server IP address:\")\n else:\n env.ip_address = \"0.0.0.0\"\n\n # Find out project name\n project_name = env.site_name.split('.')\n try:\n if project_name[1] == 'com':\n # Sample case - abc.com\n env.project_name = project_name[0]\n else:\n # Sample case - shop.abc.com\n env.project_name = project_name[1]\n except IndexError:\n env.project_name = env.site_name" ]
[ "0.65263337", "0.65263337", "0.65263337", "0.62827975", "0.62794477", "0.6158341", "0.5972385", "0.58721447", "0.58504283", "0.5804181", "0.5779834", "0.5756773", "0.56696814", "0.56623816", "0.5615718", "0.5593665", "0.557681", "0.55743575", "0.5570838", "0.55553085", "0.55523545", "0.55295", "0.55295", "0.55295", "0.55258113", "0.5511424", "0.55106336", "0.5496042", "0.5483058", "0.54802674" ]
0.7780491
0
Gets or sets the goal of the solution.
def goal(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "goal")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def goal(self):\n return self._build_goal", "def get_goal(self):\n return self.get_observation(self.env._get_goal())", "def get_goal(self) -> GoalType:\n return self.goal", "def goal(self, goal):\n\n self._goal = goal", "def get_goal(self):\n self._pid_lock.acquire() # Acquire Lock\n rtn = self._goal\n self._pid_lock.release() # Release Lock\n\n return rtn", "def set_goal(self, **kwargs):\n return self.env.set_goal(**kwargs)", "def set_goal(self, goal):\r\n self.goal = goal\r\n self.start_time = self.get_current_time()", "def build_goal(self):\n return self._build_goal", "def set_goal(self, goal: GoalType) -> None:\n self.goal = goal", "def update_goal(self):\n pass", "def getGoalNode(self):\r\n\t\treturn self.goalNode", "def goal_pos(self) -> Pt:\n return self._goal", "def goal(self) -> Goal:\n return MaxReward()", "def getPath(self):\r\n\t\treturn self.pathToGoal", "def get_solution(self):\r\n return self.solution", "def get_goal(self):\n l11 = np.random.uniform(self.l1min, self.l1max)\n l12 = np.random.uniform(self.l1min, self.l1max)\n l13 = np.random.uniform(self.l1min, self.l1max)\n l21 = np.random.uniform(self.l2min, self.l2max)\n l22 = np.random.uniform(self.l2min, self.l2max)\n l23 = np.random.uniform(self.l2min, self.l2max)\n dl21 = l21-l11; dl22 = l22-l12; dl23 = l23-l13\n kappa1, phi1, seg_len1 = self.configuration_space(l11, l12, l13, self.d, self.n)\n kappa2, phi2, seg_len2 = self.configuration_space(dl21, dl22, dl23, self.d, self.n)\n T01 = self.transformation_matrix_bishop(kappa1, phi1, seg_len1)\n T12 = self.transformation_matrix_bishop(kappa2, phi2, seg_len2)\n T02 = np.matmul(T01, T12)\n return np.matmul(T02, self.base)[0:3]", "def _next_goal(self):\n current_state = self.goal_generation.current_state()\n\n return self.goal_generation.next_goal(self._random_state, current_state)", "def set_goal(self, goal):\n self._pid_lock.acquire() # Acquire Lock\n self._goal = goal\n self._pid_lock.release() # Release Lock", "def set_goal(goal_loc):\n BoardPath._goal_loc = goal_loc", "def objective(self) -> Optional[Union[int, float]]:\n if self.solution is not None:\n if isinstance(self.solution, list):\n return getattr(self.solution[-1], \"objective\", None)\n else:\n return getattr(self.solution, \"objective\", None)\n else:\n return None", "def judge_goal(self):\n err_pos = math.sqrt((self.y_des - self.y)**2 +(self.x_des - self.x)**2)\n print(\"t= %s\" % rospy.get_time()+\"-----------\")\n print('destination position=['+str(self.x_des)+','+str(self.y_des)+\"]\")\n print('the current position=['+str(self.x)+','+str(self.y)+\"]\")\n print('the current yaw angle=['+str(self.yaw))\n print('distance to destination='+str(err_pos))\n\n if(err_pos < 0.8):\n print('reach goal!!!!!')\n self.goal_flag=1", "def find_goal(self):\n w, l, h = self.get_pos()\n gw, gl, gh = self.goal\n try:\n angle_deg = angle((w, l), (gw, gl))\n except ZeroDivisionError:\n if w > gw and l > gl:\n return 2\n elif w < gw and l < gl:\n return 5\n if -105 <= angle_deg <= -75:\n return 0\n elif -75 < angle_deg < 15:\n return 1\n elif -15 <= angle_deg <= 15:\n return 2\n elif 15 < angle_deg < 75:\n return 3\n elif 75 <= angle_deg <= 105:\n return 4\n else:\n return 5", "def get_heuristic(self, start, goal):\n assert start in self.nodes, \"No node \"+str(start)+\" in graph \"+str(self)\n assert goal in self.nodes, \"No node \"+str(goal)+\" in graph \"+str(self)\n if goal in self.heuristic:\n if start in self.heuristic[goal]:\n return self.heuristic[goal][start]\n else:\n return 0 # we have checked that everything is positive\n else: \n return 0 # we have checked that everything is positive", "def goalTest(node, goal):\r\n if node.state == goal:\r\n return node", "def is_goal(self):\n if self.team1.get_cur_hp() == 0:\n return 1\n elif self.team2.get_cur_hp() == 0:\n return -1\n else:\n return 0", "def set_goal_done(self):\n self.has_goal = False\n self.last_goal_wait = False", "def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)", "def set_goal_pos(self):\n goal_list = np.where(self.value_map == self.value_map.max())\n # assume the first one\n self.goal_pos = (goal_list[0][0], goal_list[1][0])", "def heuristic(current, goal):\r\n distance = getDistance(current, goal)\r\n return distance", "def goal_velocity(self):\n return self._read(MX_GOAL_VELOCITY)" ]
[ "0.7292076", "0.7102423", "0.6604421", "0.65645677", "0.6517414", "0.64766264", "0.6386368", "0.6379113", "0.63391376", "0.6286651", "0.6228413", "0.6221903", "0.6003145", "0.5985015", "0.5963023", "0.59561074", "0.59523135", "0.59348536", "0.5926568", "0.58431983", "0.58356905", "0.5811185", "0.5795577", "0.5725961", "0.57014555", "0.5678781", "0.56723094", "0.56711674", "0.5666814", "0.5628138" ]
0.7343218
0
Gets or sets the purpose of the solution.
def purpose(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, "purpose")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def purpose(self) -> str:\n return pulumi.get(self, \"purpose\")", "def purpose(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"purpose\")", "def purpose(self):\n return self._purpose", "def purpose(self, purpose):\n\n self._purpose = purpose", "def purpose(self, purpose):\n\n self._purpose = purpose", "def purpose(self) -> Optional[pulumi.Input['CryptoKeyPurpose']]:\n return pulumi.get(self, \"purpose\")", "def introduction(purpose = None):\n # If a purpose has been specified, use that as the start of the file\n if isinstance(purpose, str):\n text = \"* \" + purpose.strip()\n # Otherwise use a generic phrase\n else:\n text = \"* IPRO Suite CHARMM script\"\n # Set options: warning and bomb levels to -2, and print level left at 0 but\n # optioned out (its always been that way, so we leave it for nostaligic\n # reasons)\n text += \"\\n*\\n\\nwrnl 0\\n!prnl -2\\nbomb -2\\n\\n\"\n return text", "def problem(self):\n return self['problem']", "def implementation_effort(self) -> Optional[str]:\n return pulumi.get(self, \"implementation_effort\")", "def implementation_effort(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"implementation_effort\")", "def implementation_effort(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"implementation_effort\")", "def implementation_effort(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"implementation_effort\")", "def problem_type(self):\n return self._problem_type", "def reason(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"reason\")", "def reason(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"reason\")", "def get_difficulte(self):\n return self.difficulte", "def marketing_name(self):\n return \"Custom solution - 2\"", "def name(self):\n return 'SE Rimozione Inquinanti'", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")", "def effect(self) -> Optional[str]:\n return pulumi.get(self, \"effect\")" ]
[ "0.7381015", "0.73635876", "0.73545533", "0.68121064", "0.68121064", "0.6228698", "0.5389133", "0.5388711", "0.51902056", "0.51759166", "0.5102519", "0.5102519", "0.4944245", "0.49320877", "0.49257827", "0.48774207", "0.4871906", "0.4851118", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178", "0.48033178" ]
0.7400996
0
Finds the package a resource lives in
def getPackageFromResource(resource): import sd url = resource.getUrl() pkg_manager = sd.getContext().getSDApplication().getPackageMgr() for p in pkg_manager.getPackages(): for r in p.getChildrenResources(False): if r.getUrl() == url: return p return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_pkg(self, pkg):\n pass", "def get_resource(package_name, resource):\n # Path of the package, relative to the loader root:\n # For package 'foo.bar', this is 'foo/bar'.\n package_path = package_name.replace(\".\", \"/\")\n\n # Path of the resource, relative to the loader root:\n # For package 'foo.bar' and resource 'subdir/data.csv', this is \"foo/bar/subdir/data.csv\".\n resource_path = os.path.join(package_path, resource)\n\n # Iterate over all the contributors for the specified package:\n try:\n package = importlib.import_module(package_name)\n except ImportError:\n # Package or module does not exist:\n logging.debug(\"Resource %r from package %r not found\", resource, package_name)\n return None\n\n for path in package.__path__:\n # 'path' is the absolute package path within a specific loader:\n # For package 'foo.bar' in a loader for a Zip file '/path/to/archive.zip',\n # this is: '/path/to/archive.zip/foo/bar'.\n logging.debug(\"Attempting to load resource %r from %r\", resource, path)\n\n assert path.endswith(package_path), \\\n \"Package path mismatch: {!r} does not include package {!r}\".format(path, package_path)\n loader_path = path[:-(len(package_path) + 1)]\n if os.path.isdir(loader_path):\n # FileFinder importers do not implement get_data().\n # We have to handle direct files explicitly:\n resource_fullpath = os.path.join(loader_path, resource_path)\n if os.path.exists(resource_fullpath):\n with open(resource_fullpath, mode=\"rb\") as f:\n logging.debug(\"Found resource %r in %r\", resource, resource_fullpath)\n return f.read()\n else:\n # Handle ZIP (and other) file loaders:\n # Loaders are expected to implement get_data(resource_path).\n try:\n importer = sys.path_importer_cache[loader_path]\n content = importer.get_data(resource_path)\n logging.debug(\"Found resource %r in %r\",\n resource, os.path.join(loader_path, resource_path))\n return content\n except OSError:\n logging.debug(\"Resource %r not found in loader %r\", resource, loader_path)\n # Ignore error and try next package path.\n\n # Specified resource not found in any loader:\n logging.debug(\"Resource %r from package %r not found\", resource, package_name)\n return None", "def xblock_resource_pkg(block):\n # XModules are a special case because they map to different dirs for\n # sub-modules.\n module_name = block.__module__\n if module_name.startswith('xmodule.'):\n return module_name\n\n return module_name.rsplit('.', 1)[0]", "def find_resource(self, resource_name, package_title=None):\n metadata = self.get_ckan_metadata()\n results = []\n for id, resource in metadata.items():\n if resource['name'] == resource_name:\n if package_title is None or resource['dataset']['title'] == package_title:\n results.append(resource)\n return results[0] if len(results) == 1 else results", "def find_resource_dir(self, dock_image: str, meta: dict) -> str:\n try:\n return self.interrogate_python_package_location(dock_image, meta)\n except CalledProcessError:\n return ''", "def package(cls):\n packages = get_packages()\n return packages.modules.get(cls.__module__)", "def findPkgPath(self, pkg):\r\n try:\r\n return self._rp.get_path(pkg)\r\n except rospkg.ResourceNotFound:\r\n raise ResourceNotFound('Can not find ROS package '\r\n '\"{0}\".'.format(pkg))", "def determinePackage(inFile):\n fileDir= os.path.dirname(inFile)\n files= os.listdir(fileDir)\n \n pkgName= None\n if \"__init__.py\" in files:\n pkgName= os.path.basename(fileDir)\n\n return pkgName", "def find_package(self, package_title):\n metadata = self.get_ckan_metadata()\n results = []\n for id, resource in metadata.items():\n if resource['dataset']['title'] == package_title:\n results.append(resource['dataset'])\n return results[0] if len(results) == 1 else results", "def find_standard_package(self, pkgname):\n\n try:\n if pkgname == 'antigravity':\n return ()\n result = find_module(pkgname)\n return result\n except ImportError:\n return ()", "def findModule(name):", "def get_package_path():\n package_name = get_package_name()\n return package_name.replace('.', '/')", "def get_resource(res_name, res_type=\"icons\"):\n own_path = os.path.dirname(__file__)\n resource_path = os.path.abspath(os.path.join(own_path, os.pardir, \"resources\", res_type))\n return os.path.join(resource_path, res_name)", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def _pkg_path(self, pkg):\n r = rospkg.RosPack()\n pkg_path = r.get_path(pkg) \n return pkg_path", "def getPackageName(rootDir=\"python\"):\n dirIter = os.walk(rootDir)\n (dirPath, dirList, fileList) = next(dirIter)\n dirList = [dirName for dirName in dirList if not dirName.startswith(\".\")]\n if len(dirList) != 1:\n raise RuntimeError(\"Found %s instead of 1 directory\" % (dirList,))\n return dirList[0]", "def package_source_space(self, package):\n for pkg_name, pkg in self.packages:\n if pkg_name == package.name:\n pkg_dir = os.path.dirname(pkg.filename)\n # Need to check if the pkg_dir is the source space as it can also be loaded from the metadata\n if os.path.commonpath([self.source_space_abs, pkg_dir]) == self.source_space_abs:\n return pkg_dir\n\n return None", "def get_packages_with_prefixes():\n return get_resources('packages')", "def package_folder(self):\n return self._base_package", "def get_package_name(pkg, rem):\n flavor = rem.os.package_type\n\n try:\n return _PACKAGE_MAP[pkg][flavor]\n except KeyError:\n return None", "def package_space(self, package):\n space_abs = getattr(self, '__%s_space_abs' % space)\n return os.path.join(space_abs, package.name)", "def search_package(package, satellite_connection, satellite_connection_auth):\n package_sat_result = satellite_connection.packages.search.name(satellite_connection_auth, package)\n return package_sat_result", "def _find_project_by_import():\n try:\n import _databand_project\n\n return abs_join(_databand_project.__file__, \"..\")\n except ImportError:\n dbnd_log_init_msg(\"Can't import `_databand_project` marker.\")\n return None", "def lookupmodule(self, filename):\n if os.path.isabs(filename) and os.path.exists(filename):\n return filename\n f = os.path.join(sys.path[0], filename)\n if os.path.exists(f) and self.canonic(f) == self.mainpyfile:\n return f\n root, ext = os.path.splitext(filename)\n if ext == '':\n filename = filename + '.py'\n if os.path.isabs(filename):\n return filename\n for dirname in sys.path:\n while os.path.islink(dirname):\n dirname = os.readlink(dirname)\n fullname = os.path.join(dirname, filename)\n if os.path.exists(fullname):\n return fullname\n return None", "def _look_in_package(tree: dict, module_path: str, name: str, level: Optional[int] = None) -> Union[str, None]:\n parent_path = os.path.dirname(module_path)\n if level is not None:\n for _ in range(level - 1):\n parent_path = os.path.dirname(parent_path)\n parent = find_tree(tree, lambda x, p: x[\"path\"] in [p, os.path.join(p, \"__init__.py\")], args=(parent_path,))\n if parent:\n if parent[\"fullname\"] in [name, \"{}.__init__\".format(name)]:\n return parent[\"path\"]\n for child in parent[\"children\"].values():\n if child[\"name\"] == name:\n return child[\"path\"]\n target = find_tree(tree, lambda x, f: x[\"fullname\"] == f, args=(\"{}.{}\".format(parent[\"fullname\"], name),))\n if target:\n return target[\"path\"]\n return None", "def get_resource(self, rsc_path):\n\n\t\ttry:\n\t\t\tfrom pkg_resources import resource_filename\n\t\t\treturn resource_filename(__name__, rsc_path)\n\t\texcept ImportError:\n\t\t\treturn os.path.join(os.path.dirname(__file__), rsc_path)", "def get_resource(resource_path):\n\n return pkg_resources.resource_string(\n cloudify_agent.__name__,\n os.path.join('resources', resource_path)\n )", "def get_package_dir():\n return Path(__file__).parent", "def _package_root(name):\n return name.split('.', 1)[0]", "def pypkgpath(self):\n pkgpath = None\n for parent in self.parts(reverse=True):\n if parent.isdir():\n if not parent.join(\"__init__.py\").exists():\n break\n if not isimportable(parent.basename):\n break\n pkgpath = parent\n return pkgpath" ]
[ "0.68585324", "0.6817595", "0.65799993", "0.65498376", "0.6490739", "0.62694687", "0.6261306", "0.625357", "0.6235414", "0.61686736", "0.61296916", "0.61037827", "0.6083547", "0.6046237", "0.6046237", "0.6038811", "0.60204786", "0.60182536", "0.60166734", "0.6001912", "0.599859", "0.59984946", "0.59691894", "0.59520876", "0.5951364", "0.5950647", "0.59309083", "0.5927737", "0.59226143", "0.5907298" ]
0.7700212
0
is_valid checks a formula and returns True if it's always satisfied within the interval. Otherwise, it returns an example of when it's not satisfied.
def is_valid(self, formula, systemstate=None): msg = "Don't know how to check validity of objects of type {type(check)}" logger.error(msg) raise ValueError(msg) # logger.debug("is_valid {str(formula)}") # if isinstance(formula, check.Check): # directly invert to a check # inverted = check.NotCheck(formula) # else: # inverted = tctl.Not(check_copy) # inverted.interval = copy.copy(formula.interval) # # # returns either the Example example, so we'll return it as counter exmaple # sat = self.is_satisfiable(inverted, systemstate) # if sat is False: # return True # else: # return sat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False", "def isRangeValid(self) -> bool:\n ...", "def check_formula(self, expected, given, samples):\r\n var_dict_list = self.randomize_variables(samples)\r\n student_result = self.tupleize_answers(given, var_dict_list)\r\n instructor_result = self.tupleize_answers(expected, var_dict_list)\r\n\r\n correct = all(compare_with_tolerance(student, instructor, self.tolerance)\r\n for student, instructor in zip(student_result, instructor_result))\r\n if correct:\r\n return \"correct\"\r\n else:\r\n return \"incorrect\"", "def is_contradiction(formula: Formula) -> bool:\n # Task 2.5b\n return not is_satisfiable(formula)", "def is_contradiction(formula: Formula) -> bool:\r\n # Task 2.5b\r\n return not is_satisfiable(formula)", "def is_satisfiable(formula: Formula) -> bool:\n # Task 2.5c\n variables = list(sorted(formula.variables()))\n assignment_dict = all_models(list(variables))\n for val in truth_values(formula, assignment_dict):\n if val:\n return True\n return False", "def validPeriod(period):\r\n try:\r\n i = float(period)\r\n except ValueError:\r\n return False\r\n else:\r\n if i>0:\r\n return True\r\n else:\r\n return False", "def _is_in_range(valid_values):\n\n def f(x):\n if x not in valid_values:\n raise ValueError('{} not in {}'.format(x, valid_values))", "def is_valid_value(self, value):\n if not self.range:\n return False\n\n return value >= self.range[0] and value <= self.range[1]", "def is_valid(self) -> ir.BooleanValue:\n return ops.GeoIsValid(self).to_expr()", "def validate(self):\n return self._validate_variable(self._value_forced) and \\\n self._validate_variable(self._value_calc)", "def is_valid(self, value: Union[float, int]) -> bool:\n if self.min is not None:\n if self.include_min:\n if value < self.min:\n return False\n else:\n if value <= self.min:\n return False\n\n if self.max is not None:\n if self.include_max:\n if value > self.max:\n return False\n else:\n if value >= self.max:\n return False\n\n if self.step is None:\n return True\n\n if self.min is not None:\n value -= self.min\n return (value % self.step) == 0", "def is_valid_number(self):\n for condition in [self.game.getRow(self.pos), self.game.getCol(self.pos), self.game.getSquare(self.pos)]:\n if not self.check_alignement_condition(condition):\n return False\n return True", "def in_period(self, value, lower_bound, upper_bound, func=lambda _x: _x):\n if ((lower_bound==None) and (upper_bound==None)):\n return True\n if (lower_bound==None):\n return func(value) <= upper_bound\n if (upper_bound==None):\n return func(value) >= lower_bound\n return ((func(value) >= lower_bound) and (func(value) <= upper_bound))", "def is_valid(self,):\r\n return self.g > 0 and self.l > 0 and self.m1 > 0 and self.m2 > 0 and self.m3 > 0 and self.r1 > 0 and self.r2 > 0 and self.tau > 0 and self.theta1 > 0 and self.theta2 > 0 and self.theta3 > 0", "def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0", "def _isvalid(self, x):\n return (x <= self.n) & (x > 0)", "def check_validity(self,check_derivative=True,verbose=True):\n is_valid = True\n if self.f0(0.0)!=0.0:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy f0(0)=0 (f(0)=\",self.f0(0.0),\")\")\n if self.f1(1.0)!=1.0:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy f0(1)=1 (f1(1)=\",self.f1(1.0),\")\")\n if self.rho_max<self.rho_min:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy rho_max>=rho_min (rho_min,rho_max=\",\n rho_min,rho_max,\")\")\n if check_derivative:\n # Coarse check if the derivative is bounded below by some d>1\n # Note it is sufficient to check the restricted ranges given\n xs0 = np.linspace(0.0,self.rho_max,129)\n xs1 = np.linspace(self.rho_min,1.0,129)\n d = np.min(np.concatenate((self.df0(xs0),self.df1(xs1))))\n if d<=1.0:\n is_valid = False\n if verbose:\n print(\"DynamicalSystem: Warning: The system does \"+\n \"not satisfy: d>1 (d<=\",d,\")\")\n return is_valid", "def is_satisfiable(formula: Formula) -> bool:\r\n # satisfiable - if it gets the value True at least once\r\n # Task 2.5c\r\n all_models_local = all_models(list(formula.variables()))\r\n for bool_val in truth_values(formula, all_models_local):\r\n if bool_val:\r\n return True\r\n return False", "def evaluate_fit_range(predicted, fit_range):\n test1 = (predicted[0] >= fit_range[0])\n test2 = (predicted[0] <= fit_range[1])\n test3 = (predicted[1] >= fit_range[2])\n test4 = (predicted[1] <= fit_range[3])\n return all([test1, test2, test3, test4])", "def in_valid_range(self, string):\n fret_number = self.cursor.get_frets()[string]\n return (\n (self.min_x <= fret_number <= self.max_x) or\n (self.allow_open and fret_number == self.guitar.min_fret)\n )", "def is_valid(self):\n return self.has_valid_values() and self.has_valid_sum()", "def is_valid(self):\n sum_prob_per_var = {}\n for rule in self.rules:\n var, prob = rule.variable, rule.probability\n if prob < 0:\n return False\n sum_prob_per_var[var] = sum_prob_per_var.get(var, 0) + prob\n return all(sum_prob == 1.0 for sum_prob in sum_prob_per_var.values())", "def is_valid(self):\n posit1 = (self.mean_v > 0) & (self.kappa_y > 0) & (self.eta_y > 0)\n posit2 = (self.kappa_s > 0) & (self.eta_s > 0)\n return posit1 & posit2 & self.feller()", "def validate(self):\n try:\n assert self.__age_calculate() is True, Exception('Age is less than expected')\n assert self.__is_user_repeated() is True,Exception(\n 'Recently request received in last 5 days')\n assert self.__is_indian_or_american() is True, Exception(\n 'Nationality should be india or america')\n assert self.__check_state() is True, Exception('State should be valid')\n assert self.__check_salary() is True, Exception(\n 'Salary should be below 90k and above 10k')\n self.__log.write_log(\"All Validation is Successful\")\n self.__response = {'response':'success'}\n return True\n except AssertionError as error:\n self.__response = {'response':f\"{error}\"}\n self.__log.write_log(\"Validation Error...Check the Eligibility Criteria...\")\n return False", "def validate_nbf(self, nbf, exp, *args, **kwargs):\n valid_nbf = True\n if nbf:\n valid_nbf = (nbf < exp and exp > time.time())\n return valid_nbf", "def is_close_eval(pss_score, actual_rating)-> bool:\n return math.isclose(pss_score, actual_rating, abs_tol=0.05)", "def is_valid(self):\n return _property_op(arctern.ST_IsValid, self).astype(bool, copy=False)", "def __validateInput(self, fx: str, ux:str, lx:str) -> bool:\r\n # validate the input fields\r\n if fx == \"\" or ux == \"\" or lx == \"\":\r\n self.errorMessage = self.errorMessageMissingFields\r\n self.__showErrorMessage()\r\n return False\r\n\r\n # validate the limits\r\n self.lowerX = lx\r\n self.upperX = ux\r\n # check if numeric\r\n try:\r\n self.upperX = float(self.upperX)\r\n self.lowerX = float(self.lowerX)\r\n except:\r\n self.errorMessage = self.errorMessageLimitsNotNumeric\r\n self.__showErrorMessage()\r\n return False\r\n \r\n # check for inquality\r\n if self.lowerX > self.upperX:\r\n self.errorMessage = self.errorMessageLimitsNotOrdered\r\n self.upperXField.setText(str(self.lowerX))\r\n self.lowerXField.setText(str(self.upperX))\r\n self.lowerX, self.upperX = self.upperX, self.lowerX\r\n ##################################\r\n # validate and process the input function\r\n self.inputFunction = fx\r\n try:\r\n self.inputFunction = self.inputFunction.replace(\" \", \"\").replace(\"^\", \"**\").replace(\"sqrt\", \"np.sqrt\")\r\n self.inputFunction = self.inputFunction.replace(\"e**\", \"np.exp\").replace(\"log\", \"np.log\") \r\n self.inputFunction = self.inputFunction.replace(\"sin\", \"np.sin\").replace(\"cos\", \"np.cos\").replace(\"tan\", \"np.tan\")\r\n\r\n except:\r\n self.errorMessage = self.errorMessageNonValidFunction\r\n self.__showErrorMessage()\r\n return True", "def is_valid_latitude(latitude):\n assert latitude is not None\n\n try:\n if is_valid_float(latitude):\n return MIN_LATITUDE <= float(latitude) <= MAX_LATITUDE\n else:\n return False\n # return -90 <= latitude and latitude <= 90\n except ValueError:\n return False" ]
[ "0.6952767", "0.6359789", "0.61984766", "0.60863614", "0.6065324", "0.6033336", "0.5894179", "0.5823172", "0.5815996", "0.5808585", "0.57865024", "0.5782736", "0.5776578", "0.5729661", "0.5703226", "0.5698514", "0.56970704", "0.56938696", "0.56928974", "0.5641353", "0.5600812", "0.55990094", "0.55897397", "0.55873466", "0.54940057", "0.54909754", "0.54719454", "0.54366654", "0.5432929", "0.542984" ]
0.7043341
0
Convert n bytes into a human readable string based on format. symbols can be either "customary", "customary_ext", "iec" or "iec_ext",
def bytes2human(n, format='%(value).1f %(symbol)s', symbols='customary'): n = int(n) sign = '' if n < 0: sign = '-' n = -n symbols = SYMBOLS[symbols] prefix = {} for i, s in enumerate(symbols[1:]): prefix[s] = 1 << (i + 1) * 10 for symbol in reversed(symbols[1:]): if n >= prefix[symbol]: value = float(n) / prefix[symbol] return sign + format % locals() return sign + format % dict(symbol=symbols[0], value=n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bytes2human(n, format=\"%(value)i%(symbol)s\"):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n prefix = {}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i+1)*10\n for symbol in reversed(symbols[1:]):\n if n >= prefix[symbol]:\n value = float(n) / prefix[symbol]\n return format % locals()\n return format % dict(symbol=symbols[0], value=n)", "def bytes2human(n):\n symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n prefix = {}\n for i, s in enumerate(symbols):\n prefix[s] = 1 << (i + 1) * 10\n for s in reversed(symbols):\n if n >= prefix[s]:\n value = float(n) / prefix[s]\n return '%.2f %s' % (value, s)\n return '%.2f B' % (n)", "def bytes2human(n):\n symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n prefix = {}\n for i, s in enumerate(symbols):\n prefix[s] = 1 << (i + 1) * 10\n for s in reversed(symbols):\n if n >= prefix[s]:\n value = float(n) / prefix[s]\n #return '%.2f %s' % (value, s)\n return '%.2f' % (value)\n return '%.2f B' % (n)", "def build_ascii_fmtstr(pc_):\n fmtstr = []\n for t, cnt in zip(pc_.type, pc_.count):\n if t == 'F':\n fmtstr.extend(['%.10f'] * cnt)\n elif t == 'I':\n fmtstr.extend(['%d'] * cnt)\n elif t == 'U':\n fmtstr.extend(['%u'] * cnt)\n else:\n raise ValueError(\"don't know about type %s\" % t)\n return fmtstr", "def human2bytes(s):\n init = s\n num = \"\"\n while s and s[0:1].isdigit() or s[0:1] == '.':\n num += s[0]\n s = s[1:]\n num = float(num)\n letter = s.strip()\n for name, sset in SYMBOLS.items():\n if letter in sset:\n break\n else:\n if letter == 'k':\n # treat 'k' as an alias for 'K' as per: http://goo.gl/kTQMs\n sset = SYMBOLS['customary']\n letter = letter.upper()\n else:\n raise ValueError(\"can't interpret %r\" % init)\n prefix = {sset[0]: 1}\n for i, s in enumerate(sset[1:]):\n prefix[s] = 1 << (i + 1) * 10\n return int(num * prefix[letter])", "def hex_string(s, n=32):\n # take first n characters, reverse them and get ascii codes with ord()\n return 'X\"{0:>0{1}}\"'.format(''.join(['{0:x}'.format(ord(c)) for c in s[:n][::-1]]), n * 2)", "def pretty_bytes(num: int) -> str:\n\n # Reject weird stuff\n try:\n if num < 0:\n raise ValueError(\"negative size\")\n except TypeError:\n raise TypeError(\"non-numeric size\")\n\n if num < 2**10:\n return f\"{num} B\"\n\n for x, p in enumerate(\"kMGTPE\"):\n if num < 2 ** ((2 + x) * 10):\n num /= 2 ** ((1 + x) * 10)\n return f\"{num:.1f} {p}iB\"\n\n # overflow or something: in this case lets just go\n # with what we were given and get on with our day.\n return f\"{num} B\"", "def humanize_bytes(value: bytes) -> str:\n if len(value) < INLINE_LENGTH:\n payload = value.hex()\n else:\n hex = value.hex()\n head = hex[:DISPLAY_CHARS]\n tail = hex[-1 * DISPLAY_CHARS :]\n payload = f\"{head}..{tail}\"\n\n return f\"bytes{len(value)}({payload})\"", "def readable_size(n: int) -> str:\n sizes = ['K', 'M', 'G']\n fmt = ''\n size = n\n for i, s in enumerate(sizes):\n nn = n / (1000 ** (i + 1))\n if nn >= 1:\n size = nn\n fmt = sizes[i]\n else:\n break\n return '%.2f%s' % (size, fmt)", "def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)", "def human_readable(bytes, units=[' bytes','kB','MB','GB','TB', 'PB', 'EB']):\n return str(bytes) + units[0] if bytes < 1024 else human_readable(bytes>>10, units[1:])", "def get_string(self, n):\n pad = self.get_pad(n)\n string = pad + self.c\n return string", "def visible_octets(data: bytes) -> str:\n hexed = hexlify(data).decode('ascii')\n tuples = [''.join((a, b)) for a, b in zip(hexed[::2], hexed[1::2])]\n line = []\n output = []\n ascii_column = []\n for idx, octet in enumerate(tuples):\n line.append(octet)\n # only use printable characters in ascii output\n ascii_column.append(octet if 32 <= int(octet, 16) < 127 else '2e')\n if (idx+1) % 8 == 0:\n line.append('')\n if (idx+1) % 8 == 0 and (idx+1) % 16 == 0:\n raw_ascii = unhexlify(''.join(ascii_column))\n raw_ascii = raw_ascii.replace(b'\\\\n z', b'.')\n ascii_column = []\n output.append('%-50s %s' % (' '.join(line),\n raw_ascii.decode('ascii')))\n line = []\n raw_ascii = unhexlify(''.join(ascii_column))\n raw_ascii = raw_ascii.replace(b'\\\\n z', b'.')\n output.append('%-50s %s' % (' '.join(line), raw_ascii.decode('ascii')))\n line = []\n return '\\n'.join(output)", "def hackerrank_Python_String_print_formatted_decimal_octal_hex_binary():\n def print_formatted(number):\n # your code goes here\n\n padw = len(bin(number).lstrip(\"0b\"))\n for i in range(1, number+1):\n print(str(i).rjust(padw) + \" \" \\\n + str(oct(i).lstrip(\"0\")).rjust(padw) + \" \" \\\n + str(hex(i).lstrip(\"0x\").upper()).rjust(padw) + \" \" \\\n + str(bin(i).lstrip(\"0b\").rjust(padw)))\n\n print_formatted(20)\n # 1 1 1 1\n # 2 2 2 10\n # 3 3 3 11\n # 4 4 4 100 ...", "def format_bytes(numBytes):\n numBytes = float(numBytes)\n labels = [\"\", \"K\", \"M\", \"G\"]\n if numBytes < 1:\n numBits = int(8 * numBytes)\n return \"%d b\" % (numBits)\n lim = 1024\n label = 0\n while lim < numBytes:\n lim *= 1024\n label += 1\n if label >= len(labels):\n label = len(labels)-1\n break\n lim /= 1024\n val = int(numBytes / lim)\n return \"%d %sB\" % (val, labels[label])", "def dump( n ):\n\n s = '%x' % n\n if len(s) & 1:\n s = '0' + s\n return s.decode('hex')", "def int2hex(n: int) -> str:", "def bytes2best_str(bytes, decimals=1):\n sizes = (\n (1<<0, 'b'),\n (1<<10, 'Kb'),\n (1<<20, 'Mb'),\n (1<<30, 'Gb'),\n (sys.maxint, 'Gb')\n )\n # find the best index in sizes array for the given bytes value\n for i in range(len(sizes)-1):\n if bytes < sizes[i+1][0]:\n break\n # bytes always must be displayed without decimals\n if i == 0:\n decimals = 0\n # format the string\n f = float(bytes)/float(sizes[i][0])\n format = '%(value).' + str(decimals) + 'f %(magnitude)s'\n s = format % {'value': f, 'magnitude': sizes[i][1]}\n return s", "def num_to_symb(num):\n\n num = str(num)\n if len(num) == 3:\n group = (num[0], num[1], num[2])\n symbolic = \"\"\n\n for key, value in notation.items():\n for g in group:\n if int(g) > 8 or int(g) < 0:\n symbolic = \"Invalid Numerical Representation!\"\n elif g == value:\n symbolic = symbolic + key\n else:\n symbolic = \"Number input should be of length 3!\"\n\n return symbolic", "def format_symbol(item):\n prefix = item.get(\"containerName\", \"\")\n label = prefix + \".\" + item.get(\"name\") if prefix else item.get(\"name\")\n return [label, format_symbol_kind(item.get(\"kind\"))]", "def human_bytes(n):\n if n < 1024:\n return '%d B' % n\n k = n/1024\n if k < 1024:\n return '%d KB' % round(k)\n m = k/1024\n if m < 1024:\n return '%.1f MB' % m\n g = m/1024\n return '%.2f GB' % g", "def format_bytes(byte_counter):\n if byte_counter is None:\n return 'N/A'\n if type(byte_counter) is str:\n byte_counter = float(byte_counter)\n if byte_counter == 0.0:\n exponent = 0\n else:\n exponent = long(math.log(byte_counter, 1024.0))\n suffix = 'bkMGTPEZY'[exponent]\n converted = float(byte_counter) / float(1024**exponent)\n return '%.2f%s' % (converted, suffix)", "def human2bytes(s):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n letter = s[-1:].strip().upper()\n num = s[:-1]\n #assert num.isdigit() and letter in symbols\n #use below assert statement to handle sizes with decimal places\n assert float(num) and letter in symbols\n num = float(num)\n prefix = {symbols[0]: 1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i +1) *10\n return int(num * prefix[letter])", "def gen_fmt_payload(write_map, write_bits='hn', printed=0, testing=False):\r\n if testing:\r\n payload = ''.join('%{}$p'.format(idx) for value,idx in sorted(write_map))\r\n\r\n else:\r\n payload = ''\r\n # sorted by value in ascending order. \r\n for value, idx in sorted(write_map): \r\n n_char = value - printed\r\n assert n_char >= 0, \"printed:%d, value=%d\" % (printed, value)\r\n if n_char > 0:\r\n payload += '%{}c'.format(n_char)\r\n # else n_char == 0, then just without %c\r\n payload += '%{}${}'.format(idx, write_bits)\r\n \r\n printed = value \r\n\r\n print '[+++++++++++] format payload: ' + payload\r\n return payload", "def human2bytes(s):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n letter = s[-1:].strip().upper()\n num = s[:-1]\n assert num.isdigit() and letter in symbols\n num = float(num)\n prefix = {symbols[0]:1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i+1)*10\n return int(num * prefix[letter])", "def human2bytes(s):\n symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')\n letter = s[-1:].strip().upper()\n num = s[:-1]\n assert num.isdigit() and letter in symbols\n num = float(num)\n prefix = {symbols[0]:1}\n for i, s in enumerate(symbols[1:]):\n prefix[s] = 1 << (i+1)*10\n return int(num * prefix[letter])", "def human_bytes(num, suffix=\"B\") -> str:\n for unit in [\"\", \"Ki\", \"Mi\", \"Gi\", \"Ti\", \"Pi\", \"Ei\", \"Zi\"]:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, \"Yi\", suffix)", "def humanize_bytes(bytes, precision=1):\n abbrevs = (\n (1<<50L, 'PB'),\n (1<<40L, 'TB'),\n (1<<30L, 'GB'),\n (1<<20L, 'MB'),\n (1<<10L, 'kB'),\n (1, 'bytes')\n )\n if bytes == 1:\n return '1 byte'\n for factor, suffix in abbrevs:\n if bytes >= factor:\n break\n\n if suffix == 'bytes':\n return '%s bytes' % (bytes / factor)\n else:\n return '%.*f %s' % (precision, float(bytes) / factor, suffix)", "def human(byte):\n # Adapted from ranger.ext.human_readable\n if byte <= 0:\n return '0'\n if byte < 2**10:\n return '%dK' % byte\n if byte < 2**10 * 99:\n return '%.2gM' % (byte / 2**10.0)\n if byte < 2**10 * 999:\n return '%.3gM' % (byte / 2**10.0)\n if byte < 2**20:\n return '%.4gM' % (byte / 2**10.0)\n if byte < 2**20 * 99:\n return '%.2gG' % (byte / 2**20.0)\n if byte < 2**20 * 999:\n return '%.3gG' % (byte / 2**20.0)\n if byte < 2**30:\n return '%.4gG' % (byte / 2**20.0)\n if byte < 2**30 * 99:\n return '%.2gT' % (byte / 2**30.0)\n if byte < 2**30 * 999:\n return '%.3gT' % (byte / 2**30.0)\n if byte < 2**40:\n return '%.4gT' % (byte / 2**30.0)\n if byte < 2**30 * 99:\n return '%.2gP' % (byte / 2**40.0)\n if byte < 2**40 * 999:\n return '%.3gP' % (byte / 2**40.0)\n if byte < 2**50:\n return '%.4gP' % (byte / 2**40.0)\n return '>9000'", "def __bytes__(self):\n line1=self.name.encode(\"ascii\").ljust(24,b\" \")\n line2=b\"1 %05dU %02d%03d%-3b %02d%012.8f %c.%08d %c%05d%+01d %c%05d%+01d 0 %04d\" %\\\n (self.id,self.desig[\"year\"]%100,self.desig[\"launch\"],\\\n self.desig[\"object\"].encode(\"ascii\"),self.epoch[\"year\"]%100,\\\n self.epoch[\"day\"],b\"-\" if self.fdmm<0 else b\" \",abs(self.fdmm*1.e8),\\\n b\"-\" if self.sdmm<0 else b\" \",\\\n abs(self.sdmm*pow(10,5-(ceil(log(abs(self.sdmm),10)) if \\\n abs(self.sdmm)>0 else 0))),\\\n (ceil(log(abs(self.sdmm),10)) if abs(self.sdmm)>0 else 0),\\\n b\"-\" if self.bstar<0 else b\" \",\\\n abs(self.bstar*pow(10,5-(ceil(log(abs(self.bstar),10)) if \\\n abs(self.bstar)>0 else 0))),\\\n (ceil(log(abs(self.bstar),10)) if abs(self.bstar)>0 else 0),\\\n self.nr,)\n line3=b\"2 %05d %08.4f %08.4f %07d %08.4f %08.4f %011.8f%05d\" %\\\n (self.id,self.inc,self.raan,self.ecc*1.e7,self.aop,\\\n self.ma,self.mm,self.revol,)\n l2cs=0\n for c in line2:\n bc=bytes([c])\n if bc.isdigit():\n l2cs+=int(bc.decode(\"ascii\"))\n elif bc==b\"-\":\n l2cs+=1\n l2cs%=10\n\n l3cs=0\n for c in line3:\n bc=bytes([c])\n if bc.isdigit():\n l3cs+=int(bc.decode(\"ascii\"))\n elif bc==b\"-\":\n l3cs+=1\n l3cs%=10\n return line1+b\"\\r\\n\"+line2+str(l2cs).encode(\"ascii\")+b\"\\r\\n\"+line3+\\\n str(l3cs).encode(\"ascii\")+b\"\\r\\n\"" ]
[ "0.65021646", "0.62997663", "0.62691927", "0.59078735", "0.57972115", "0.5664556", "0.56188285", "0.5568806", "0.5559517", "0.5547597", "0.5538554", "0.55122787", "0.5508553", "0.546931", "0.5457602", "0.54516983", "0.543876", "0.53954804", "0.5391302", "0.53787154", "0.5351915", "0.5340204", "0.532416", "0.5300822", "0.5298044", "0.5298044", "0.52972525", "0.52719074", "0.52711606", "0.5250788" ]
0.6520492
0
Set the view limits to the nearest multiples of base that contain the data
def view_limits(self, dmin, dmax): base = self._select_base(dmin, dmax) if mpl.rcParams['axes.autolimit_mode'] == 'round_numbers': vmin = base.le(dmin) vmax = base.ge(dmax) if vmin == vmax: vmin -= 1 vmax += 1 else: vmin = dmin vmax = dmax return mtransforms.nonsingular(vmin, vmax)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def view_limits(self, vmin, vmax):\n return vmin, vmax\n # return nonsingular(vmin, vmax)", "def set_view_max(self, view_max):\n try:\n view_max = float(view_max)\n self._view_max = view_max\n self.update_rgba()\n if self._cross_pos:\n self.update_orth_rgba()\n except ValueError:\n print\"view_max must be a number.\"", "def limit(self, lim: float):\n if self.mag() > lim:\n self.values = tuple(self.norm()*lim)\n return self", "def recalc_view_lonlat_limits(self):\n\n self.view_llon = self.map_llon + self.view_offset_x / self.ppd_x\n self.view_rlon = self.view_llon + self.view_width / self.ppd_x\n\n self.view_tlat = self.map_tlat - self.view_offset_y / self.ppd_y\n self.view_blat = self.view_tlat - self.view_height / self.ppd_y", "def set_limits(self):\n S = self.structure\n self.limit_edges = []\n for u in S:\n # direct successors are fully known\n for v in (suc for suc in S.successors(u) if suc):\n S[u][v][\"upper_limit\"] = S[u][v][DIST]\n S[u][v][\"lower_limit\"] = S[u][v][DIST]\n for v in (des for des in level2_descendants(S, u) if des):\n ids = self.kinematic_map[u][v] # TODO generate this at init\n l1 = self.a[ids[1]]\n l2 = self.a[ids[2]]\n lb = self.lb[ids[2]] # symmetric limit\n ub = self.ub[ids[2]] # symmetric limit\n lim = max(abs(ub), abs(lb))\n S.add_edge(u, v)\n S[u][v][\"upper_limit\"] = l1 + l2\n S[u][v][\"lower_limit\"] = sqrt(\n l1 ** 2 + l2 ** 2 - 2 * l1 * l2 * cos(pi - lim)\n )\n S[u][v][BOUNDED] = \"below\"\n self.limit_edges += [[u, v]] # TODO remove/fix", "def _update_limits(self):\n if self.pos_x > self.max_x:\n self.max_x = self.pos_x\n if self.pos_y > self.max_y:\n self.max_y = self.pos_y\n if self.pos_x < self.min_x:\n self.min_x = self.pos_x\n if self.pos_y < self.min_y:\n self.min_y = self.pos_y", "def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)", "def SetLimit(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetLimit(self, *args)", "def set_limits(self):\n K = self.parents\n S = self.structure\n T = self.T_zero\n kinematic_map = self.kinematic_map\n T_axis = trans_axis(self.axis_length, \"z\")\n for u in K:\n for v in (des for des in K.successors(u) if des):\n S[u][v][LOWER] = S[u][v][DIST]\n S[u][v][UPPER] = S[u][v][DIST]\n for v in (des for des in level2_descendants(K, u) if des):\n names = [\n (f\"p{u[1:]}\", f\"p{v[1:]}\"),\n (f\"p{u[1:]}\", f\"q{v[1:]}\"),\n (f\"q{u[1:]}\", f\"p{v[1:]}\"),\n (f\"q{u[1:]}\", f\"q{v[1:]}\"),\n ]\n\n for ids in names:\n path = kinematic_map[u][v]\n T0, T1, T2 = [T[path[0]], T[path[1]], T[path[2]]]\n\n if \"q\" in ids[0]:\n T0 = T0.dot(T_axis)\n if \"q\" in ids[1]:\n T2 = T2.dot(T_axis)\n\n d_max, d_min, limit = self.max_min_distance(T0, T1, T2)\n\n if limit:\n\n rot_limit = rot_axis(self.ub[v], \"z\")\n\n T_rel = T1.inv().dot(T2)\n\n d_limit = norm(T1.dot(rot_limit).dot(T_rel).trans - T0.trans)\n\n if limit == \"above\":\n d_max = d_limit\n else:\n d_min = d_limit\n\n self.limited_joints += [v]\n self.limit_edges += [[ids[0], ids[1]]] # TODO remove/fix\n\n S.add_edge(ids[0], ids[1])\n if d_max == d_min:\n S[ids[0]][ids[1]][DIST] = d_max\n S[ids[0]][ids[1]][UPPER] = d_max\n S[ids[0]][ids[1]][LOWER] = d_min\n S[ids[0]][ids[1]][BOUNDED] = limit", "def set_limits(self):\n S = self.structure\n self.limit_edges = []\n for u in S:\n # direct successors are fully known\n for v in (suc for suc in S.successors(u) if suc):\n S[u][v][UPPER] = S[u][v][DIST]\n S[u][v][LOWER] = S[u][v][DIST]\n for v in (des for des in level2_descendants(S, u) if des):\n ids = self.kinematic_map[u][v]\n l1 = self.d[ids[1]]\n l2 = self.d[ids[2]]\n lb = self.lb[ids[2]]\n ub = self.ub[ids[2]]\n lim = max(abs(ub), abs(lb))\n S.add_edge(u, v)\n S[u][v][UPPER] = l1 + l2\n S[u][v][LOWER] = sqrt(l1 ** 2 + l2 ** 2 - 2 * l1 * l2 * cos(pi - lim))\n S[u][v][BOUNDED] = \"below\"\n self.limit_edges += [[u, v]] # TODO remove/fix", "def extend_to_grid(self, resolution):\n return Bounds(\n min_value = math.floor(self.min/resolution)*resolution,\n max_value = math.ceil(self.max/resolution)*resolution\n )", "def set_lim(values, scale):\n\n v_min, v_max = min(values), max(values)\n margin = (v_max - v_min) * scale\n v_min, v_max = v_min - margin, v_max + margin\n\n return v_min, v_max", "def _set_bet_limit(self) -> None:\n for i, ratio in enumerate(BET_LIMIT_RATIOS):\n self._bet_limits[i] = self._treasury_min.get() // ratio", "def update_limits(self):\n if len(self) == 0:\n self.limits = np.array([[0.0, 0.0], [0.0, 0.0]])\n else:\n x_min, x_max = self.buf[self.rear][0], self.buf[self.front][0]\n y_min, y_max = self.slmm.get_minmax()\n self.limits = np.array([[x_min, y_min], [x_max, y_max]])", "def apply_bound(x, var_min, var_max):\n x.position = np.maximum(x.position, var_min)\n x.position = np.minimum(x.position, var_max)", "def set_lim(self, new_lim: int):\n self.__max_loop = new_lim\n self.__check_interpreter()", "def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...", "def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...", "def lower_bound(self) -> float:\n ...", "def __init__(self, start, home, left_limit, right_limit):\n\n super().__init__(start, home)\n self.left_limit = left_limit\n self.right_limit = right_limit\n self.x = self.x", "def set_mapping(self, value_min, value_min_raw, value_max, value_max_raw):\n assert value_min <= value_max\n # prevent division by zero.\n if value_min == value_max:\n value_max += 1.\n if value_min_raw == value_max_raw:\n value_max_raw += 1.\n self.value_min = value_min\n self.value_max = value_max\n self.value_min_raw = value_min_raw\n self.value_max_raw = value_max_raw\n self._value_scale = (self.value_max - self.value_min) / (self.value_max_raw - self.value_min_raw)", "def ctrl_limit_changed(self, which, new_limit):\n super(PyDMSpinbox, self).ctrl_limit_changed(which, new_limit)\n if not self.userDefinedLimits:\n if which == \"UPPER\":\n self.setMaximum(new_limit)\n else:\n self.setMinimum(new_limit)", "def determinePlotLimits(self):\n max_str = \"up99\"\n min_str = \"dn99\"\n if self.keywords.get(\"limit_type\",\"99per\") == \"minmax\":\n max_str = \"max\"\n min_str = \"min\"\n \n # Determine the min/max of variables over all models\n limits = {}\n prune = False\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n if \"MeanState\" not in dataset.groups: continue\n group = dataset.groups[\"MeanState\"]\n variables = [v for v in group.variables.keys() if v not in group.dimensions.keys()]\n for vname in variables:\n var = group.variables[vname]\n pname = vname.split(\"_\")[0]\n region = vname.split(\"_\")[-1]\n if var[...].size <= 1: continue\n if space_opts.has_key(pname):\n if not limits.has_key(pname):\n limits[pname] = {}\n limits[pname][\"min\"] = +1e20\n limits[pname][\"max\"] = -1e20\n limits[pname][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][\"min\"] = min(limits[pname][\"min\"],var.getncattr(min_str))\n limits[pname][\"max\"] = max(limits[pname][\"max\"],var.getncattr(max_str))\n elif time_opts.has_key(pname):\n if not limits.has_key(pname): limits[pname] = {}\n if not limits[pname].has_key(region):\n limits[pname][region] = {}\n limits[pname][region][\"min\"] = +1e20\n limits[pname][region][\"max\"] = -1e20\n limits[pname][region][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][region][\"min\"] = min(limits[pname][region][\"min\"],var.getncattr(\"min\"))\n limits[pname][region][\"max\"] = max(limits[pname][region][\"max\"],var.getncattr(\"max\"))\n if not prune and \"Benchmark\" in fname and pname == \"timeint\":\n prune = True\n self.pruneRegions(Variable(filename = fname,\n variable_name = vname,\n groupname = \"MeanState\"))\n \n # Second pass to plot legends (FIX: only for master?)\n for pname in limits.keys():\n\n try:\n opts = space_opts[pname]\n except:\n continue\n \n # Determine plot limits and colormap\n if opts[\"sym\"]:\n vabs = max(abs(limits[pname][\"min\"]),abs(limits[pname][\"min\"]))\n limits[pname][\"min\"] = -vabs\n limits[pname][\"max\"] = vabs\n\n # if a score, force to be [0,1]\n if \"score\" in pname:\n limits[pname][\"min\"] = 0\n limits[pname][\"max\"] = 1\n\n limits[pname][\"cmap\"] = opts[\"cmap\"]\n if limits[pname][\"cmap\"] == \"choose\": limits[pname][\"cmap\"] = self.cmap\n\n # Plot a legend for each key\n if opts[\"haslegend\"]:\n fig,ax = plt.subplots(figsize=(6.8,1.0),tight_layout=True)\n label = opts[\"label\"]\n if label == \"unit\": label = limits[pname][\"unit\"]\n post.ColorBar(ax,\n vmin = limits[pname][\"min\"],\n vmax = limits[pname][\"max\"],\n cmap = limits[pname][\"cmap\"],\n ticks = opts[\"ticks\"],\n ticklabels = opts[\"ticklabels\"],\n label = label)\n fig.savefig(os.path.join(self.output_path,\"legend_%s.png\" % (pname))) \n plt.close()\n\n # Determine min/max of relationship variables\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n for g in dataset.groups.keys():\n if \"relationship\" not in g: continue\n grp = dataset.groups[g]\n if not limits.has_key(g):\n limits[g] = {}\n limits[g][\"xmin\"] = +1e20\n limits[g][\"xmax\"] = -1e20\n limits[g][\"ymin\"] = +1e20\n limits[g][\"ymax\"] = -1e20\n limits[g][\"xmin\"] = min(limits[g][\"xmin\"],grp.variables[\"ind_bnd\"][ 0, 0])\n limits[g][\"xmax\"] = max(limits[g][\"xmax\"],grp.variables[\"ind_bnd\"][-1,-1])\n limits[g][\"ymin\"] = min(limits[g][\"ymin\"],grp.variables[\"dep_bnd\"][ 0, 0])\n limits[g][\"ymax\"] = max(limits[g][\"ymax\"],grp.variables[\"dep_bnd\"][-1,-1])\n\n \n self.limits = limits", "def upper_bound(self) -> float:\n ...", "def setMinMax(self):\n currentIndustryNum = self.myParent.myIndustry[self.myIndustryData.id]\n oldIndustryNum = self.myParent.myOldIndustry[self.myIndustryData.id]\n self.setMinValue(-currentIndustryNum)\n if oldIndustryNum > currentIndustryNum:\n self.setMaxValue(oldIndustryNum-currentIndustryNum)\n elif self.isIndustryResearched() == 0:\n self.setMaxValue(0)\n else:\n max = self.getMaxFromFundsAvail()\n cityNum = (self.myParent.cities-self.myParent.citiesUsed)/self.myIndustryData.cities\n if max < cityNum:\n self.setMaxValue(max)\n else:\n self.setMaxValue(cityNum)", "def setlimits(self, Xlim=[], Ylim=[]):\n self.data['Xmin'] = Xlim[0]\n self.data['Xmax'] = Xlim[1]\n self.data['Ymin'] = Ylim[0]\n self.data['Ymax'] = Ylim[1]", "def zoom(self, xmin, xmax, xlen, ymin, ymax, ylen):\n self.xmax = xmax\n self.xmin = xmin\n self.xmax = xmax\n self.xlen = xlen\n self.ymin = ymin\n self.ymax = ymax\n self.ylen = ylen\n self.refresh()", "def set_limits_minmax(self, zmin, zmax):\n self.camera.set_clim(zmin, zmax)\n self.autoscale = False", "def _hold_bounds(self):\n adc_channel = self.graph_renderer.channels[0]\n if self.sx2 > adc_channel.size():\n self.anchored = True\n\n if self.anchored:\n # anchor right side of the window to the last graph sample. so the graph always animates, grows out from\n # the right side of the window. (anchor sx2 to adc_channel.size())\n dx = self.sx2 - adc_channel.size()\n dxw = self.wsx2 - adc_channel.size()\n self.sx1 -= dx\n self.sx2 -= dx\n self.wsx1 -= dxw\n self.wsx2 -= dxw\n\n # eliminate integer overflow problems. only allow indices smaller than a 32bit integer value. and then divide\n # it by four just to be sure.. maybe it's not necessary, but maybe there are some other tricks used in the\n # graph rendering..\n bound = 0xffffffff / 4\n # hmm. this allows only 12 days of data with ~960Hz. time to go 64bit?\n self.sx1 = max(self.sx1, -bound)\n self.sy1 = max(self.sy1, -bound)\n self.sx1 = min(self.sx1, bound)\n self.sy1 = min(self.sy1, bound)\n self.sx2 = max(self.sx2, -bound)\n self.sy2 = max(self.sy2, -bound)\n self.sx2 = min(self.sx2, bound)\n self.sy2 = min(self.sy2, bound)\n self.wsx1 = max(self.wsx1, -bound)\n self.wsy1 = max(self.wsy1, -bound)\n self.wsx1 = min(self.wsx1, bound)\n self.wsy1 = min(self.wsy1, bound)\n self.wsx2 = max(self.wsx2, -bound)\n self.wsy2 = max(self.wsy2, -bound)\n self.wsx2 = min(self.wsx2, bound)\n self.wsy2 = min(self.wsy2, bound)\n\n # limit horizontal zoom to 2 samples. can't zoom in anymore if less than one sample stays on screen.\n # don't have time to implement and test line segment cutting, if one sample is outside the window, and another\n # is inside.\n if self.wsx2 - self.wsx1 < 2.:\n self.wsx2 = self.wsx1 + 2.\n if self.sx2 - self.sx1 < 2.:\n self.sx2 = self.sx1 + 2.\n\n #\n # limit vertical movement and vertical zoom\n #\n\n val_min = adc_channel.value_min\n val_max = adc_channel.value_max\n\n # allow offset of this percent/100 of the screen\n overlap = .30\n\n # top of the screen has smaller sample values than bottom of the screen. inverted graph.\n # sy1 is top pixel, sy2 bottom. bottom-left coordinat is (0, 0)\n if self.sy1 < self.sy2:\n val_top = val_min + (self.wsy1 - self.wsy2) * overlap\n val_bottom = val_max - (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 < val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 < val_top:\n self.wsy1 = val_top\n if self.wsy2 > val_bottom:\n self.wsy2 = val_bottom\n else:\n val_bottom = val_min - (self.wsy1 - self.wsy2) * overlap\n val_top = val_max + (self.wsy1 - self.wsy2) * overlap\n if self.wsy1 > val_top:\n self.wsy2 -= self.wsy1 - val_top\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy1 += val_bottom - self.wsy2\n self.wsy2 = val_bottom\n if self.wsy1 > val_top:\n self.wsy1 = val_top\n if self.wsy2 < val_bottom:\n self.wsy2 = val_bottom", "def __init__(self, v=30, *args, **kwargs):\n super(Dumb, self).__init__(*args, **kwargs)\n self.vmax=v" ]
[ "0.61097175", "0.6046753", "0.5984086", "0.5902828", "0.58193773", "0.581779", "0.5771895", "0.5711445", "0.5687423", "0.5683919", "0.5634394", "0.5563799", "0.55608094", "0.5543304", "0.5472012", "0.54453385", "0.5431932", "0.5431932", "0.54241353", "0.5407296", "0.5398765", "0.53760505", "0.53672194", "0.53661627", "0.53525126", "0.5349131", "0.53437704", "0.53340894", "0.5323971", "0.5321989" ]
0.7059033
0
Convert lists to JSON encoded strings, and correctly handle any unicode URL parameters.
def unicode_urlencode(self, params): if isinstance(params, dict): params = params.items() for i, param in enumerate(params): if isinstance(param[1], list): params[i] = (param[0], json.dumps(param[1]),) return urllib.urlencode( [(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unicode_urlencode(params):\n if isinstance(params, dict):\n params = list(params.items())\n for i, param in enumerate(params):\n if isinstance(param[1], list):\n params[i] = (param[0], json.dumps(param[1]),)\n\n result = urllib.parse.urlencode([(k, isinstance(v, str) and v.encode('utf-8') or v) for k, v in params])\n return result", "def to_json_string(list_dictionaries):\n if list_dictionaries and list_dictionaries is not None:\n return json.dumps(list_dictionaries)\n else:\n return \"[]\"", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or list_dictionaries == []:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or list_dictionaries == []:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)", "def get_prep_value(self, value):\n if isinstance(value, list):\n return json.dumps([str(d) for d in value])\n\n return value", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return(\"[]\")\n json_string = json.dumps(list_dictionaries)\n return(json_string)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or not list_dictionaries:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n return \"[]\"\n else:\n string = json.dumps(list_dictionaries)\n return string", "def to_json_string(list_dictionaries):\n if not list_dictionaries or list_dictionaries is None:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or len(list_dictionaries) < 1:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def unicode_urlencode(self, params):\n if isinstance(params , dict):\n params = params.items()\n for i, param in enumerate(params):\n if isinstance(param[1] , list):\n params[i] = (param[0], json.dumps(param[1]),)\n\n if maj_ver == 2:\n return urllib.urlencode(\n [(k , isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params]\n )\n\n if maj_ver == 3:\n return urllib.parse.urlencode(\n [(k, isinstance(v, str) and v.encode('utf-8') or v) for k, v in params]\n )\n\n return", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n if list_dictionaries is None:\n list_dictionaries = []\n return json.dumps(list_dictionaries)", "def json_friendly(self):", "def json_str(item):\n\n if isinstance(item, dict):\n #return {json_str(key): json_str(value) for key, value in item.iteritems()}\n return dict((json_str(key), json_str(value)) for key, value in item.iteritems())\n elif isinstance(item, list):\n return [json_str(element) for element in item]\n elif isinstance(item, unicode):\n return item.encode('utf-8')\n else:\n return item", "def unicode_urlencode(self, params):\n if isinstance(params, dict):\n params = params.items()\n for i, param in enumerate(params):\n if isinstance(param[1] , list):\n params[i] = (param[0], json.dumps(param[1]),)\n\n if maj_ver == 2:\n return urllib.urlencode(\n [(k, isinstance(v, unicode) and v.encode('utf-8') or v) for k, v in params]\n )\n\n if maj_ver == 3:\n return urllib.parse.urlencode(\n [(k, isinstance(v, str) and v.encode('utf-8') or v) for k, v in params]\n )\n\n return", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or len(list_dictionaries) is 0:\n return \"[]\"\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n\n if list_dictionaries is None or list_dictionaries == []:\n return (\"[]\")\n\n else:\n return (json.dumps(list_dictionaries))", "def to_json_string(list_dictionaries):\n if list_dictionaries is None or len(list_dictionaries) == 0:\n return (\"[]\")\n else:\n return json.dumps(list_dictionaries)", "def to_json_string(list_dictionaries):\n\n if list_dictionaries is None or len(list_dictionaries) == 0:\n return \"[]\"\n else:\n return json.dumps(list_dictionaries)", "def encode_strings(o):\n\tif isinstance(o, list):\n\t\treturn [encode_strings(x) for x in o]\n\tif isinstance(o, dict):\n\t\treturn {k.encode('utf-8'): encode_strings(v) for k, v in o.items()}\n\tif isinstance(o, unicode):\n\t\treturn o.encode('utf-8')\n\treturn o", "def to_json_string(list_dictionaries):\n\n if list_dictionaries is None or len(list_dictionaries) <= 0:\n return '[]'\n\n return json.dumps(list_dictionaries)", "def list_to_json(items):\n return json.dumps(to_dict_list(items))", "def encode_params(params, **kwargs):\n cleaned = clean_params(params, **kwargs)\n return json.dumps(cleaned)", "def stringify(input):\n if isinstance(input, dict):\n return dict([(stringify(key), stringify(value)) for key, value in input.iteritems()])\n elif isinstance(input, list):\n return [stringify(element) for element in input]\n elif isinstance(input, unicode):\n return input.encode('utf-8')\n else:\n return input", "def to_json_string(list_dictionaries):\n\n if list_dictionaries is None or list_dictionaries == []:\n return \"[]\"\n if (type(list_dictionaries) != list or\n not all(type(dicts) == dict for dicts in list_dictionaries)):\n raise TypeError(\"list_dictionaries must be a list of dictionaries\")\n return json.dumps(list_dictionaries)", "def list_to_str(value, encode=None):\n result = []\n for index, v in enumerate(value):\n if isinstance(v, dict):\n result.append(dict_to_str(v, encode))\n continue\n\n if isinstance(v, list):\n result.append(list_to_str(v, encode))\n continue\n\n if encode:\n result.append(encode(v))\n else:\n result.append(v)\n\n return result", "def to_json_string(list_dictionaries):\n if not list_dictionaries:\n return \"[]\"\n if (type(list_dictionaries) != list or\n not all(type(x) == dict for x in list_dictionaries)):\n raise TypeError(\"list_dictionaries must be a list of dictionaries\")\n return json.dumps(list_dictionaries)" ]
[ "0.6864774", "0.66397476", "0.66172516", "0.66086245", "0.6606865", "0.65962803", "0.65903056", "0.65903056", "0.6582515", "0.6557054", "0.65298176", "0.6521824", "0.65212786", "0.65129966", "0.65129966", "0.65117544", "0.65017253", "0.64936596", "0.64865845", "0.6458174", "0.64512134", "0.6449487", "0.6438063", "0.64376414", "0.63302946", "0.6325667", "0.6176393", "0.61394393", "0.6092484", "0.60737884" ]
0.66988754
1
Hashes arguments by joining key=value pairs, appending a secret, and then taking the MD5 hex digest.
def hash_args(self, args, secret=None): for a in args: if isinstance(args[a], list): args[a] = json.dumps(args[a]) args_joined = '' for a in sorted(args.keys()): if isinstance(a, unicode): args_joined += a.encode('utf-8') else: args_joined += str(a) args_joined += '=' if isinstance(args[a], unicode): args_joined += args[a].encode('utf-8') else: args_joined += str(args[a]) hash = hashlib.md5(args_joined) if secret: hash.update(secret) elif self.api_secret: hash.update(self.api_secret) return hash.hexdigest()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()", "def checksum(**kwargs):\n\n # remove secretkey from kwargs, lookup if missing\n secretkey = kwargs.pop('secretkey', resolve_secretkey())\n\n # sort the args, and concatenate them\n param_string = ''.join([''.join([str(x), str(y)])\n for x, y in sorted(kwargs.items())])\n\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def checksum(self, **kwargs):\n try:\n # if a secretkey is in **kwargs, use it, and remove it\n secretkey = kwargs['secretkey']\n del kwargs['secretkey']\n except KeyError:\n # if the kwargs lookup fails, get secretkey elsewhere\n secretkey = self.secretkey or resolve_secretkey()\n args = kwargs.items()\n args.sort()\n\n param_string = ''\n for key, value in args:\n param_string += str(key)\n param_string += str(value)\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def signature(self, params):\n string = ''.join(key + params[key] for key in sorted(params.keys()))\n return md5(string + self.cfg('secret'))", "def seed_hash(*args):\n args_str = str(args)\n return int(hashlib.md5(args_str.encode(\"utf-8\")).hexdigest(), 16) % (2**31)", "def secret_hash(data):\n\n passwords_hash = hashlib.md5(data.encode(\"UTF-8\")).hexdigest()\n \n return passwords_hash", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def hexdigest(self, *args, **kwargs): # real signature unknown\n pass", "def list_digest(inp_list):\n if type(inp_list) is not list:\n raise TypeError(\"list_digest only works on lists!\")\n if not inp_list:\n raise ValueError(\"input must be a non-empty list!\")\n # If we can rely on python >= 3.8, shlex.join is better\n return hashlib.sha256(\" \".join(inp_list).encode(\"utf-8\")).hexdigest()", "def get_hash(input, method='md5', salt=settings.SECRET_KEY):\n h = hashlib.new(method)\n h.update(str(input))\n h.update(salt)\n return h.hexdigest()", "def add(self, *args):\n for a in args:\n if not isinstance(a, str):\n raise TypeError(f\"Expected {a} to be str, got {type(a)}\")\n self.explored.add(hashlib.md5(a.encode()).hexdigest())", "def get_hash(dictionary):\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()", "def generate_password_hash(event=None, user_id=None):\n\n suffix_key = f'password{event}'\n hexkey = str.encode(f'{user_id}{suffix_key}')\n\n # md5 value[1:10] + 1\n passwd = '{0}{1}'.format(hashlib.md5(hexkey).hexdigest()[1:10], 1)\n\n return passwd", "def _md5(input):\n m = hashlib.md5()\n m.update(input)\n return m.hexdigest()", "def create_config_hash(config):\n value_str = \"\"\n for section in config.sections:\n for key in section.keys():\n value_str += str(config[section][key])\n value_hash = hashlib.md5(value_str.encode('utf-8')).hexdigest()\n\n return value_hash", "def default_md5(key: KeyT, *args, **kwargs) -> bytes:\n return md5(key).digest() # type: ignore", "def create_hash_for_post_payment(*args, **kwargs):\n KEYS = ('command', 'var1')\n hash = sha512(settings.PAYU_INFO[\"merchant_key\"])\n for key in KEYS:\n try:\n hash.update(\"%s%s\" % ('|', kwargs[key]))\n except KeyError:\n pass\n hash.update(\"%s%s\" % ('|', settings.PAYU_INFO[\"merchant_salt\"]))\n return hash.hexdigest().lower()", "def buildAuthParams():\n\tauthHash = hashlib.md5();\n\t#time.time() gets the current time since the epoch (1970) with decimals seconds\n\ttemp = str.encode(developers['apikey'] + developers['secret'] + repr(int(time.time())))\n\tauthHash.update(temp)\n\treturn authHash.hexdigest()", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def digest(self, *args, **kwargs): # real signature unknown\n pass", "def main():\n inputStr = input(\"Enter string to be hashed: \")\n print(f\"Hashed string: {hash(inputStr.replace(' ', ''))}\")" ]
[ "0.7316442", "0.71318525", "0.7110362", "0.63037807", "0.6302396", "0.6230085", "0.6221342", "0.6154309", "0.6154309", "0.6154309", "0.6154309", "0.6154309", "0.6154309", "0.60746914", "0.6072491", "0.5993483", "0.5954927", "0.59384996", "0.5916692", "0.58866465", "0.58668876", "0.5844368", "0.5836285", "0.5825734", "0.5825734", "0.5825734", "0.5825734", "0.5825734", "0.5825734", "0.5812817" ]
0.7924275
0
given an opened email file f, parse out all text below the metadata block at the top (in Part 2, you will also add stemming capabilities) and return a string that contains all the words in the email (spaceseparated)
def parseOutText(f): f.seek(0) ### go back to beginning of file (annoying) all_text = f.read() ### split off metadata content = re.split("X-FileName:.*$", all_text, flags=re.MULTILINE, maxsplit=1) words = "" if len(content) > 1: text_string = content[1] ## remove mails that are forwarded or to which are responded # e.g. ---------------------- Forwarded" text_string = re.split("-*\sForwarded", text_string, maxsplit=1)[0] # -----Original Message----- text_string = re.split("-*\Original\sMessage", text_string, maxsplit=1)[0] # Vince J Kaminski@ECT # 04/30/2001 02:28 PM # To: Stanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron # cc: Vince J Kaminski/HOU/ECT@ECT # or # Vince J Kaminski@ECT # 04/30/2001 02:28 PM # to: Stanley Horton/Corp/Enron@Enron, Danny McCarty/ET&S/Enron@Enron # cc: Vince J Kaminski/HOU/ECT@ECT text_string = re.split("((.*\n){2})[Tt]o:\s", text_string, maxsplit=1)[0] ### remove punctuation # should be autopmatically by scikit learn #text_string = text_string.translate(string.maketrans("", ""), string.punctuation) ### project part 2: comment out the line below #words = text_string ### split the text string into individual words, stem each word, ### and append the stemmed word to words (make sure there's a single ### space between each stemmed word) from nltk.stem.snowball import SnowballStemmer stemmer = SnowballStemmer("english") words = [stemmer.stem(word) for word in text_string.split()] return " ".join(words)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n\n ### split off metadata\n content = all_text.split(\"X-FileName:\")\n words = \"\"\n if len(content) > 1:\n ### remove punctuation\n text_string = content[1].translate(str.maketrans(\"\", \"\", string.punctuation))\n\n ### split the text string into individual words\n words = text_string.split()\n\n return words", "def parseOutText(f):\n\n\n f.seek(0) ### go back to beginning of file (annoying)\n all_text = f.read()\n\n ### split off metadata\n content = all_text.split(\"X-FileName:\")\n words = \"\"\n stemmed=\"\"\n if len(content) > 1:\n ### remove punctuation\n text_string = content[1].translate(maketrans(\"\", \"\", string.punctuation))\n \n ### project part 2: comment out the line below\n# words = text_string\n\n ### split the text string into individual words, stem each word, \n ### and append the stemmed word to words (make sure there's a single\n ### space between each stemmed word)\n ps = SnowballStemmer(\"english\") \n \n words = text_string.split() \n for w in words: \n# print(w, \" : \", ps.stem(w)) \n stemmed= stemmed+ \" \"+ ps.stem(w)\n stemmed= stemmed.lstrip()\n \n\n return stemmed", "def process_email(email_contents):\n\n # Load Vocabulary\n vocab_list = get_vocab_list();\n\n # Init return value\n word_indices = [];\n\n # ========================== Preprocess Email ===========================\n\n # Find the Headers ( \\n\\n and remove )\n # Uncomment the following lines if you are working with raw emails with the\n # full headers\n\n # hdrstart = strfind(email_contents, ([char(10) char(10)]));\n # email_contents = email_contents(hdrstart(1):end);\n\n # Lower case\n email_contents = email_contents.lower()\n\n # Strip all HTML\n # Looks for any expression that starts with < and ends with > and replace\n # and does not have any < or > in the tag it with a space\n email_contents = re.sub(\"<[^<>]+>\", \" \", email_contents)\n\n # Handle Numbers\n # Look for one or more characters between 0-9\n email_contents = re.sub(\"[0-9]+\", \"number\", email_contents)\n\n # Handle URLS\n # Look for strings starting with http:// or https://\n email_contents = re.sub(\"(http|https)://[^\\s]*\", \"httpaddr\", email_contents)\n\n # Handle Email Addresses\n # Look for strings with @ in the middle\n email_contents = re.sub(\"[^\\s]+@[^\\s]+\", \"emailaddr\", email_contents)\n\n # Handle $ sign\n email_contents = re.sub(\"[$]+\", \"dollar\", email_contents)\n\n # ========================== Tokenize Email ===========================\n\n # Output the email to screen as well\n print(\"\\n==== Processed Email ====\\n\");\n\n # Process file\n l = 0;\n\n # Tokenize and also get rid of any punctuation\n stemmer = PorterStemmer()\n email_contents = re.split(r'[@$/#.-:&\\*\\+=\\[\\]?!(){},\\'\\'\\\">_<;%\\s\\n\\r\\t]+', email_contents)\n for s in email_contents:\n\n # Remove any non alphanumeric characters\n s = re.sub(\"[^a-zA-Z0-9]\", \"\", s)\n\n # Stem the word \n # (the porter_stemmer sometimes has issues, so we use a try catch block)\n #try:\n s = stemmer.stem(s.strip())\n #except:\n # s = \"\"\n # continue\n\n # Skip the word if it is too short\n if len(s) < 1:\n continue\n\n # Look up the word in the dictionary and add to word_indices if\n # found\n # ====================== YOUR CODE HERE ======================\n # Instructions: Fill in this function to add the index of s to\n # word_indices if it is in the vocabulary. At this point\n # of the code, you have a stemmed word from the email in\n # the variable s. You should look up s in the\n # vocabulary list (vocabList). If a match exists, you\n # should add the index of the word to the word_indices\n # vector. Concretely, if s = 'action', then you should\n # look up the vocabulary list to find where in vocabList\n # 'action' appears. For example, if vocabList{18} =\n # 'action', then, you should add 18 to the word_indices \n # vector (e.g., word_indices = [word_indices ; 18]; ).\n # \n # Note: vocabList[idx] returns a the word with index idx in the\n # vocabulary list.\n # \n # Note: You can use s1 == s2 to compare two strings (s1 and\n # s2). It will return True only if the two strings are equivalent.\n #\n\n\n\n # =============================================================\n\n # Print to screen, ensuring that the output lines are not too long\n if (l + len(s)) > 78:\n print()\n l = 0\n print(f\"{s} \", end=\"\")\n l = l + len(s) + 1\n\n # Print footer\n print('\\n\\n=========================')\n return word_indices", "def extract_metadata(file_name, token_type, create_labels, remove_stop_words):\n metadata = {'file': file_name}\n\n with open(file_name, 'r') as file:\n rows = file.readlines()\n\n rules = [\n ['Message-ID:', 'id'],\n ['Subject:', 'subject'],\n ]\n\n for (index, row) in enumerate(rows):\n row = row.lstrip('> \\t')\n for (pattern, prop) in rules:\n if row.startswith(pattern):\n metadata[prop] = row.replace(pattern,'')\n\n if 'body' not in metadata:\n if row.startswith('\\n'):\n metadata['body'] = '\\n'.join(rows[index:])\n\n elif '-----Original Message-----' in row:\n del metadata['body']\n\n if 'body' in metadata:\n if create_labels:\n metadata['label'] = create_label(metadata['body'])\n metadata['original_body'] = metadata['body']\n metadata['body'] = clean_text(metadata['body'], token_type, remove_stop_words)\n clean_subject = cleanse(metadata['subject'])\n metadata['subject'] = '' if clean_subject.count(' ') == len(clean_subject) else clean_subject\n\n return metadata", "def extract_from_full_text(input_file, output_file):\n fw = open(output_file, 'w')\n with open(input_file, 'r') as f:\n st = f.readline()\n while st:\n word = st.split('\\t')[0] + \"\\n\"\n if word.startswith(\"to \") or word.startswith(\"an \") or word.startswith(\"at \") or word.startswith(\"in \"):\n word = word[3:]\n if word.startswith(\"a \"):\n word = word[2:]\n if word.strip().endswith(\" of\"):\n word = word[:-3]\n fw.writelines(word)\n st = f.readline()\n fw.close()", "def process_file(path):\r\n\ttokenset = {}\r\n\r\n\tfp = open(path, 'r')\r\n\temailMsg = email.message_from_file(fp)\r\n\tfp.close()\r\n\r\n\ttokenset = parse_body(emailMsg.get_payload().lower())\r\n\r\n\treturn tokenset", "def transform(self, email_path):\n mail = open(email_path, 'r')\n content = mail.read(self.max_read_len)\n i = 0\n while not(content[i] == '\\n' and content[i + 1] == '\\n') and i < len(content) - self.ngram:\n i += 1\n header = content[:i]\n # TODO find a smarter way deal with the header-body problem\n body = content[i + 2:]\n if len(body) + len(header) > self.max_read_len:\n body = body[:max(1000, self.max_read_len - len(header))]\n header_set = self.tokenize(header)\n body_set = self.tokenize(body)\n mail.close()\n return (header_set, body_set)", "def processEmail(email_contents):\n # Lower case\n email_contents = email_contents.lower()\n # Strip all HTML\n email_contents = re.sub('<[^<>]+>', ' ', email_contents)\n # Handle Numbers\n email_contents = re.sub('[0-9]+', 'number', email_contents)\n # Handle URLS\n email_contents = re.sub('(http|https)://[^\\s]*', 'httpaddr', email_contents)\n # Handle Email Addresses\n email_contents = re.sub('[^\\s]+@[^\\s]+', 'emailaddr', email_contents)\n # Handle $ sign\n email_contents = re.sub('[$]+', 'dollar', email_contents)\n # Remove any non alphanumeric characters\n email_contents = re.sub('[^a-zA-Z]', ' ', email_contents)\n # Tokenize ane remove single characters\n ps = PorterStemmer()\n email_contents = [ps.stem(token) for token\n in email_contents.split(\" \") if len(token) > 1]\n\n vocabList = getVocabList()\n word_indices = []\n for word in email_contents:\n ind = vocabList[vocabList.vocab == word].index\n if ind.any():\n word_indices.append(ind[0])\n print(word, '\\t', ind[0])\n\n return email_contents, word_indices", "def process(Email):\n # convert to lower case\n email = Email.read().lower()\n # strip any HTML\n temp = regx.sub(\"<.*?>\", \" \", email)\n # replace numbers for 0-9 with \"number\"\n temp = regx.sub(\"[0-9]+\", \"number\", temp)\n # replace Http adress to \"httpaddr\"\n temp = regx.sub(\"(http|https)://[^\\s]*\", \"httpaddr\", temp)\n # replace email adress with \"emailaddr\"\n temp = regx.sub(\"[^\\s]+@.*?\\s+\", \"emailaddr\", temp)\n # replace currency sign\n temp = regx.sub(\"[$]+\", \"dollar\", temp)\n temp = regx.sub(\"[']\", \" \", temp)\n # ========================== Tokenize Email ===========================\n # temp = regx.sub(\">+|:+|#+|[$]+|[.]+|@+|/+|-+|&+|[*]+|[+]+|=+|[]]+|[?]+|[()]+|[{}]+|,+|[']+|<+|_+|;+|%+\", \"\", temp)\n\n # remove punctuation\n temp = temp.translate(str.maketrans('', '', string.punctuation))\n\n # split the string in list of words\n tokenized_list = temp.split()\n stemmer = PorterStemmer()\n a = []\n vocab = VocabArray.getVocab()\n extracted_features = mat.zeros((1, len(vocab)))\n\n i = 0\n print(\"========================== Processed Email =========================\")\n for w in range(len(tokenized_list)):\n if len(tokenized_list[w]) < 1:\n continue\n\n # stem the word\n word = stemmer.stem(tokenized_list[w])\n print(word, end=\" \")\n if i > 20:\n i = 0\n print(\"\\n\")\n # get index of the word from vocab list\n indices = mat.where(vocab == word)[0]\n i += 1\n if len(indices) == 0:\n continue\n\n a.append(indices)\n extracted_features[:, indices] = 1\n\n word_indices = mat.c_[mat.array(a)]\n print(\"\\n\")\n return word_indices, extracted_features", "def processEmail(email_contents):\n # % Load Vocabulary\n vocabList = getVocabList()\n\n # % Init return value\n word_indices = []\n\n # % ========================== Preprocess Email ===========================\n # % Find the Headers ( \\n\\n and remove )\n # % Uncomment the following lines if you are working with raw emails with the\n # % full headers\n # %\n # % hdrstart = strfind(email_contents, ([char(10) char(10)]));\n # % email_contents = email_contents(hdrstart(1):end);\n\n # % Lower case\n email_contents = email_contents.lower()\n\n # % Strip all HTML\n # % Looks for any expression that starts with < and ends with > and replace\n # % and does not have any < or > in the tag it with a space\n email_contents = re.sub(r'<[^<>]+>', ' ', email_contents)\n\n # % Handle Numbers\n # % Look for one or more characters between 0-9\n email_contents = re.sub(r'[0-9]+', 'number', email_contents)\n\n # % Handle URLS\n # % Look for strings starting with http:// or https://\n email_contents = re.sub(r'(http|https)://[^\\s]*', 'httpaddr', email_contents)\n\n # % Handle Email Addresses\n # % Look for strings with @ in the middle\n email_contents = re.sub(r'[^\\s]+@[^\\s]+', 'emailaddr', email_contents)\n\n # % Handle $ sign\n email_contents = re.sub(r'[$]+', 'dollar ', email_contents)\n\n # Pick words-like strings\n email_contents_list = re.findall(r'[\\w]+', email_contents)\n email_contents = ' '.join(email_contents_list)\n\n # % ========================== Tokenize Email ===========================\n #\n # % Output the email to screen as well\n print('\\n==== Processed Email ====\\n')\n\n # % Tokenize and also get rid of any punctuation\n porter_stemmer = PorterStemmer()\n words = word_tokenize(email_contents)\n email_contents_list = []\n for index, word in enumerate(words):\n stemmed_word = porter_stemmer.stem(word)\n email_contents_list.append(stemmed_word)\n try:\n index = vocabList.index(stemmed_word)\n except ValueError:\n continue\n else:\n word_indices.append(index)\n\n email = ' '.join(email_contents_list)\n print('Email contents:\\n', email)\n return word_indices", "def thesaurus(self, message):\n read_pointer = open('Thesaurus.txt')\n\n for line in read_pointer:\n split_line = line.split(':', 1)\n if split_line[0] == message:\n return split_line[1]", "def read_in_file(filename):\n with open(filename, 'r') as file:\n lines = file.readlines() # reads all the file lines into one list\n text = \"\"\n for item in lines:\n text += str(item)\n text = text.replace('\\n', \" \")\n for c in string.punctuation:\n text = text.replace(c, \" \")\n text = text.replace(\" \", \" \")\n if \"START OF THIS PROJECT GUTENBERG EBOOK POOR BLOSSOM\" in text: # This starts the text after the \"***START OF THIS PROJECT GUTENBERG EBOOK POOR BLOSSOM***\" text\n index = text.index(\"START OF THIS PROJECT GUTENBERG EBOOK POOR BLOSSOM \")\n text = text[index + 51:]\n print(text)\n else:\n print(\"FYI, this is not a Gutenberg Book\")\n print(text)\n return text", "def get_mentions(fname):\n capture = re.compile('NPRI in ?.* ?(news|print)', re.I)\n with open(fname) as fp:\n for line in fp:\n line = line.strip()\n if capture.search(line):\n yield get_info(line)", "def get_text_from_email(msg):\n parts = []\n for part in msg.walk():\n if part.get_content_type() == 'text/plain':\n parts.append(part.get_payload())\n return ''.join(parts)", "def extract (msgfile, key):\n m = email.message_from_file(msgfile)\n From, To, Subject, Date = caption(m)\n #Text, Html, Files, Parts = pullout(m, key)\n Text = Text.strip(); Html = Html.strip()\n msg = {\"subject\": Subject, \"from\": From, \"to\": To, \"date\": Date,\n \"text\": Text, \"html\": Html, \"parts\": Parts}\n if Files: msg[\"files\"] = Files\n return msg", "def read_file(fname):\n f = open(fname,'rb')\n raw_file = f.read()\n f.close()\n raw_file = raw_file.replace(b'\\r\\n',b'\\n')\n emails =raw_file.split(b\"\\n\\n\\nFrom\")\n emails = [emails[0]]+ [b\"From\"+x for x in emails[1:] ]\n return emails", "def read_file(fname):\n f = open(fname,'rb')\n raw_file = f.read()\n f.close()\n raw_file = raw_file.replace(b'\\r\\n',b'\\n')\n emails =raw_file.split(b\"\\n\\n\\nFrom\")\n emails = [emails[0]]+ [b\"From\"+x for x in emails[1:] ]\n return emails", "def parse_body(body):\n for line in body.lower().split(\"\\n\"):\n words = line.split()\n try:\n idx = words.index(\"re-run\")\n except ValueError:\n continue\n if words[idx + 1] == \"full\":\n yield words[idx : idx + 3]\n else:\n yield words[idx : idx + 2]", "def get_processed_content(self, fn):\n fin = open(os.path.join(self.wiki_path, fn), 'rb')\n text = fin.read()\n fin.close()\n return (x for x in gensim.utils.tokenize(text, lowercase=True, deacc=True, errors=\"ignore\") if x not in STOPLIST)", "def read_message(message_file):\n words = []\n f = open(message_file, \"r\")\n for line in f:\n for word in line.split():\n if not stopword(word):\n words.append(word)\n return words", "def read_article_2(filename):\n file = open(filename, \"r\")\n filedata = file.readlines()\n sentences = sent_tokenize(filedata[0])\n return sentences", "def analyze_message(messageIn):\n\n\tcontent = set()\n\tword_count = Counter()\n\temail = None\n\n\t#Iterate through file contents\n\t#for line in messageIn['body']:\n\tline_tokens = messageIn['body'].split()\n\n\tif len(line_tokens) > 1:\n\t\t#Discard links and strip characters. Add it to the list\n\t\tnew_content, new_count = clean_line(line_tokens)\n\t\tcontent = content.union(new_content)\n\t\tword_count += new_count\n\n\t# Add our fields\n\tcontrol.update_one(\n\t\t{\n\t\t\t'_id': messageIn['_id']\n\t\t},\n\t\t{\n\t\t\t\"$set\":{\n\t\t\t\t\"email\":messageIn['headers']['From'],\n\t\t\t\t\"words\":list(content),\n\t\t\t\t\"wordCount\":word_count,\n\t\t\t\t\"year\": messageIn['headers']['Date'].split()[3],\n\t\t\t\t\"version\":1\n\t\t\t}\n\t\t}\n\t)", "def process_extract_name_organization_details(device_index, mp3_filename, text, record):\n\n flag = 0\n text = process_speak_listen(device_index, mp3_filename, text, record, flag)\n if text is None:\n details = None\n else:\n sentences = process_token_sentence(text)\n entity_names = []\n\n for tree in sentences:\n entity_names.extend(process_extract_entity_names(tree))\n\n print(set(entity_names))\n try:\n details = entity_names[0]\n except IndexError:\n details = None\n\n return details", "def extractPersonalInfo():\n if 'Issued' in clean_lic:\n index = clean_lic.find('Issued')\n new_lic = clean_lic[2:index]\n new_lic_list = new_lic.split('\\\\n')\n while len(new_lic_list[len(new_lic_list)-1]) < 5:\n new_lic_list.pop()\n return(new_lic_list[-4:])\n else:\n return ''", "def _process(self, file: bytes) -> List[Tuple[str]]:\n decoded_text = file.decode('utf-8')\n # Replace end of line tokens\n if self.eol is not None and not self.split_by_sentence:\n decoded_text = decoded_text.replace('\\n', self.eol)\n\n # Split by sentence or unroll\n if self.split_by_sentence:\n nltk.download('punkt', quiet=True)\n text = [(sent.strip(),) for sent in nltk.tokenize.sent_tokenize(decoded_text)]\n else:\n text = [(decoded_text,)]\n\n return text", "def get_bare_file(filename):\n \"\"\" for a given entry, finds all of the info we want to display \"\"\"\n f = open(filename, 'r')\n str = f.read()\n str = str.decode('utf-8')\n e = {}\n try: e['title'] = re.search('(?<=title:)(.)*', str).group()\n except: pass\n try: e['slug'] = re.search('(?<=slug:)(.)*', str).group()\n except: pass\n try: e['summary'] = re.search('(?<=summary:)(.)*', str).group()\n except: pass\n try:\n e['content'] =re.search('(?<=content:)((?!category:)(?!published:)(.)|(\\n))*', str).group()\n if e['content'] == None:\n e['content'] = re.search('(?<=content:)((.)|(\\n))*$', str).group()\n except:\n pass\n try:\n e['published'] = re.search('(?<=published:)(.)*', str).group()\n except: pass\n try: e['author'] = re.search('(?<=author:)(.)*', str).group()\n except: pass\n try: e['category'] = re.search('(?<=category:)(.)*', str).group()\n except: pass\n try: e['url'] = re.search('(?<=url:)(.)*', str).group()\n except: pass\n try:\n e['uid'] = re.search('(?<=u-uid:)(.)*', str)\n if e['uid']:\n e['uid'] = e['uid'].group()\n else:\n e['uid'] = re.search('(?<=u-uid)(.)*', str).group()\n except: pass\n try: e['time-zone'] = re.search('(?<=time-zone:)(.)*', str).group()\n except: pass\n try: e['location'] = re.search('(?<=location:)(.)*', str).group()\n except: pass\n try: e['syndication'] = re.search('(?<=syndication:)(.)*', str).group()\n except: pass\n try: e['location_name'] = re.search('(?<=location-name:)(.)*', str).group()\n except: pass\n try: e['in_reply_to'] = re.search('(?<=in-reply-to:)(.)*', str).group()\n except:pass\n return e", "def createWordList(emailids, emaildata): #creates word list of all the words used in email bodies\n with open('res/dictionary.txt', 'w') as f:\n words = set([])\n for emailid in emailids:\n email = e.Email(emailid)\n subject = set(email.parsedsubject)\n body = set(email.body)\n try:\n emailcontent = body.union(subject)\n for word in emailcontent:\n if not word in words:\n words.add(word)\n f.write(word + '\\n')\n except AttributeError:\n print(body)", "def get_synopsis(file_data):\n for individual in file_data:\n if 'synopsis' in individual:\n synopsis = individual.split('\"')\n return synopsis[1]", "def extract_contents(enml):\n return re.search(r'<en-note[^>]*>(.*?)</en-note>', enml, re.S).group(1)", "def extract_text_from_pdf(file):\n\n return RegexpTokenizer(r'\\w+').tokenize(parser.from_file(file)['content'])" ]
[ "0.71798396", "0.66124284", "0.6192006", "0.6141319", "0.6066048", "0.60461986", "0.60439855", "0.6027324", "0.6021339", "0.59490126", "0.58234227", "0.57764715", "0.573566", "0.5679234", "0.5660324", "0.5651464", "0.5651464", "0.5589413", "0.55189335", "0.5435613", "0.53896666", "0.5380712", "0.5367632", "0.53486085", "0.5347853", "0.5345258", "0.53242224", "0.5315737", "0.52871025", "0.528403" ]
0.75676155
0
Recursively search for linked calendars, generating iteration of those found
def get_linked_cals(folders): for folder in folders: if 'link' in folder: for linked_folder in folder['link']: yield linked_folder if 'folder' in folder: subfolders = folder['folder'] for linked_folder in get_linked_cals(subfolders): yield linked_folder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __addCalendars(self, tree, key=\"dates/calendars\"):\n \n calendars = self.__getStore(self.__data, \"calendar\")\n \n for element in tree.findall(\"./%s/*\" % key):\n if not element.get(\"draft\"):\n self.__addCalendar(calendars, element)", "def test_calendarsUpgradeWithNestedCollections(self):\n\n beforeUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \"nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n afterUIDContents = {\n \"64\": {\n \"23\": {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\": {\n \"calendar\": {\n db_basename: {\n \"@contents\": \"\",\n },\n },\n \".collection.nested1\": {\n \"nested2\": {},\n },\n }\n }\n },\n \".DS_Store\": {\n \"@contents\": \"\",\n }\n }\n\n before = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": beforeUIDContents,\n },\n \"principals\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \".DS_Store\":\n {\n \"@contents\": \"\",\n },\n \"__uids__\": afterUIDContents,\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))", "def calendars(self):\n cals = []\n\n data = self.children(cdav.Calendar.tag)\n for c_url, c_type, c_name in data:\n try:\n cal_id = c_url.split(\"/\")[-2]\n except:\n log.error(f\"Calendar {c_name} has unexpected url {c_url}\")\n cal_id = None\n cals.append(\n Calendar(self.client, id=cal_id, url=c_url, parent=self, name=c_name)\n )\n\n return cals", "def _find_cycle(subtypes: Dict[str, List[str]]) -> None:\n\n found_cycles = []\n\n def iterate(current_id, find_id):\n for t_entry in subtypes.get(current_id, []):\n if t_entry == find_id:\n found_cycles.append((find_id, current_id))\n iterate(t_entry, find_id)\n\n for the_id in subtypes['']:\n iterate(the_id, the_id)\n if len(found_cycles) > 0:\n for entry in found_cycles:\n logger.error(\n 'Cycle found with ids {} and {}'.format(entry[0], entry[1]))\n raise ValueError('cycles found in graph information')", "def calendars(self):\r\n return c.Calendars(self)", "def calendars(self):\r\n return c.Calendars(self)", "def calendars(self):\n return self.calendar_home_set.calendars()", "def parse(self, response):\n for link in response.css(\"#mainColumn a\"):\n if \"Calendar\" in \" \".join(link.css(\"*::text\").extract()):\n yield response.follow(\n link.attrib[\"href\"], callback=self._parse_calendar, dont_filter=True\n )", "def base_depth_for_period(resort_name, start_date, end_date):\n\n start_date_year = int(start_date[0:4])\n start_date_month = int(start_date[4:6])\n start_date_day = int(start_date[6:8])\n\n end_date_year = int(end_date[0:4])\n end_date_month = int(end_date[4:6])\n end_date_day = int(end_date[6:8])\n\n resort_table = resort_table_dict[resort_name]\n\n query = \"SELECT status_date FROM %s\" %(resort_table)\n connection = get_connection()\n\n period_date_list = []\n base_depth_list = []\n\n if connection is not None:\n try:\n for row in get_select_query_results(connection, query):\n row_year = int(row[0].strftime('%Y'))\n row_month = int(row[0].strftime('%m'))\n row_day = int(row[0].strftime('%d'))\n\n if row_year < start_date_year or row_year > end_date_year:\n continue\n if start_date_year == row_year:\n if start_date_month > row_month:\n continue\n if start_date_year == row_year:\n if start_date_month == row_month:\n if start_date_day > row_day:\n continue\n if end_date_year == row_year:\n if end_date_month < row_month:\n continue\n if end_date_year == row_year:\n if end_date_month == row_month:\n if end_date_day < row_day:\n continue\n\n date_to_add = (row[0].strftime('%Y') + row[0].strftime('%m') + row[0].strftime('%d'))\n period_date_list.append(date_to_add)\n\n except Exception as e:\n print(e, file=sys.stderr)\n\n for date in period_date_list:\n base_depth_for_list = base_depth_for_date(resort_name, date)\n base_depth_list.append(base_depth_for_list)\n\n return json.dumps(base_depth_list)", "def available_calendars(self):\n\n calendars = [filename for filename in listdir(self.calendar_directory)\n if isfile(join(self.calendar_directory, filename))]\n\n # Filter the calendar.judaic.2018 files and so on,\n # because we use the symbolic link calendar.judaic\n calendars = [calendar for calendar in calendars\n if not calendar.startswith(\"calendar.judaic.\")]\n return sorted([calendar[9:] for calendar in calendars])", "def test_calendarsUpgradeWithDuplicateOrphans(self):\n\n before = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n \"unknownuser.1\":\n {\n },\n \"unknowngroup.1\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))", "def list_calendars(service):\n app.logger.debug(\"Entering list_calendars with service\")\n calendar_list = service.calendarList().list().execute()[\"items\"]\n app.logger.debug(\"Got calendar list\")\n result = []\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal:\n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n\n result.append(\n {\"kind\": kind, \"id\": id, \"summary\": summary, \"selected\": selected,\n \"primary\": primary})\n app.logger.debug(\"About to return from list_calendars with: \", result)\n return sorted(result, key=cal_sort_key)", "def test_calendarsUpgradeWithOrphans(self):\n\n before = {\n \"calendars\":\n {\n \"users\":\n {\n \"unknownuser\":\n {\n },\n },\n \"groups\":\n {\n \"unknowngroup\":\n {\n },\n },\n },\n \"principals\":\n {\n OLDPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n }\n\n after = {\n \"archived\":\n {\n \"unknownuser\":\n {\n },\n \"unknowngroup\":\n {\n },\n },\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))", "def _iterate(self, start=None, use_repetitions=True):\r\n path = start and start.path or [] # path to start\r\n multiple = start and start.multiple or [-1] # multiple to start\r\n\r\n elements = [] # a stack of elements\r\n cur = self.root # current element\r\n index = 0 # the next child element to examine\r\n \r\n def get_repetitions(element):\r\n \"\"\"Return the number of times an element is repeated.\"\"\"\r\n return int(element.attrib.get('repeat', 1))\r\n \r\n # go to start\r\n for i in path:\r\n elements.append(cur)\r\n cur = cur[i]\r\n multiple[-1] += 1\r\n\r\n try:\r\n while True:\r\n repetitions = get_repetitions(cur)\r\n if multiple[-1] >= repetitions and repetitions != 0:\r\n index = path.pop() + 1\r\n multiple.pop()\r\n cur = elements.pop()\r\n elif len(cur) == 0:\r\n yield Moment(cur.get('name'), cur.get('descr'), copy(path), copy(multiple))\r\n multiple[-1] += 1\r\n elif index < len(cur):\r\n path.append(index)\r\n multiple.append(0)\r\n elements.append(cur)\r\n cur = cur[index]\r\n index = 0\r\n if not use_repetitions: multiple[-1] = get_repetitions(cur) - 1\r\n else:\r\n multiple[-1] += 1\r\n index = 0\r\n except IndexError:\r\n pass # iteration is done\r", "def __iter__(self):\n for arc in self.agenda:\n yield arc", "def resolve_bridge_days(self, verbose=0):\n for i in range(0, self.D):\n if self.is_bridge_day(i):\n if verbose > 0:\n print(\"Found a bridge day:\", i)\n self.print_calendar()\n self.add_holiday(i) # should be 1 afterwards in every case", "def calendars(self):\n if \"calendars\" in self._prop_dict:\n return CalendarsCollectionPage(self._prop_dict[\"calendars\"])\n else:\n return None", "def list_calendars(service):\n app.logger.debug(\"Entering list_calendars\") \n calendar_list = service.calendarList().list().execute()[\"items\"]\n result = [ ]\n for cal in calendar_list:\n kind = cal[\"kind\"]\n id = cal[\"id\"]\n if \"description\" in cal: \n desc = cal[\"description\"]\n else:\n desc = \"(no description)\"\n summary = cal[\"summary\"]\n # Optional binary attributes with False as default\n selected = (\"selected\" in cal) and cal[\"selected\"]\n primary = (\"primary\" in cal) and cal[\"primary\"]\n \n\n result.append(\n { \"kind\": kind,\n \"id\": id,\n \"summary\": summary,\n \"selected\": selected,\n \"primary\": primary\n })\n return sorted(result, key=cal_sort_key)", "def _get_links(self, from_year):\n self.links = []\n self.titles = []\n self.speakers = []\n self.dates = []\n\n r = requests.get(self.calendar_url)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n if self.verbose:\n print(\"Getting links for press conference scripts...\")\n presconfs = soup.find_all(\n \"a\", href=re.compile(\"^/monetarypolicy/fomcpresconf\\d{8}.htm\")\n )\n presconf_urls = [\n self.base_url + presconf.attrs[\"href\"] for presconf in presconfs\n ]\n for presconf_url in presconf_urls:\n r_presconf = requests.get(presconf_url)\n soup_presconf = BeautifulSoup(r_presconf.text, \"html.parser\")\n contents = soup_presconf.find_all(\n \"a\", href=re.compile(\"^/mediacenter/files/FOMCpresconf\\d{8}.pdf\")\n )\n for content in contents:\n # print(content)\n self.links.append(content.attrs[\"href\"])\n self.speakers.append(\n self._speaker_from_date(self._date_from_link(content.attrs[\"href\"]))\n )\n self.titles.append(\"FOMC Press Conference Transcript\")\n self.dates.append(\n datetime.strptime(\n self._date_from_link(content.attrs[\"href\"]), \"%Y-%m-%d\"\n )\n )\n if self.verbose:\n print(\"{} links found in current page.\".format(len(self.links)))\n\n # Archived before 2015\n if from_year <= 2014:\n print(\"Getting links from archive pages...\")\n for year in range(from_year, 2015):\n yearly_contents = []\n fomc_yearly_url = (\n self.base_url\n + \"/monetarypolicy/fomchistorical\"\n + str(year)\n + \".htm\"\n )\n r_year = requests.get(fomc_yearly_url)\n soup_yearly = BeautifulSoup(r_year.text, \"html.parser\")\n\n presconf_hists = soup_yearly.find_all(\n \"a\", href=re.compile(\"^/monetarypolicy/fomcpresconf\\d{8}.htm\")\n )\n presconf_hist_urls = [\n self.base_url + presconf_hist.attrs[\"href\"]\n for presconf_hist in presconf_hists\n ]\n for presconf_hist_url in presconf_hist_urls:\n # print(presconf_hist_url)\n r_presconf_hist = requests.get(presconf_hist_url)\n soup_presconf_hist = BeautifulSoup(\n r_presconf_hist.text, \"html.parser\"\n )\n yearly_contents = soup_presconf_hist.find_all(\n \"a\",\n href=re.compile(\"^/mediacenter/files/FOMCpresconf\\d{8}.pdf\"),\n )\n for yearly_content in yearly_contents:\n # print(yearly_content)\n self.links.append(yearly_content.attrs[\"href\"])\n self.speakers.append(\n self._speaker_from_date(\n self._date_from_link(yearly_content.attrs[\"href\"])\n )\n )\n self.titles.append(\"FOMC Press Conference Transcript\")\n self.dates.append(\n datetime.strptime(\n self._date_from_link(yearly_content.attrs[\"href\"]),\n \"%Y-%m-%d\",\n )\n )\n if self.verbose:\n print(\n \"YEAR: {} - {} links found.\".format(\n year, len(presconf_hist_urls)\n )\n )\n print(\"There are total \", len(self.links), \" links for \", self.content_type)", "def calendar_lists(self):\r\n return CalendarLists(self)", "def __setup_recursion(folder_found, links_titles):\n urls = []\n if folder_found:\n for element_x, _, element_z in links_titles:\n if element_z == 'folder':\n # fill urls with sub-links to recursively call crawl function on them\n urls.append(element_x)\n return urls", "async def All_orgs():\n\n links_13 = []\n links_14 = []\n valid_url = \"/?archive/?gsoc/\\d+[0-9]/orgs/[a-zA-Z]+\"\n for year in range(2009, 2016):\n year_url = melange + \"/archive/gsoc/{}\".format(year)\n soup = await get_page(year_url)\n\n for url in soup.find_all('a'):\n if re.match(valid_url, url.get(\"href\")):\n if year <= 2013:\n links_13.append(join(melange, url.get(\"href\")[1:]))\n else:\n links_14.append(join(melange, url.get(\"href\")[1:]))\n return links_13, links_14", "def scrap_calendar_available_date(self):\n\n self.driver.find_element_by_xpath(\"//div[@class='publication-info telechargements replie']/h3[contains(., 'Téléchargement')]\").click()\n self.driver.find_element_by_id('datepicker').click()\n\n json_date = defaultdict(lambda : defaultdict(list))\n\n dropdown_year = Select(self.driver.find_element_by_xpath(\"//select[@class='ui-datepicker-year']\"))\n available_year = [year_option.get_attribute('value') for year_option in dropdown_year.options]\n for year in available_year:\n print(year)\n self.driver.find_element_by_xpath(f\"//select[@class='ui-datepicker-year']/option[@value='{year}']\").click()\n dropdown_month = Select(self.driver.find_element_by_xpath(\"//select[@class='ui-datepicker-month']\"))\n available_month = [month_option.get_attribute('value') for month_option in dropdown_month.options] \n \n available_month = list(set(available_month)-set(['5', '6', '7', '8', '9', '10']))\n \n for month in available_month:\n print(month)\n self.driver.find_element_by_xpath(f\"//select[@class='ui-datepicker-month']/option[@value='{month}']\").click()\n available_days = [day.text for day in self.driver.find_elements_by_xpath(\"//td[@data-handler='selectDay']\")]\n for day in available_days:\n if day != '':\n json_date[year][month].append(day)\n \n element = self.driver.find_element_by_xpath(\"//div[@class='publication-info telechargements']/h3[contains(., 'Téléchargement')]\")\n self.driver.execute_script(\"arguments[0].click();\", element)\n\n return json_date", "def get_cal_events(user, calservice):\r\n cal_page_token = None\r\n while True:\r\n try:\r\n #the next for loop retrives the calendar events\r\n #list to be checked for matching criteria\r\n prieml = user['primaryEmail']\r\n creator_to_del = '[email protected]'\r\n event_to_del = 'Digital Directorate Team Meeting'\r\n events = calservice.events().list(calendarId=prieml,\r\n pageToken=cal_page_token).execute()\r\n for event in events['items']:\r\n if event['status'] != 'cancelled':\r\n try:\r\n #this is the criteri to be checked against\r\n organiser = event['organizer']['email']\r\n summary = event['summary']\r\n if organiser == creator_to_del \\\r\n and summary == event_to_del:\r\n try:\r\n #checking for specific start date \r\n #in the event some events have different\r\n #dateTime\\date keywords\r\n if event['start']['dateTime']:\r\n evdate = event['start']['dateTime']\r\n startDate = datetime.strptime(evdate[0:10],\r\n '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate[0:10]))\r\n except KeyError:\r\n #if the keyword is not dateTime \r\n #then fetch date keyword\r\n evdate = event['start']['date']\r\n startDate = datetime.strptime(evdate, '%Y-%m-%d')\r\n today = datetime.today()\r\n if startDate > today:\r\n print('{0} ({1}) {2} {3}'.format(prieml,\r\n event['summary'],\r\n event['organizer']['email'],\r\n evdate))\r\n except KeyError:\r\n continue\r\n cal_page_token = events.get('nextPageToken')\r\n if not cal_page_token:\r\n break\r\n except ValueError:\r\n print('Oops! Thhe last event has an error. Try again...')", "def _test_find_day(self, days):\n msg = \"Find day in list of %d elements\" % len(days)\n for d in range(0, len(days)):\n self._test_giod(days, days[d], 0,\n d, msg)\n self._test_giod(days, days[d], 1,\n d, msg + \" (next = 1)\")\n self._test_giod(days, days[d], \"next = -1\",\n d, msg)", "def func_calendar_list():\r\n creds = None\r\n global page_token\r\n #global new_calendar_list=[]\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n\r\n service = build('calendar', 'v3', credentials=creds)\r\n\r\n calendar_list = service.calendarList().list(pageToken=page_token).execute()\r\n new_calendar_list = []\r\n for calendar_list_entry in calendar_list['items']:\r\n new_calendar_list.append(calendar_list_entry['summary'])\r\n page_token = calendar_list.get('nextPageToken')\r\n return (new_calendar_list)", "def test_calendarsUpgradeWithError(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935E\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C73.ics\":\n {\n \"@contents\": event02_broken,\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n root = self.createHierarchy(before)\n\n config.DocumentRoot = root\n config.DataRoot = root\n\n (yield self.doUpgrade(config))\n\n self.assertTrue(self.verifyHierarchy(root, after))", "def test_calendarsUpgradeWithUIDsMultilevel(self):\n\n before = {\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_before,\n \"@xattrs\":\n {\n md5Attr: \"12345\",\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: \"12345\",\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n # Zlib compressed XML\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/users/wsanchez/calendar</href>\\r\\n</calendar-free-busy-set>\\r\\n\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": \"\",\n }\n }\n\n after = {\n \".calendarserver_version\":\n {\n \"@contents\": \"2\",\n },\n \"calendars\":\n {\n \"__uids__\":\n {\n \"64\":\n {\n \"23\":\n {\n \"6423F94A-6B76-4A3A-815B-D52CFD77935D\":\n {\n \"calendar\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"1E238CA1-3C95-4468-B8CD-C8A399F78C72.ics\":\n {\n \"@contents\": event01_after,\n \"@xattrs\":\n {\n md5Attr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\r\\n<getcontentmd5 xmlns='http://twistedmatrix.com/xml_namespace/dav/'>%s</getcontentmd5>\\r\\n\" % (event01_after_md5,)),\n },\n },\n \"@xattrs\":\n {\n xattrname(\"ignore\"): \"extra\",\n cTagAttr: isValidCTag, # method below\n },\n },\n \"inbox\":\n {\n db_basename: {\n \"@contents\": \"\",\n },\n \"@xattrs\":\n {\n freeBusyAttr: zlib.compress(\"<?xml version='1.0' encoding='UTF-8'?>\\n<calendar-free-busy-set xmlns='urn:ietf:params:xml:ns:caldav'>\\r\\n <href xmlns='DAV:'>/calendars/__uids__/6423F94A-6B76-4A3A-815B-D52CFD77935D/calendar/</href>\\r\\n</calendar-free-busy-set>\"),\n },\n },\n },\n },\n },\n },\n },\n NEWPROXYFILE:\n {\n \"@contents\": None,\n },\n }\n\n (yield self.verifyDirectoryComparison(before, after, reverify=True))", "def find_all_cycles(candidates, new_elem, path=[]):\n \n def have_cycle(candidates, path):\n \"\"\" Checks that when we have no more candidates, that our path\n 'endpoints' are cyclical. \"\"\"\n return (not candidates and path[0].prefix == path[-1].suffix)\n \n def have_dead_end(candidates, new_elem):\n \"\"\" Checks that we have at least one candidate whose prefix is\n cyclical with the new element's suffix. \"\"\"\n return new_elem.suffix not in map(lambda x: x.prefix, candidates)\n \n def remove_sgons(s_value, candidates):\n \"\"\" Returns a new list where all s-gonal candidates have been\n removed. \"\"\"\n return list(filter(lambda x: x.s != s_value,\n candidates))\n # Append new_elem to our working path, and test for our two exit criteria:\n # 1. A complete cycle -- There are no more candidates to extend our path\n # with and our ends wrap around prefix-suffix-cyclically\n # 2. A dead end -- There are no new candidates whose prefix match our\n # new element's suffix\n path = path + [new_elem]\n if have_cycle(candidates, path):\n return [path]\n if have_dead_end(candidates, new_elem):\n return []\n # Now go through every candidate and find the handful of ones whose prefix\n # match our new element's suffix.\n cycles = []\n for candidate in candidates:\n if new_elem.suffix == candidate.prefix:\n # When we find a valid candidate, we remove all candidates of the\n # same figurate type as our valid candidate.\n new_candidates = remove_sgons(candidate.s, candidates)\n # We then go down the path of finding all cycles with our valid\n # candidate as the new last-element\n new_cycles = find_all_cycles(new_candidates, candidate, list(path))\n for new_cycle in new_cycles:\n cycles.append(new_cycle)\n return cycles", "def get_outlook_calendar_entries(days = 1):\r\n outlook = win32.Dispatch('outlook.application')\r\n\r\n ns = outlook.GetNamespace(\"MAPI\")\r\n appointments = ns.GetDefaultFolder(9).Items\r\n appointments.Sort(\"[Start]\")\r\n appointments.IncludeRecurrences = \"True\"\r\n\r\n date_from = datetime.datetime.today()\r\n begin = date_from.date().strftime(\"%x\")\r\n\r\n date_to = datetime.timedelta(days=(days+1)) + date_from\r\n end = date_to.date().strftime(\"%x\")\r\n\r\n date_filter = \"[Start] >= '\" + begin + \"' AND [END] <= '\" + end + \"'\"\r\n\r\n print(date_filter)\r\n\r\n appointments = appointments.Restrict(date_filter)\r\n events_list = []\r\n\r\n for a in appointments:\r\n #print(\"from appointment \" + str(a.Start))\r\n event_date = a.Start.replace(tzinfo=timezone(datetime.timedelta(seconds=time.localtime().tm_gmtoff)))\r\n events_list.append([event_date, a.Subject, a.Duration, a.Location])\r\n\r\n return events_list" ]
[ "0.63429284", "0.57949895", "0.5711854", "0.5607237", "0.55794144", "0.55794144", "0.5459125", "0.5458623", "0.5429311", "0.5379322", "0.531464", "0.52693355", "0.52175975", "0.5208984", "0.51823956", "0.5172256", "0.5160157", "0.51547325", "0.5136413", "0.51360726", "0.513471", "0.5132009", "0.51258737", "0.50915056", "0.5085535", "0.5074666", "0.5071255", "0.50604683", "0.505642", "0.50374794" ]
0.6902938
0
Add a dotted line representing a change
def addDottedLine(self, sourceTreeLabelPrefix, sourceLabel, targetTreeLabelPrefix, targetLabel): color = "green" if sourceLabel != targetLabel: color = "gray" self.dotRepresentation.append( '{} -> {} [style=dotted color="{}" constraint=false]'. format(sourceTreeLabelPrefix + sourceLabel, targetTreeLabelPrefix + targetLabel, color))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addLineStyle(dist, focus, axis, pupil):\n r = 0 #focus / 2\n g = 0 #np.log10(dist) / (25 / 3)\n b = 0 #axis / 20\n a = 0.4\n rgb = [r, g, b, a]\n line = {'style': '-', 'color': rgb}\n return line", "def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')", "def update(self, line):", "def update_line(self):\n self._draw_line_text()\n self._draw_status()\n self._line_listbox.set_focus(self.model.l_index)", "def line(self, arg, fill):\n pass", "def increment_lines(self, d):\n self.lines += d\n styled_set_label_text(self.lines_display, \"Lines: \"+str(self.lines))", "def add(self, line):\n self.cull()\n self.lines.append(line)", "def addChange(change):", "def addChange(change):", "def _(event):\n if line.is_multiline:\n line.newline()\n else:\n if line.validate():\n cli_ref().line.add_to_history()\n cli_ref().set_return_value(line.document)", "def _append_line_color_update_expression(self) -> None:\r\n from apysc.expression import expression_file_util\r\n expression: str = (\r\n f'{self.variable_name}.stroke(\"{self.line_color}\");'\r\n )\r\n expression_file_util.append_js_expression(expression=expression)", "def line(value):\r\n return '({}, {}), ({}, {})'.format(value.x1(), value.y1(), value.x2(), value.y2())", "def addLegendLine(line,n):\n dislin.leglin(' ',line,n)", "def line(self, x, y):\n self.call('line', x, y)", "def newLineEvent(self, line):\n self.newLine_callback(line)", "def __draw_line(display, color, ball_pos, dx, dy):\n pygame.draw.line(display, color, ball_pos, (ball_pos[0] + dx, ball_pos[1] + dy), 2)", "def _draw_line_text(self):\n self._line_text.set_text(self.model.get_current_line())", "def do_draw_network(self, line):\n self.fibbing.root.lsdb.graph.draw(line)", "def _draw_line(plot, hori, vert, color, text):\n plot.plot(hori, vert, '-o'+color)\n plot.text(hori[-1]-3, vert[-1]+2, text, color=color)", "def DrawDottedLine(self, dc, point, length, vertical):\r\n\r\n for i in xrange(0, length, 2):\r\n dc.DrawPoint(point.x, point.y)\r\n if vertical:\r\n point.y += 2\r\n else:\r\n point.x += 2", "def line() -> str:\n return f\"---\"", "def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)", "def _b_line_changed(self):\n self.bSpin.setValue(self.bLine.value())\n self._update_image()", "def line_color(self, value: String) -> None:\r\n self._update_line_color_and_skip_appending_exp(value=value)\r\n self._append_line_color_update_expression()", "def draw(x,y,x1,y1,d,color=1):\n d.add(dxf.line((x,y),(x1,y1),color=color, layer='LINES',thickness=0.01))", "def _g_line_changed(self):\n self.gSpin.setValue(self.gLine.value())\n self._update_image()", "def add_to_plot(self, line_name, points):\n points = [x * 100 for x in points]\n plt.plot(points, label=line_name)", "def previous_line():\r\n set_point(point().previous_line())", "def _draw_line(self, event):\n if not self.obstacle_creation_mode:\n return\n\n if self.previous_coordinates is None:\n self.previous_coordinates = event.x, event.y\n self.new_obstacle.append([event.x, event.y])\n return\n\n x1, y1 = event.x, event.y\n\n if self._is_closing_shape(x1, y1, self.new_obstacle):\n x1, y1 = self.new_obstacle[0]\n else:\n self.new_obstacle.append([x1, y1])\n\n x0, y0 = self.previous_coordinates\n self.canvas.create_line(x0, y0, x1, y1, **self.LINE_OPTIONS)\n self.previous_coordinates = x1, y1", "def update_lines(self):\n self._checkfigure()\n for ld in self.lines:\n line = ld['line']\n\n color = ld['color']\n line.set_color(color)\n\n lw = ld['linewidth']\n hlf = ld['highlight factor']\n highlight = hlf if ld['highlighted'] else 1.0\n lw = lw*highlight\n line.set_linewidth(lw)\n\n for vline in ld['vlines']:\n vline.set_color(color)\n vline.set_linestyle('--')\n vline.set_linewidth(lw)\n\n for hline in ld['vlines']:\n hline.set_color(color)\n hline.set_linestyle('--')\n hline.set_linewidth(lw)" ]
[ "0.64584184", "0.6071593", "0.60253584", "0.59168375", "0.5902079", "0.58755267", "0.5826162", "0.5811116", "0.5811116", "0.579447", "0.578479", "0.57716346", "0.57066214", "0.5697388", "0.5693667", "0.56484234", "0.56478703", "0.56259626", "0.56172717", "0.56061083", "0.55969644", "0.5573388", "0.55642015", "0.5563676", "0.55618423", "0.5560819", "0.55608106", "0.55372626", "0.55356026", "0.55302745" ]
0.6653383
0
Read Double(8 bytes) from RAM\n
def read_double(self, process_handle: int, address: int): self.__bufferSize = 8 value = self.__read_bytes(process_handle, address) return None if value is None else unpack('<d', bytearray(value))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_double(self):\n return self._packers[\"d\"].unpack(self.read(8))[0]", "def read_double(data):\n s_type = \"=%s\" % get_type(\"double\")\n return struct.unpack(s_type, data.read(8))[0]", "def ReadDouble(self, endian=\"<\"):\n return self.unpack(\"%sd\" % endian, 8)", "def read_double(stream, writer_schema=None, reader_schema=None): # noqa\n return unpack('<d', stream.read(8))[0]", "def toDouble(self, buff):\n right, left = struct.unpack(\"<Ii\", struct.pack(\"B\" * 8, *buff[0:8]))\n return float(left) + float(right)/(2**32)", "def _decode_double(fp):\n return struct.unpack('>d', fp.read(8))[0]", "def read_doubles(self, count=1, location=None):\n return_vals = []\n byteorder = {'little': '<d', 'big': '>d'}[self._byteorder]\n if self._tiff is not None:\n off = self._offset\n if location is not None:\n off = location\n for c in range(count):\n return_vals.append(unpack_from(byteorder, self._tiff[off:off + 8])[0])\n off += 8 # size\n if location is None:\n self._offset += (count * 8) # size)\n return return_vals", "def get_double(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_double()\n rc = grib_get_double(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value", "def get_double(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_double()\n rc = grib_get_double(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value", "def read_reals(self, dtype='f8'):\n return self.read_record(dtype)", "def decode_double(self, buf, pos):\n return self.decode_struct(self._double_fmt, buf, pos)", "def memory_read(self, addr: str) -> Byte:\n print(f\"memory read {addr}\")\n _parsed_addr = self._parse_addr(addr)\n if _parsed_addr:\n return _parsed_addr.read(addr)\n data = self.memory.read(addr)\n return data", "def read(self, length):\n return self.meas.read(length)", "def read_drt(self):\n data = Array('B')\n data = self.read(0, 0, 8)\n num_of_devices = drt_controller.get_number_of_devices(data)\n len_to_read = num_of_devices * 8\n\n data = self.read(0, 0, len_to_read + 8)\n self.drt_manager.set_drt(data)", "def _read24(self, register):\r\n ret = 0.0\r\n for b in self._read_register(register, 3):\r\n ret *= 256.0\r\n ret += float(b & 0xFF)\r\n return ret", "def read_raw(self, offset, size, return_raw = False):\n raw_data = self.reader(offset, size)\n if raw_data is None:\n return None\n if return_raw:\n return raw_data\n else:\n if size == 1:\n data = struct.unpack(\"%dB\" %size, raw_data)[0]\n else:\n data = struct.unpack(\"%dB\" %size, raw_data)\n return data", "def read(reader: BitStreamReader, _index: int) -> float:\n\n return reader.readFloat64()", "def _get_data(self, read_size):\n return self._character_device.read(read_size)", "def _read_num(self, pos):\n self._file.seek(pos)\n return u64(self._file.read(8))", "def read_memory(self, address, size):\n return self.read(0, address, size, mem_device=True)", "def getDouble(self, address: ghidra.program.model.address.Address) -> float:\n ...", "def getData(self):\n return struct.unpack(\"!f\",self.data)[0]", "def memory_read64(self, addr, num_long_words):\n buf_size = num_long_words\n buf = (ctypes.c_ulonglong * buf_size)()\n units_read = self._dll.JLINKARM_ReadMemU64(addr, buf_size, buf, 0)\n if units_read < 0:\n raise errors.JLinkException(units_read)\n\n return buf[:units_read]", "def get_data(self):\n self.dev.write(1, 'A0')\n digit1, digit2 = self.dev.read(0x81, 64)[:2]\n # Save the data as voltage between 0.0 and 5.0\n self.data0.append((digit1 + 256*digit2)*5.0/1024)", "def readMemory(memory): # static method\n\t\t# Read value from shared memory\n\t\tmemoryValue = memory.read()\n\t\t# Find the 'end' of the string and strip\n\t\ti = memoryValue.find(ord('\\0'))\n\t\tif i != -1:\n\t\t\tmemoryValue = memoryValue[:i]\n\t\telse:\n\t\t\terrorMessage = \"i: \" + str(i) + \" should be -1 to have read \\0 in memory location\"\n\t\t\traise ValueError(errorMessage)\n\t\treturn str(memoryValue.decode('ascii'))", "def read_vref(self) -> float:", "def read_float(self, process_handle: int, address: int):\n self.__bufferSize = 4\n value = self.__read_bytes(process_handle, address)\n return None if value is None else unpack('<f', bytearray(value))", "def read_floats(filepointer):\n\tdata = read_strings(filepointer)\n\tif not data:\n\t\treturn None\n\ttry:\n\t\tdata = [float(x) for x in data]\n\t\treturn data\n\texcept:\n\t\t# try the next line\n\t\treturn read_floats(filepointer)", "def ReadFloat(self, endian=\"<\"):\n return self.unpack(\"%sf\" % endian, 4)", "def read_raw8(self):\n raise NotImplementedError" ]
[ "0.73760796", "0.7177214", "0.7096821", "0.674607", "0.648972", "0.6238688", "0.6091336", "0.6064237", "0.6064237", "0.6027614", "0.6014518", "0.5953449", "0.5883696", "0.5883034", "0.58459276", "0.5834244", "0.5813193", "0.5796779", "0.57841295", "0.5764492", "0.5742572", "0.5661948", "0.5658944", "0.5649725", "0.56287134", "0.5627149", "0.5623371", "0.55946016", "0.5582269", "0.5551515" ]
0.73079646
1
Write Long Integer to RAM
def write_long_integer(self, process_handle: int, address: int, value): self.__bufferSize = 8 is_write = self.__write_bytes(process_handle, address, value) return True if is_write else False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_long(self, l):\n if not isinstance(l, six.integer_types):\n raise TypeError(\"expected an int, got %r\" % (type(l),))\n\n if not -2147483648 <= l <= 2147483647:\n raise OverflowError(\"Not in range, %d\" % l)\n\n self.write(self._packers[\"l\"].pack(l))", "def memory_write64(self, addr, data, zone=None):\n words = []\n bitmask = 0xFFFFFFFF\n for long_word in data:\n words.append(long_word & bitmask) # Last 32-bits\n words.append((long_word >> 32) & bitmask) # First 32-bits\n return self.memory_write32(addr, words, zone=zone)", "def serialize_long(self, obj):\n return self.serialize_int(obj)", "def write_ulong(self, l):\n if not isinstance(l, six.integer_types):\n raise TypeError(\"expected an int, got %r\" % (type(l),))\n\n if not 0 <= l <= 4294967295:\n raise OverflowError(\"Not in range, %d\" % l)\n\n self.write(self._packers[\"L\"].pack(l))", "def write(writer: BitStreamWriter, value: int) -> None:\n\n writer.writeVarUInt64(value)", "def setLong(self, addr: ghidra.program.model.address.Address, value: long, bigEndian: bool) -> None:\n ...", "def wLong(self, value):\n self.w(struct.pack(self.endian + \"q\", value))", "def write_long(self, registeraddress, value, signed=False):\n MAX_VALUE_LONG = 4294967295 # Unsigned INT32\n MIN_VALUE_LONG = -2147483648 # INT32\n\n _checkInt(value, minvalue=MIN_VALUE_LONG, maxvalue=MAX_VALUE_LONG, description='input value')\n _checkBool(signed, description='signed')\n self._genericCommand(16, registeraddress, value, numberOfRegisters=2, signed=signed, payloadformat='long')", "def write_u32(self):\n pass", "def read_long(self):\n a, b, c, d = self.read_list(4)\n return a << 24 | b << 16 | c << 8 | d", "def add_int64(self, value):\n self._check_int_type(value, _INT_8BYTE_UPPERLIMIT)\n self._data += value.to_bytes(8, byteorder=\"little\")", "def pack_uint64(data: int) -> bytes:\n return struct.pack(\">Q\", data)", "def pack_uint64(data: int) -> bytes:\n return struct.pack(\">Q\", data)", "def write(writer: BitStreamWriter, value: int) -> None:\n\n writer.writeVarInt64(value)", "def setLong(self, addr: ghidra.program.model.address.Address, value: long) -> None:\n ...", "def __long__(self):\n return long(self.micros() // 1000000) # pragma: PY2", "def write_int32(self, val, timeout = 0):\n self.write(struct.pack(\"!i\", val), timeout)", "def write(self, value: int, /) -> None:", "def memory_write32(self, addr, data, zone=None):\n return self.memory_write(addr, data, zone, 32)", "def getLong(self, int: int, int2: int) -> int:\n ...", "def _write_uint(self, bits, number):\n assert bits % 8 == 0 and 0 < bits <= 64, bits\n\n result = _UintInByteCode(\n self.byte_array, len(self.byte_array), bits // 8)\n result.set(number) # this makes self.byte_array longer\n return result", "def setLong(self, address: ghidra.program.model.address.Address, value: long) -> None:\n ...", "def Write(buf: IO[bytes], chunk: bytes) -> None:\n buf.write(_UINT64.pack(len(chunk)))\n buf.write(chunk)", "def test_ulong_int(self):\n self.failUnlessEqual(self.callFunc('encode_long', self.const_integer), self.const_integer_long_encoded, 'long encoding FAILED...')", "def write_int32(self, i: int) -> None:\n self.buffer += struct.pack(\"<i\", i)", "def write_uint32(self, val, timeout = 0):\n self.write(struct.pack(\"!I\", val), timeout)", "def write_timestamp_micros_long(self, dt: datetime) -> None:\n self.write_int(datetime_to_micros(dt))", "def test_ulong_long_int(self):\n self.failUnlessEqual(self.callFunc('encode_longlong', self.const_integer), self.const_integer_long_long_encoded, 'long long encoding FAILED...')", "def write(self, value: int):\n self.data[self.pointer] = value", "def int64_t(n):\n return int(n).to_bytes(8, byteorder='little', signed=True)" ]
[ "0.6978887", "0.6569718", "0.6544994", "0.6524525", "0.6470489", "0.64520514", "0.6406458", "0.63829195", "0.6229732", "0.6182572", "0.617011", "0.61541975", "0.61541975", "0.6016069", "0.60110754", "0.59766585", "0.5974542", "0.5972365", "0.59481144", "0.59329474", "0.59187764", "0.5903135", "0.5847793", "0.5828866", "0.5813798", "0.5808899", "0.5790846", "0.5790122", "0.57800174", "0.5773187" ]
0.690406
1
Write Double to RAM
def write_double(self, process_handle: int, address: int, value): self.__bufferSize = 8 is_write = self.__write_bytes(process_handle, address, value) return True if is_write else False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_double(self, d):\n if not isinstance(d, float):\n raise TypeError(\"expected a float, got %r\" % (type(d),))\n\n self.write(self._packers[\"d\"].pack(d))", "def write_double(self, f: float) -> None:\n self.write(STRUCT_DOUBLE.pack(f))", "def write_memory(self, address, data):\n self.write(0, address, data, mem_device = True)", "def setDouble(self, address: ghidra.program.model.address.Address, value: float) -> None:\n ...", "def createDouble(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Data:\n ...", "def write_memory(self, address, value):\n self.memory[Vm.filter_mem_address(address)] = value", "def _write(self, v, w):\n if self.overwrite_mode:\n if w > 0.5:\n self.memory[self.head_pos] = np.copy(v)\n if self.history is not None:\n self.history[\"adds\"][-1] = self._read()\n else:\n if self.history is not None:\n self.history[\"adds\"][-1] = (w * (v - self._read()))\n self.memory[self.head_pos] = (1 - w) * self._read() + v * w", "def read_double(self, process_handle: int, address: int):\n self.__bufferSize = 8\n value = self.__read_bytes(process_handle, address)\n return None if value is None else unpack('<d', bytearray(value))", "def write_data():", "def writeHAL_refTemp(self, val):\r\n self.hal['ref-temp-out'] = val", "def write(writer: BitStreamWriter, value: float) -> None:\n\n writer.writeFloat64(value)", "def read_double(self):\n return self._packers[\"d\"].unpack(self.read(8))[0]", "def write_memory(self, address, value):\n\n self.memory[address] = CPU.MASKS.word_mask(value)\n\n return", "def getDouble(self, address: ghidra.program.model.address.Address) -> float:\n ...", "def write(self, value: int, /) -> None:", "def write_float(self, process_handle: int, address: int, value):\n self.__bufferSize = 4\n is_write = self.__write_bytes(process_handle, address, value)\n return True if is_write else False", "def save_float16_npy(data, path):\n np.save(path, data.astype(np.float16))", "def setd(self, node, new_double):\n\n self.daq.syncSetDouble(f'/{self.device_id}/{node}', new_double)", "def write(self):", "def write(self):", "def write(value):\n return value", "def double(self):\n return self._double", "def write_mem(self):\n request = 'commands/writemem'\n return self._post(request)", "def write(self, fileW):\n fileW.wFloat(self.x)\n fileW.wFloat(self.y)\n fileW.wFloat(self.z)", "def __write_bytes(self, process_handle: int, address: int, value: [int, float]):\n if type(value) is int:\n value = c_long(value)\n elif type(value) is float:\n value = c_float(value)\n self.__bytes_written = (c_ubyte * (self.__bufferSize // 256 + 1))()\n is_write = self.WriteProcessMemory(process_handle, address, byref(value),\n self.__bufferSize, byref(self.__bytes_written))\n return True if is_write else False", "def dbWrite(dbPoint, formatedValue):\n raise NotImplementedError('dbWrite in simu mode')", "def set_fig(cls, obj, quad):\n\t\taddr = quad.result\n\t\ttype = abs(addr) // 1000 # integer division\n\t\trelative_address = abs(addr) - (type * 1000)\n\t\tprint \"> Rel = {} - {}\".format(abs(addr), (type * 1000))\n\t\tprint \"> Set New Fig mem value: type = {}, addr = {}\".format(type, relative_address)\n\n\t\tif addr < 0:\n\t\t\tcls.heap.memory[type][abs(relative_address)] = obj\n\t\t\tprint \"> Heap memory: {}\".format(cls.heap.memory)\n\t\telse:\n\t\t\tcls.stack.peek().memory[type][relative_address] = obj\n\t\t\tprint \"> Stack memory: {}\".format(cls.stack.peek().memory)", "def do_math_double(cls, quad, type):\n\t\tdata1 = cls.get_address_value(quad.left_operand)\n\t\tdata2 = cls.get_address_value(quad.right_operand)\n\t\tval = 0.0\n\t\tif(type == \"pow\"):\n\t\t\tval = math.pow(data1, data2)\n\n\t\tcls.set_address_value(quad.result, val)", "def save_memory(self, filename):\n \n\n with open(filename + '/obses.npy', 'wb') as f:\n np.save(f, self.obses)\n \n with open(filename + '/actions.npy', 'wb') as f:\n np.save(f, self.actions)\n\n with open(filename + '/next_obses.npy', 'wb') as f:\n np.save(f, self.next_obses)\n \n with open(filename + '/rewards.npy', 'wb') as f:\n np.save(f, self.rewards)\n \n with open(filename + '/not_dones.npy', 'wb') as f:\n np.save(f, self.not_dones)\n \n with open(filename + '/not_dones_no_max.npy', 'wb') as f:\n np.save(f, self.not_dones_no_max)\n\n with open(filename + '/index.txt', 'w') as f:\n f.write(\"{}\".format(self.idx))\n\n print(\"save buffer to {}\".format(filename))", "def set_memory(self, address, value):\n #self.memory[address] = value & 0xFF\n if address in self.bwrites:\n self.paused = True\n self.pause_reason = 'Write at ' + hex(address)\n return self.memory.SetMemory(self, address, value)" ]
[ "0.6733481", "0.65203", "0.6181993", "0.5956225", "0.5944526", "0.5870351", "0.58195794", "0.5721083", "0.5606261", "0.559216", "0.553943", "0.553408", "0.54819244", "0.545933", "0.54461956", "0.5419049", "0.5410827", "0.53655046", "0.5362042", "0.5362042", "0.5360366", "0.53493786", "0.53486764", "0.5296751", "0.5273005", "0.52029985", "0.5199054", "0.51863474", "0.518384", "0.5178074" ]
0.67981523
0
Creates a new instance of the class extending Definition. After creation any dependencies defined in the class are looked up in mod_reg and added as fields to the newly created instance.
def _new_instance(cls: Type[T], mod_dir: str, home_dir: str, state_dir: str, mod_reg: Dict[str, "Definition"]) -> T: assert cls.name mod = cls(mod_dir, home_dir, state.load_state(state_dir, cls.name), logging.get_logger(cls.name)) for dep_name in cls.required: dep = mod_reg[dep_name] setattr(mod, dep_name, _Protector(dep)) for dep_name in cls.optional: opt_dep = mod_reg.get(dep_name) if not opt_dep: _LOG.info( f"optional dependency {dep_name} of {cls.name} not available" ) setattr(mod, dep_name, None) continue setattr(mod, dep_name, _Protector(opt_dep)) return mod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def __init__(self):\n \n cls = self.__class__\n \n # check if the class doesn't override mod_info\n if cls.mod_info == Mod.mod_info:\n # check mandatory attributes\n if not hasattr(cls, \"name\"):\n raise Exception(\"Mod must specify the class attribute `name`.\")\n if not hasattr(cls, \"version\"):\n raise Exception(\"Mod must specify the class attribute `version`.\")\n if not hasattr(cls, \"author\"):\n raise Exception(\"Mod must specify the class attribute `author`.\")\n if not hasattr(cls, \"nsfw\"):\n cls.nsfw = False\n if not hasattr(cls, \"dependencies\"):\n cls.dependencies = []\n else:\n # cannot have both mod_info and class attributes\n if hasattr(cls, \"name\"):\n raise Exception(\"Mod name can only be defined either by class attribute or mod_info function, not both.\")\n if hasattr(cls, \"version\"):\n raise Exception(\"Mod version can only be defined either by class attribute or mod_info function, not both.\")\n if hasattr(cls, \"author\"):\n raise Exception(\"Mod author can only be defined either by class attribute or mod_info function, not both.\")\n if hasattr(cls, \"nsfw\"):\n raise Exception(\"Mod nsfw tag can only be defined either by class attribute or mod_info function, not both.\")\n \n # set class attributes from mod_info\n mi = self.mod_info()\n cls.name = mi[0]\n cls.version = mi[1]\n cls.author = mi[2]\n cls.nsfw = mi[3] if len(mi) >= 4 else False\n cls.dependencies = []\n \n # check if class attributes have valid types\n assert isinstance(cls.name, (str, unicode))\n assert isinstance(cls.version, (str, unicode))\n assert isinstance(cls.author, (str, unicode))\n assert isinstance(cls.nsfw, bool)\n assert isinstance(cls.dependencies, (list, tuple))", "def make_module_instance(self, *args, **kwargs):\r\n\r\n # Function to go through member lists and dictionaries recursively,\r\n # to look for submodules on which make_module_instance needs to be called\r\n def recurse(v):\r\n if isinstance(v,list):\r\n iterv = enumerate(v)\r\n else:\r\n iterv = v.iteritems()\r\n #backport\r\n #iter = enumerate(v) if isinstance(v,list) else v.iteritems()\r\n for sk,sv in iterv:\r\n if isinstance(sv,(list,dict)):\r\n sv = recurse(sv)\r\n elif isinstance(sv,Module):\r\n sv = sv.make_module_instance(args,kwargs)\r\n v[sk] = sv\r\n return v\r\n\r\n for k,v in self.local_attr.iteritems():\r\n if isinstance(v,Module):\r\n v = v.make_module_instance(args,kwargs)\r\n self[k] = self.__wrapper__(v)\r\n elif isinstance(v,Method):\r\n self.__setitem__(k,v)\r\n else:\r\n # iterate through lists and dictionaries to wrap submodules\r\n if isinstance(v,(list,dict)):\r\n self[k] = self.__wrapper__(recurse(v))\r\n try:\r\n self[k] = self.__wrapper__(v)\r\n except Exception:\r\n if isinstance(v, Component):\r\n raise\r\n else:\r\n self.__dict__[k] = v\r\n return self", "def contribute_to_class(self, *args: Any, **kwargs: Any) -> None:\n super().contribute_to_class(*args, **kwargs)\n self.add_base_fields()\n self.add_sub_factories()\n self.add_related_factories()\n self.add_m2m_factories()\n # Reevaluated declarations:\n for k, v in vars(self.factory).items():\n if self._is_declaration(k, v):\n self.base_declarations[k] = v\n self.pre_declarations, self.post_declarations = (\n factory.builder.parse_declarations(self.declarations))", "def _createModuleObj(self):\n ModuleTimeWeakening.__init__(self)\n return", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def _createModuleObj(self):\n raise NotImplementedError(\"Implement in derived class.\")", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def create_module(cls, *args, **kwargs): # real signature unknown\n pass", "def _createModuleObj(self):\n ModuleTimeHistory.__init__(self)", "def _setup_engine(class_definition, params):\n\n cls = load_from_module(class_definition)\n return cls(params)", "def _new_instance(self):\n return self.__class__(self._fmodule, self._tensor_rank)", "def _createModuleObj(self):\n ModuleOutputSolnDomain.__init__(self)", "def new(self):\n\n self.obj = self.factory()\n\n if self.textproperty is None:\n self.attributes = ElementHandler.load_definitions(self, self.obj)", "def __init__(cls, name, bases, dct):\n #Create a registry for *this* class\n cls._Registry = {}\n #Insert a reference to this class in it's *base class'* registry\n cls._super(bases)._Registry[name] = cls", "def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)", "def new(self, _def, _dict=None, **kwargs):\n\n _def = self.get_def(_def)\n obj = AnodeObjectBase(_def, _dict, **kwargs)\n self.instances.add(obj)\n self.instances_by_name[_def.type.name] = obj\n return obj", "def __init__(self):\n super().__init__()\n self.name = '' # name of this istance (alias)\n self.type = type(self).__name__ # specific type within this class\n self.verbosity = None # verbosity level (see message handler)\n self.globalAttributes = {} # this is a dictionary that contains parameters that are set at the level of the base classes defining the types\n self._knownAttribute = [] # this is a list of strings representing the allowed attribute in the xml input for the class\n self._knownAttribute += ['name','verbosity'] # attributes that are known\n self.printTag = 'BaseType' # the tag that refers to this class in all the specific printing\n self.variableGroups = {} # the variables this class needs to be aware of\n self.metadataKeys = set() # list of registered metadata keys to expect from this entity\n self.metadataParams = {} # dictionary of registered metadata keys with repect to their indexes", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def create_submodule(self, *args: Any, **kwargs: Any) -> Submodule:\n return Submodule.add(self, *args, **kwargs)", "def __init__(self, mod: str = None, **kwargs):\n \n self.mod = mod\n self.mod_name = '_factory'\n self.mod_args = {}\n self.kwargs = kwargs\n \n if self.mod is not None:\n self._parse_mod(self.mod)\n #elif self.kwargs['fn'] is not None:\n # self._parse_mod(self.kwargs['fn'])", "def _createModuleObj(self):\n ModuleFaultCohesiveKin.__init__(self)\n return", "def __init__(self, name):\r\n super(Module, self).__init__()\r\n self.name = name", "def __init__(self, descriptor, *args, **kwargs):\r\n # Set the descriptor first so that we can proxy to it\r\n self.descriptor = descriptor\r\n super(XModule, self).__init__(*args, **kwargs)\r\n self._loaded_children = None\r\n self.runtime.xmodule_instance = self", "def __init__(self, *args, **kwargs):\r\n super(CombinedOpenEndedModule, self).__init__(*args, **kwargs)\r\n\r\n self.system.set('location', self.location)\r\n\r\n if self.task_states is None:\r\n self.task_states = []\r\n\r\n if self.old_task_states is None:\r\n self.old_task_states = []\r\n\r\n version_tuple = VERSION_TUPLES[self.version]\r\n\r\n self.student_attributes = version_tuple.student_attributes\r\n self.settings_attributes = version_tuple.settings_attributes\r\n\r\n attributes = self.student_attributes + self.settings_attributes\r\n\r\n static_data = {}\r\n instance_state = {k: getattr(self, k) for k in attributes}\r\n self.child_descriptor = version_tuple.descriptor(self.system)\r\n self.child_definition = version_tuple.descriptor.definition_from_xml(etree.fromstring(self.data), self.system)\r\n self.child_module = version_tuple.module(self.system, self.location, self.child_definition, self.child_descriptor,\r\n instance_state=instance_state, static_data=static_data,\r\n attributes=attributes)\r\n self.save_instance_data()", "def __init__(self):\n super(Modules, self).__init__()\n \n global superclasses\n superclasses['universe'] = []\n superclasses['actions'] = ['universe']\n superclasses['booleans'] = ['universe']\n\n global instances\n instances['universe'] = set()\n instances['actions'] = set()\n instances['booleans'] = set()", "def setup_class(self):\n\n class SubFLRW(FLRW):\n def w(self, z):\n return super().w(z)\n\n self.cls = SubFLRW\n # H0, Om0, Ode0\n self.cls_args = (70 * u.km / u.s / u.Mpc, 0.27 * u.one, 0.689 * u.one)\n self.cls_kwargs = dict(Tcmb0=3.0 * u.K, name=self.__class__.__name__, meta={\"a\": \"b\"})", "def __init_subclass__(cls):\n # All Flax Modules are dataclasses. We force this convention since\n # it encourages the stateless behavior needed to clone module instances for\n # functional transformation. Instead of using a python metaclass, we\n # automatically transform Modules into dataclasses at subclass creation\n # time, and we set the last dataclass arguments to `parent` and `name`.\n cls._customized_dataclass_transform()\n # We wrap user-defined methods including setup and __call__ to enforce\n # a number of different checks and to provide clear error messages.\n cls._verify_single_or_no_compact()\n cls._wrap_module_methods()\n # Set empty class defaults.\n cls._state = _uninitialized_module_internal_state\n cls.scope = None", "def create_modules(self):\n self.bitcell = self.replica_bitcell = self.mod_replica_bitcell()\n self.add_mod(self.bitcell)\n\n # This is the replica bitline load column that is the height of our array\n self.rbl = bitcell_array(name=\"bitline_load\", cols=1, rows=self.bitcell_loads)\n self.add_mod(self.rbl)\n\n # FIXME: The FO and depth of this should be tuned\n self.delay_chain = self.mod_delay_chain([self.delay_fanout]*self.delay_stages)\n self.add_mod(self.delay_chain)\n\n self.inv = pinv()\n self.add_mod(self.inv)\n\n self.access_tx = ptx(tx_type=\"pmos\")\n self.add_mod(self.access_tx)" ]
[ "0.66630805", "0.66630805", "0.6438465", "0.63896257", "0.6250889", "0.61951476", "0.6176598", "0.61626345", "0.61407685", "0.61407685", "0.60893005", "0.6053891", "0.6015417", "0.5839071", "0.5776912", "0.575494", "0.5753405", "0.56903225", "0.56891537", "0.5682703", "0.5663051", "0.5655745", "0.5641892", "0.5636069", "0.56113774", "0.56010294", "0.5598641", "0.5598591", "0.55952424", "0.5594787" ]
0.71358895
0
Filter mod_info for modules that are enabled on the current host.
def enabled_on_host() -> Callable[[Loader.ModInfo], bool]: def filter_fn(mod_info: Loader.ModInfo) -> bool: if len(mod_info.mod_def.hostnames) == 0: return True if HOSTNAME not in mod_info.mod_def.hostnames: _LOG.info( f"{mod_info.mod_def.name} is not enabled for host {HOSTNAME}" ) return False return True return filter_fn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def modules_enabled(self, c):\n\n modules = []\n for name, module in self.modules.iteritems():\n modules.append( (name, module.__class__.__name__) )\n\n return modules", "def enabled_modules(self):\n return [scomp for scomp in self.modules()\n if getattr(scomp, 'enabled', True)]", "def module_info():\n pass", "def get_module_info_list(self):\n self._get_module_info_list = pa_module_info_cb_t(self._module_info_cb)\n pa_context_get_module_info_list(self._context,\n self._get_module_info_list,\n None)", "def available_modules(self, user):\n return [sitecomp for sitecomp in self.enabled_modules() if sitecomp.has_perm(user)]", "def process_module_list(self, modules):", "def update_modules(self) -> None:\n\n matches = apache_util.parse_modules(self.configurator.options.get_modules_cmd)\n for mod in matches:\n self.add_mod(mod.strip())", "def moduleInfo(*args, definition: bool=True, listModules: bool=True, moduleName: AnyStr=\"\",\n path: bool=True, version: bool=True, **kwargs)->List[AnyStr]:\n pass", "def get_module_info(self):\n self._log_msg_start(\"Poll basic module info\")\n return self._ubx.poll(\"NAV-SVINFO\")", "def _filter_modules(self, plugins, names):\n if self.module_plugin_filters:\n # check to make sure the number of plugins isn't changing\n original_length_plugins = len(plugins)\n module_plugins = set()\n for module_filter in self.module_plugin_filters:\n module_plugins.update(module_filter(plugins, names))\n if len(plugins) < original_length_plugins:\n warning = \"\"\"Module Filter removing plugins from original\n data member! Suggest creating a new list in each module\n filter and returning new list instead of modifying the\n original data member so subsequent module filters can have\n access to all the possible plugins.\\n {}\"\"\"\n\n self._log.info(warning.format(module_filter))\n\n plugins = module_plugins\n return plugins", "async def get_module_data(request):\n hw = hw_from_req(request)\n requested_serial = request.match_info['serial']\n res = None\n\n for module in hw.attached_modules:\n is_serial_match = module.device_info.get('serial') == requested_serial\n if is_serial_match and hasattr(module, 'live_data'):\n res = module.live_data\n\n if res:\n return web.json_response(res, status=200)\n else:\n return web.json_response({\"message\": \"Module not found\"}, status=404)", "def get_enabled_modules(self):\n return self._gconf.get_enabled_modules()", "def do_list_modules(self, arg):\n for module in self.reader.module_list.modules:\n if arg:\n name = GetModuleName(self.reader, module).lower()\n if name.find(arg.lower()) >= 0:\n PrintModuleDetails(self.reader, module)\n else:\n PrintModuleDetails(self.reader, module)\n print()", "def MODULES(self):\n pass", "def mod_info(self):\n return (self.name, self.version, self.author, getattr(self.__class__, \"nsfw\", False))", "def on_modules_command(sender, command, label, args):\n plugin_header(sender, \"Modules\")\n msg(sender, \", \".join([((\"&a\" if mod in shared[\"modules\"] else \"&c\") + mod) for mod in shared[\"load_modules\"]]))", "def scrub_from_sys_modules():\n for k, m in sys.modules.items():\n if k in sys_modules_whitelist:\n continue\n\n if hasattr(m, '__file__') and m.__file__ is not None:\n mp = pathlib.Path(m.__file__)\n if pex_root in mp.parents:\n yield k", "def present_module_info():\n writer()\n print_heading(\"Module Info\")\n writer(f\"GWT Version: {GWT_VERSION}\")\n writer(f\"Content-Type: {CONTENT_TYPE}\")\n writer(f\"X-GWT-Module-Base: {BASE_URL}\")\n writer(f\"X-GWT-Permutation: {GWT_PERMUTATION}\")\n if RPC_MODE:\n writer(f\"RPC Version: {RPC_VERSION}\")\n writer(f\"RPC Flags: {RPC_FLAGS}\")\n writer()", "def modules(self):\n return self._modules.keys()", "def ImportsTest(recipe, allowed_modules):\n\n for _, val in sorted(recipe.global_symbols.iteritems()):\n if isinstance(val, types.ModuleType):\n module_name = val.__name__\n for pattern in allowed_modules:\n if pattern.match(val.__name__):\n break\n else:\n yield ('In %s:\\n'\n ' Non-whitelisted import of %s' % (recipe.path, module_name))", "def modules(self):\n return self._modules", "def modules_available(self, c):\n\n modules = []\n module = YModule.FirstModule()\n while module is not None:\n modules.append( (module.get_productName(), module.get_serialNumber()) )\n module = module.nextModule()\n\n return modules", "def get_all_d_module_info():\n a_local_var = 'this is local variable'\n zzz = 5", "def module_info(self) -> FilebaseApiModuleInfo:\n return self._module_info", "def modules(self):\n for desc in self._mappings.values():\n if hasattr(desc, 'module'):\n yield desc.module\n else:\n continue", "def get_required_module_descriptors(self):\r\n return []", "def exploits(self):\n return self.rpc.call(MsfRpcMethod.ModuleExploits)['modules']", "def _list_modi_ports() -> List[ListPortInfo]:\n def __is_modi_port(port):\n return (\n port.manufacturer == \"LUXROBO\"\n or port.product == \"MODI Network Module\"\n or port.description == \"MODI Network Module\"\n or (port.vid == 12254 and port.pid == 2))\n\n return [port for port in stl.comports() if __is_modi_port(port)]", "def __contains__(self, module):\n\n for enabled_module in self.modules:\n if enabled_module.ID == module:\n return True\n return False", "def fusion_api_get_interconnect_pluggable_module_info(self, uri=None, api=None, param='', headers=None):\n param = '/pluggableModuleInformation/%s' % param\n return self.ic.get(uri=uri, api=api, headers=headers, param=param)" ]
[ "0.62593675", "0.5974044", "0.5942882", "0.5807972", "0.5670104", "0.5630162", "0.544685", "0.5350841", "0.5337559", "0.5322708", "0.5253789", "0.52141494", "0.5195664", "0.5185848", "0.5140138", "0.5120659", "0.5107055", "0.5052914", "0.5033366", "0.50035936", "0.49902838", "0.49887124", "0.49801075", "0.4963274", "0.4955916", "0.4944333", "0.49433202", "0.49156883", "0.4911943", "0.48822442" ]
0.64883715
0
Topologically sorts mod_infos by the module definitions list of required dependencies. Topological sort is done using Kahn's algorithm
def sort_by_dependencies( mod_infos: List[Loader.ModInfo]) -> List[Loader.ModInfo]: # First compile a list of module infos with no dependencies and a mapping # from modules with at least one dependency to their respective # dependencies. no_deps: List[Loader.ModInfo] = [] with_deps: Dict[str, Set[str]] = {} by_name: Dict[str, Loader.ModInfo] = {} for mod_info in mod_infos: assert mod_info.mod_def.name # Set, or inferred during loading by_name[mod_info.mod_def.name] = mod_info for mod_info in mod_infos: assert mod_info.mod_def.name # Set, or inferred during loading name = mod_info.mod_def.name opt_deps = {opt for opt in mod_info.mod_def.optional if opt in by_name} if not mod_info.mod_def.required and not opt_deps: no_deps.append(mod_info) continue with_deps[name] = set(mod_info.mod_def.required).union(opt_deps) sorted_infos = [] while no_deps: # Remove the first element without dependencies from no_deps and # add it to the list of sorted infos. info = no_deps.pop() sorted_infos.append(info) # Then remove the info from all module infos that list it as a # dependency. If an entry in with_deps points to an empty set, remove # it from with_deps and add the module info to no_deps. # Copy with_deps.keys into a list to allow modifying with_deps during # iteration. for name in list(with_deps.keys()): # load_modules set this name if it was None assert info.mod_def.name is not None if info.mod_def.name not in with_deps[name]: continue with_deps[name].remove(info.mod_def.name) if not with_deps[name]: no_deps.append(by_name[name]) del with_deps[name] # If with_deps is not empty by now, the contained modules have cyclic # dependencies. if with_deps: unmet = [f"{n}: {' -> '.join(vs)}" for n, vs in with_deps.items()] msg = "\n\t".join(unmet) msg = f"Unmet or cyclic module dependencies:\n\n{msg}" raise DependencyError(msg) return sorted_infos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dag_topology_sort(self):\n mlist = []\n mod_wrapper = self.mod_wrapper.copy()\n while mod_wrapper:\n temp_list = []\n for mod, wrapper in mod_wrapper.items():\n if wrapper.is_root_mod():\n temp_list.append(mod)\n wrapper.remove_self_from_bindings()\n\n for mod in temp_list:\n mod_wrapper.pop(mod, None)\n\n mlist += temp_list\n\n mod_wrapper_sort = {}\n for mod, i in zip(mlist, range(len(mlist))):\n self.mod_wrapper[mod].set_idx_name(i)\n mod_wrapper_sort[mod] = self.mod_wrapper[mod]\n\n self.mod_wrapper = mod_wrapper_sort", "def _sort_dependencies(self):\n def sort_hier(node):\n if node is None:\n return None\n task = self.get_task_by_mapper(node.item)\n if node.cycles is not None:\n tasks = []\n for n in node.cycles:\n tasks.append(self.get_task_by_mapper(n.item))\n task.circular = task._sort_circular_dependencies(self, tasks)\n for child in node.children:\n t = sort_hier(child)\n if t is not None:\n task.childtasks.append(t)\n return task\n \n mappers = self._get_noninheriting_mappers()\n head = DependencySorter(self.dependencies, list(mappers)).sort(allow_all_cycles=True)\n #print \"-------------------------\"\n #print str(head)\n #print \"---------------------------\"\n task = sort_hier(head)\n return task", "def toposorted(infos):\n key_to_info = {}\n depends = {}\n for info in infos:\n key_to_info[info.key] = info\n depends[info.key] = []\n for info in infos:\n for after in info.after:\n after_info = key_to_info[after]\n depends[info.key].append(after_info)\n for before in info.before:\n before_info = key_to_info[before]\n depends[before_info.key].append(info)\n return topological_sort(infos, lambda info: depends[info.key])", "def getLoadOrder(self,modNames,asTuple=True):\n data = self.data\n modNames = list(modNames) #--Don't do an in-place sort.\n modNames.sort()\n modNames.sort(key=lambda a: (a in data) and data[a].mtime) #--Sort on modified\n modNames.sort(key=lambda a: a[-1].lower()) #--Sort on esm/esp\n #--Match Bethesda's esm sort order\n # - Start with masters in chronological order.\n # - For each master, if it's masters (mm's) are not already in list, \n # then place them ahead of master... but in REVERSE order. E.g., last\n # grandmaster will be first to be added.\n def preMaster(modName,modDex):\n \"\"\"If necessary, move grandmasters in front of master -- but in \n reverse order.\"\"\"\n if self.data.has_key(modName):\n mmNames = list(self.data[modName].masterNames[:])\n mmNames.reverse()\n for mmName in mmNames:\n if mmName in modNames:\n mmDex = modNames.index(mmName)\n #--Move master in front and pre-master it too.\n if mmDex > modDex:\n del modNames[mmDex]\n modNames.insert(modDex,mmName)\n modDex = 1 + preMaster(mmName,modDex)\n return modDex\n #--Read through modNames.\n modDex = 1\n while modDex < len(modNames):\n modName = modNames[modDex]\n if modName[-1].lower() != 'm': break\n if self.circularMasters([modName]):\n modDex += 1\n else:\n modDex = 1 + preMaster(modName,modDex)\n #--Convert? and return\n if asTuple:\n return tuple(modNames)\n else:\n return modNames", "def preProcess(self):\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n votes = 0\n winner = ''\n for voter in self.module[moduleName].keys():\n if self.module[moduleName][voter] > votes:\n votes = self.module[moduleName][voter]\n winner = voter\n self.module[moduleName] = winner\n\n # quick and dirty algorithm O(n^2). Can be done in O(n*lg(n))\n moduleLength = {}\n # find module lengths first\n for moduleName in self.module.keys():\n parts = moduleName.split('/')\n if len(parts) not in moduleLength:\n moduleLength[len(parts)] = []\n moduleLength[len(parts)].append(moduleName)\n lengths = moduleLength.keys()\n lengths.sort(reverse=True)\n\n for length in lengths:\n # FIXME: needs to be configurable.\n if length > 2:\n parents = {}\n for moduleName in self.module.keys():\n parts = moduleName.split('/')\n # group all parts of same length.\n if len(parts) == length:\n parent = moduleName.rsplit('/', 1)[0]\n if parent not in parents:\n parents[parent] = []\n parents[parent].append([moduleName, self.module[moduleName]])\n # check if all the children have the same developer as parent. If so remove the children.\n for parent in parents.keys():\n same = True\n parentDeveloper = self.module[parent]\n for moduleName, developer in parents[parent]:\n if developer != parentDeveloper:\n same = False\n if same:\n for moduleName, developer in parents[parent]:\n del self.module[moduleName]", "def get_required_mods(self):\r\n mods = []\r\n unknowntags = []\r\n for key, value in self.dependencies.items():\r\n if value.required_by:\r\n if value.provided_by:\r\n mods.append(list(value.provided_by)[0]) #Pick random'ish if more than one.\r\n else:\r\n unknowntags.append((key, value))\r\n return {\"mods\":sorted(mods, key= lambda x: x.mod.name), \"unknown\": unknowntags}", "def _toposort_with_ordered_mech_tuples(self, data):\n result = []\n for dependency_set in toposort(data):\n d_iter = iter(dependency_set)\n result.extend(sorted(dependency_set, key=lambda item : next(d_iter).mechanism.name))\n return result", "def process_module_list(self, modules):", "def order(self):\n pairs = [(w['source'][0], w['target'][0]) for w in self.wires]\n return processing_order(len(self.modules), pairs)", "def sortInfos(self):\n #--Build infosById\n infosById = {}\n for info in self.infos:\n if info.id == None: raise Tes3Error(self.inName,_('Dialog %s: info with missing id.') % (self.id,))\n infosById[info.id] = info\n #--Heads\n heads = []\n for info in self.infos:\n if info.prevId not in infosById:\n heads.append(info)\n #--Heads plus their next chains\n newInfos = []\n for head in heads:\n nextInfo = head\n while nextInfo:\n newInfos.append(nextInfo)\n nextInfo = infosById.get(nextInfo.nextId)\n #--Anything left?\n for info in self.infos:\n if info not in newInfos:\n newInfos.append(info)\n #--Replace existing list\n self.infos = newInfos", "def modules(self):\n return sorted([module for module in self._registry.values()],\n key=lambda scomp: (scomp.order, scomp.label))", "def do_list_modules(self, long_output=None,sort_order=None):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\t# list of module ids and other details\n\t\t# will also contain column headers\n\t\ttable_list = []\n\t\tif long_output is None:\n\t\t\tlong_output = self.list_modules['long']\n\t\tif sort_order is None:\n\t\t\tsort_order = self.list_modules['sort']\n\t\tif long_output:\n\t\t\t# --long table: sort modules by run order\n\t\t\ttable_list.append([\"Order\",\"Module ID\",\"Description\",\"Run Order\",\"Built\",\"Compatible\"])\n\t\t\t#table_list.append([\"Order\",\"Module ID\",\"Description\",\"Run Order\",\"Built\"])\n\t\telse:\n\t\t\t# \"short\" table ==> sort module by module_id\n\t\t\t#table_list.append([\"Module ID\",\"Description\",\"Built\"])\n\t\t\ttable_list.append([\"Module ID\",\"Description\",\"Built\",\"Compatible\"])\n\n\t\tif sort_order == 'run_order':\n\t\t\td = {}\n\t\t\tfor m in self.shutit_modules:\n\t\t\t\td.update({m.module_id:m.run_order})\n\t\t\t# sort dict by run_order; see http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value\n\t\t\tb = sorted(d.items(), key=operator.itemgetter(1))\n\t\t\tcount = 0\n\t\t\t# now b is a list of tuples (module_id, run_order)\n\t\t\tfor pair in b:\n\t\t\t\t# module_id is the first item of the tuple\n\t\t\t\tk = pair[0]\n\t\t\t\tfor m in self.shutit_modules:\n\t\t\t\t\tif m.module_id == k:\n\t\t\t\t\t\tcount += 1\n\t\t\t\t\t\tcompatible = True\n\t\t\t\t\t\tif not cfg[m.module_id]['shutit.core.module.build']:\n\t\t\t\t\t\t\tcfg[m.module_id]['shutit.core.module.build'] = True\n\t\t\t\t\t\t\tcompatible = self.determine_compatibility(m.module_id) == 0\n\t\t\t\t\t\t\tcfg[m.module_id]['shutit.core.module.build'] = False\n\t\t\t\t\t\tif long_output:\n\t\t\t\t\t\t\ttable_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])\n\t\t\t\t\t\t\t#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttable_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])\n\t\telif sort_order == 'id':\n\t\t\tl = []\n\t\t\tfor m in self.shutit_modules:\n\t\t\t\tl.append(m.module_id)\n\t\t\tl.sort()\n\t\t\tfor k in l:\n\t\t\t\tfor m in self.shutit_modules:\n\t\t\t\t\tif m.module_id == k:\n\t\t\t\t\t\tcount = 1\n\t\t\t\t\t\tcompatible = True\n\t\t\t\t\t\tif not cfg[m.module_id]['shutit.core.module.build']:\n\t\t\t\t\t\t\tcfg[m.module_id]['shutit.core.module.build'] = True\n\t\t\t\t\t\t\tcompatible = self.determine_compatibility(m.module_id) == 0\n\t\t\t\t\t\tif long_output:\n\t\t\t\t\t\t\ttable_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])\n\t\t\t\t\t\t\t#table_list.append([str(count),m.module_id,m.description,str(m.run_order),str(cfg[m.module_id]['shutit.core.module.build'])])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t#table_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build'])])\n\t\t\t\t\t\t\ttable_list.append([m.module_id,m.description,str(cfg[m.module_id]['shutit.core.module.build']),str(compatible)])\n\n\t\t# format table for display\n\t\ttable = texttable.Texttable()\n\t\ttable.add_rows(table_list)\n\t\t# Base length of table on length of strings\n\t\tcolwidths = []\n\t\tfor item in table_list:\n\t\t\tfor n in range(0,len(item)):\n\t\t\t\t# default to 10 chars\n\t\t\t\tcolwidths.append(10)\n\t\t\tbreak\n\t\tfor item in table_list:\n\t\t\tfor n in range(0,len(item)-1):\n\t\t\t\tif len(str(item[n])) > colwidths[n]:\n\t\t\t\t\tcolwidths[n] = len(str(item[n]))\n\t\ttable.set_cols_width(colwidths)\n\t\tmsg = table.draw()\n\t\tshutit_global.shutit_global_object.shutit_print('\\n' + msg)", "def _rearrange_codes_by_dependencies(self, nl):\n\n def parse_lists(_nl):\n \"\"\"\n dont judge me\n \"\"\"\n for l in _nl:\n for code in l:\n if code.depends_on_runs:\n t = code.depends_on_runs\n if t not in l:\n for ol in _nl:\n if t in ol:\n ol.append(code)\n l.remove(code)\n return False\n return True\n\n done = False\n while not done:\n done = parse_lists(nl)", "def _sort_circular_dependencies(self, trans, cycles):\n allobjects = []\n for task in cycles:\n allobjects += [e.obj for e in task.get_elements(polymorphic=True)]\n tuples = []\n \n cycles = util.Set(cycles)\n \n #print \"BEGIN CIRC SORT-------\"\n #print \"PRE-CIRC:\"\n #print list(cycles)[0].dump()\n \n # dependency processors that arent part of the cyclical thing\n # get put here\n extradeplist = []\n \n # organizes a set of new UOWTasks that will be assembled into\n # the final tree, for the purposes of holding new UOWDependencyProcessors\n # which process small sub-sections of dependent parent/child operations\n dependencies = {}\n def get_dependency_task(obj, depprocessor):\n try:\n dp = dependencies[obj]\n except KeyError:\n dp = dependencies.setdefault(obj, {})\n try:\n l = dp[depprocessor]\n except KeyError:\n l = UOWTask(self.uowtransaction, depprocessor.targettask.mapper, circular_parent=self)\n dp[depprocessor] = l\n return l\n\n def dependency_in_cycles(dep):\n # TODO: make a simpler way to get at the \"root inheritance\" mapper\n proctask = trans.get_task_by_mapper(dep.processor.mapper.primary_mapper().base_mapper(), True)\n targettask = trans.get_task_by_mapper(dep.targettask.mapper.base_mapper(), True)\n return targettask in cycles and (proctask is not None and proctask in cycles)\n \n # organize all original UOWDependencyProcessors by their target task\n deps_by_targettask = {}\n for t in cycles:\n for task in t.polymorphic_tasks():\n for dep in task.dependencies:\n if not dependency_in_cycles(dep):\n extradeplist.append(dep)\n for t in dep.targettask.polymorphic_tasks():\n l = deps_by_targettask.setdefault(t, [])\n l.append(dep)\n\n object_to_original_task = {}\n \n for t in cycles:\n for task in t.polymorphic_tasks():\n for taskelement in task.get_elements(polymorphic=False):\n obj = taskelement.obj\n object_to_original_task[obj] = task\n #print \"OBJ\", repr(obj), \"TASK\", repr(task)\n \n for dep in deps_by_targettask.get(task, []):\n # is this dependency involved in one of the cycles ?\n #print \"DEP iterate\", dep.processor.key, dep.processor.parent, dep.processor.mapper\n if not dependency_in_cycles(dep):\n #print \"NOT IN CYCLE\"\n continue\n #print \"DEP\", dep.processor.key \n (processor, targettask) = (dep.processor, dep.targettask)\n isdelete = taskelement.isdelete\n \n # list of dependent objects from this object\n childlist = dep.get_object_dependencies(obj, trans, passive=True)\n if childlist is None:\n continue\n # the task corresponding to saving/deleting of those dependent objects\n childtask = trans.get_task_by_mapper(processor.mapper.primary_mapper())\n \n childlist = childlist.added_items() + childlist.unchanged_items() + childlist.deleted_items()\n \n for o in childlist:\n if o is None or not childtask.contains_object(o, polymorphic=True):\n continue\n #print \"parent/child\", obj, o\n whosdep = dep.whose_dependent_on_who(obj, o)\n #print \"WHOSEDEP\", dep.processor.key, dep.processor.direction, whosdep\n if whosdep is not None:\n tuples.append(whosdep)\n # create a UOWDependencyProcessor representing this pair of objects.\n # append it to a UOWTask\n if whosdep[0] is obj:\n get_dependency_task(whosdep[0], dep).append(whosdep[0], isdelete=isdelete)\n else:\n get_dependency_task(whosdep[0], dep).append(whosdep[1], isdelete=isdelete)\n else:\n get_dependency_task(obj, dep).append(obj, isdelete=isdelete)\n \n #print \"TUPLES\", tuples\n head = DependencySorter(tuples, allobjects).sort()\n if head is None:\n return None\n\n #print str(head)\n\n # create a tree of UOWTasks corresponding to the tree of object instances\n # created by the DependencySorter\n def make_task_tree(node, parenttask, nexttasks):\n #print \"MAKETASKTREE\", node.item, parenttask\n originating_task = object_to_original_task[node.item]\n t = nexttasks.get(originating_task, None)\n if t is None:\n t = UOWTask(self.uowtransaction, originating_task.mapper, circular_parent=self)\n nexttasks[originating_task] = t\n parenttask.append(None, listonly=False, isdelete=originating_task.objects[node.item].isdelete, childtask=t)\n t.append(node.item, originating_task.objects[node.item].listonly, isdelete=originating_task.objects[node.item].isdelete)\n \n if dependencies.has_key(node.item):\n for depprocessor, deptask in dependencies[node.item].iteritems():\n t.cyclical_dependencies.add(depprocessor.branch(deptask))\n nd = {}\n for n in node.children:\n t2 = make_task_tree(n, t, nd)\n return t\n\n # this is the new \"circular\" UOWTask which will execute in place of \"self\"\n t = UOWTask(self.uowtransaction, self.mapper, circular_parent=self)\n\n # stick the non-circular dependencies and child tasks onto the new\n # circular UOWTask\n [t.dependencies.add(d) for d in extradeplist]\n t.childtasks = self.childtasks\n make_task_tree(head, t, {})\n #print t.dump()\n return t", "def modules():", "def sort(self):\n sorted_entries = [] # type: list[MSBModel]\n for entry_subtype in MSBModelSubtype:\n sorted_entries += list(sorted(self.get_entries(entry_subtype), key=lambda m: m.name))\n self._entries = sorted_entries", "def test_collect_generic_module_dependencies(self, module_repo):\n expected_result = {(\"pack_with_definition\", True), (\"pack_4\", True)}\n\n test_input = [\n {\n \"dummy generic module\": {\n \"name\": \"dummy generic module\",\n \"file_path\": \"path.json\",\n \"fromversion\": \"6.5.0\",\n \"pack\": \"dummy pack\",\n \"definitionIds\": [\"assets\"],\n \"views\": {\n \"Vulnerability Management\": {\n \"title\": \"Risk Base Vulnerability Management\",\n \"dashboards\": [\"pack_4 - dashboard\"],\n }\n },\n }\n }\n ]\n\n found_result = PackDependencies._collect_generic_modules_dependencies(\n pack_generic_modules=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n assert set(found_result) == set(expected_result)", "def reorder_module_calls(lines):\n\n code_len = len(lines)\n module_calls = []\n module_start = 0\n module_call = []\n output_io = 0\n boundary = 0\n new_module = 0\n prev_module_name = \"\"\n first_line = -1\n last_line = -1\n reset = 0\n\n for pos in range(code_len):\n line = lines[pos]\n if line.find(\"/* Module Call */\") != -1:\n if module_start == 0:\n module_start = 1\n else:\n module_start = 0\n\n if module_start:\n # Examine if the module is an output I/O module\n nxt_line = lines[pos + 1]\n if nxt_line.find(\"IO\") != -1 and nxt_line.find(\"out\") != -1:\n output_io = 1\n # Examine if the module is an boundary module\n if nxt_line.find(\"boundary\") != -1:\n boundary = 1\n # Extract the module name\n module_name = nxt_line.strip()[:-9]\n if boundary:\n module_name = module_name[:-9]\n if prev_module_name == \"\":\n prev_module_name = module_name\n first_line = pos\n else:\n if prev_module_name != module_name:\n new_module = 1\n prev_module_name = module_name\n first_line = pos\n reset = 0\n else:\n if reset:\n first_line = pos\n reset = 0\n new_module = 0\n\n if not module_start:\n if output_io:\n last_line = pos\n module_call.append(line)\n module_calls.append(module_call.copy())\n module_call.clear()\n if boundary:\n # Reverse the list\n module_calls.reverse()\n # Insert it back\n left_lines = lines[last_line + 1:]\n lines = lines[:first_line]\n first = 1\n for c in module_calls:\n if not first:\n lines.append(\"\\n\")\n lines = lines + c\n first = 0\n lines = lines + left_lines\n # Clean up\n module_calls.clear()\n boundary = 0\n output_io = 0\n reset = 1\n if new_module:\n # Pop out the previous module calls except the last one\n module_calls = module_calls[-1:]\n\n if module_start and output_io:\n module_call.append(line)\n\n return lines", "def _build_order(c, ignore=False, update=False):\n\n fs_bo = [d.parent.resolve().name for d in Path('.').glob('*/metadata.csv')]\n\n if c.metapack.build_order is None or ignore is not False:\n return [Path('.').joinpath(d) for d in fs_bo]\n else:\n\n bo = c.metapack.build_order\n\n if update:\n bo += list(set(fs_bo) - set(bo))\n\n return [Path('.').joinpath(d) for d in bo]", "def sort_by_version(compiled_re, names):\n annotated_names = [([int(n) for n in compiled_re.match(name).groups()], name) for name in names]\n annotated_names.sort()\n return [annotated_name[1] for annotated_name in reversed(annotated_names)]", "def test_dependency_order(self):\n sections = self.old_manifest.formula_sections()\n assert sections.index(\"git\") < sections.index(\n \"sub\"\n ), \"Dependency is out of order! git comes after sub\"", "def parse_deps():\n Files = []\n Dependencies = []\n TimeBins = ['recover_parameters', 'startup', 'wragh', 'paramcheck',\n 'preregridinitial', 'postregridinitial', 'basegrid', \n 'initial', 'postinitial', 'postrestrictinitial', \n 'postpostinitial', 'recover_variables', \n 'post_recover_variables', 'cpinitial', 'checkpoint', \n 'preregrid', 'postregrid', 'prestep', 'evol', 'postrestrict', \n 'poststep', 'analysis', 'terminate', 'shutdown']\n\n implement_re = re.compile('implements:\\s*(\\w+)', re.I)\n inherit_re = re.compile('inherits:\\s*(.+)', re.I)\n provides_function_re = re.compile('PROVIDES\\s+FUNCTION\\s+(\\w+)', re.I)\n uses_function_re = re.compile('USES\\s+FUNCTION\\s+(\\w+)', re.I)\n requires_function_re = re.compile('REQUIRES\\s+FUNCTION\\s+(\\w+)', re.I)\n shares_re = re.compile('shares:\\s*(\\w+)', re.I)\n requires_thorn_re = re.compile('REQUIRES\\s+(?!FUNCTION\\s*)(\\w+)', re.I)\n schedules_function_re = re.compile('schedule\\s+(?:group\\s+)?(\\w+)\\s+(?:in|at)\\s+(\\w+)', re.I)\n\n # find all interface.ccl and param.ccl files in cwd\n Cactus_Path = os.path.expanduser('~/Cactus/')\n for dirpath, dirnames, filenames in os.walk(Cactus_Path + 'arrangements', followlinks=True):\n for file in filenames:\n if file == 'interface.ccl':\n Files.append(os.path.join(dirpath, file))\n\n for file in Files:\n # first parse interface.ccl\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines = fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then parse param.ccl\n file = re.sub('interface.ccl', 'param.ccl', file)\n\n try:\n fptr = open(file, 'r')\n except IOError:\n print(\"Could not open %s\" % file) \n\n lines += fptr.readlines()\n\n try:\n fptr.close()\n except IOError:\n print(\"Could not close %s\" % file) \n\n # then configuration.ccl\n file = re.sub('param.ccl', 'configuration.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # then schedule.ccl\n file = re.sub('configuration.ccl', 'schedule.ccl', file)\n\n try:\n fptr = open(file, 'r')\n lines += fptr.readlines()\n fptr.close()\n except IOError:\n pass\n\n # get the thorn dir and its parent\n thornname = os.path.basename(os.path.dirname(file))\n parentdir = os.path.basename(os.path.dirname(os.path.dirname(file)))\n thornname = os.path.join(parentdir, thornname)\n file_dict = {'name' : thornname.lower()}\n for line in lines:\n line = line.strip()\n m = re.match(implement_re, line)\n if m:\n file_dict['implements'] = m.group(1).lower()\n\n m = re.match(inherit_re, line)\n if m:\n inheritance = re.split('\\W+', m.group(1).lower())\n file_dict['inherits'] = inheritance\n\n m = re.match(provides_function_re, line)\n if m:\n try:\n file_dict['provides_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['provides_function'] = [m.group(1).lower()]\n\n m = re.match(uses_function_re, line)\n if m:\n try:\n file_dict['uses_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['uses_function'] = [m.group(1).lower()]\n\n m = re.match(requires_function_re, line)\n if m:\n try:\n file_dict['requires_function'].append(m.group(1).lower())\n except KeyError:\n file_dict['requires_function'] = [m.group(1).lower()]\n\n m = re.match(requires_thorn_re, line)\n if m:\n requires = re.split('\\W+', m.group(1).lower())\n # sometimes we have 'REQUIRES THORNS' instead of 'REQUIRES'\n if requires[0].lower() == 'thorns':\n del requires[0]\n file_dict['requires_thorn'] = requires\n\n m = re.match(shares_re, line)\n if m:\n try:\n file_dict['shares'].append(m.group(1).lower())\n except KeyError:\n file_dict['shares'] = [m.group(1).lower()]\n\n m = re.match(schedules_function_re, line)\n if m:\n bin, func = m.group(2).lower(), m.group(1).lower()\n if bin in TimeBins:\n bin = 'cctk_' + bin\n func_dict = {bin : func}\n try:\n file_dict['schedules_function'].append(func_dict)\n except KeyError:\n file_dict['schedules_function'] = [func_dict]\n\n\n Dependencies.append(file_dict)\n\n return Dependencies", "def sort_decls(self):\n\n #import simplecssbuilder\n #SHORTHAND_REL = simplecssbuilder.SHORTHAND_REL\n\n def decl_key(decl):\n \"\"\"key for sorting declarations\"\"\"\n prop = decl.split(':')[0] # get property name\n if str(prop) in SHORTHAND_REL_inv:\n return SHORTHAND_REL_inv[str(prop)]\n else:\n return str(prop)\n\n def sort_decls_clique(clique):\n \"\"\"Sort the declarations in clique\"\"\"\n (_,ps) = clique\n ps.sort(key=decl_key)\n\n # compute the inverse of SHORTHAND_REL\n SHORTHAND_REL_inv = dict()\n for k,vs in SHORTHAND_REL.iteritems():\n for v in vs:\n SHORTHAND_REL_inv[v] = k\n\n #print 'PRINTING CLIQUES'\n for clique in self.cliques:\n #print clique\n sort_decls_clique(clique)\n #print clique", "def get_dep_map(kerneldir):\n\n\tf = open(os.path.join(kerneldir, 'modules.dep'))\n\tdeps = {}\n\tfor l in f:\n\t\t#print repr(l)\n\t\tmod, dep_list_str = l.strip().split(':', 1)\n\t\tassert mod not in deps\n\n\t\tkmod = KModuleName(mod)\n\t\tdep_list = [KModuleName(x) for x in dep_list_str.strip().split()]\n\t\tdep_list.insert(0, kmod)\t# prepend ourself as a dependency\n\n\t\tdeps[kmod] = dep_list\n\n\tf.close()\n\treturn deps", "def sort(self):\n self.model_list.sort()\n for model in self.model_list:\n model.sort()", "def getDepList(self, dict):\n \n if( dict.has_key( self.name) ):\n return\n else:\n dict[ self.name ] = self.installPath\n\n if( len( dict ) > 1 ):\n mods = self.reqmodules + self.optmodules\n else:\n mods = self.reqmodules + self.optmodules + self.reqmodules_buildonly\n \n for modname in mods:\n if( self.parent.module(modname) != None ):\n self.parent.module(modname).getDepList( dict )", "def sort_hgf_fields(config,doctype,inst):\n\torder = config[\"order\"] # get all field defined under config['order'] (1.)\n\torder_index = 2\n\tdefault_set = config[\"default_form\"]\n\tdefault_order = {} # dict with fields from default_form and proper order (2.)\n\tfor k in (default_set.keys()):\n\t\tif k in order.keys(): #field in config[\"order\"]\n\t\t\tdefault_order[k] = order[k]\n\t\tif k in default_set.keys() and default_set[k][order_index] !=\"-\" : #field in config[\"default_form\"] and not \"-\"\n\t\t\tdefault_order[k] = default_set[k][order_index] \n\t\t\n\tif doctype in config[inst].keys(): #get the institutional changes (3.)\n\t\tinst_changes = config[inst][doctype]\n\telse:\n\t\tinst_changes = {}\n\t\n\t\n\tinst_order = {}\n\tfor key in inst_changes.keys():\n\t\tif inst_changes[key] == \"None\":\n\t\t\tif key in default_order.keys(): #delete fields from institutional changes which are set \"None\" and in default_form\n\t\t\t\tdel default_order[key]\n\t\t\tcontinue\n\t\tif inst_changes[key][order_index] == \"-\": #we take the default\n\t\t\tif key in default_order.keys(): pass #already assigned by default_order\n\t\t\telse: \n\t\t\t\tif key in order.keys(): #get the order from config['order']\n\t\t\t\t\tinst_order[key] = order[key]\n\t\t\t\telse: warning(\"Please define the order (config['order']) for field %s in doctype: %s\" %(key,doctype))\n\t\t\tcontinue\n\t\tinst_order[key] = inst_changes[key][order_index] #institutional changes\t\t\t\n\t\t\n\tfinal_order = {}\n\t#get institutional changes in order\n\tmax_score = max(map(int,default_order.values() + inst_order.values())) #get all order values as string, convert strings to int and get the max value\n\tfor k in (default_order.keys() + inst_changes.keys()): \t\n\t\tif k in inst_changes.keys():\n\t\t\tif inst_changes[k] == \"None\": \n\t\t\t\tcontinue\n\t\t\tif inst_changes[k][order_index] == \"-\":\n\t\t\t\tif k in default_order.keys(): #take the default_order\n\t\t\t\t\tfinal_order[k] = default_order[k]\n\t\t\t\telse: \n\t\t\t\t\tif k in order.keys():\n\t\t\t\t\t\tfinal_order[k] = order[k]\n\t\t\t\t\telse: #no default. sort this field to the end\n\t\t\t\t\t\twarning(\"The field %s in doctype: %s is sorted to the end of the form\" %(k,doctype))\n\t\t\t\t\t\tfinal_order[k] = max_score\n\t\t\t\t\t\tmax_score +=1\n\t\t\telse: final_order[k] = inst_changes[k][order_index] #take order from institutional changes\n\t\t\t\n\t\telse: \n\t\t\tfinal_order[k] = default_order[k] # take order from default_form\n\t\n\tfinal_order[\"hgf_end\"] = max_score\n\t\n\tnew_order = sorted(final_order.items(),key=lambda x: int(x[1])) #create list with tuples sorted by value\n\thidden_fields = get_hidden_fields(config) #get hidden fields\n\t\n\tsorted_hgf_fields = []\t\t\n\tfor i in new_order:\n\t\tsorted_hgf_fields.append(i[0])\n\t\n\t# add all hidden fields\n\tfor i in hidden_fields:\n\t\tif i in sorted_hgf_fields: continue\n\t\tsorted_hgf_fields.append(i)\n\treturn sorted_hgf_fields", "def sortby(self):\n ...", "def _sort_compounds(self):\n self.sorted_molecules = sorted(self.values(), key=operator.attrgetter('criterion'))", "def sort(self, by='dependency'):\n\n nodes_ordered = []\n if by == \"dependency\":\n for node in self.nodes:\n insert_idx = len(nodes_ordered)\n for node_dependency in node.dependencies:\n for idx, node_ordered in enumerate(nodes_ordered):\n if (idx <= insert_idx) and (node_dependency.id == node_ordered.id):\n insert_idx = idx + 1 # place the node after the dependency\n nodes_ordered.insert(insert_idx, node)\n else:\n err_msg = \"Sorting strategy '{}' unknown \".format(by)\n raise ValueError(err_msg)\n\n return Graph.from_list(nodes_ordered)" ]
[ "0.71461624", "0.64357716", "0.6193327", "0.6173135", "0.61355674", "0.604872", "0.6001152", "0.5978634", "0.5959674", "0.5802672", "0.57968855", "0.57469845", "0.5734828", "0.5680638", "0.5603417", "0.56024396", "0.5596359", "0.5579106", "0.5559897", "0.55554754", "0.5545035", "0.5488935", "0.5479852", "0.5456126", "0.542796", "0.5423967", "0.54233295", "0.542178", "0.5417093", "0.5403929" ]
0.8192629
0
Compute Allan variance (AVAR). Consider an underlying measurement y(t). Our sensors output integrals of y(t) over successive time intervals of length dt. These measurements x(k dt) form the input to this function. Allan variance is defined for different averaging times tau = m dt as
def allan_variance(x, dt=1, min_cluster_size=1, min_cluster_count='auto', n_clusters=100, input_type="increment"): if input_type not in ('increment', 'mean'): raise ValueError("`input_type` must be either 'increment' or 'mean'.") x = np.asarray(x, dtype=float) n = x.shape[0] X = np.cumsum(x, axis=0) if min_cluster_count == 'auto': min_cluster_count = min(1000, n - 2) log_min = np.log2(min_cluster_size) log_max = np.log2((n - min_cluster_count) // 2) cluster_sizes = np.logspace(log_min, log_max, n_clusters, base=2) cluster_sizes = np.unique(np.round(cluster_sizes)).astype(np.int64) avar = np.empty(cluster_sizes.shape + X.shape[1:]) for i, k in enumerate(cluster_sizes): c = X[2*k:] - 2 * X[k:-k] + X[:-2*k] avar[i] = np.mean(c**2, axis=0) / k / k if input_type == 'increment': avar *= 0.5 / dt**2 elif input_type == 'mean': avar *= 0.5 return cluster_sizes * dt, avar
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allanvariance(data, dt=1):\n # 2008-07-30 10:20 IJC: Created\n # 2011-04-08 11:48 IJC: Moved to analysis.py\n\n newdata = array(data, subok=True, copy=True)\n dsh = newdata.shape\n\n newdata = newdata.ravel()\n\n nsh = newdata.shape\n\n alvar = zeros(nsh[0]-1, float)\n\n for lag in range(1, nsh[0]):\n alvar[lag-1] = mean( (newdata[0:-lag] - newdata[lag:])**2 )\n\n return (alvar*0.5)", "def plot_avar(time, sigma):\n pylab.figure()\n pylab.loglog(time, sigma,'-o')\n pylab.xlabel('$time (s)$')\n pylab.ylabel('$\\sigma(\\\\tau)$')\n pylab.title('Allan deviation')\n pylab.grid(True)\n pylab.show()", "def avar (inarray, dimension=None,keepdims=0):\r\n if dimension == None:\r\n inarray = N.ravel(inarray)\r\n dimension = 0\r\n mn = amean(inarray,dimension,1)\r\n deviations = inarray - mn\r\n if type(dimension) == ListType:\r\n n = 1\r\n for d in dimension:\r\n n = n*inarray.shape[d]\r\n else:\r\n n = inarray.shape[dimension]\r\n var = ass(deviations,dimension,keepdims)/float(n-1)\r\n return var", "def ComputeAveY(data):\n NBINSY=data.shape[0]\n NBINSX=data.shape[1]\n the_averY=np.zeros(NBINSX)\n the_y=np.zeros(NBINSY)\n for ix in np.arange(NBINSX):\n the_ysum=np.sum(data[:,ix])\n for iy in np.arange(NBINSY):\n the_y[iy]=iy*data[iy,ix]\n if(the_ysum>0):\n med=np.sum(the_y)/the_ysum\n the_averY[ix]=med\n return the_averY", "def _ARMAvar(self, q, a): \n if q>0:\n theta = 1./(q+1.)\n psi_jm1 = 1./(q+1.)\n var = psi_jm1**2\n for j in range(q):\n psi_j = a*psi_jm1 + theta\n var += psi_j**2\n psi_jm1 = psi_j\n else:\n var = 1\n # Now, sum up terms from q to infinity\n var += 1./(1.-a**2) - (1. - a**(2*(q+1)))/(1.-a**2)\n # var_ratio is ratio of variance to that of AR(1) model\n var_ratio = var*(1.-a**2)\n return (var, var_ratio)", "def autocovariance(magnetisation):\r\n \r\n #length of input vector\r\n length = len(magnetisation)\r\n \r\n #maximum degree of retardation tau\r\n taulength = length // 20\r\n \r\n #linespace\r\n tau = np.array(range(taulength))\r\n \r\n #average magnetisation\r\n averagemag = np.average(magnetisation)\r\n \r\n #late snap of the magnetisation vector\r\n lateM = magnetisation[taulength:]\r\n \r\n #M'(t+tau)\r\n lateMprime = lateM - averagemag\r\n \r\n #collect A values\r\n A = np.zeros(taulength)\r\n \r\n #<M'(t)M'(t)>\r\n A[0] = np.mean(np.square(lateMprime))\r\n \r\n for i in range(1,taulength):\r\n #M'(t)\r\n Mprime = magnetisation[taulength - tau[i]: -tau[i]] - averagemag\r\n \r\n #<M'(t+tau)M'(t)>\r\n A[i] = np.mean(np.multiply(Mprime, lateMprime))\r\n \r\n #obtain autocovariance\r\n a = A / A[0]\r\n \r\n #return autocovariance\r\n return a", "def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):\n if dtype is None:\n dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))\n\n p = len(ar) - 1\n q = len(ma) - 1\n m = max(p, q) + 1\n\n if sigma2.real < 0:\n raise ValueError(\"Must have positive innovation variance.\")\n\n # Short-circuit for trivial corner-case\n if p == q == 0:\n out = np.zeros(nobs, dtype=dtype)\n out[0] = sigma2\n return out\n elif p > 0 and np.max(np.abs(np.roots(ar))) >= 1:\n raise ValueError(NONSTATIONARY_ERROR)\n\n # Get the moving average representation coefficients that we need\n ma_coeffs = arma2ma(ar, ma, lags=m)\n\n # Solve for the first m autocovariances via the linear system\n # described by (BD, eq. 3.3.8)\n A = np.zeros((m, m), dtype=dtype)\n b = np.zeros((m, 1), dtype=dtype)\n # We need a zero-right-padded version of ar params\n tmp_ar = np.zeros(m, dtype=dtype)\n tmp_ar[: p + 1] = ar\n for k in range(m):\n A[k, : (k + 1)] = tmp_ar[: (k + 1)][::-1]\n A[k, 1 : m - k] += tmp_ar[(k + 1) : m]\n b[k] = sigma2 * np.dot(ma[k : q + 1], ma_coeffs[: max((q + 1 - k), 0)])\n acovf = np.zeros(max(nobs, m), dtype=dtype)\n try:\n acovf[:m] = np.linalg.solve(A, b)[:, 0]\n except np.linalg.LinAlgError:\n raise ValueError(NONSTATIONARY_ERROR)\n\n # Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances\n if nobs > m:\n zi = signal.lfiltic([1], ar, acovf[:m:][::-1])\n acovf[m:] = signal.lfilter(\n [1], ar, np.zeros(nobs - m, dtype=dtype), zi=zi\n )[0]\n\n return acovf[:nobs]", "def calculate_variance(beta):\n n, T = beta.shape\n betadot = gradient(beta, 1. / (T - 1))\n betadot = betadot[1]\n normbetadot = zeros(T)\n centroid = calculatecentroid(beta)\n integrand = zeros((n, n, T))\n t = linspace(0, 1, T)\n for i in range(0, T):\n normbetadot[i] = norm(betadot[:, i])\n a1 = (beta[:, i] - centroid)\n a1 = a1.reshape((n, 1))\n integrand[:, :, i] = a1 @ a1.T * normbetadot[i]\n\n l = trapz(normbetadot, t)\n variance = trapz(integrand, t, axis=2)\n variance /= l\n\n return (variance)", "def AllFreAverageV(self,):\n \t\tv_array = self.data\n \t\taaverage_v = np.average(v_array[:,1])\n \t\tprint('Whole frequency average group velocity:\\nVw=',aaverage_v/1000,'km/s')\n \t\treturn", "def q_mean_variance(self, x_start, t):\n x_start_shape = tf.shape(x_start)\n mean = self._extract(self.sqrt_alphas_cumprod, t, x_start_shape) * x_start\n variance = self._extract(1.0 - self.alphas_cumprod, t, x_start_shape)\n log_variance = self._extract(self.log_one_minus_alphas_cumprod, t, x_start_shape)\n return mean, variance, log_variance", "def trans_elv_alav(self, akav=0.112):\n if self.type != 'elv':\n warnings.warn(\"attempt to normalize a non-elv curve with av\",\n UserWarning)\n else:\n # determine the index for the B band\n dwaves = np.absolute(self.waves['BAND'] - 2.19)\n sindxs = np.argsort(dwaves)\n kindx = sindxs[0]\n if dwaves[kindx] > 0.02:\n warnings.warn(\"no K band mesurement in E(l-V)\",\n UserWarning)\n else:\n # normalize each portion of the extinction curve\n ekv = self.exts['BAND'][kindx]\n av = ekv/(akav - 1)\n for curname in self.exts.keys():\n self.exts[curname] /= av\n self.exts[curname] += 1.0\n self.uncs[curname] /= av\n self.type = 'alav'", "def _em_variance(self, result, endog, exog, betas, tmp=None):\n k_exog = 0 if exog is None else exog.shape[1]\n\n if self.switching_variance:\n variance = np.zeros(self.k_regimes)\n for i in range(self.k_regimes):\n if k_exog > 0:\n resid = endog - np.dot(exog, betas[i])\n else:\n resid = endog\n variance[i] = (\n np.sum(resid ** 2 *\n result.smoothed_marginal_probabilities[i]) /\n np.sum(result.smoothed_marginal_probabilities[i]))\n else:\n variance = 0\n if tmp is None:\n tmp = np.sqrt(result.smoothed_marginal_probabilities)\n for i in range(self.k_regimes):\n tmp_endog = tmp[i] * endog\n if k_exog > 0:\n tmp_exog = tmp[i][:, np.newaxis] * exog\n resid = tmp_endog - np.dot(tmp_exog, betas[i])\n else:\n resid = tmp_endog\n variance += np.sum(resid ** 2)\n variance /= self.nobs\n return variance", "def dirichlet_variance(alpha):\n sum = np.sum(alpha)\n var = (alpha*(sum-alpha))/((1+sum)*sum**2)\n return var", "def _CloudVar(self): \n # q is MA order of ARMA(1,q)\n q = int(round(self.lambda_avg/self.lambda_s))\n a = exp(-self.lambda_s / self.lambda_p) \n (var, var_ratio) = self._ARMAvar(q, a)\n # This variance is a multiple of the variance of the noise driving the\n # AR(1) model. This variance, in turn, is a multiple of the underlying\n # measurement variance, with the relationship given in Gillespie 96\n var = var * (1. - exp(-2*self.lambda_s / self.lambda_p))/2\n # print q, a\n return var", "def acovariance(X):\r\n if len(X.shape) <> 2:\r\n raise TypeError, \"acovariance requires 2D matrices\"\r\n n = X.shape[0]\r\n mX = amean(X,0)\r\n return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)", "def avariation(a,dimension=None):\r\n return 100.0*asamplestdev(a,dimension)/amean(a,dimension)", "def mov_av(t,x,l,type_av=\"t\"):\n\t\t\t\n\t# check inputs\n\ttry:\n\t\tassert np.issubsctype(t,float)\n\texcept:\n\t\tprint \"Error at input 't': must be a numpy array of 'float' type\"\n\t\treturn\n\ttry:\n\t\tassert np.issubsctype(x,float)\n\texcept:\n\t\tprint \"Error at input 'x': must be a numpy array of 'float' type\"\n\t\treturn\n\ttry:\n\t\tassert (type(type_av) is str) and ((type_av.lower()==\"n\") or (type_av.lower()==\"t\"))\n\texcept AssertionError:\n\t\tprint \"Error at input 'type_av': must be 'n' or 't'\"\n\t\treturn\n\ttype_av=type_av.lower()\n\tif type_av==\"n\":\n\t\ttry:\n\t\t\tassert (type(l) is int) and l>0\n\t\texcept AssertionError:\n\t\t\tprint \"Error at input 'l': must be of 'int' type and >0\"\n\t\t\treturn\n\telif type_av==\"t\":\n\t\ttry:\n\t\t\tassert (type(l) is float) and l>0.\n\t\texcept AssertionError:\n\t\t\tprint \"Error at input 'l': must be of 'float' type and >0.\"\n\t\t\treturn\n\t# Main code\n\tn=x.size\n\tmov_av_x=np.zeros(n)\n\tif type_av==\"n\":\n\t\tif l%2==0:\n\t\t\tlbis=l+1 # if even number => +1\n\t\telse:\n\t\t\tlbis=l\n\t\tm=(lbis-1)/2\n\t\t# on the borders of the time series\n\t\tfor k in range(m):\n\t\t\tmov_av_x[k]=np.sum(x[:(k+m+1)])/float(k+1+m)\n\t\t\tmov_av_x[n-1-k]=np.sum(x[(n-1-k-m):])/float(k+1+m)\n\t\t# everywhere except on the border\n\t\tfor k in range(m,n-m):\n\t\t\tmov_av_x[k]=np.sum(x[(k-m):(k+m+1)])/lbis\n\telif type_av==\"t\":\n\t\tt_start=t[0]\n\t\tt_end=t[-1]\n\t\tm=l/2.0\n\t\tind_left=0\n\t\tind_right=0\n\t\tfor k in range(n):\n\t\t\tt_left=max(t_start,t[k]-m)\n\t\t\tind_left=ind_left+np.argmin(np.absolute(t[ind_left:]-t_left))\n\t\t\tt_right=min(t_end,t[k]+m)\n\t\t\tind_right=ind_right+np.argmin(np.absolute(t[ind_right:]-t_right))\n\t\t\tmov_av_x[k]=np.sum(x[ind_left:ind_right+1])/float(ind_right+1-ind_left)\n\treturn mov_av_x", "def get_vama(data):\n if data is None:\n raise EmptyDataError('[!] Invalid data value')\n\n result = TA.VAMA(data)\n if result is None:\n raise IndicatorException\n return result", "def estimate_arpu(x):\n arpu = 0\n if x['mean_luminosity_km2'] > 5:\n # #10 year time horizon\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (20*12) / (1 + 0.03) ** i\n # )\n return 20 * 12 * 10#arpu\n elif x['mean_luminosity_km2'] > 1:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (5*12) / (1 + 0.03) ** i\n # )\n return 5 * 12 * 10#arpu\n else:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (2*12) / (1 + 0.03) ** i\n # )\n return 2 * 12 * 10#arpu", "def spectral_variance(data, fft_data):\n return np.var(np.abs(fft_data))", "def _analytic_forecast(\n self,\n parameters: NDArray,\n resids: NDArray,\n backcast: Union[float, NDArray],\n var_bounds: NDArray,\n start: int,\n horizon: int,\n ) -> VarianceForecast:", "def _add_avar(font, axes, mappings, axisTags):\n\n assert axes\n assert isinstance(axes, OrderedDict)\n\n log.info(\"Generating avar\")\n\n avar = newTable(\"avar\")\n\n interesting = False\n vals_triples = {}\n for axis in axes.values():\n # Currently, some rasterizers require that the default value maps\n # (-1 to -1, 0 to 0, and 1 to 1) be present for all the segment\n # maps, even when the default normalization mapping for the axis\n # was not modified.\n # https://github.com/googlei18n/fontmake/issues/295\n # https://github.com/fonttools/fonttools/issues/1011\n # TODO(anthrotype) revert this (and 19c4b37) when issue is fixed\n curve = avar.segments[axis.tag] = {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}\n\n keys_triple = (axis.minimum, axis.default, axis.maximum)\n vals_triple = tuple(axis.map_forward(v) for v in keys_triple)\n vals_triples[axis.tag] = vals_triple\n\n if not axis.map:\n continue\n\n items = sorted(axis.map)\n keys = [item[0] for item in items]\n vals = [item[1] for item in items]\n\n # Current avar requirements. We don't have to enforce\n # these on the designer and can deduce some ourselves,\n # but for now just enforce them.\n if axis.minimum != min(keys):\n raise VarLibValidationError(\n f\"Axis '{axis.name}': there must be a mapping for the axis minimum \"\n f\"value {axis.minimum} and it must be the lowest input mapping value.\"\n )\n if axis.maximum != max(keys):\n raise VarLibValidationError(\n f\"Axis '{axis.name}': there must be a mapping for the axis maximum \"\n f\"value {axis.maximum} and it must be the highest input mapping value.\"\n )\n if axis.default not in keys:\n raise VarLibValidationError(\n f\"Axis '{axis.name}': there must be a mapping for the axis default \"\n f\"value {axis.default}.\"\n )\n # No duplicate input values (output values can be >= their preceeding value).\n if len(set(keys)) != len(keys):\n raise VarLibValidationError(\n f\"Axis '{axis.name}': All axis mapping input='...' values must be \"\n \"unique, but we found duplicates.\"\n )\n # Ascending values\n if sorted(vals) != vals:\n raise VarLibValidationError(\n f\"Axis '{axis.name}': mapping output values must be in ascending order.\"\n )\n\n keys = [models.normalizeValue(v, keys_triple) for v in keys]\n vals = [models.normalizeValue(v, vals_triple) for v in vals]\n\n if all(k == v for k, v in zip(keys, vals)):\n continue\n interesting = True\n\n curve.update(zip(keys, vals))\n\n assert 0.0 in curve and curve[0.0] == 0.0\n assert -1.0 not in curve or curve[-1.0] == -1.0\n assert +1.0 not in curve or curve[+1.0] == +1.0\n # curve.update({-1.0: -1.0, 0.0: 0.0, 1.0: 1.0})\n\n if mappings:\n interesting = True\n\n hiddenAxes = [axis for axis in axes.values() if axis.hidden]\n\n inputLocations = [\n {\n axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag])\n for name, v in mapping.inputLocation.items()\n }\n for mapping in mappings\n ]\n outputLocations = [\n {\n axes[name].tag: models.normalizeValue(v, vals_triples[axes[name].tag])\n for name, v in mapping.outputLocation.items()\n }\n for mapping in mappings\n ]\n assert len(inputLocations) == len(outputLocations)\n\n # If base-master is missing, insert it at zero location.\n if not any(all(v == 0 for k, v in loc.items()) for loc in inputLocations):\n inputLocations.insert(0, {})\n outputLocations.insert(0, {})\n\n model = models.VariationModel(inputLocations, axisTags)\n storeBuilder = varStore.OnlineVarStoreBuilder(axisTags)\n storeBuilder.setModel(model)\n varIdxes = {}\n for tag in axisTags:\n masterValues = []\n for vo, vi in zip(outputLocations, inputLocations):\n if tag not in vo:\n masterValues.append(0)\n continue\n v = vo[tag] - vi.get(tag, 0)\n masterValues.append(fl2fi(v, 14))\n varIdxes[tag] = storeBuilder.storeMasters(masterValues)[1]\n\n store = storeBuilder.finish()\n optimized = store.optimize()\n varIdxes = {axis: optimized[value] for axis, value in varIdxes.items()}\n\n varIdxMap = builder.buildDeltaSetIndexMap(varIdxes[t] for t in axisTags)\n\n avar.majorVersion = 2\n avar.table = ot.avar()\n avar.table.VarIdxMap = varIdxMap\n avar.table.VarStore = store\n\n assert \"avar\" not in font\n if not interesting:\n log.info(\"No need for avar\")\n avar = None\n else:\n font[\"avar\"] = avar\n\n return avar", "def analytic_value_VaR(x):\n mu_H = -15 * x + 10 * x ** 2\n # z = 0.67448975 # VaR 0.75\n # z = .7978845608028654 # CVaR 0.5\n z = 1.27111 # CVaR 0.75\n sigma_H = np.sqrt(16 * x ** 2 + 4 * x ** 4)\n return mu_H + z * sigma_H", "def variance_moving_average_time_series(series, length):\n \n # just in case the index isn't already datetime type\n series.index = pd.to_datetime(series.index)\n\n variance = series.rolling(length).var()\n\n variance.name = series.name+\"_var\"\n\n return variance", "def variance(self, mean=None):\n raise NotImplementedError", "def ANOVA_one_way(a):\r\n #a = array(a)\r\n group_means = []\r\n group_variances = []\r\n num_cases = 0 # total observations in all groups\r\n all_vals = []\r\n for i in a:\r\n num_cases += len(i)\r\n group_means.append(mean(i))\r\n group_variances.append(i.var(ddof=1) * (len(i) - 1))\r\n all_vals.extend(i)\r\n\r\n # Get within Group variances (denominator)\r\n dfd = num_cases - len(group_means)\r\n # need to add a check -- if the sum of the group variances is zero it will\r\n # error, but only if the between_Groups value is not zero\r\n within_Groups = sum(group_variances) / dfd\r\n if within_Groups == 0.:\r\n return nan, nan\r\n # Get between Group variances (numerator)\r\n all_vals = array(all_vals)\r\n grand_mean = all_vals.mean()\r\n between_Groups = 0\r\n for i in a:\r\n diff = i.mean() - grand_mean\r\n diff_sq = diff * diff\r\n x = diff_sq * len(i)\r\n between_Groups += x\r\n\r\n dfn = len(group_means) - 1\r\n between_Groups = between_Groups / dfn\r\n F = between_Groups / within_Groups\r\n return F, f_high(dfn, dfd, F)", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def variance(self, avg=False):\n if not self.fp_init:\n if not avg:\n return self._calc_var(self.f, self.a, self.b, self.Z)\n else:\n return self._calc_var(self.f_avg, self.a_avg, self.b_avg,\n self.Z_avg)\n return self._var if not avg else self._var_avg", "def variance(x):\n \"\"\" note - why n-1?: since we are likely looking at a sample, x_bar is only an\n estimate of the actual mean, which means that on average (x_i - x_bar) ** 2\n is an underestimate of x_i's squared deviation from the mean, which is why\n we divide by n-1 instead of n (see bit.ly/lL2EapI)\"\"\"\n n = len(x)\n deviations = deviations_from_mean(x)\n return sum_of_squares(deviations) / (n - 1)" ]
[ "0.77815217", "0.6229782", "0.6185854", "0.6170043", "0.601502", "0.5954434", "0.5937822", "0.5889841", "0.57054853", "0.56572413", "0.5547725", "0.550151", "0.5489578", "0.54802334", "0.5443163", "0.5424169", "0.5422288", "0.54091", "0.53961694", "0.5382453", "0.53801197", "0.5377847", "0.5360351", "0.53407115", "0.5328095", "0.5323981", "0.5318844", "0.5318844", "0.5315968", "0.5295924" ]
0.6378373
1
Downloads a cookbook tar.gz, unzips it, and writes the directory structure to the zip file.
def download_cookbook(fileurl, download_dir): # download and save file to download_dir logger.info('Downloading cookbook: %s' % fileurl) # get filename tarname = fileurl.split('/')[-1].split('?')[0] tarfilepath = download_dir + tarname logger.info('Writing cookbook file to %s' % tarfilepath) with open(tarfilepath, 'w') as tmpfile: res = requests.get(fileurl) for chunk in res.iter_content(256): tmpfile.write(chunk) logger.info('Extracting contents of %s to %s' % (tarfilepath, download_dir)) # extract file to download_dir with tarfile.open(tarfilepath) as tar: try: tar.extractall(download_dir) except Exception as e: logger.error('Error extracting tarfile %s. Ex: %s' % (tarfilepath, e)) # delete the downloaded archive logger.info('Deleting %s' % tarfilepath) os.remove(tarfilepath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_and_unzip(url, zip_path, csv_path, data_folder):\n\n download_from_url(url, zip_path)\n\n unzip(zip_path, csv_path, data_folder)\n\n print('Done.')", "def _download_compressed_dir(self, tar_gz_path):\n self._log.debug(\"Downloading compressed directory at {!r}\".format(tar_gz_path))\n\n tar_gz = self._git_show(tar_gz_path)\n dest_tar_path = os.path.join(self._code_dir, tar_gz_path.replace(\"/\", os.path.sep))\n self._ensure_directory(os.path.dirname(dest_tar_path))\n\n self._save_file(dest_tar_path, tar_gz)\n self._extract(dest_tar_path)", "def _download_zip(self, zip_url, dest_dir):\n # TODO(jsirois): Wrap with workunits, progress meters, checksums.\n self.context.log.info('Downloading {}...'.format(zip_url))\n sess = requests.session()\n sess.mount('file://', self.LocalFileAdapter())\n res = sess.get(zip_url)\n if not res.status_code == requests.codes.ok:\n raise TaskError('Failed to download {} ({} error)'.format(zip_url, res.status_code))\n\n with open_zip(BytesIO(res.content)) as zfile:\n safe_mkdir(dest_dir)\n for info in zfile.infolist():\n if info.filename.endswith('/'):\n # Skip directories.\n continue\n # Strip zip directory name from files.\n filename = os.path.relpath(info.filename, get_basedir(info.filename))\n f = safe_open(os.path.join(dest_dir, filename), 'w')\n f.write(zfile.read(info))\n f.close()", "def unzip() -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # define the destination\n destination = project_dir / 'data' / 'raw'\n\n # extract zip\n zip_file = ZipFile(destination / \"original.zip\")\n zip_file.extractall(destination)", "def download_data():\n\n if not os.path.exists(zipfile_path):\n print(f'Downloading {config.download_url} to {zipfile_path}')\n urlretrieve(config.download_url, zipfile_path)\n print(f'Successfully downloaded {zipfile_path}')\n\n zip_ref = ZipFile(zipfile_path, 'r')\n zip_ref.extractall(config.raw_data_dir)\n zip_ref.close()\n\n os.rename(f\"{config.raw_data_dir}/cornell movie-dialogs corpus\", extracted_dir)", "def unzip_citibike_data(zip_dir):\n# zip_dir = \"data/citibike-tripdata-nyc/\"\n# csv_dir = \"data/citibike-tripdata-nyc/csv\"\n extension = \".zip\"\n\n # for each zip file in zip_dir extract data\n for item in os.listdir(zip_dir):\n if item.endswith(extension):\n\n # create zipfile object and extract\n file_name = zip_dir + item\n with zipfile.ZipFile(file_name, \"r\") as zip_ref:\n zip_ref.extractall(zip_dir)\n print(item + \" done\")", "def download_and_unzip(url, extract_to='.'):\n http_response = urlopen(url)\n zipfile = ZipFile(BytesIO(http_response.read()))\n zipfile.extractall(path=extract_to)", "def getzip(url, zipfile, unzipdir):\n done_file = os.path.join(unzipdir, '.'+os.path.basename(zipfile)+'.done')\n if file_exists(done_file):\n print('{} already downloaded and extracted; skipping. To reinstall \"rm {}\"'.format(os.path.basename(zipfile), done_file))\n else:\n print('Downloading {} as {}.'.format(url, zipfile))\n urlretrieve(url, zipfile)\n print('Extracting {} into {}.'.format(zipfile, unzipdir))\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)\n with open(done_file, 'w'):\n pass", "def unzipper(data_address, target_directory):\n import zipfile\n data = \"/home/sharoonsaxena/Datasets/dogs-vs-cats.zip\"\n zip_ref = zipfile.ZipFile(data, \"r\")\n zip_ref.extractall(\"/home/sharoonsaxena/Datasets/extracted/\")\n zip_ref.close()", "def download_zip_file(zip_remote, save_dir, force_overwrite, cleanup=False):\n zip_download_path = download_from_remote(zip_remote, save_dir, force_overwrite)\n unzip(zip_download_path, cleanup=cleanup)", "def dir_2_cbz(dir_pth):\r\n shutil.make_archive(dir_pth, 'zip', dir_pth)\r\n shutil.rmtree(dir_pth)\r\n os.rename(dir_pth+'.zip', dir_pth+'.cbz')\r\n pass", "def download_and_unzip_dataset(url, path):\n dl = urllib.urlretrieve(url)\n zf = zipfile.ZipFile(dl[0])\n zf.extractall(path)\n return zf", "def download(self, cloud_path):\n zip_file = os.path.join(self.root, ZIPFILE)\n unzip_dir = os.path.join(self.root, UNZIP_NAME)\n\n if os.path.isfile(zip_file):\n logger.debug(f\"File {zip_file} exists. Skip download.\")\n else:\n client = GCSClient()\n object_key = os.path.join(NYU_GCS_PATH, ZIPFILE)\n\n logger.debug(\n f\"Downloading file {zip_file} from gs://{const.GCS_BUCKET}/\"\n f\"{object_key}\"\n )\n client.download(const.GCS_BUCKET, object_key, zip_file)\n\n if os.path.isdir(unzip_dir):\n logger.debug(f\"File {unzip_dir} exists. Skip unzip.\")\n else:\n # unzip the file\n with ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(self.root)\n logger.debug(f\"Unzip file from {zip_file}\")", "def download_and_unzip_data(\n url=\"https://storage.googleapis.com/simpeg/em_examples/tdem_groundedsource/tdem_groundedsource.tar\",\n):\n # download the data\n downloads = utils.download(url)\n\n # directory where the downloaded files are\n directory = downloads.split(\".\")[0]\n\n # unzip the tarfile\n tar = tarfile.open(downloads, \"r\")\n tar.extractall()\n tar.close()\n\n return downloads, directory", "def download_one_zip(data_url, data_dir):\r\n\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n r = requests.get(data_url, stream=True)\r\n with open(zipfile_path, \"wb\") as py_file:\r\n for chunk in r.iter_content(chunk_size=1024): # 1024 bytes\r\n if chunk:\r\n py_file.write(chunk)\r\n unzip_nested_zip(zipfile_path, unzip_dir), download_small_file", "def download_small_zip(data_url, data_dir):\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.mkdir(unzip_dir)\r\n zipfile_path, _ = urllib.request.urlretrieve(data_url, zipfile_path)\r\n unzip_nested_zip(zipfile_path, unzip_dir)", "def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])", "def download_and_unzip_data(url, destination, prefix='state-'):\n # make sure destination exists or create a temporary directory\n if not destination:\n destination = tempfile.mkdtemp(prefix=prefix)\n logger.debug(\"Created temp directory {}\".format(destination))\n else:\n if not os.path.exists(destination):\n os.makedirs(destination)\n logger.info(\"Created {}\".format(destination))\n zip_filename = get_zipfile_path(url, destination)\n # don't re-download data if raw data file already exists\n if os.path.exists(zip_filename):\n logger.debug(\"{} exists, skipping download\".format(zip_filename))\n else:\n logger.debug(\"Downloading data to {}\".format(zip_filename))\n response = requests.get(url, stream=True)\n # XXX check status code here; e.g., if permissions haven't been granted\n # for a file being downloaded from S3 a 403 will be returned\n content_length = int(response.headers.get('content-length'))\n start = time.clock()\n downloaded = 0\n with open(zip_filename, 'wb') as f:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n downloaded += len(chunk)\n now = time.clock()\n if (now - start) >= 5:\n logger.debug('{0:.2g}% downloaded'.format(downloaded/content_length*100))\n start = now\n f.write(chunk)\n f.flush()\n logger.debug('100% downloaded')\n\n unzip_data(destination, url=url)\n return destination", "def download_data():\r\n print('Downloading cifar-10 data...')\r\n request.urlretrieve(dataurl)\r\n print('Done')\r\n print('Please unzip files. command is:')\r\n print('gzip -d cifar-10-python.tar.gz')\r\n print('tar -xf cifar-10-python.tar')\r\n exit()", "def download_untar(url, download_path, extract_path=None):\n file_name = url.split('/')[-1]\n if extract_path is None:\n extract_path = download_path\n tar_file_path = os.path.join(download_path, file_name)\n download(tar_file_path, url)\n sys.stdout.flush()\n print('Extracting {} archive into {}'.format(tar_file_path, extract_path))\n untar(tar_file_path, extract_path)\n os.remove(tar_file_path)", "def maybe_download_and_extract():\n dest_directory = MODEL_DIR\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def download_dataset(dataset):\n\n if dataset not in URLS:\n print(f\"unknown dataset {dataset}\")\n sys.exit(0)\n\n filename = f'{dataset}.tar.gz'\n url = URLS[dataset]\n\n if not os.path.exists(filename):\n print(f'downloading dataset \"{dataset}\"')\n os.system(f'curl \"{url}\" -o {filename}')\n else:\n print(f'zipfile \"{filename}\" already exists, remove it if you want to re-download.')\n\n if not os.path.exists(dataset):\n print(f'extracting \"{filename}\"')\n os.system(f'tar -xvf {filename}')\n else:\n print(f'folder \"{dataset}\" already exists, remove it if you want to re-create.')\n\n image_chips = f'{dataset}/image-chips'\n label_chips = f'{dataset}/label-chips'\n if not os.path.exists(image_chips) and not os.path.exists(label_chips):\n print(\"creating chips\")\n libs.images2chips.run(dataset)\n else:\n print(f'chip folders \"{image_chips}\" and \"{label_chips}\" already exist, remove them to recreate chips.')", "def unzip_nested_zip(dataset_zip, path_unzip):\r\n\r\n with zipfile.ZipFile(dataset_zip, \"r\") as zfile:\r\n try:\r\n zfile.extractall(path=path_unzip)\r\n except OSError as e:\r\n logging.warning(\r\n \"Please check the unzipped files manually. There may be some missed important files.\"\r\n )\r\n logging.warning(\"The directory is: \" + path_unzip)\r\n for root, dirs, files in os.walk(path_unzip):\r\n for filename in files:\r\n if re.search(r\"\\.zip$\", filename):\r\n file_spec = os.path.join(root, filename)\r\n new_dir = os.path.join(root, filename[0:-4])\r\n unzip_nested_zip(file_spec, new_dir)", "def _download_and_uncompress_dataset(dataset_dir):\n filename = _DATA_URL.split('/')[-1]\n filepath = os.path.join(dataset_dir, filename)\n\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dataset_dir)", "def download(self) -> None:\n os.makedirs(self.root, exist_ok=True)\n\n for subset in self.subsets:\n if self._check_subset_integrity(subset):\n print(f\"{subset} already downloaded and verified\")\n continue\n path = os.path.join(self.root, subset + \".tar.gz\")\n\n already_present = os.path.isfile(path)\n if not already_present:\n subset_url = self.openslr_url + subset + \".tar.gz\"\n with requests.get(subset_url, stream=True) as r:\n r.raise_for_status()\n with open(path, \"wb\") as f:\n shutil.copyfileobj(r.raw, f)\n\n archive_md5 = self.data_files[subset][\"archive_md5\"]\n if utils.checksum_file(path, \"md5\") != archive_md5:\n raise utils.DownloadError(f\"invalid checksum for {path}\")\n\n with tarfile.open(path, mode=\"r|gz\") as tar:\n tar.extractall(self.root)\n\n if not already_present:\n os.remove(path)", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def zipfiles (downloadable, name):\n\n print \"compressing files. almost done.\"\n import zipfile\n for book in downloadable:\n if (os.path.exists(os.path.join(name, book[1]))):\n files = os.listdir(os.path.join(name, book[1]))\n cbz = zipfile.ZipFile(os.path.join(name, name + '-' + book[1] + '.cbz'), 'w')\n for file in files:\n cbz.write(os.path.join(name, book[1],file))\n cbz.close()", "def maybe_download_and_extract():\n dest_directory = FLAGS['model_dir']\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath,\n reporthook=_progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)", "def maybe_download_and_extract():\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (\n filename, float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)\n print()\n statinfo = os.stat(filepath)\n print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)" ]
[ "0.6516769", "0.6404197", "0.6369594", "0.620349", "0.62003124", "0.6194112", "0.61609334", "0.6144687", "0.6142736", "0.6118757", "0.6090507", "0.6075871", "0.6071649", "0.60694623", "0.60556203", "0.60411805", "0.6037752", "0.6025028", "0.60244155", "0.6015607", "0.60121644", "0.60079163", "0.6005784", "0.59454566", "0.5941157", "0.5932637", "0.59301895", "0.5920356", "0.58794945", "0.587395" ]
0.6953494
0
Search for all cookbooks matching search_term
def cookbook_search(search_term): return db.boxcar_cookbooks.find({'name': {'$regex':'^'+search_term}})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def book_search(self, term):\n\n try:\n cur = self._db.cursor()\n search = f'%{term.upper()}%'\n cur.execute('SELECT rowid, * FROM books WHERE UPPER(title) like ? OR UPPER(author) like ?', (search, search))\n return self._cursor_to_booklist(cur)\n except sqlite3.Error as e:\n raise BookError(f'Error searching for books with search term {term}') from e", "def search(self, term):\n term = u'%' + term.decode('utf8') + u'%'\n cursor = self._dbcon.cursor()\n t = (term, term, term)\n sql = u\"\"\"select rowid, * from books where (title like ?) or\n (author like ?) or (filename like ?)\"\"\"\n cursor.execute(sql, t)\n result = cursor.fetchall()\n cursor.close()\n return [self._book_from_query_result(x) for x in result]", "def search(self, term):", "def search(collection_of_books: tuple, search_tag: str, search_keyword: str) -> list:\r\n found_books = []\r\n\r\n if search_tag == \"Shelf\" and search_keyword.isnumeric():\r\n found_books = [book for book in collection_of_books if search_keyword == book[\"Shelf\"]]\r\n\r\n else:\r\n for book in collection_of_books:\r\n if search_keyword.lower() in book[search_tag].lower():\r\n found_books.append(book)\r\n\r\n return found_books", "def find_books(self):\n search_query = unicode(self.search_input.data)\n q = u'%{}%'.format(search_query)\n\n # used for dummy emulation of caseinsensetive search\n qC = u'%{}%'.format(capfirst(search_query))\n\n books = Book.query.filter(db.or_(\n Book.authors.any(db.or_(\n Author.name.like(q),\n Author.name.like(qC))),\n Book.title.like(q),\n Book.title.like(qC)),)\n\n return books", "def search_recipes(request):\n\n string_to_find = request.GET.get(\"term\", None)\n\n if string_to_find is None:\n return HttpResponse(status=400)\n\n matching_recipes = Recipe.objects.filter(title__icontains=string_to_find)\n\n context = {}\n for r in matching_recipes:\n context[r.title] = reverse('recipes:recipe', kwargs={'recipe_slug': r.slug})\n\n return HttpResponse(json.dumps(context), content_type='application/json')", "def search():\n query = request.form.get(\"query\")\n # pylint: disable=redefined-outer-name\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "def search():\n query = request.form.get(\"query\")\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "def search():\n query = request.form.get(\"query\", None)\n recipes = mongo.db.recipes.find({\"$text\": {\"$search\": query}})\n return render_template(\"recipes/list.html\", recipes=recipes)", "def search_recipes(\n *,\n keyword: Optional[str] = Query(None, min_length=3, example=\"chicken\"),\n max_results: Optional[int] = 10,\n) -> dict:\n if not keyword:\n # we use Python list slicing to limit results\n # based on the max_results query parameter\n return {\"results\": RECIPES[:max_results]}\n\n results = filter(lambda recipe: keyword.lower() in recipe[\"label\"].lower(), RECIPES)\n return {\"results\": list(results)[:max_results]}", "def search(self, **kwargs):\n return keyword_search(self._rq_list, **kwargs)", "def search_from_terms(api, term, **kwargs):\n tweets=api.GetSearch(term=term)\n return {\"tweets\":tweets}", "def search(self, term):\n return self._search(self._root, term, 0)", "def book_search(library: list) -> None:\n options = ['Author', 'Title', 'Publisher', 'Shelf', 'Category', 'Subject']\n prompt = '\\nWhat option would you like to search by?'\n choice = get_user_choice(options, prompt)\n if choice == '1':\n search_by_chosen_option(library, options[0])\n elif choice == '2':\n search_by_chosen_option(library, options[1])\n elif choice == '3':\n search_by_chosen_option(library, options[2])\n elif choice == '4':\n search_by_shelf(library)\n elif choice == '5':\n search_by_chosen_option(library, options[4])\n elif choice == '6':\n search_by_chosen_option(library, options[5])", "def search_book():\n\n title = request.form.get(\"search\")\n books = book_search_results(GR_KEY, title)\n acct = get_current_account(session['acct'])\n search = True\n\n return render_template(\"index.html\", books=books, acct=acct, search=search)", "def search(self, term):\n data = self.__get_data_from_db(term)\n\n if not data:\n data = self.__get_data_from_store(term)\n self.__set_data_to_db(term, data)\n print(data)\n return data", "def search(api_key, term, location):\n\n\n\n url_params = {\n\n 'term': term.replace(' ', '+'),\n\n 'location': location.replace(' ', '+'),\n\n 'limit': SEARCH_LIMIT\n\n }\n\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)", "def search(api_key, term, location):\r\n\r\n url_params = {\r\n 'term': term.replace(' ', '+'),\r\n 'location': location.replace(' ', '+'),\r\n 'limit': SEARCH_LIMIT\r\n }\r\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)", "def search_recipes():\r\n cuisine, course, allergens = Helpers.dropdowns(coll_cuisines, coll_courses, coll_allergens)\r\n args = request.args.get\r\n args_list = request.args.getlist\r\n\r\n # Get Search and Pagination arguments from URL\r\n keyword_args = (\r\n args(\"search_keys\") if args(\"search_keys\") is not None else \"\")\r\n cuisineFilter_args = (\r\n args(\"cuisine_filter\") if args(\"cuisine_filter\") is not None else \"\")\r\n courseFilter_args = (\r\n args(\"course_filter\") if args(\"course_filter\") is not None else \"\")\r\n allergenFilter_args = (\r\n args_list(\"allergen_filter\") if args_list(\r\n \"allergen_filter\") is not None else [])\r\n page_args = int(args(\"page\")) if args(\"page\") is not None else 1\r\n\r\n # Set search variables\r\n search_keywords = (\r\n keyword_args.split() if keyword_args is not None else \"\")\r\n search_cuisine = (\r\n cuisineFilter_args if cuisineFilter_args is not None else \"\")\r\n search_course = (\r\n courseFilter_args if courseFilter_args is not None else \"\")\r\n search_allergens = (\r\n allergenFilter_args if allergenFilter_args != [] else \"\")\r\n\r\n # Join search variables and perform search\r\n search = (\r\n '\"' + '\" \"'.join(search_keywords) +\r\n '\" \"' + ''.join(search_cuisine) +\r\n '\" \"' + ''.join(search_course) +\r\n '\"' + ' -' + ' -'.join(search_allergens))\r\n search_results = coll_recipes.find(\r\n {\"$text\": {\"$search\": search}}).skip((page_args * 8) - 8)\\\r\n .limit(8).sort([(\"views\", -1)])\r\n\r\n # Pagination\r\n (\r\n pages, previous_page, next_page, count,\r\n total_recipes, results_count) = Helpers.pagination(\r\n search_results, page_args, coll_recipes)\r\n\r\n return render_template(\r\n \"searchrecipes.html\",\r\n recipes=search_results,\r\n cuisine=sorted(cuisine),\r\n course=course,\r\n allergens=allergens,\r\n keywords=keyword_args,\r\n f_cuisine=cuisineFilter_args,\r\n f_course=courseFilter_args,\r\n f_allergen=allergenFilter_args,\r\n pages=pages,\r\n results_count=results_count,\r\n total_recipes=total_recipes,\r\n count=count,\r\n page=page_args,\r\n next_page=next_page,\r\n previous_page=previous_page)", "def search(api_key, term, location):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT\n }\n return request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)", "def search():\n search = request.form.get(\"search\")\n results = mongo.db.recipes.find({\"$text\": {\"$search\": search}}).limit(2)\n result_count = mongo.db.recipes.find(\n {\"$text\": {\"$search\": search}}).count()\n if result_count > 0:\n return render_template(\"pages/search.html\", results=results, search=search, isFooter=True)\n else:\n flash(\"No results found.\")\n return render_template(\"pages/search.html\", results=results, search=search, isFooter=True)", "def search(api_key, term, location, categories, offset, price):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': int(params['limit']),\n 'offset': offset,\n 'categories': categories,\n 'price':price\n }\n \n find_locs = request(API_HOST, SEARCH_PATH, api_key, url_params=url_params)\n \n return json_normalize(find_locs['businesses'])", "def search_sd(terms):\n\n print('-- Search ScienceDirect:', len(terms), 'terms.')\n\n books = defaultdict(set)\n for result in pool.imap(search_sd_helper, chunks(terms, 200)):\n for book in result:\n books[book] |= result[book]\n\n return books", "def search():\n import booksearch as bs\n\n opt = var.get()\n term = searchBox.get()\n term2 = dateBox.get()\n\n # Case statement (substitute) for different search areas\n # Each key is an option in the OptionMenu\n searchBy = {\n \"Title & Author\" : bs.search(term),\n \"ID\" : bs.bookID(term),\n \"Date\" : bs.dateRange(term, term2),\n }\n query = searchBy[opt] # Make & stores a query (2D list)\n\n # Repopulates table\n if term != \"\":\n populate(query)", "def search_catalogue(search_term):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock FROM catalogue WHERE productname = ?\"\"\",\n (search_term, )).fetchall()\n\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Cost\", \"Stock\"]))", "def search_cupcake():\n\n search_term = request.json['search']\n result = Cupcake.query.filter(Cupcake.flavor.like(search_term)).all()\n serialized = search_serialize(result)\n return jsonify(serialized)", "def search(bearer_token, term, location):\n\n url_params = {\n 'term': term.replace(' ', '+'),\n 'location': location.replace(' ', '+'),\n 'limit': SEARCH_LIMIT\n }\n return request_from_yelp(API_HOST, SEARCH_PATH, bearer_token, url_params=url_params)", "def find(self, search_terms, _keywords=None):\n objects = super().get_queryset().order_by(\"name\")\n term_query = Q()\n for t in search_terms:\n term_query.add(Q(name__iexact=t), Q.OR)\n term_query.add(Q(search_tokens__icontains=t), Q.OR)\n return objects.filter(term_query)", "def library_searched():\n\n searched_result = []\n \n updated_books = duplicated_code()\n\n if request.method == 'POST':\n if request.form['type_search'] == 'book':\n book_title = request.form['search']\n for book in updated_books:\n if book['title'] == book_title:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'genre':\n book_genre = request.form['search']\n for book in updated_books:\n if book['genre'] == book_genre:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n elif request.form['type_search'] == 'author':\n book_author = request.form['search']\n for book in updated_books:\n if book['author_name'] == book_author:\n searched_result.append(book)\n return render_template(\"library_searched.html\", result = searched_result)\n else:\n return render_template(\"library_searched.html\")", "def search():\n query = request.args['query']\n # find instances of the entered word in title, tags or ingredients\n results = mongo.db.places.find({\n '$or': [\n {'name': {'$regex': query, '$options': 'i'}},\n {'tags': {'$regex': query, '$options': 'i'}},\n {'city': {'$regex': query, '$options': 'i'}},\n ]\n })\n return render_template('search.html', query=query, results=results)" ]
[ "0.72791237", "0.71715647", "0.69993097", "0.68785614", "0.66799706", "0.6646577", "0.6640332", "0.66243654", "0.65412086", "0.64594185", "0.64096427", "0.6388486", "0.63285005", "0.63256395", "0.6321836", "0.6234597", "0.62287444", "0.6206532", "0.61862636", "0.61501443", "0.61109245", "0.6088614", "0.6085403", "0.6066463", "0.6049756", "0.60446453", "0.60282564", "0.6013084", "0.6000662", "0.59917253" ]
0.78580964
0
Search for a single cookbook of the specified name
def find_one_cookbook(name): return db.boxcar_cookbooks.find_one({'name': name})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cookbook_search(search_term):\n return db.boxcar_cookbooks.find({'name': {'$regex':'^'+search_term}})", "def get_recipe_by_name(self, name):\n for key in self.recipe_list.keys():\n if name in self.get_recipes_by_types(key):\n for recipe in self.recipe_list[key]:\n if recipe.name == name:\n print(recipe.__str__())\n return recipe\n sys.stderr.write(UKN_RCP.format(name, self.name))\n raise KeyError", "def search_for_name(self, name):\n for p in self.books_all:\n if p['name'] == name:\n return p", "def get_recipe_by_name(self, name):\n for rec_list in self.recipe_list.values():\n for item in rec_list:\n if (item.name == name):\n return(item)", "def get_recipe_by_name(self, name):\n\t\tfor key, val in self.recipes_list.items():\n\t\t\tfor a, b in val.items():\n\t\t\t\tif name == a:\n\t\t\t\t\tprint(str(b))", "def search_food(cls, name):\n obj = cls.objects(name=name).first()\n return obj", "def find(self, name):\n return Search(self.request).find(name)", "def find_recipe(self, recipe_id):\n return self.find_doc('recipe', 'name', self.get_unique_recipe_name(recipe_id))", "def find_by_name(name):\n return repository.find_by_name(name)", "def find_by_name ( self, name, **kw ):\n try:\n return next ( self.find_all_by_name ( name, **kw ) )\n except StopIteration:\n return None", "def find_resource(self, resource_name, package_title=None):\n metadata = self.get_ckan_metadata()\n results = []\n for id, resource in metadata.items():\n if resource['name'] == resource_name:\n if package_title is None or resource['dataset']['title'] == package_title:\n results.append(resource)\n return results[0] if len(results) == 1 else results", "def search_for_customer(self, name):\n customers_list = self.get_customers()\n return next((customer for customer in customers_list if customer.get('name') == name), {'name': None, 'parent':None, 'active': None, 'link': None })", "def get_by_name(self, name, isCaseSensitive=None):\n # log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_by_name()\")\n\n if isCaseSensitive is None or isCaseSensitive is True:\n for obj in self.get_list():\n if obj.options['name'] == name:\n return obj\n else:\n for obj in self.get_list():\n if obj.options['name'].lower() == name.lower():\n return obj\n return None", "def search_product_by_name(name, filters):\n return store_handler.search_product_by_name(name, filters)", "def dataFind(self, collectionName, catagory, data):\n result = collectionName.find_one({catagory: data})\n return result", "def _get_kit_by_component(self, comp_name, comp_version=None):\n kit_list = self._kit_db_api.getKitList()\n kits = [\n kit\n for kit in kit_list\n for component in kit.getComponentList()\n if component.getName() == comp_name and\n (comp_version is None or\n component.getVersion() == comp_version)\n ]\n if not kits:\n raise KitNotFound(\n 'Kit containing component [%s] not found' % (comp_name))\n\n if len(kits) > 1:\n raise ComponentNotFound(\n 'Kit name must be specified, multiple kits contain '\n 'component: {}'.format(comp_name)\n )\n\n return kits[0]", "def get_volume_by_name(self, name):\n for vol in self.conn.volumes:\n if vol.name == name:\n return vol\n raise KeyError(\"Volume with NAME \" + name + \" not found\")", "def search_by_shelf(library: list) -> None:\n user_input = input(f'What is the number/name of the shelf you want to search for?')\n found_books = []\n for book in library:\n if user_input.lower() == str(getattr(book, 'shelf')).lower():\n found_books.append(book)\n print(f'We found {len(found_books)} book(s) that matched this search in your library.\\n')\n for num, book in enumerate(found_books, 1):\n print(f'{num} - {book.__repr__()}')\n if len(found_books) > 0 and not return_to_main_menu():\n move_book(library, found_books)", "def search(isamAppliance, name, force=False, check_mode=False):\n ret_obj = get_all(isamAppliance)\n return_obj = isamAppliance.create_return_object()\n\n for obj in ret_obj['data']:\n if obj['name'] == name:\n logger.info(\"Found STS Chain {0} id: {1}\".format(name, obj['id']))\n return_obj['data'] = obj['id']\n return_obj['rc'] = 0\n\n return return_obj", "def search(name):\n try:print(f'Searching for {name}...');os.system(f'python -m pip search {name}')\n except Exception as e:print(\"something went wrong\\n{e}\")", "def search(self, name: str) -> \"Navaids\":\n return self.__class__(\n self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n )", "def search():\n query = request.form.get(\"query\")\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "def Collection_search_name(C:list, name:str) -> list:\r\n restaurants = []\r\n for r in C:\r\n for dish in r.menu:\r\n if name in dish.name:\r\n restaurants.append(r)\r\n return restaurants", "def search(collection_of_books: tuple, search_tag: str, search_keyword: str) -> list:\r\n found_books = []\r\n\r\n if search_tag == \"Shelf\" and search_keyword.isnumeric():\r\n found_books = [book for book in collection_of_books if search_keyword == book[\"Shelf\"]]\r\n\r\n else:\r\n for book in collection_of_books:\r\n if search_keyword.lower() in book[search_tag].lower():\r\n found_books.append(book)\r\n\r\n return found_books", "def search():\n query = request.form.get(\"query\")\n # pylint: disable=redefined-outer-name\n recipes = list(mongo.db.recipes.find({\"$text\": {\"$search\": query}}))\n return render_template(\"recipes.html\", recipes=recipes)", "def find(owner_name, resource_name):\n resource = find_node(owner_name, resource_name)\n if not isinstance(resource, Resource):\n raise NotFound('Not a resource: %s / %s' % (owner_name, \n resource_name))\n require.resource.read(resource)\n return resource", "def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None", "def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None", "def findFood(self,name):\n\t\tname = name.lower()\n\t\treturn dictfood.has_key(name)", "def get_recipe_chef(soup_recipe):\n chef_name = soup_recipe.find(\"span\", {\"itemprop\": \"author\"})\n if not chef_name:\n return None\n return chef_name.get_text().strip()" ]
[ "0.7266315", "0.66256815", "0.6528147", "0.64047", "0.63835543", "0.61755985", "0.60229975", "0.59094644", "0.57915264", "0.573889", "0.56801695", "0.5435157", "0.5398273", "0.53891546", "0.5387135", "0.5348948", "0.5345011", "0.5339703", "0.52805656", "0.5240057", "0.5239863", "0.5215055", "0.5214187", "0.52104855", "0.5207545", "0.5202028", "0.5169945", "0.5169945", "0.51643103", "0.51431924" ]
0.8132757
0
Check if credit limit for partner was exceeded.
def check_limit(self): self.ensure_one() partner = self.partner_id moveline_obj = self.env['account.move.line'] movelines = moveline_obj.\ search([('partner_id', '=', partner.id), ('account_id.user_type_id.type', 'in', ['receivable', 'payable']), ('full_reconcile_id', '=', False)]) debit, credit = 0.0, 0.0 today_dt = datetime.strftime(datetime.now().date(), DF) for line in movelines: if line.date_maturity < today_dt: credit += line.debit debit += line.credit if (credit - debit + self.amount_total) > partner.credit_limit: # Consider partners who are under a company. if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit): partner.write({ 'credit_limit': credit - debit + self.amount_total}) return True else: msg = '%s Can not confirm Sale Order,Total mature due Amount ' \ '%s as on %s !\nCheck Partner Accounts or Credit ' \ 'Limits !' % (partner.over_credit,credit - debit, today_dt) raise UserError(_('Credit Over Limits !\n' + msg)) else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def _check_rate_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n # TODO distinct up/down rates\n # check limiting rate for resource flow in/out, if any\n if self._rate:\n request = {res: None}\n inputs = {'request': request,\n 'meta': meta,\n 'raven_vars': raven_vars,\n 'dispatch': dispatch,\n 't': t}\n max_rate = self._rate.evaluate(inputs, target_var=res)[0][res]\n delta = np.sign(amt) * min(max_rate, abs(amt))\n print('max_rate in _check_rate_limit',max_rate, 'delta (min of maxrate and abs(amt)',delta)\n return {res: delta}, meta\n return {res: amt}, meta", "def _safe_limit_check(self):\n if self.rem == 40:\n self.time_start = time.time()\n elif time.time() - self.time_start >= 11:\n self.rem = 40\n self.time_start = time.time()\n elif self.rem <= 0:\n t = 11 - (time.time() - self.time_start)\n\n if t <= 0:\n self.rem = 40\n self.time_start = time.time()\n else:\n if self.policy == Limit.Sleep:\n time.sleep(t)\n elif self.policy == Limit.Ignore:\n return False\n\n self.rem -= 1\n return True", "def _is_limited(request, rate, rl):\n def inner(*args, **kwargs):\n is_limited = rl.is_limited(*args, **kwargs)\n\n if is_limited:\n messages.error(\n request,\n _(\"Too many submissions, wait %(time)s.\") % {\n 'time': rate.split('/')[1]})\n\n return is_limited\n\n return inner", "async def cclimit(self, ctx, limit_amount: int = None):\n if limit_amount is None:\n return await ctx.send_help()\n if limit_amount < 0:\n return await ctx.send(\"You need to use a number larger than 0.\")\n await self.config.limit.set(limit_amount)\n await ctx.send(f\"Chatchart is now limited to {limit_amount} messages.\")", "def check_rate_limit(self):\n # Already received 429 from server\n if self.rate_limit_sleep:\n msg = f\"请求过于频繁,已被BitMEX限制,请等待{self.rate_limit_sleep}秒后再试\"\n self.gateway.write_log(msg)\n return False\n # Just local request limit is reached\n elif not self.rate_limit_remaining:\n msg = \"请求频率太高,有触发BitMEX流控的风险,请稍候再试\"\n self.gateway.write_log(msg)\n return False\n else:\n self.rate_limit_remaining -= 1\n return True", "def check(self):\n self.__check_request_limit()", "def has_reached_limit(domain, limit=RATE_LIMIT):\n count = count_domain_certs_since(domain)\n return count >= limit", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def allowedLimit(self, number, msg=None):\n return allowed_limit(number, msg)", "async def check_total(self, guild: discord.Guild):\n\n max_allowed = await self.config.guild(guild).total_players()\n signed = await self.config.guild(guild).signed()\n\n if signed < max_allowed:\n return True", "def is_few_remaining(self) -> bool:\n return self.on_hand <= self.warn_limit", "def exceeded_max(self):\n return self.total_max is not None and self.counter > self.total_max", "def check_min_value(self, tracked_price):\n if tracked_price < self.min_value and self.warning_calls <= 2:\n print(f'Warning! Price dropeed under {self.min_value} pln {tracked_price}')\n self.make_phone_call()\n self.warning_calls += 1\n elif tracked_price < self.min_value and self.warning_calls == 3:\n self.send_a_message(\n f'This is a warning message. Price of EUR/PLN dropped under critical value!'\n f' {self.min_value} pln')\n print(f'Called 3 times! Price dropeed under {self.min_value} pln {tracked_price}')\n self.warning_calls = 0\n else:\n print(f\"Current price for Euro in PLN is {tracked_price}\")", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def is_power_limited(self):\n status = self.get_status_response()\n return ((status[1] & 0x10) == 0x10)\n #end is_power_limited()", "def is_rate_limit_exceeded(self, request):\r\n counts = self.get_counters(request)\r\n return sum(counts.values()) >= self.requests", "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "def charge(self,price):\n\n if price + self._balance> self._limit:\n return False\n else:\n self._balance+=price\n return True", "def check_constrained(self, limit=None):\n\n # Set the 'well-constrained' limit at 10% (arbitrary) if not provided.\n limit = (Decimal(0.1) if not limit else Decimal(limit))\n\n if is_empty(self.value) or is_empty(self.uncertainty):\n return False\n elif self.uncertainty > (Decimal(self.value) * Decimal(limit)):\n self.well_constrained = False\n else:\n self.well_constrained = True", "def test_custom_per_project_upper_limit(self):\n data = {'payment_amount': '50.00'}\n account = Account(goal=8000, current=3001)\n form = DonationAmountForm(data=data, account=account)\n self.assertFalse(form.is_valid())\n errors = form.errors.as_data()\n self.assertEqual('max_value', errors['payment_amount'][0].code)\n self.assertTrue('$49.99' in errors['payment_amount'][0].message)\n\n account.current = 3000\n form = DonationAmountForm(data=data, account=account)\n self.assertTrue(form.is_valid())", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "def check_resource_limit(self, selection_count, population_count):\n p = self.ctx.policy\n max_resource_limits = MaxResourceLimit(p, selection_count, population_count)\n return max_resource_limits.check_resource_limits()", "def can_accept_credit(self, value):\n return value >= 0", "def check(self):\n logging.info(\"rate limit remaining %s\" % self.remaining)\n while self.remaining <= 1:\n now = time.time()\n logging.debug(\"rate limit < 1, now=%s and reset=%s\", now,\n self.reset)\n if self.reset and now < self.reset:\n # padded with 5 seconds just to be on the safe side\n secs = self.reset - now + 5\n logging.info(\"sleeping %s seconds for rate limiting\" % secs)\n time.sleep(secs)\n else:\n # sleep a second before checking again for new rate limit\n time.sleep(1)\n # get the latest limit\n self.ping()\n self.remaining -= 1", "def limit_reached(self):\n if len(self.selected) >= self.limit:\n return True\n return False", "def check_gc_min_max(self):\n if not self.allow_open_amount:\n return\n\n if self.gc_min < 0 or self.gc_max < 0:\n self.raise_user_error(\"negative_amount_not_allowed\")\n\n if self.gc_min > self.gc_max:\n self.raise_user_error(\"invalid_amount\")", "def is_limited(self) -> bool:\n return self.__times > ActionState.UNLIMITED", "def retry_allowed(self, total_sleep, num_retries):\n if self.max_cumulative_retry is None:\n return num_retries <= self.max_retries\n else:\n return total_sleep <= self.max_cumulative_retry" ]
[ "0.7017334", "0.67190903", "0.6673682", "0.6619289", "0.6474647", "0.6447294", "0.6442348", "0.6328854", "0.6313708", "0.628994", "0.628804", "0.6241019", "0.6215823", "0.6123995", "0.61043304", "0.60935956", "0.6043182", "0.6026192", "0.6019101", "0.5975524", "0.5956821", "0.59448576", "0.59441096", "0.59225196", "0.59179187", "0.5916446", "0.5888424", "0.58736026", "0.58655554", "0.58650625" ]
0.83692616
0
Extend to check credit limit before confirming sale order.
def action_confirm(self): for order in self: order.check_limit() return super(SaleOrder, self).action_confirm()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_limit(self):\n self.ensure_one()\n partner = self.partner_id\n moveline_obj = self.env['account.move.line']\n movelines = moveline_obj.\\\n search([('partner_id', '=', partner.id),\n ('account_id.user_type_id.type', 'in',\n ['receivable', 'payable']),\n ('full_reconcile_id', '=', False)])\n\n debit, credit = 0.0, 0.0\n today_dt = datetime.strftime(datetime.now().date(), DF)\n for line in movelines:\n if line.date_maturity < today_dt:\n credit += line.debit\n debit += line.credit\n\n if (credit - debit + self.amount_total) > partner.credit_limit:\n # Consider partners who are under a company.\n if partner.over_credit or (partner.parent_id and partner.parent_id.over_credit):\n partner.write({\n 'credit_limit': credit - debit + self.amount_total})\n return True\n else:\n msg = '%s Can not confirm Sale Order,Total mature due Amount ' \\\n '%s as on %s !\\nCheck Partner Accounts or Credit ' \\\n 'Limits !' % (partner.over_credit,credit - debit, today_dt)\n raise UserError(_('Credit Over Limits !\\n' + msg))\n else:\n return True", "def use(self):\n if self.credit < self.price_of_trip:\n print(\"Your credit is not enough, please increase your credit\")\n else:\n self.credit -= self.price_of_trip\n print(\"Done\")", "def limit_chase(self, oq, max_chase=3.0, failsafe=False, double_check=False):\n ret = self.send_order(oq=oq, ot='limit', price=None)\n order_id = ret[0]['orderID']\n last_price = ret[0]['price']\n side = ret[0]['side']\n max_chase_buy = float(last_price) + float(max_chase)\n max_chase_sell = float(last_price) - float(max_chase)\n avg = last_price\n time.sleep(1)\n self.logger.info(\n f'Chasing {side} order {order_id}, order_price: {avg}, last_price: {last_price}, '\n f'current price: {last_price} max chase: {max_chase_buy}')\n count = 0\n while True:\n count += 1\n o = self.ws_orders(order_id)\n if o:\n if side == 'Buy':\n if double_check:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['buy']\n else:\n _price = self.ws.get_ticker()['buy']\n if float(_price) <= float(max_chase_buy):\n if float(last_price) < float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price}')\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n self.logger.info(ret)\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if double_check:\n time.sleep(0.5)\n\n else:\n if failsafe:\n self.logger.info(f'Order {order_id} exceeded max chase. Placing a market order.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n self.send_order(oq, 'market', text='OrderChase Market Failsafe')\n else:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, giving up.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n break\n elif side == 'Sell':\n if double_check:\n quote = self.get_quote()\n self.logger.info('Bid: {} Ask: {}'.format(quote['bidPrice'], quote['askPrice']))\n _price = quote['sell']\n else:\n _price = self.ws.get_ticker()['sell']\n if float(_price) >= float(max_chase_sell):\n if float(last_price) > float(_price):\n self.logger.info(f'Amending order {order_id} to price {_price} ')\n ret = self.client.Order.Order_amend(orderID=order_id, price=_price).result()\n self.logger.info(ret)\n last_price = _price\n else:\n self.logger.debug(f'Sleeping, order_price: {last_price}, current price: {_price}')\n if double_check:\n time.sleep(0.5)\n\n else:\n if failsafe:\n self.logger.info(f'Order {order_id} exceeded max chase. Placing a market order.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n self.send_order(oq, 'market', text='OrderChase Market Failsafe')\n else:\n self.logger.info(f'Price {_price} exceeded max chase {max_chase_buy}, giving up.')\n self.client.Order.Order_cancel(orderID=order_id).result()\n break\n else:\n time.sleep(0.5)\n if o:\n self.logger.info(f'{side} Order manually Canceled!')\n self.logger.info('Order Filled')\n break", "def charge(self,price):\n\n if price + self._balance> self._limit:\n return False\n else:\n self._balance+=price\n return True", "async def cclimit(self, ctx, limit_amount: int = None):\n if limit_amount is None:\n return await ctx.send_help()\n if limit_amount < 0:\n return await ctx.send(\"You need to use a number larger than 0.\")\n await self.config.limit.set(limit_amount)\n await ctx.send(f\"Chatchart is now limited to {limit_amount} messages.\")", "def test_cancel_amount_more_than_msc_balance(self):\n entity_a1 = self.entities[1]\n\n # 1. A1 starts with 50.0 MSC, 0.0 MDiv1\n self.check_balance(entity_a1.address, MSC, '50.00000000', '0.00000000') # SP 1\n self.check_balance(entity_a1.address, MDiv1, '0.00000000', '0.00000000') # SP 4\n\n # 2. A1 offers 50.0 MSC for 111.5 MDiv1\n entity_a1.trade('50.00000000', MSC, '111.5', MDiv1, ADD_1)\n self.generate_block()\n self.check_balance(entity_a1.address, MSC, '0.00000000', '50.00000000') # SP 1\n self.check_balance(entity_a1.address, MDiv1, '0.00000000', '0.00000000') # SP 4\n\n # 3. A1 cancels 50.0 MSC for 111.5 MDiv1 (cancel-at-price)\n entity_a1.trade('50.00000000', MSC, '111.5', MDiv1, CANCEL_2)\n self.generate_block()\n self.check_balance(entity_a1.address, MSC, '50.00000000', '0.00000000') # SP 1\n self.check_balance(entity_a1.address, MDiv1, '0.00000000', '0.00000000') # SP 4", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def _check_cost(self, cr, uid, ids, context=None):\n for enrich in self.browse(cr, uid, ids, context=context):\n if enrich.amount <= 0:\n raise osv.except_osv(_('ValidateError'), _('The Cost Must Be Greater Than Zero!'))\n return True", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError()\n \n if self._balance + price <= self._limit:\n self._balance += price\n return True\n else: return False", "def check_credit(self):\n self.ensure_one()\n getattr(self, '%s_check_credit' % self.provider, lambda: None)()", "def testInsufficientCash(self):\n\n bid_move = self._move()\n context = self._context()\n context.players[0].cash = 200\n bfpc = BiddingForPrivateCompany()\n\n self.assertFalse(bfpc.run(bid_move, context), bfpc.errors())", "def charge(self, price):\n '''try:\n type(price) == int or type(price) == float\n except ValueError: \n print 'Not a number!'\n \n if type(price) != int or type(price) != float:\n raise ValueError(\"Not a number!\")\n '''\n if price < 0:\n return False\n elif price + self._balance > self._limit:\n return False\n else:\n self._balance += price\n return True", "def complete_purchase(self, customer_credit=0):\r\n \r\n #take the products first, then tell customer how many tickets to take\r\n #requires IChat interface to be passed to tell customers how many tickets to take\r\n \r\n #switch to list view in the collection window\r\n print(\"YES\")\r\n self._slow_click(target=self._images.get_trade(\"list_view_collection_window\"))\r\n print(\"NO\")\r\n \r\n running_total = self.search_for_products()\r\n running_total -= customer_credit\r\n \r\n print(\"running total is \" + str(running_total))\r\n if running_total == 0 or not running_total:\r\n self.cancel_trade()\r\n return False\r\n \r\n total_tickets_notice = 'Please take %i tickets.' % running_total\r\n self.Ichat.type_msg(total_tickets_notice)\r\n \r\n #wait for the customer to get the tickets, then click confirm\r\n if not self.preconfirm_scan_purchase(running_total): \r\n self.cancel_trade()\r\n \r\n self.go_to_confirmation()\r\n print(\"starting confirmation scan\")\r\n #run a final confirmation scan to check the products and tickets taken\r\n products_bought = self.confirmation_scan(tickets_to_give=running_total, credit=customer_credit)\r\n \r\n self.Ichat.close_current_chat()\r\n \r\n if products_bought:\r\n self._slow_click(target=self._images.get_trade(\"confirm_button\", \"confirm\"))\r\n wait(Pattern(self._images.get_ok_button()), 600)\r\n self._slow_click(target=self._images.get_ok_button())\r\n products_bought[\"total_tickets\"] = running_total\r\n \r\n return products_bought\r\n \r\n else:\r\n self.cancel_trade()\r\n return False", "def test_adjusted_payment_still_below_invoice(self):\n debit_jobs([(self.job, A(600), Entry.WORK_DEBIT)])\n credit_jobs([(self.job, A(480), A(0), A(20))], D(480))\n self.assert_balances(\n bank=A(480, 0, 0),\n balance=A(100), # debited (600) + credited (-500) = balance (100)\n debited=A(600),\n invoiced=A(580), # debited (600) + adjustment (-20) = invoiced (580)\n paid=A(-480),\n credited=A(-500), # payment (-480) + adjustment (-20) = credited (-500)\n promised=A(100),\n partial=A(480).net_amount,\n tax=A(480).tax_amount,\n )", "def charge(self, price):\n if not isinstance(price, (int, float)):\n raise TypeError('Price must be numeric')\n if price + self._balance > self._limit: # if charge would exceed limit\n return False # cannot accept charge\n self._balance += price\n return True", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def _on_order_amount_too_low(self, _msg):\r\n self.debug(\"### Server said: 'Order amount is too low'\")\r\n self.count_submitted -= 1", "def check_costs(self):\r\n if self.cost > self.owner.player.char_ob.currency:\r\n self.add_error(\r\n \"celebration_tier\",\r\n \"You cannot afford to pay the cost of %s.\" % self.cost,\r\n )", "def confirmed(self, cr, uid, ids, context=None):\n\tallow_archive_line_obj = self.pool.get('services.contracts.allowances.lines')\n for record in self.browse(cr, uid, ids, context=context):\n\t\tif not record.allowances_lines_before :\n \traise osv.except_osv(_('Partner Lines !'), _('Sorry no partner Lines!'))\n\n\t \tlines_ids = [line.id for line in record.allowances_lines_after]\n \tallow_archive_line_obj.unlink(cr,uid,lines_ids,context=context)\n\n\t\tfor lines in record.allowances_lines_before:\n\t\t\tif lines.percentage_rating < 0 or lines.percentage_rating > 100 :\n \t\traise osv.except_osv(_('Rate Error !'), _('Sorry you insert wrong rate ... rate is between (0,100)!'))\n \t\tamount_after_rate_id = allow_archive_line_obj.create(cr, uid, {\n \t\t\t\t'cost_of_rent':lines.cost_of_rent,\n \t\t\t\t'amount_untaxed':round (lines.amount_untaxed*lines.percentage_rating/100,2),\n \t\t\t\t'amount_tax':round(lines.amount_tax*lines.percentage_rating/100,2),\n \t\t\t\t'amount_total':round(lines.amount_total*lines.percentage_rating/100,2),\n \t\t\t\t'deduct_days':lines.deduct_days,\n \t\t\t\t'deduct_amount':lines.deduct_amount,\n \t\t\t\t'contract_id':lines.contract_id.id,\n\t\t\t\t\t'env_allow_id_after_rate':record.id,\n\t\t\t\t\t'type': 'after',\n 'category_id':lines.category_id.id,\n\t\t\t\t\t'percentage_rating':lines.percentage_rating,\n\n })\n\t\t\n \n self.write(cr, uid, ids, {'state':'confirmed'})\n return True", "def make_payment(self, cost):\n self.process_coins()\n if self.money_received >= cost:\n change = round(self.money_received - cost, 2)\n print(f\"Here is {self.CURRENCY}{change} in change.\")\n self.profit += cost\n self.money_received = 0\n return True\n else:\n print(\"Sorry that's not enough money. Money refunded.\")\n self.money_received = 0\n return False", "def update_customer_credit(cust_id, credit_limit):\n update_query = Customer.update(credit_limit=credit_limit) \\\n .where(Customer.customer_id == cust_id)\n if not update_query.execute():\n raise ValueError(\"Record does not exist\")\n return True", "def _check_amount_with_priority(self):\n\t\tfor slc in self:\n\t\t\tif slc.max_amount and self.search([('priority', '<', slc.priority), ('max_amount', '>=', slc.max_amount)]):\n\t\t\t\traise Warning(_(\"There are below slides [Priority less than %s] with bigger amount from [%s]\"\n\t\t\t\t \" which against the logic!!!\\n You can increase amount or handel priority\")\n\t\t\t\t % (slc.priority, slc.max_amount))", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def update_willorder_limit(\n sender, instance, created, using, update_fields, *args, **kwargs\n):\n # only triggers when 'been_paid' is passed to the kwarg update field when calling save method on invoice\n if update_fields and \"been_paid\" in update_fields:\n order_details = InvoiceService(instance.order).limit_details\n # goes through all the current order_details and sets limits on them when the invoice is paid\n for order_detail, order_numbers in order_details.items():\n try:\n willorder_limit = OrderLimit.objects.get(\n invoice=instance, detail=order_detail\n )\n if order_numbers > willorder_limit.limit:\n willorder_limit.limit = order_numbers\n willorder_limit.save()\n except OrderLimit.DoesNotExist:\n OrderLimit.objects.create(\n invoice=instance, detail=order_detail, limit=order_numbers\n )\n\n # update discounts as redeemed when billed\n discounts = instance.discounts.all()\n # sets record on discounts when paid\n if discounts.exists():\n for discount in discounts:\n discount.redeemed += 1\n discount.save()\n discount.redeemed_by.add(instance.order.user)", "def update_customer_credit(customer_id, credit_limit):\n try:\n customer = cm.Customers.get(cm.Customers.customer_id == customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except cm.DoesNotExist:\n raise ValueError", "def update_customer_credit(customer_id, credit_limit):\n try:\n with database.transaction():\n customer = Customer.get_by_id(customer_id)\n customer.credit_limit = credit_limit\n customer.save()\n except Exception as unknown_error:\n print(f'Error. Cutomer {customer_id} does not exist. {unknown_error}')\n raise ValueError", "def _check_amount_with_priority(self):\n\t\tfor line in self:\n\t\t\tif line.tax_slide_id and (line.amount_from > line.tax_slide_id.max_amount\n\t\t\t or line.amount_to > line.tax_slide_id.max_amount):\n\t\t\t\traise Warning(_(\"Line Amount couldn't exceed te slide max amount [%s]\" % line.tax_slide_id.max_amount))", "def payment_approval(self, house_cost: (int, float)):\n if self.money_available >= house_cost: # Person has enough available money to make a deal with Realtor\n self.money_available -= house_cost\n print(f'Payment from {self.name} was approved')\n return True\n print(f'{self.name} doesn\\'t have enough money to buy this house')\n return False", "def action_confirm(self):\n if any(not l.is_available for l in self.mapped('order_line')):\n raise UserError(_('Some of your products in order does not have enough quantity available'))\n res = super(SaleOrder, self).action_confirm()\n return res", "def test_cancel_amount_more_than_tmsc_balance(self):\n entity_a1 = self.entities[1]\n\n # 1. A1 starts with 50.0 TMSC, 0 TIndiv1\n self.check_balance(entity_a1.address, TMSC, '50.00000000', '0.00000000') # SP 2\n self.check_balance(entity_a1.address, TIndiv1, '0', '0') # SP 2147483651\n\n # 2. A1 offers 50.0 TMSC for 100 TIndiv1\n entity_a1.trade('50.00000000', TMSC, '100', TIndiv1, ADD_1)\n self.generate_block()\n self.check_balance(entity_a1.address, TMSC, '0.00000000', '50.00000000') # SP 2\n self.check_balance(entity_a1.address, TIndiv1, '0', '0') # SP 2147483651\n\n # 3. A1 cancels 50.0 TMSC for 100 TIndiv1 (cancel-at-price)\n entity_a1.trade('50.00000000', TMSC, '100', TIndiv1, CANCEL_2)\n self.generate_block()\n self.check_balance(entity_a1.address, TMSC, '50.00000000', '0.00000000') # SP 2\n self.check_balance(entity_a1.address, TIndiv1, '0', '0') # SP 2147483651" ]
[ "0.7913511", "0.6271354", "0.61509377", "0.61213034", "0.59694165", "0.59436303", "0.59382004", "0.5932659", "0.5925501", "0.5905597", "0.5774578", "0.57630503", "0.5758884", "0.5758847", "0.57344717", "0.5730981", "0.57309234", "0.5721947", "0.5711063", "0.56954175", "0.5691851", "0.56849176", "0.5655139", "0.56522626", "0.56495017", "0.5641131", "0.5628216", "0.5603934", "0.55949616", "0.5571064" ]
0.7099402
1
This function will save microburst data to a CSV file.
def saveData(self, fPath=None): keys = ['dateTime', 'dos1rate', 'peak_std', 'Lm_OPQ', 'MLT_OPQ', 'lat', 'lon', 'alt', 'Dist_In_Track', 'Lag_In_Track', 'Dist_Total','Loss_Cone_Type', 'flag'] # headerl1 = ['Microburst catalogue created on {}'.format( # datetime.now())] headerl2 = copy.copy(keys) #headerl2[0] = '# {}'.format(headerl2[0]) if fPath is None: saveDir = os.path.abspath('./../data/z_daily_microburst_catalogues/') saveName = 'AC6{}_{}_microbursts.txt'.format(self.sc_id, self.date.date()) fPath = os.path.join(saveDir, saveName) if len(self.peakInd) == 0: print('No microbursts detected. Not saving the file {}.'.format(saveName)) return with open(fPath, 'w', newline='') as f: writer = csv.writer(f) #writer.writerow(headerl1) writer.writerow(headerl2) row_arr = [None]*len(keys) for row, peakInd in enumerate(self.peakInd): for ic, key in enumerate(keys): # Save data to file. if statement checks to make sure we # are saving data from the 10Hz data or derived data. if key != 'peak_std': row_arr[ic] = self.d[key][peakInd] else: row_arr[ic] = self.peak_std[row] writer.writerow(row_arr) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_microtrips(self):\n for data in self.microtrip_data:\n file_name = \"../data/microtrips/\" + data.file.name + \"_m\" + \".csv\"\n data.save_csv(file_name)", "def save(self, data, outpath):\n data.to_csv(outpath)", "def save_csv(self, filename): # DONE\n self.data.to_csv(filename)", "def save_as_csv(time_series, data, path_and_file_name):\n\n parent_name = \"test\"\n parent_uqid = uuid.uuid4()\n\n file_obj = open(path_and_file_name, 'w')\n file_obj.write('version,'+str(2)+'\\n')\n file_obj.write('numOfCH,'+str(1)+'\\n')\n file_obj.write('type, scan\\n')\n file_obj.write('ch_type,'+str(0)+'\\n')\n\n file_obj.write('carpet pos,'+str(0)+'\\n')\n file_obj.write('parent_name,'+str(parent_name)+'\\n')\n file_obj.write('parent_uqid,'+str(parent_uqid)+'\\n')\n file_obj.write('parent_filename,'+str(path_and_file_name)+'\\n')\n\n file_obj.write('pc, 0\\n')\n file_obj.write('Time (ns), CH0 Auto-Correlation\\n')\n for time_step in range(0, time_series.shape[0]):\n file_obj.write(str(float(time_series[time_step]))+','+str(data[time_step])+ '\\n')\n file_obj.write('end\\n')\n\n file_obj.close()", "def _save_data(self):\n self.data.to_csv('data/c&le/{}'.format(self.name))", "def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')", "def save_csv(data): \n bank_data = data\n\n #Creating headers for the csv file\n header = [\"Lender\", \"Max Loan Amount\", \"Max LTV\", \"Max DTI\", \"Max Credit Score\", \"Interest Rate\"]\n\n #Creating output path of the CSV file\n csvpath = Path(\"save_file.csv\")\n\n #Opening the csv file in csvpath by using the open() method\n with open(csvpath, \"w\", newline='') as csvfile:\n\n csvwriter = csv.writer(csvfile, delimiter = \",\")\n csvwriter.writerow(header)\n for row in bank_data:\n csvwriter.writerow(row)\n\n return data", "def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)", "def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))", "def save_csv(self, save_path=''):\n if not save_path:\n time = datetime.now()\n time = datetime.strftime(time, '%Y-%m-%d_%H:%M:%S')\n filename = time + '.csv'\n save_path = os.path.join(os.path.abspath(os.curdir), filename)\n data = self._get_data()\n with open(save_path, 'wb') as f:\n for line in data:\n f.write(line + '\\n')", "def dataSave():\n # NR5G = gui_reader()\n try: #Python3\n f = open(__file__ + \".csv\",'wt', encoding='utf-8')\n except:\n f = open(__file__ + \".csv\",'wb')\n f.write('%s,'%(entryCol.entry0.get()))\n f.write('%s,'%(entryCol.entry1.get()))\n f.write('%s,'%(entryCol.entry2.get()))\n f.write('%s,'%(entryCol.entry3.get()))\n f.close()\n print(\"DataSave: File Saved\")", "def save_csv(net, wires, net_id, chip_id, chip):\n with open('output/output.csv', 'w') as file:\n # Write first line\n output = csv.writer(file)\n output.writerow([\"net\", \"wires\"])\n\n # Index and fill the body\n for step in range(len(wires)):\n output.writerow([net[step],wires[step]])\n\n # End of file\n output.writerow([f\"chip_{chip_id}_net_{net_id}\", chip.cost])", "def save_to_csv(data):\n print(\"Saving file...\")\n\n data = [\"year,rank,company,revenue ($ millions),profit ($ millions)\"] + data\n data = [row.replace(\", \", \"; \").replace(\"\\\"\", \"\") for row in data] # list comprehension\n\n with open(CSV_PATH, \"w\", newline=\"\", encoding=\"utf-8\") as csvfile:\n spamwriter = csv.writer(csvfile, delimiter=\",\")\n for row in data:\n spamwriter.writerow(row.split(\",\"))", "def save_to_csv(self):\r\n # Save the read values to a csv file\r\n with open(self.fname, \"a\") as f:\r\n wr = csv.writer(f, dialect='excel')\r\n wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,\r\n self.soc0, self.set_val, self.P_ac, self.P_bat])", "def saveCSV(name, ra, dec, ang):\n r = res(ra,dec,ang)\n return r.write('{}.csv'.format(name), overwrite = True)", "def save_csv(data, name):\n\n data_shp = np.asarray(np.shape(data))\n data_shp[0] = data_shp[0] + 1\n\n new_array = np.zeros(data_shp)\n new_array[1:, :] = data\n\n csv_array = new_array.astype(str)\n\n # Format the first row with legend\n leg = ['Energy (MeV)', 'Attentuation (cm^-1)']\n\n csv_array[0, :] = leg\n\n np.savetxt(os.path.join(directory, f'{name}.csv'), csv_array, delimiter=',', fmt='%s')", "def saveCSV(self):\n filename=tkFileDialog.asksaveasfilename(defaultextension='.csv',\n initialdir=os.getcwd(),\n filetypes=[(\"csv\",\"*.csv\"),(\"All files\",\"*.*\")])\n if not filename:\n return\n for m in self.matrices:\n matrix = self.matrices[m] \n if matrix != None: \n c=matrix.csvRepresentation()\n f=open(filename,'w')\n f.write(c)\n f.close()\n return", "def write_to_csv(self, verbose: bool = False) -> None: \n Path(self.csv_dir).mkdir(exist_ok=True)\n with open(f\"{self.csv_dir}/train.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as train_file:\n with open(f\"{self.csv_dir}/test.csv\", \"wt\", encoding=\"utf-8\", newline=\"\") as test_file:\n csv_header = (\"phone\", \"phone_class_index\", \"f1\", \"f2\", \"f3\", \"f4\", \"f5\")\n train_csvwriter = csv.writer(train_file)\n test_csvwriter = csv.writer(test_file)\n train_csvwriter.writerow(csv_header)\n test_csvwriter.writerow(csv_header)\n for vowels_and_formants, wav_path, category in self:\n if verbose:\n print(f\"File: {wav_path} (category: {category})\")\n writer = train_csvwriter if category == \"TRAIN\" else test_csvwriter\n for vowel_and_formants in vowels_and_formants:\n phone, formants = vowel_and_formants\n row = (phone, ipa_class_index[phone]) + tuple(formants)\n writer.writerow(row)\n if verbose:\n print(row)", "def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)", "def create_csv_file(self):\r\n # Create a new csv-file\r\n with open(self.fname, 'w') as f:\r\n writer = csv.writer(f, dialect='excel')\r\n writer.writerow(['set_time',\r\n 'read_time_P_ac',\r\n 'read_time_P_bat',\r\n 'soc',\r\n 'set_value',\r\n 'P_ac',\r\n 'P_bat'])", "def export_csv(self, path):\r\n\r\n with open(path, 'w') as f:\r\n f.write('# h,hr,m')\r\n\r\n if self.rho is not None:\r\n f.write(',rho')\r\n if self.temperature is not None:\r\n f.write(',temperature')\r\n\r\n f.write('\\n')\r\n for i in range(self.shape[0]):\r\n for j in range(self.shape[1]):\r\n f.write(f'{self.h[i, j]},{self.hr[i, j]},{self.m[i, j]}')\r\n if self.rho is not None:\r\n f.write(f',{self.rho[i, j]}')\r\n if self.temperature is not None:\r\n f.write(f',{self.temperature[i, j]}')\r\n f.write('\\n')\r\n return", "def save_dataset_csv(self, path):\n cols = list(self.data_dict.keys())\n df = pd.DataFrame(self.data_dict, index=None, columns=cols)\n df.to_csv(path, index=True)", "def write(self):\n \n self.df.to_csv('/home/austin/Desktop/Falcon/realestate/Falcon/Datasets/mls.csv')", "def save(self):\n\t\t# save self.dfAnalysis\n\t\tcsvPath = self._getSavePath()\n\t\tprint('saving:', csvPath)\n\t\tself.dfAnalysis.to_csv(csvPath)", "def setup_csv(self) -> None:\n csvData = ['Followers', 'Time']\n\n # Create our CSV file header\n with open(self.graphfile, 'w') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(csvData)\n csvFile.close()", "def writeToCSV(self, filepath):\r\n\t\twith open(filepath, 'w') as outputFile:\r\n\t\t\toutputFile.write(str(self))", "def save_clean_data(self):\n for data in self.clean_data:\n file_name = \"../data/clean_data/\" + data.file.name + data.file.extension\n data.save_csv(file_name)", "def save_as_csv(path,data,NO_SENSORS):\n\n HEADER1 = [ ['Sensor 1'],\n ['X','Y','Z','Time/ms'] ]\n HEADER2 = [ ['Sensor 1',' ',' ',' ','Sensor 2'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n HEADER3 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n HEADER4 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3',' ',' ',' ','Sensor 4'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n HEADER5 = [ ['Sensor 1',' ',' ',' ','Sensor 2',' ',' ',' ','Sensor 3',' ',' ',' ','Sensor 4',' ',' ',' ','Sensor 5'],\n ['X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms','X','Y','Z','Time/ms'] ]\n\n HEADERS = [HEADER1,HEADER2,HEADER3,HEADER4,HEADER5]\n\n HEADER = HEADERS[NO_SENSORS - 1]\n\n # The data is saved as a CSV file using the given path\n with open(path, 'w') as csv_file:\n csv_write = csv.writer(csv_file, dialect='excel')\n csv_write.writerows(HEADER)\n csv_write.writerows(data)", "def write_csv(file_name, data):\n\n with open(file_name, \"w\") as fp:\n\n writer = RiscvInstructionTraceCsv(fp)\n writer.start_new_trace()\n\n for entry in data:\n writer.write_trace_entry(entry)", "def write_to_file(data, name):\n\n time_str = time.strftime('%Y%m%d', time.localtime())\n\n data.to_csv(Path(ROOT_DIR + '/data/' + time_str + '-' + name.replace(\"/\", \"\") + '.csv'), index=True)\n\n pass" ]
[ "0.70688546", "0.6881139", "0.6879607", "0.67490184", "0.6713802", "0.666279", "0.66253996", "0.6521193", "0.6448808", "0.63529104", "0.62716025", "0.6208062", "0.61897266", "0.6178907", "0.61324775", "0.6119791", "0.6088917", "0.6062749", "0.6046528", "0.60461986", "0.60035586", "0.6000747", "0.59992665", "0.5964932", "0.5964138", "0.59505486", "0.59499747", "0.59497404", "0.59415936", "0.5936699" ]
0.71242034
0
This method calculates the autocorrelation of the counts array in the time range specified by tRange. times array is used to identify which counts to autocorrelate. The ax argument specified the subplot on which to plot the autocorrelation on.
def _autoCorrCounts(self, times, counts, tRange, norm=True): validIdt = np.where((counts != -1E31) & (times > tRange[0]) & (times < tRange[1]))[0] x = counts[validIdt] - counts[validIdt].mean() # mode=same means that some edge effects will be observed. Should be ok. ac = np.correlate(x, x, mode='same') ac = ac[ac.size//2:] # Lags are normalized to seconds lags = np.arange(0, ac.size)/10 if norm: ac /= len(x)*np.var(x) # Identify peaks peakInd, _ = scipy.signal.find_peaks(ac, prominence=0.1) return ac, lags, max(counts[validIdt]), peakInd
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_autocorrelation(self):\n fig, ax = plt.subplots()\n U = stats.nanmean(self.uf, axis=1)\n # correlate two 1d arrays\n # np.correlate(U, U, mode='full')[len(U) - 1:]\n # but we want to autocorrelate a 2d array over a given\n # axis\n N = U.shape[1]\n pad_N = N * 2 - 1\n s = np.fft.fft(U, n=pad_N, axis=1)\n acf = np.real(np.fft.ifft(s * s.conjugate(), axis=1))[:, :N]\n # normalisation\n acf0 = np.expand_dims(acf[:, 0], 1)\n acf = acf / acf0\n\n fig, ax = plt.subplots(nrows=2)\n c0 = ax[0].contourf(U, self.levels)\n c1 = ax[1].contourf(acf, 100)\n\n fig.colorbar(c0, ax=ax[0], use_gridspec=True)\n fig.colorbar(c1, ax=ax[1], use_gridspec=True)\n\n ax[0].set_title(r'$\\overline{u_x}(z, t)$')\n ax[0].set_xlabel('time')\n ax[0].set_ylabel('z')\n\n ax[1].set_title('autocorrelation')\n ax[1].set_xlabel('lag')\n ax[1].set_ylabel('z')\n\n fig.tight_layout()\n\n return fig", "def autocorrelation(df,maxt,step,vari,acquisiton_time,division_time):\n maxt = int(maxt/acquisiton_time)\n step = int(step/acquisiton_time)\n df = connect_cells(df,vari)\n return np.vstack([correlation(df,Dt,vari) for Dt in\\\n np.arange(0,maxt,step)]),\\\n np.arange(0,maxt,step)*acquisiton_time/division_time", "def plotting_autocorr(dataframe):\n plot_acf(dataframe['STU'].iloc[1:], lags=40)\n plt.show()", "def plot_autocorrelation(series, params, lags, alpha=0.05, title=''):\n plt.rcParams.update(params)\n acf_plot = tsaplots.plot_acf(series, lags=lags, alpha=alpha)\n plt.title(title)\n plt.xlabel('Number of Lags')\n plt.show()", "def step_autocorrelation(trajectories, axis=0):\n\n try:\n if len(axis) == 1:\n axis = axis[0]\n except TypeError:\n pass\n\n ntraj = trajectories.shape[1] # number of particles with a trajectory\n\n # calculate acf of first trajectory in order to determine size of output array. timeseries.acf will truncate\n # the array slightly in order to make the FFT efficient\n ACF = acf(trajectories[1:, 0, axis] - trajectories[:-1, 0, axis])\n acfs = np.zeros([ntraj, ACF.size])\n acfs[0, :] = ACF\n\n keep = []\n for t in range(1, ntraj):\n steps = trajectories[1:, t, axis] - trajectories[:-1, t, axis]\n if not np.all(steps == 0):\n acfs[t, :] = acf(steps)\n keep.append(t)\n #acfs[t, :] = acf(trajectories[:ACF.size, t, axis])\n\n return acfs[keep, :]", "def step_autocorrelation(self):\n\n max_hops = max([len(x) for x in self.steps])\n\n self.acf = np.zeros([len(self.steps), max_hops])\n\n keep = [] # list to hold indices of trajectories with a non-zero amount of hops\n for i in range(len(self.steps)):\n hops = self.steps[i]\n if len(hops) > 1:\n self.acf[i, :len(self.steps[i])] = timeseries.acf(self.steps[i])\n keep.append(i)\n\n self.acf = self.acf[keep, :]\n\n self.acf = np.array([self.acf[np.nonzero(self.acf[:, i]), i].mean() for i in range(max_hops)])\n\n #self.acf = timeseries.step_autocorrelation(self.z_interpolated.T[..., np.newaxis])", "def plotAutocorrelation(lXs, lYs, out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\"):\n\n# print len(lXs)\n# print len(lYs)\n lRhs = []\n Ym = np.mean(lYs)\n N = len(lYs)\n C0 = 0.0\n for i in lYs:\n C0 += (i-Ym)*(i-Ym)\n C0 = C0/N\n for i in range(0,N):\n Ch = 0.0\n for j in range(0,(N-i-1)):\n Ch += (lYs[j]-Ym)*(lYs[j+i]-Ym)\n Ch = Ch/N\n lRhs.append(Ch/C0)\n print(len(lRhs))\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n ax = fig.add_subplot(111)\n ax.plot(lXs,lRhs)\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)\n\n return lRhs", "def plot_autocorr(self, series):\n plot_acf(series, lags=range(0, 2500, 50), alpha=0.9)\n plt.title(self.ticker + ' Autocorrelation Plot')\n plt.savefig('plots/ARIMA/{0}Autocor.pdf'.format(self.ticker))\n plt.close()", "def plotAutoC(ax, dset, coll):\n from statsmodels.tsa.stattools import acf\n import numpy as np\n from .FigureCommon import texRename\n\n # Pivot to separate out all the walkers\n dd = dset.pivot(index='Step', columns='walker', values=coll)\n\n nlags = dd[0].size\n\n # Calculate the autocorrelation\n outt = dd.apply(lambda x: acf(x, nlags=nlags))\n\n # Cutoff view at 25 steps\n outt = outt.loc[outt.index < 25, :]\n\n # Plot the values\n outt.plot(ax=ax, legend=False, linewidth=0.5)\n\n # Rename columns for plotting\n ax.set_title(texRename(coll))\n\n # Indicate the confidence intervals for failure\n z95 = 1.959963984540054 / np.sqrt(nlags)\n z99 = 2.5758293035489004 / np.sqrt(nlags)\n ax.axhline(y=z99, linestyle='--', color='grey', linewidth=0.5)\n ax.axhline(y=z95, color='grey', linewidth=0.5)\n ax.axhline(y=0.0, color='black', linewidth=0.5)\n ax.axhline(y=-z95, color='grey', linewidth=0.5)\n ax.axhline(y=-z99, linestyle='--', color='grey', linewidth=0.5)\n\n ax.set_xlim(0, 20)", "def _crossCorrCounts(self, timesA, timesB, countsA, countsB, tRange, norm=True):\n validIdtA = np.where((countsA != -1E31) & \n (timesA > tRange[0]) & \n (timesA < tRange[1]))[0]\n x = countsA[validIdtA] - countsA[validIdtA].mean()\n\n validIdtB = np.where((countsB != -1E31) & \n (timesB > tRange[0]) & \n (timesB < tRange[1]))[0]\n y = countsB[validIdtB] - countsB[validIdtB].mean()\n\n cc = np.correlate(x, y, mode='same')\n # Lags are normalized to seconds \n lags = np.arange(-cc.size/2, cc.size/2)/10 \n \n if norm:\n cc /= np.sqrt(len(x)*np.var(x)*len(y)*np.var(y))\n \n # Identify peaks\n #peakInd, _ = scipy.signal.find_peaks(cc)\n return cc, lags", "def _plot_acf(autocorrelation_by_lag, lags, min_absolute_autocorrelation,\n max_lag_to_plot, title_string, output_file_name):\n\n indices_to_plot = numpy.where(lags <= max_lag_to_plot)\n autocorrelation_by_lag = autocorrelation_by_lag[indices_to_plot]\n lags = lags[indices_to_plot]\n\n _, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES))\n\n axes_object.plot(\n lags, autocorrelation_by_lag, linestyle='solid', color=MAIN_LINE_COLOUR,\n linewidth=DEFAULT_LINE_WIDTH)\n\n these_x_values = numpy.array([lags[0], lags[-1]])\n these_y_values = numpy.array(\n [min_absolute_autocorrelation, min_absolute_autocorrelation])\n axes_object.plot(\n these_x_values, these_y_values, linestyle='dashed',\n color=SIGNIFICANCE_LINE_COLOUR, linewidth=DEFAULT_LINE_WIDTH)\n\n these_y_values = these_y_values * -1\n axes_object.plot(\n these_x_values, these_y_values, linestyle='dashed',\n color=SIGNIFICANCE_LINE_COLOUR, linewidth=DEFAULT_LINE_WIDTH)\n\n these_y_values = numpy.full(2, 0.)\n axes_object.plot(\n these_x_values, these_y_values, linestyle=':', color=ZERO_LINE_COLOUR,\n linewidth=ZERO_LINE_WIDTH)\n\n pyplot.xlabel('Lag')\n pyplot.ylabel('Autocorrelation')\n pyplot.xlim([0, max_lag_to_plot])\n\n pyplot.title(title_string)\n print 'Saving figure to: \"{0:s}\"...'.format(output_file_name)\n pyplot.savefig(output_file_name, dpi=FIGURE_RESOLUTION_DPI)\n pyplot.close()", "def get_acf_tau(y, c=7.0):\n if np.nansum(y) == 0 or np.nanstd(y) < 1e-12:\n print(\"Autocorr time could not be computed. Check your input.\")\n return 0, np.zeros(len(y)), np.zeros(len(y))\n acf = y*0.\n for ii in range(y.shape[1]):\n acf[:,ii] = autocorr(y[:,ii] - np.nanmean(y[:,ii]))\n acf[:,ii] /= acf[0,ii] #np.nanmax(acf[ii,:])\n f = np.nansum(acf, axis=1) / y.shape[1]\n taus = 2.0 * np.cumsum(f) - 1.0\n window = auto_window(taus, c)\n return taus[window], f, acf", "def _compute_acf(values_in_series):\n\n autocorrelation_by_lag = numpy.correlate(\n values_in_series, values_in_series, mode='same')\n\n # Remove negative lags.\n lag_0_index = numpy.argmax(autocorrelation_by_lag)\n autocorrelation_by_lag = autocorrelation_by_lag[lag_0_index:]\n lags = numpy.linspace(\n 0, len(autocorrelation_by_lag) - 1, num=len(autocorrelation_by_lag),\n dtype=int)\n\n # Divide by num points used to compute each autocorrelation.\n num_points_by_lag = len(values_in_series) - lags\n autocorrelation_by_lag = autocorrelation_by_lag / num_points_by_lag\n\n # Normalize so that lag-0 autocorrelation is 1 (true by definition).\n autocorrelation_by_lag = autocorrelation_by_lag / autocorrelation_by_lag[0]\n\n return autocorrelation_by_lag, lags", "def _calculate_autocorrelations(self):\n\n self._autocorr_real_x = self.__calculate_autocorr(self._noise_field_real, self._n_x, self._n_y, 'x')\n self._autocorr_real_y = self.__calculate_autocorr(self._noise_field_real, self._n_y, self._n_x, 'y')\n self._autocorr_imag_x = self.__calculate_autocorr(self._noise_field_imag, self._n_x, self._n_y, 'x')\n self._autocorr_imag_y = self.__calculate_autocorr(self._noise_field_imag, self._n_y, self._n_x, 'y')", "def EstimatedAutocorr(fw, data, pnum, trialnum, marker1, marker2): \n cycle_start = HeelStrike(fw, data, pnum, trialnum, marker1, marker2)\n x = cycle_start[2] \n time = cycle_start[1]\n drop_NA = np.vstack((x, time))\n #print drop_NA.shape, x.shape, y.shape\n drop_NA = drop_NA.T\n x = drop_NA[:,0]\n #x = x[~np.isnan(x).any()]\n \n #n = len(x)\n #var = np.var(x)\n tao = np.correlate(x, x, mode='full')\n # assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))\n #result = r/(var*(np.arange(n, 0, -1)))\n plt.figure(4)\n plt.plot(tao)\n return tao", "def plotting_part_autocorr(dataframe):\n plot_pacf(dataframe['STU'].iloc[1:], lags=40)\n plt.show()", "def plot_autocorrelation(\n trj: TrajaDataFrame,\n coord: str = \"y\",\n unit: str = \"Days\",\n xmax: int = 1000,\n interactive: bool = True,\n):\n pd.plotting.autocorrelation_plot(trj[coord])\n plt.xlim((0, xmax))\n plt.xlabel(f\"Lags ({unit})\")\n plt.ylabel(\"Autocorrelation\")\n if interactive:\n plt.show()\n return plt.gcf()", "def plot_partial_autocorrelation(series, params, lags, alpha=0.05, title=''):\n plt.rcParams.update(params)\n acf_plot = tsaplots.plot_pacf(series, lags=lags, alpha=alpha)\n plt.xlabel('Number of Lags')\n plt.title(title)\n plt.show()", "def pacf_plots(self):\n fig, axes = plt.subplots(3, 2, figsize=(20, 9), sharex=False)\n #\n axes[0, 0].plot(self.ts_df['y'])\n axes[0, 0].set_title('Original Series')\n plot_pacf(self.ts_df['y'], ax=axes[0, 1])\n\n # 1st Differencing\n axes[1, 0].plot(self.ts_df['y'].diff())\n axes[1, 0].set_title('1st Order Differencing')\n # axes[0].set(ylim=(0, 5))\n plot_pacf(self.ts_df['y'].diff().dropna(), ax=axes[1, 1])\n\n # 2nd Differencing\n axes[2, 0].plot(self.ts_df['y'].diff().diff())\n axes[2, 0].set_title('2nd Order Differencing')\n plot_pacf(self.ts_df['y'].diff().diff().dropna(), ax=axes[2, 1])\n\n plt.gcf().autofmt_xdate()\n plt.grid(True)\n plt.show()", "def autocorrplot(trace, varnames=None, max_lag=100, symmetric_plot=False, combined=False,\n figsize=None, textsize=None, skip_first=0, ax=None):\n trace = trace_to_dataframe(trace[skip_first:], combined=combined)\n varnames = get_varnames(trace, varnames)\n\n if figsize is None:\n figsize = (12, len(varnames) * 2)\n\n textsize, linewidth, _ = _scale_text(figsize, textsize, 1)\n\n nchains = trace.columns.value_counts()[0]\n fig, ax = plt.subplots(len(varnames), nchains, squeeze=False, sharex=True, sharey=True,\n figsize=figsize)\n\n max_lag = min(len(trace) - 1, max_lag)\n\n for i, varname in enumerate(varnames):\n for j in range(nchains):\n if nchains == 1:\n data = trace[varname].values\n else:\n data = trace[varname].values[:, j]\n ax[i, j].acorr(data, detrend=plt.mlab.detrend_mean, maxlags=max_lag, lw=linewidth)\n\n if not symmetric_plot:\n ax[i, j].set_xlim(0, max_lag)\n\n if nchains > 1:\n ax[i, j].set_title(\"{0} (chain {1})\".format(varname, j), fontsize=textsize)\n else:\n ax[i, j].set_title(varname, fontsize=textsize)\n ax[i, j].tick_params(labelsize=textsize)\n\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')\n plt.grid(False)\n plt.xlabel(\"Lag\", fontsize=textsize)\n plt.ylabel(\"Correlation\", fontsize=textsize)\n return ax", "def ACF():\n sm.graphics.tsa.plot_acf(df.values.squeeze(), lags=40)\n plt.title('Crude Oil AdjClose Price Autocorrelation')\n plt.savefig('../plots/ACF_Nonstationary.jpg')", "def correlograms(zt):\n\n # Initialize a 2x2 figure\n fig, ax = plt.subplots(2, 2, figsize=(9, 6), sharex=True, sharey=True)\n\n stop = 200 # index of point at which to stop (Diebold et al. stopped at 200)\n\n ax[0, 0].plot(acf(zt - zt.mean())[:stop], linewidth=2) # (z - meanz)\n ax[0, 1].plot(acf((zt - zt.mean()) ** 2)[:stop], linewidth=2) # (z - meanz)^2\n ax[1, 0].plot(acf((zt - zt.mean()) ** 3)[:stop], linewidth=2) # (z - meanz)^2\n ax[1, 1].plot(acf((zt - zt.mean()) ** 4)[:stop], linewidth=2) # (z - meanz)^2\n\n titles = ['$(z - \\overline{z})$', '$(z - \\overline{z})^2$', '$(z - \\overline{z})^3$', '$(z - \\overline{z})^4$']\n\n for i in range(4):\n ax[i // 2, i % 2].set_title(titles[i], fontsize=14)\n ax[i // 2, i % 2].tick_params(labelsize=14)\n\n ax[1, 0].set_xlabel('Lag (time steps)', fontsize=14)\n ax[1, 1].set_xlabel('Lag (time steps)', fontsize=14)\n ax[0, 0].set_ylabel('Correlation', fontsize=14)\n ax[1, 0].set_ylabel('Correlation', fontsize=14)", "def ft_acf(\n cls,\n ts: np.ndarray,\n nlags: t.Optional[int] = None,\n adjusted: bool = True,\n ) -> np.ndarray:\n return cls._calc_acf(\n ts=ts, nlags=nlags, adjusted=adjusted, detrend=False\n )", "def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Axes:\n plot_backend = _get_plot_backend(\"matplotlib\")\n return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs)", "def auto_correlation(values, lags=100):\n lags, corr, line, x = pl.acorr( values, maxlags=lags, usevlines=False, marker=None)\n return lags, corr", "def autocorrelation_plot_limited(series, n_samples=None, ax=None, **kwds):\n n = len(series)\n data = np.asarray(series)\n if ax is None:\n ax = plt.gca(xlim=(1, n_samples), ylim=(-1.0, 1.0))\n mean = np.mean(data)\n c0 = np.sum((data - mean) ** 2) / float(n)\n\n def r(h):\n return ((data[:n - h] - mean) *\n (data[h:] - mean)).sum() / float(n) / c0\n x = (np.arange(n) + 1).astype(int)\n y = lmap(r, x)\n z95 = 1.959963984540054\n z99 = 2.5758293035489004\n # Only show the 99% confidence interval\n ax.axhline(y=z99 / np.sqrt(n), linestyle='--', color='grey')\n# ax.axhline(y=z95 / np.sqrt(n), color='grey')\n ax.axhline(y=0.0, color='black')\n# ax.axhline(y=-z95 / np.sqrt(n), color='grey')\n ax.axhline(y=-z99 / np.sqrt(n), linestyle='--', color='grey')\n ax.set_xlabel(\"Lag\")\n ax.set_ylabel(\"Autocorrelation\")\n if n_samples:\n ax.plot(x[:n_samples], y[:n_samples], **kwds)\n else:\n ax.plot(x, y, **kwds)\n if 'label' in kwds:\n ax.legend()\n ax.grid()\n return ax", "def autocorrelation(x, nlags = 0):\n return [x.corr(x.shift(lag)) for lag in range(nlags + 1)]", "def ft_autocorr_out_dist(\n cls,\n ts: np.ndarray,\n p: float = 0.8,\n max_nlags: t.Optional[int] = None,\n adjusted: bool = True,\n detrended_acfs: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n detrended_acfs = cls._calc_acf(\n ts=ts,\n nlags=max_nlags,\n adjusted=adjusted,\n detrended_acfs=detrended_acfs,\n )\n\n ts_abs = np.abs(ts)\n ts_inliners = ts[ts_abs <= np.quantile(ts_abs, p)]\n\n ts_inliners_acfs = cls._calc_acf(\n ts=ts_inliners, nlags=max_nlags, adjusted=adjusted\n )\n\n dist_acfs = np.abs(\n detrended_acfs[: ts_inliners_acfs.size] - ts_inliners_acfs\n )\n\n return dist_acfs", "def _calc_acf(\n cls,\n ts: np.ndarray,\n nlags: t.Optional[int] = None,\n adjusted: bool = True,\n detrend: bool = True,\n detrended_acfs: t.Optional[np.ndarray] = None,\n ts_detrended: t.Optional[np.ndarray] = None,\n ) -> np.ndarray:\n if detrended_acfs is not None and (\n nlags is None or detrended_acfs.size == nlags\n ):\n return detrended_acfs\n\n if detrend and ts_detrended is None:\n try:\n ts_detrended = _detrend.decompose(ts=ts, ts_period=0)[2]\n\n except ValueError:\n pass\n\n if ts_detrended is None:\n ts_detrended = ts\n\n if nlags is None:\n nlags = ts.size // 2\n\n acf = statsmodels.tsa.stattools.acf(\n ts_detrended, nlags=nlags, adjusted=adjusted, fft=True\n )\n return acf[1:]", "def plot_correlation(self, x,y, T, fs, prn1, prn2):\n x = self.create_constant_magnitude_signal(x)\n y = self.create_constant_magnitude_signal(y)\n auto_1 = self.circular_correlation(x, x)\n auto_2 = self.circular_correlation(y, y)\n cross = self.circular_correlation(x, y)\n fig = plt.figure(figsize=(12,5))\n ax1 = fig.add_subplot(1, 3, 1)\n ax2 = fig.add_subplot(1, 3, 2)\n ax3 = fig.add_subplot(1, 3, 3)\n t = arange(0, T, 1.0 / fs)\n ax1.plot(t, auto_1)\n ax1.set_title('Auto-correlation PRN {0}'.format(prn1))\n ax1.set_xlabel('Time (ms)')\n ax2.plot(t, auto_2)\n ax2.set_title('Auto-correlation PRN {0}'.format(prn2))\n ax2.set_xlabel('Time (ms)')\n ax3.plot(t, cross)\n ax3.set_title('Cross-correlation b/t PRN {0} and PRN {1}'.format(prn1, prn2))\n ax3.set_xlabel('Time (ms)')\n xlim = (0, T)\n ylim = ax1.get_ylim()\n for ax in [ax1, ax2, ax3]:\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n ax.set_xticklabels(['{0:1.1f}'.format(1e3 * t) for t in ax.get_xticks()])\n plt.show()" ]
[ "0.661664", "0.6439528", "0.60843444", "0.60580397", "0.59329104", "0.58847815", "0.583871", "0.576628", "0.5759394", "0.5759256", "0.56740904", "0.5664268", "0.56560445", "0.56214404", "0.5612409", "0.5610661", "0.56057805", "0.5589329", "0.5573851", "0.5513599", "0.55013096", "0.5498407", "0.54870236", "0.5457985", "0.5432573", "0.540316", "0.5380271", "0.5344858", "0.5296117", "0.5279554" ]
0.6516164
1
This method calculates the crosscorrelation of the counts array in the time range specified by tRange. times array is used to identify which counts to autocorrelate. This method also calculates the cross correlation of the two signals to determine if the micorbursts observed by both AC6 units are coincident. The ax argument specified the subplot on which to plot the autocorrelation on.
def _crossCorrCounts(self, timesA, timesB, countsA, countsB, tRange, norm=True): validIdtA = np.where((countsA != -1E31) & (timesA > tRange[0]) & (timesA < tRange[1]))[0] x = countsA[validIdtA] - countsA[validIdtA].mean() validIdtB = np.where((countsB != -1E31) & (timesB > tRange[0]) & (timesB < tRange[1]))[0] y = countsB[validIdtB] - countsB[validIdtB].mean() cc = np.correlate(x, y, mode='same') # Lags are normalized to seconds lags = np.arange(-cc.size/2, cc.size/2)/10 if norm: cc /= np.sqrt(len(x)*np.var(x)*len(y)*np.var(y)) # Identify peaks #peakInd, _ = scipy.signal.find_peaks(cc) return cc, lags
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _autoCorrCounts(self, times, counts, tRange, norm=True):\n validIdt = np.where((counts != -1E31) & \n (times > tRange[0]) & \n (times < tRange[1]))[0]\n x = counts[validIdt] - counts[validIdt].mean()\n # mode=same means that some edge effects will be observed. Should be ok. \n ac = np.correlate(x, x, mode='same')\n ac = ac[ac.size//2:]\n # Lags are normalized to seconds \n lags = np.arange(0, ac.size)/10 \n \n if norm:\n ac /= len(x)*np.var(x)\n \n # Identify peaks\n peakInd, _ = scipy.signal.find_peaks(ac, prominence=0.1)\n return ac, lags, max(counts[validIdt]), peakInd", "def plot_autocorrelation(self):\n fig, ax = plt.subplots()\n U = stats.nanmean(self.uf, axis=1)\n # correlate two 1d arrays\n # np.correlate(U, U, mode='full')[len(U) - 1:]\n # but we want to autocorrelate a 2d array over a given\n # axis\n N = U.shape[1]\n pad_N = N * 2 - 1\n s = np.fft.fft(U, n=pad_N, axis=1)\n acf = np.real(np.fft.ifft(s * s.conjugate(), axis=1))[:, :N]\n # normalisation\n acf0 = np.expand_dims(acf[:, 0], 1)\n acf = acf / acf0\n\n fig, ax = plt.subplots(nrows=2)\n c0 = ax[0].contourf(U, self.levels)\n c1 = ax[1].contourf(acf, 100)\n\n fig.colorbar(c0, ax=ax[0], use_gridspec=True)\n fig.colorbar(c1, ax=ax[1], use_gridspec=True)\n\n ax[0].set_title(r'$\\overline{u_x}(z, t)$')\n ax[0].set_xlabel('time')\n ax[0].set_ylabel('z')\n\n ax[1].set_title('autocorrelation')\n ax[1].set_xlabel('lag')\n ax[1].set_ylabel('z')\n\n fig.tight_layout()\n\n return fig", "def autocorrelation(df,maxt,step,vari,acquisiton_time,division_time):\n maxt = int(maxt/acquisiton_time)\n step = int(step/acquisiton_time)\n df = connect_cells(df,vari)\n return np.vstack([correlation(df,Dt,vari) for Dt in\\\n np.arange(0,maxt,step)]),\\\n np.arange(0,maxt,step)*acquisiton_time/division_time", "def corrFlag(self, ACwidth=2, CCwidth=1):\n tempPeaks = np.array([], dtype=int)\n for p_i in self.peakInd:\n t_i = self.d['dateTime'][p_i]\n autoCorrRange = [t_i-timedelta(seconds=ACwidth/2), \n t_i+timedelta(seconds=ACwidth/2)] \n crossCorrRange = [t_i-timedelta(seconds=CCwidth/2), \n t_i+timedelta(seconds=CCwidth/2)]\n validIdA = np.where(\n (self.d['dateTime'] > autoCorrRange[0]) & \n (self.d['dateTime'] < autoCorrRange[1])\n )[0]\n # Autocorrelate dos1\n ac1, lags1, max1, peakInd1 = self._autoCorrCounts(\n self.d['dateTime'], \n self.d['dos1rate'], \n autoCorrRange)\n # Autocorrelate dos2 (because dos1 and dos2 respond \n # similarly to noise)\n ac2, lags2, max2, peakInd2 = self._autoCorrCounts(\n self.d['dateTime'], \n self.d['dos2rate'], \n autoCorrRange)\n # Cross correlate dos1 and dos2\n dos12corr, dos12lags = self._crossCorrCounts(\n self.d['dateTime'], \n self.d['dateTime'], \n self.d['dos1rate'], \n self.d['dos2rate'], \n crossCorrRange) \n # First check that dos1 and dos12 are correlated, then\n # then if max(dos2) > 0.5*max(dos1) then it is noise. \n if not ( (len(np.where(peakInd1 == 4)[0]) == 1 or \n len(np.where(peakInd1 == 2)[0]) == 1 or \n len(np.where(peakInd2 == 4)[0]) == 1 or \n len(np.where(peakInd2 == 2)[0]) == 1) and\n (max(self.d['dos2rate'][validIdA]) > 1000 or\n max(dos12corr) >= 0.9) ):\n tempPeaks = np.append(tempPeaks, p_i) \n self.peakInd = tempPeaks\n return", "def plot_correlation_ca_code(self, x, y, prn1, prn2):\n x = self.create_constant_magnitude_signal(x)\n y = self.create_constant_magnitude_signal(y)\n auto_1 = self.circular_correlation(x, x)\n auto_2 = self.circular_correlation(y, y)\n cross = self.circular_correlation(x, y)\n fig = plt.figure(figsize=(12,5))\n ax1 = fig.add_subplot(1, 3, 1)\n ax2 = fig.add_subplot(1, 3, 2)\n ax3 = fig.add_subplot(1, 3, 3)\n t = arange(0, len(x))\n ax1.plot(t, auto_1)\n ax1.set_title('Auto-correlation PRN {0}'.format(prn1))\n ax1.set_xlabel('Sample Number')\n ax2.plot(t, auto_2)\n ax2.set_title('Auto-correlation PRN {0}'.format(prn2))\n ax2.set_xlabel('Sample Number')\n ax3.plot(t, cross)\n ax3.set_title('Cross-Correlation b/t PRN {0} and PRN {1}'.format(prn1, prn2))\n ax3.set_xlabel('Sample Number')\n xlim = (-10, len(x))\n ylim = ax1.get_ylim()\n for ax in [ax1, ax2]:\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n ax3.set_xlim(xlim)\n plt.show()", "def _calculate_cc(self, array, corr_range, tau_max, lag_mode):\n\n # lag_mode dict\n mode = self.lag_modi[lag_mode]\n only_tri = int(self.only_tri)\n\n if lag_mode == 'all':\n corrmat = numpy.zeros((2*tau_max + 1, self.N, self.N),\n dtype='float32')\n elif lag_mode == 'sum':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n elif lag_mode == 'max':\n corrmat = numpy.zeros((2, self.N, self.N), dtype='float32')\n\n # loop over all node pairs, NOT symmetric due to time shifts!\n for i in range(self.N-only_tri):\n for j in range((i+1)*only_tri, self.N):\n\n if mode == 2:\n maxcross = 0.0\n argmax = 0\n\n # loop over taus INCLUDING the last tau value\n for t in range(2*tau_max+1):\n\n # here the actual cross correlation is calculated\n crossij = (array[tau_max, i, :] * array[t, j, :]).mean()\n\n # fill in values in matrix depending on lag_mode\n if mode == 0:\n corrmat[t, i, j] = crossij\n\n elif mode == 1:\n if t <= tau_max:\n corrmat[1, i, j] += numpy.abs(crossij)\n if t >= tau_max:\n corrmat[0, i, j] += numpy.abs(crossij)\n\n elif mode == 2:\n # calculate max and argmax by comparing to previous\n # value and storing max\n if numpy.abs(crossij) > maxcross:\n maxcross = numpy.abs(crossij)\n argmax = t\n\n if mode == 2:\n corrmat[0, i, j] = maxcross\n corrmat[1, i, j] = argmax - tau_max\n\n if self.only_tri:\n if lag_mode == 'all':\n corrmat = corrmat + corrmat.transpose(0, 2, 1)[::-1]\n elif lag_mode == 'sum':\n corrmat[0] += corrmat[1].transpose()\n corrmat[1] = corrmat[0].transpose()\n elif lag_mode == 'max':\n corrmat[0] += corrmat[0].transpose()\n corrmat[1] -= corrmat[1].transpose()\n\n return corrmat", "def plot_correlation(self, x,y, T, fs, prn1, prn2):\n x = self.create_constant_magnitude_signal(x)\n y = self.create_constant_magnitude_signal(y)\n auto_1 = self.circular_correlation(x, x)\n auto_2 = self.circular_correlation(y, y)\n cross = self.circular_correlation(x, y)\n fig = plt.figure(figsize=(12,5))\n ax1 = fig.add_subplot(1, 3, 1)\n ax2 = fig.add_subplot(1, 3, 2)\n ax3 = fig.add_subplot(1, 3, 3)\n t = arange(0, T, 1.0 / fs)\n ax1.plot(t, auto_1)\n ax1.set_title('Auto-correlation PRN {0}'.format(prn1))\n ax1.set_xlabel('Time (ms)')\n ax2.plot(t, auto_2)\n ax2.set_title('Auto-correlation PRN {0}'.format(prn2))\n ax2.set_xlabel('Time (ms)')\n ax3.plot(t, cross)\n ax3.set_title('Cross-correlation b/t PRN {0} and PRN {1}'.format(prn1, prn2))\n ax3.set_xlabel('Time (ms)')\n xlim = (0, T)\n ylim = ax1.get_ylim()\n for ax in [ax1, ax2, ax3]:\n ax.set_ylim(ylim)\n ax.set_xlim(xlim)\n ax.set_xticklabels(['{0:1.1f}'.format(1e3 * t) for t in ax.get_xticks()])\n plt.show()", "def cross_correlation(self, tau_max=0, lag_mode='all'):\n # Normalize anomaly time series to zero mean and unit variance for all\n # lags, array contains normalizations for all lags\n corr_range = self.total_time - 2*tau_max\n normalized_array = numpy.empty((2*tau_max + 1, self.N, corr_range),\n dtype=\"float32\")\n\n for t in range(2*tau_max + 1):\n # Remove mean value from time series at each vertex (grid point)\n normalized_array[t] = self.dataarray[:, t:t+corr_range] - \\\n self.dataarray[:, t:t+corr_range].\\\n mean(axis=1).reshape(self.N, 1)\n\n # Normalize the variance of anomalies to one\n normalized_array[t] /= normalized_array[t].\\\n std(axis=1).reshape(self.N, 1)\n\n # Correct for grid points with zero variance in their time series\n normalized_array[t][numpy.isnan(normalized_array[t])] = 0\n\n return self._calculate_cc(normalized_array, corr_range=corr_range,\n tau_max=tau_max, lag_mode=lag_mode)", "def cross_correlation(x,y,time):\n import numpy as np\n modeC = \"same\"\n x = (x - np.mean(x))/np.std(x)\n y = (y - np.mean(y))/np.std(y)\n\n timeInt = np.diff(time).mean().days\n numPoints = len(x)\n fig = plt.figure(figsize=(6,3.5)) \n d = np.correlate(y,x,modeC)\n\n plt.plot([0,0],[-0.5,1],color=\"grey\")\n plt.xlabel(\"Lag\")\n plt.ylabel(\"Correlation\")\n plt.plot(np.linspace(len(x)/2*timeInt,-len(x)/2*timeInt,len(x)),d/numPoints)\n plt.show()", "def plotting_autocorr(dataframe):\n plot_acf(dataframe['STU'].iloc[1:], lags=40)\n plt.show()", "def autocorrplot(trace, varnames=None, max_lag=100, symmetric_plot=False, combined=False,\n figsize=None, textsize=None, skip_first=0, ax=None):\n trace = trace_to_dataframe(trace[skip_first:], combined=combined)\n varnames = get_varnames(trace, varnames)\n\n if figsize is None:\n figsize = (12, len(varnames) * 2)\n\n textsize, linewidth, _ = _scale_text(figsize, textsize, 1)\n\n nchains = trace.columns.value_counts()[0]\n fig, ax = plt.subplots(len(varnames), nchains, squeeze=False, sharex=True, sharey=True,\n figsize=figsize)\n\n max_lag = min(len(trace) - 1, max_lag)\n\n for i, varname in enumerate(varnames):\n for j in range(nchains):\n if nchains == 1:\n data = trace[varname].values\n else:\n data = trace[varname].values[:, j]\n ax[i, j].acorr(data, detrend=plt.mlab.detrend_mean, maxlags=max_lag, lw=linewidth)\n\n if not symmetric_plot:\n ax[i, j].set_xlim(0, max_lag)\n\n if nchains > 1:\n ax[i, j].set_title(\"{0} (chain {1})\".format(varname, j), fontsize=textsize)\n else:\n ax[i, j].set_title(varname, fontsize=textsize)\n ax[i, j].tick_params(labelsize=textsize)\n\n fig.add_subplot(111, frameon=False)\n plt.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')\n plt.grid(False)\n plt.xlabel(\"Lag\", fontsize=textsize)\n plt.ylabel(\"Correlation\", fontsize=textsize)\n return ax", "def main():\n logfile = setup_log(os.path.join(os.environ['decor'], 'logs',\n 'tcat_tcorrelate'))\n logfile.info('Started 8.tcat_tcorrelate.py')\n\n subj_list = ['RSDE', 'VREA']\n\n # Below is for full time series.\n __, clip, __ = get_timings(logfile)\n segments = set(c.split('_')[0] for c in clip)\n tcorr_suf = '6mmblur_tcorr_out_spearman'\n for subject in subj_list:\n tcorr_main(logfile, subject, segments, tcorr_suf)\n\n # Below is for subset of time series.\n # When run either 'twothirds' or 'abouthalf'.\n \"\"\"\n run, clip, trs = get_timings(logfile)\n for funcseg in ['abouthalf', 'twothirds']:\n segments = set(c.split('_')[0] for c in clip)\n segments = subsettter(segments, funcseg)\n tcorr_suf = '6mmblur_tcorr_out_spearman_{}'.format(funcseg)\n for subject in subj_list:\n tcorr_main(logfile, subject, segments, tcorr_suf)\n \"\"\"", "def get_time_filtered_correlations(a_lt3,a_lt4,adwin_filt_bool,**kw):\r\n verbose = kw.pop('verbose',False)\r\n ### prepare RO results and sort them according to sweep point\r\n for a in [a_lt3,a_lt4]:\r\n a.pts = a.g.attrs['sweep_length']\r\n a.ssros = a.agrp['ssro_results'].value\r\n a.readouts = a.g.attrs['nr_of_ROsequences']\r\n # a.sorted_results = a_ssros.reshape((-1,a.pts,a.readouts))\r\n\r\n\r\n ### correlate the ROs with each other by making a boolean filter:\r\n ### variables here are described in terms of spin states!\r\n m00 = (a_lt3.ssros == 1)*(a_lt4.ssros == 1)\r\n m10 = (a_lt3.ssros == 1)*(a_lt4.ssros == 0)\r\n m01 = (a_lt3.ssros == 0)*(a_lt4.ssros == 1)\r\n m11 = (a_lt3.ssros == 0)*(a_lt4.ssros == 0)\r\n \r\n ### now define unique identifiers for each Ro correlation and recast the correlations into a single array.\r\n ### As identifieres I choose 1 = index 0 in the output list, i.e. 11; 2 = index 1 in the output list ... and so forth\r\n RO_correlators = np.array(len(a_lt3.ssros)*[1])*m11 \\\r\n + np.array(len(a_lt3.ssros)*[2])*m10 \\\r\n + np.array(len(a_lt3.ssros)*[3])*m01 \\\r\n + np.array(len(a_lt3.ssros)*[4])*m00 \r\n ### PH - added to make sure that has a full set of repetitions\r\n RO_correlators = RO_correlators[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n adwin_filt_bool = adwin_filt_bool[:(a.g.attrs['sweep_length']*(len(RO_correlators)/a.g.attrs['sweep_length']))]\r\n\r\n \r\n ### now sort the correlators and the adwin fltr according to the sweep pts\r\n sorted_RO_correlators = RO_correlators.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n sorted_adwin_fltr = adwin_filt_bool.reshape((-1,a_lt3.pts,a_lt3.readouts))\r\n\r\n ### from now on: no numpy magic anymore. from here it is brutforce 'for-looping'\r\n ### (all conceived arrays will have different lengths due to temporal filtering. this break most np methods)\r\n ### although vstack and hstack would probably work...\r\n \r\n return_list = range(a_lt3.pts) ## all of these pts will be substituted with the correlator occurence\r\n for i in range(a_lt3.pts): \r\n correlators_at_sweep_pt = [0,0,0,0]\r\n for j in [1,2,3,4]: ### loop over the correlator identifiers\r\n correlators_at_sweep_pt[j-1] = np.sum(np.logical_and(sorted_adwin_fltr[:,i,:],sorted_RO_correlators[:,i,:]==j)) ## exclude adwin filter and do a logical and with the correlator identifier. Then sum over the number of occurences\r\n\r\n\r\n return_list[i] = correlators_at_sweep_pt\r\n\r\n return return_list", "def study_correlation(data: np.array, labels: np.array)->None:\n n_rows = np.size(data, 0)\n n_cols = np.size(data, 1)\n\n fig, ax = plt.subplots(n_cols, n_cols)\n\n for i in range(n_cols):\n for j in range(n_cols):\n if i != j: ax[i][j].scatter(data[:,j], data[:,i], c = labels)\n else: ax[i][j].annotate(\"series \" + str(i), (0.5, 0.5), xycoords = 'axes fraction', ha = \"center\", va = \"center\")\n\n if i < n_cols-1: ax[i][j].xaxis.set_visible(False)\n if j > 0: ax[i][j].yaxis.set_visible(False)\n\n ax[-1][-1].set_xlim(ax[0][-1].get_xlim())\n ax[0][0].set_ylim(ax[0][1].get_ylim())\n\n plt.show()\n plt.close()\n\n #print(\"Correlation between features {} and {} is {}\".format(1, 2, 3))", "def coherency(s1, s2, lags, plot=False, window_fraction=None, noise_floor_db=None):\n\n # test for symmetry\n i = len(lags) // 2\n assert lags[i] == 0, \"Midpoint of lags must be zero for coherency!\"\n assert np.sum(-lags[:i] != lags[-i:][::-1]) == 0, \"lags must be symmetric for coherency!\"\n\n window = np.ones([len(lags)], dtype='float')\n if window_fraction is not None:\n assert window_fraction > 0 and window_fraction <= 1, \"window_fraction must be between 0 and 1\"\n # create a gaussian windowing function for the CF and ACFs\n window = np.exp(-lags**2 / (window_fraction*lags.max())**2)\n\n # do an FFT shift to the lags and the window, otherwise the FFT of the ACFs is not equal to the power\n # spectrum for some numerical reason\n window = fftshift(window)\n shift_lags = fftshift(lags)\n if len(lags) % 2 == 1:\n # shift zero from end of shift_lags to beginning\n shift_lags = np.roll(shift_lags, 1)\n window = np.roll(window, 1)\n\n cf = correlation_function(s1, s2, shift_lags)\n acf1 = correlation_function(s1, s1, shift_lags)\n acf2 = correlation_function(s2, s2, shift_lags)\n\n if np.sum(np.isnan(cf)) > 0:\n # print 'len(lags)=%d, len(s1)=%d, len(s2)=%d' % (len(lags), len(s1), len(s2))\n print('signals=',zip(s1, s2))\n print('shift_lags,cf=',zip(shift_lags, cf))\n raise Exception(\"Nans in cf\")\n\n assert np.sum(np.isnan(acf1)) == 0, \"Nans in acf1\"\n assert np.sum(np.isnan(acf2)) == 0, \"Nans in acf2\"\n\n if window_fraction is not None:\n cf *= window\n acf1 *= window\n acf2 *= window\n\n cf_fft = fft(cf)\n acf1_fft = fft(acf1)\n acf2_fft = fft(acf2)\n\n acf1_ps = np.abs(acf1_fft)\n acf2_ps = np.abs(acf2_fft)\n\n # determine which points are noise (with magnitudes too low to be useful) in the acfs\n zeros = np.zeros([len(cf_fft)], dtype='bool')\n if noise_floor_db is not None:\n db1 = 20*np.log10(acf1_ps / acf1_ps.max()) + noise_floor_db\n z1 = db1 <= 0\n\n db2 = 20*np.log10(acf2_ps / acf2_ps.max()) + noise_floor_db\n z2 = db2 <= 0\n zeros = z1 | z2\n\n assert np.abs(acf1_fft.imag).max() < 1e-8, \"acf1_fft.imag.max()=%f\" % np.abs(acf1_fft.imag).max()\n assert np.abs(acf2_fft.imag).max() < 1e-8, \"acf2_fft.imag.max()=%f\" % np.abs(acf2_fft.imag).max()\n\n cpre = cf_fft / np.sqrt(acf1_ps*acf2_ps)\n cpre[zeros] = 0\n c = ifft(cpre)\n assert np.abs(c.imag).max() < 1e-8, \"np.abs(c.imag).max()=%f\" % np.abs(c.imag).max()\n\n coh = fftshift(c.real)\n freq = fftshift(fftfreq(len(lags)))\n fi = freq >= 0\n\n if np.sum(np.abs(coh) > 1) > 0:\n print('Warning: coherency is > 1!')\n\n if plot:\n plt.figure()\n plt.subplot(2, 3, 1)\n plt.plot(s1, 'r-')\n plt.plot(s2, 'b-')\n plt.legend(['s1', 's2'])\n plt.xlabel('Time')\n plt.axis('tight')\n plt.title('Signals')\n\n plt.subplot(2, 3, 2)\n plt.axvline(0, c='k')\n plt.axhline(0, c='k')\n l1 = plt.plot(lags, fftshift(acf1), 'r-')\n l2 = plt.plot(lags, fftshift(acf2), 'b-')\n l3 = plt.plot(lags, fftshift(cf), 'g-')\n plt.title('Correlation Functions')\n plt.xlabel('Lags')\n plt.legend(['', '', 'ACF1', 'ACF2', 'CF12'])\n plt.axis('tight')\n plt.ylim(-0.5, 1.0)\n\n plt.subplot(2, 3, 3)\n plt.axhline(0, c='k', alpha=0.75)\n plt.axvline(0, c='k', alpha=0.75)\n plt.plot(lags, coh, 'm-')\n plt.ylabel('Coherency')\n plt.xlabel('Lag')\n plt.axis('tight')\n plt.title('Coherency')\n\n plt.subplot(2, 3, 4)\n plt.plot(freq[fi], fftshift(acf1_ps)[fi], 'r')\n plt.plot(freq[fi], fftshift(acf2_ps)[fi], 'b')\n cf_ps = fftshift(np.abs(cf_fft))\n cf_pre_ps = fftshift(np.abs(cpre))\n plt.plot(freq[fi], cf_ps[fi], 'g--')\n plt.plot(freq[fi], cf_pre_ps[fi], 'm-')\n plt.legend(['ACF1', 'ACF2', 'CF12', 'CPRE'])\n plt.ylabel('Power (raw)')\n plt.xlabel('Frequency')\n plt.axis('tight')\n plt.title('Raw Power Spectra')\n\n if noise_floor_db:\n plt.subplot(2, 3, 5)\n plt.axhline(0, c='k')\n plt.plot(freq[fi], fftshift(db1)[fi], 'r')\n plt.plot(freq[fi], fftshift(db2)[fi], 'b')\n plt.legend(['ACF1', 'ACF2'])\n plt.ylabel('Power (dB)')\n plt.xlabel('Frequency')\n plt.axis('tight')\n plt.title('Log Power Spectra')\n\n plt.show()\n\n return coh", "def EstimatedAutocorr(fw, data, pnum, trialnum, marker1, marker2): \n cycle_start = HeelStrike(fw, data, pnum, trialnum, marker1, marker2)\n x = cycle_start[2] \n time = cycle_start[1]\n drop_NA = np.vstack((x, time))\n #print drop_NA.shape, x.shape, y.shape\n drop_NA = drop_NA.T\n x = drop_NA[:,0]\n #x = x[~np.isnan(x).any()]\n \n #n = len(x)\n #var = np.var(x)\n tao = np.correlate(x, x, mode='full')\n # assert np.allclose(r, np.array([(x[:n-k]*x[-(n-k):]).sum() for k in range(n)]))\n #result = r/(var*(np.arange(n, 0, -1)))\n plt.figure(4)\n plt.plot(tao)\n return tao", "def xcorr(t, x, y, zeropad=True):\n tau = t\n # sx = len(x)\n # sy = len(y)\n if zeropad is True:\n Xn = np.fft.rfft(x, n=len(x) * 2)\n Yn = np.conj(sp.fft(y, n=len(x) * 2))\n else:\n Xn = np.fft.rfft(x)\n Yn = np.conj(np.fft.rfft(y))\n\n xcor = np.real(fftpack.fftshift(sp.ifft(Xn * Yn)))\n dt = t[1] - t[0]\n\n tau = np.linspace(-len(xcor) / 2 * dt - dt / 2,\n len(xcor) / 2 * dt - dt / 2, len(xcor))\n return tau, xcor", "def _calculate_autocorrelations(self):\n\n self._autocorr_real_x = self.__calculate_autocorr(self._noise_field_real, self._n_x, self._n_y, 'x')\n self._autocorr_real_y = self.__calculate_autocorr(self._noise_field_real, self._n_y, self._n_x, 'y')\n self._autocorr_imag_x = self.__calculate_autocorr(self._noise_field_imag, self._n_x, self._n_y, 'x')\n self._autocorr_imag_y = self.__calculate_autocorr(self._noise_field_imag, self._n_y, self._n_x, 'y')", "def correlation_plots(SIC, LIC, temperature, landmask):\n\n # Generate timeseries with different lengths\n SIC_short = SIC.sel(time=slice('2002-01-01', '2019-12-31'))\n LIC_short = LIC.sel(time=slice('2002-01-01', '2019-12-31'))\n temperature_short = temperature.sel(\n time=slice('2002-01-01', '2019-12-31'))\n\n SIC_long = SIC.sel(time=slice('1979-01-01', '2019-12-31'))\n temperature_long = temperature.sel(\n time=slice('1979-01-01', '2019-12-31'))\n\n # Plot spatial correlation of variables\n corr_SIC_temp_long = xr.corr(SIC_long, temperature_long, dim='time')\n corr_SIC_temp_short = xr.corr(SIC_short, temperature_short, dim='time')\n corr_LIC_temp_short = xr.corr(LIC_short, temperature_short, dim='time')\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.contourf(corr_SIC_temp_short.x, corr_SIC_temp_short.y, corr_SIC_temp_short,\n crs=ccrs.SouthPolarStereo(), cmap='RdBu_r', norm=divnorm, levels=16)\n ax.coastlines()\n ax.set_title('Correlations between SIC and SKT')\n cbar = plt.colorbar(plot)\n cbar.set_label(r'Correlations')\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_shortterm_spatial')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.contourf(corr_SIC_temp_long.x, corr_SIC_temp_long.y, corr_SIC_temp_long,\n crs=ccrs.SouthPolarStereo(), cmap='RdBu_r', norm=divnorm, levels=16)\n ax.coastlines()\n ax.set_title('Correlations between SIC and SKT')\n cbar = plt.colorbar(plot)\n cbar.set_label(r'Correlations')\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_longterm_spatial')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.contourf(corr_LIC_temp_short.x, corr_LIC_temp_short.y, corr_LIC_temp_short,\n crs=ccrs.SouthPolarStereo(), cmap='RdBu_r', norm=divnorm, levels=16)\n ax.coastlines()\n ax.set_title('Correlations between LIC and SKT')\n cbar = plt.colorbar(plot)\n cbar.set_label(r'Correlations')\n misc.savefigures(folder='images/week8',\n filename='corr_lic_skt_shortterm_spatial')\n plt.show()\n\n # Plot temporal correlation of variables\n\n corr_SIC_temp_long = xr.corr(SIC_long, temperature_long, dim=('x', 'y'))\n corr_SIC_temp_short = xr.corr(SIC_short, temperature_short, dim=('x', 'y'))\n corr_LIC_temp_short = xr.corr(LIC_short, temperature_short, dim=('x', 'y'))\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1)\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.plot(corr_SIC_temp_long.time, corr_SIC_temp_long,)\n ax.set_title('Correlations between SIC and SKT')\n ax.set_ylim([-1, 1])\n ax.axhline(0, color='k', alpha=0.5)\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_longterm_temporal')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1)\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.plot(corr_SIC_temp_short.time, corr_SIC_temp_short,)\n ax.set_title('Correlations between SIC and SKT')\n ax.set_ylim([-1, 1])\n ax.axhline(0, color='k', alpha=0.5)\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_shortterm_temporal')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1)\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.plot(corr_LIC_temp_short.time, corr_LIC_temp_short,)\n ax.set_title('Correlations between LIC and SKT')\n ax.set_ylim([-1, 1])\n ax.axhline(0, color='k', alpha=0.5)\n misc.savefigures(folder='images/week8',\n filename='corr_lic_skt_shortterm_temporal')\n plt.show()\n\n # Generate timeseries with different lengths\n SIC_short = SIC.sel(time=slice('2002-01-01', '2019-12-31'))\n LIC_short = LIC.sel(time=slice('2002-01-01', '2019-12-31'))\n temperature_short = temperature.sel(\n time=slice('2002-01-01', '2019-12-31')).skt\n\n SIC_long = SIC.sel(time=slice('1979-01-01', '2019-12-31'))\n temperature_long = temperature.sel(\n time=slice('1979-01-01', '2019-12-31')).skt\n\n # Plot spatial correlation of variables\n SIC_long_anmomalous = SIC_long.pipe(w5.find_anomalies)\n SIC_short_anmomalous = SIC_short.pipe(w5.find_anomalies)\n LIC_short_anmomalous = LIC_short.pipe(w5.find_anomalies)\n temperature_short_anmomalous = temperature_short.pipe(w5.find_anomalies)\n temperature_long_anmomalous = temperature_long.pipe(w5.find_anomalies)\n\n corr_SIC_temp_long = xr.corr(\n SIC_long_anmomalous, temperature_long_anmomalous, dim='time')\n corr_SIC_temp_short = xr.corr(\n SIC_short_anmomalous, temperature_short_anmomalous, dim='time')\n corr_LIC_temp_short = xr.corr(\n LIC_short_anmomalous, temperature_short_anmomalous, dim='time')\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.contourf(corr_SIC_temp_short.x, corr_SIC_temp_short.y, corr_SIC_temp_short,\n crs=ccrs.SouthPolarStereo(), cmap='RdBu_r', norm=divnorm, levels=16)\n ax.coastlines()\n ax.set_title('Correlations between SIC and SKT')\n cbar = plt.colorbar(plot)\n cbar.set_label(r'Correlations')\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_shortterm_spatial_anmomalous')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.contourf(corr_SIC_temp_long.x, corr_SIC_temp_long.y, corr_SIC_temp_long,\n crs=ccrs.SouthPolarStereo(), cmap='RdBu_r', norm=divnorm, levels=16)\n ax.coastlines()\n ax.set_title('Correlations between SIC and SKT')\n cbar = plt.colorbar(plot)\n cbar.set_label(r'Correlations')\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_longterm_spatial_anmomalous')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1, projection=ccrs.SouthPolarStereo())\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.contourf(corr_LIC_temp_short.x, corr_LIC_temp_short.y, corr_LIC_temp_short,\n crs=ccrs.SouthPolarStereo(), cmap='RdBu_r', norm=divnorm, levels=16)\n ax.coastlines()\n ax.set_title('Correlations between LIC and SKT')\n cbar = plt.colorbar(plot)\n cbar.set_label(r'Correlations')\n misc.savefigures(folder='images/week8',\n filename='corr_lic_skt_shortterm_spatial_anmomalous')\n plt.show()\n\n # Plot temporal correlation of variables\n\n corr_SIC_temp_long = xr.corr(\n SIC_long_anmomalous, temperature_long_anmomalous, dim=('x', 'y'))\n corr_SIC_temp_short = xr.corr(\n SIC_short_anmomalous, temperature_short_anmomalous, dim=('x', 'y'))\n corr_LIC_temp_short = xr.corr(\n LIC_short_anmomalous, temperature_short_anmomalous, dim=('x', 'y'))\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1)\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.plot(corr_SIC_temp_long.time, corr_SIC_temp_long,)\n ax.set_title('Correlations between SIC and SKT')\n ax.set_ylim([-1, 1])\n ax.axhline(0, color='k', alpha=0.5)\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_longterm_temporal_anmomalous')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1)\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.plot(corr_SIC_temp_short.time, corr_SIC_temp_short,)\n ax.set_title('Correlations between SIC and SKT')\n ax.set_ylim([-1, 1])\n ax.axhline(0, color='k', alpha=0.5)\n misc.savefigures(folder='images/week8',\n filename='corr_sic_skt_shortterm_temporal_anmomalous')\n plt.show()\n\n fig = plt.figure(figsize=(5, 5))\n ax = fig.add_subplot(1, 1, 1)\n divnorm = TwoSlopeNorm(vmin=-1, vcenter=0, vmax=1)\n plot = ax.plot(corr_LIC_temp_short.time, corr_LIC_temp_short,)\n ax.set_title('Correlations between LIC and SKT')\n ax.set_ylim([-1, 1])\n ax.axhline(0, color='k', alpha=0.5)\n misc.savefigures(folder='images/week8',\n filename='corr_lic_skt_shortterm_temporal_anmomalous')\n plt.show()", "def cross_correlation(values1, values2, lags=100):\n lags, corr, line, x = pl.xcorr( values1, values2, maxlags=lags, usevlines=False, marker=None)\n return lags, corr", "def causDspectra(uxmax, uymax, ax, ay, dso, dsl, dm, m, n, N):\n \n ymin = -m*uxmax + n\n ymax = m*uxmax + n\n if ymin < -uymax:\n xmin = (-uymax - n)/m\n ymin = m*xmin + n\n else:\n xmin = -uxmax\n if ymax > uymax:\n xmax = (uymax - n)/m\n ymax = m*xmax + n\n else:\n xmax = uxmax\n \n dlo = dso - dsl\n coeff = dsl*dlo*re*dm/(2*pi*dso)\n \n rx = np.linspace(xmin - 5., xmax + 5., 500)\n ry = np.linspace(ymin - 5., ymax + 5., 500)\n uvec = np.meshgrid(rx, ry)\n A, B, C, D, E = causticFreqHelp(uvec, ax, ay, m, n)\n upxvec = np.linspace(xmin, xmax, N)\n freqcaus = []\n for upx in upxvec:\n eq1 = A*upx**2 + B*upx + C\n eq2 = D*upx + E\n evcaus = np.array([eq1, eq2])\n roots = polishedRootsBulk(evcaus, causEqFreq, rx, ry, args = (upx, ax, ay, m, n))\n for root in roots:\n ux, uy = root\n arg = coeff*lensg(ux, uy)[0]/(ux - upx)\n # print(arg)\n if arg > 0:\n freq = c*np.sqrt(arg)/(ax*GHz)\n if freq > 0.01:\n freqcaus.append([upx, freq])\n # print(freqcaus)\n freqcaus = np.asarray(freqcaus).T\n # plt.scatter(freqcaus[0], freqcaus[1], marker = '.', color = 'black', s = 3.)\n # plt.xlim(xmin, xmax)\n # plt.ylim(0., max(freqcaus[1]) + 0.5)\n # plt.xlabel(r\"$u'_x$\", fontsize = 16)\n # plt.ylabel(r'$\\nu$ (GHz)', fontsize = 16)\n # plt.grid()\n # plt.show()\n return freqcaus", "def _compute_correlations(self, data):\n mappings = self.mappings_\n n_channels, n_times = data.shape\n\n # get the predictions\n y_pred = data.T.dot(mappings.T)\n y_pred = y_pred.reshape((n_times, len(self.picks),\n self.n_resample), order='F')\n # pool them using median\n # XXX: weird that original implementation sorts and takes middle value.\n # Isn't really the median if n_resample even\n y_pred = np.median(y_pred, axis=-1)\n # compute correlation\n num = np.sum(data.T * y_pred, axis=0)\n denom = (np.sqrt(np.sum(data.T ** 2, axis=0)) *\n np.sqrt(np.sum(y_pred ** 2, axis=0)))\n\n corr = num / denom\n return corr", "def plot_auto_corr(timeSeries1_pre,timeSeries2_pre,k,number1,number2):\n timeSeriestimeSeries = pd.DataFrame(range(k))\n for i in xrange(1,k+1):\n timeSeriestimeSeries.loc[i-1] =get_auto_corr(timeSeries1_pre,timeSeries2_pre,i)\n plt.bar(range(1,len(timeSeriestimeSeries)+1),timeSeriestimeSeries[0].values)\n plt.savefig(\"./mind_hb_inter_%d_%d.png\"%(number1,number2))\n plt.show()", "def check_correlations(self, x, y, prn1, prn2):\n x = self.create_constant_magnitude_signal(x)\n y = self.create_constant_magnitude_signal(y)\n auto_1 = self.circular_correlation(x, x)\n auto_2 = self.circular_correlation(y, y)\n print(\"Max Value in Auto-Correlation for PRN {0}: {1}\".format(prn1, max(auto_1)))\n print(\"Max Value in Auto-Correlation for PRN {0}: {1}\".format(prn2, max(auto_2)))\n auto_1_63, auto_1_65, auto_1_minus_1 = np.sum(auto_1 == 63)/1023, np.sum(auto_1 == -65)/1023, (np.sum(auto_1 == -1)/1023)\n auto_2_63, auto_2_65, auto_2_minus_1 = np.sum(auto_2 == 63)/1023, np.sum(auto_2 == -65)/1023, np.sum(auto_2 == -1)/1023\n print(\"63 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_1_63 * 100, prn1))\n print(\"-65 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_1_65 * 100, prn1))\n print(\"-1 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_1_minus_1 * 100, prn1))\n print(\"63 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_2_63 * 100, prn2))\n print(\"-65 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_2_65 * 100, prn2))\n print(\"-1 appears {0}% in Auto-Correlation for PRN {1}\".format(auto_2_minus_1 * 100, prn2))", "def correlations_cont_cat(self):\n \"\"\" Use ICC to define correlations, give box-plots for highly correlated pairs \"\"\"\n \n warnings.filterwarnings('ignore')\n \n # Print correlations and column names\n print('One-way ANOVA p-values - Predictors')\n for i,j,v in self.cont_cat_distance:\n print('{} and {} = {:.2}'.format(i,j,v))\n \n # Box plot of the highly correlated pairs\n for i,j,v in self.cont_cat_distance:\n fg,ax = plt.subplots(figsize=(12, 8))\n fg = self._dataset.boxplot(i, j, ax=ax, grid=False)\n plt.xticks(rotation=90)\n plt.show()", "def get_acf_tau(y, c=7.0):\n if np.nansum(y) == 0 or np.nanstd(y) < 1e-12:\n print(\"Autocorr time could not be computed. Check your input.\")\n return 0, np.zeros(len(y)), np.zeros(len(y))\n acf = y*0.\n for ii in range(y.shape[1]):\n acf[:,ii] = autocorr(y[:,ii] - np.nanmean(y[:,ii]))\n acf[:,ii] /= acf[0,ii] #np.nanmax(acf[ii,:])\n f = np.nansum(acf, axis=1) / y.shape[1]\n taus = 2.0 * np.cumsum(f) - 1.0\n window = auto_window(taus, c)\n return taus[window], f, acf", "def plotAutocorrelation(lXs, lYs, out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\"):\n\n# print len(lXs)\n# print len(lYs)\n lRhs = []\n Ym = np.mean(lYs)\n N = len(lYs)\n C0 = 0.0\n for i in lYs:\n C0 += (i-Ym)*(i-Ym)\n C0 = C0/N\n for i in range(0,N):\n Ch = 0.0\n for j in range(0,(N-i-1)):\n Ch += (lYs[j]-Ym)*(lYs[j+i]-Ym)\n Ch = Ch/N\n lRhs.append(Ch/C0)\n print(len(lRhs))\n\n fig = plt.Figure(figsize=(20,20))\n fig.suptitle(title, fontsize=32)\n ax = fig.add_subplot(111)\n ax.plot(lXs,lRhs)\n axis_font = {'size':'28'}\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)\n\n return lRhs", "def cch(self, nid0, nid1=None, trange=50, binw=None, shift=None, nshifts=10,\n rate=False, norm=False, c='k', title=True, figsize=(7.5, 6.5)):\n if nid1 == None:\n nid1 = nid0\n autocorr = nid0 == nid1\n n0 = self.alln[nid0]\n n1 = self.alln[nid1]\n calctrange = trange * 1000 # calculation trange, in us\n if shift:\n assert nshifts > 0\n shift *= 1000 # convert to us\n maxshift = nshifts * shift\n calctrange = trange + maxshift # expand calculated trange to encompass shifts\n calctrange = np.array([-calctrange, calctrange]) # convert to a +/- array, in us\n dts = util.xcorr(n0.spikes, n1.spikes, calctrange) # in us\n if autocorr:\n dts = dts[dts != 0] # remove 0s for autocorr\n if shift: # calculate dts for shift corrector\n shiftis = range(-nshifts, nshifts+1)\n shiftis.remove(0) # don't shift by 0, that's the original which we'll subtract from\n shifts = np.asarray(shiftis) * shift\n shiftdts = np.hstack([ dts+s for s in shifts ]) # in us\n print('shifts =', shifts / 1000)\n\n if not binw:\n nbins = intround(np.sqrt(len(dts))) # good heuristic\n nbins = max(20, nbins) # enforce min nbins\n nbins = min(200, nbins) # enforce max nbins\n else:\n nbins = intround(2 * trange / binw)\n\n dts = dts / 1000 # in ms, converts to float64 array\n t = np.linspace(start=-trange, stop=trange, num=nbins+1, endpoint=True) # ms\n binw = t[1] - t[0] # all should be equal width, ms\n n = np.histogram(dts, bins=t, density=False)[0]\n if shift: # subtract shift corrector\n shiftdts = shiftdts / 1000 # in ms, converts to float64 array\n shiftn = np.histogram(shiftdts, bins=t, density=False)[0] / (nshifts*2)\n f = pl.figure(figsize=figsize)\n a = f.add_subplot(111)\n a.bar(left=t[:-1], height=shiftn, width=binw) # omit last right edge in t\n a.set_xlim(t[0], t[-1])\n a.set_xlabel('spike interval (ms)')\n n -= shiftn\n if norm: # normalize and convert to float:\n n = n / n.max()\n elif rate: # normalize by binw and convert to float:\n n = n / binw\n f = pl.figure(figsize=figsize)\n a = f.add_subplot(111)\n a.bar(left=t[:-1], height=n, width=binw, color=c, ec=c) # omit last right edge in t\n a.set_xlim(t[0], t[-1])\n a.set_xlabel('spike interval (ms)')\n if norm:\n a.set_ylabel('coincidence rate (AU)')\n a.set_yticks([0, 1])\n elif rate:\n a.set_ylabel('coincidence rate (Hz)')\n else:\n a.set_ylabel('count')\n if title:\n a.set_title('spike times of n%d wrt n%d' % (self.n1.id, self.n0.id))\n wtitlestr = lastcmd()# + ', binw=%.1f ms' % binw\n gcfm().window.setWindowTitle(wtitlestr)\n f.tight_layout(pad=0.3) # crop figure to contents", "def plot_correlation(\n adata, \n gene_1, \n gene_2, \n bandwidth=5, \n contrib_thresh=10, \n kernel_matrix=None, \n row_key='row', \n col_key='col', \n condition=None,\n cmap='RdBu_r',\n colorbar=True,\n ticks=True,\n ax=None,\n figure=None,\n dsize=10,\n estimate='local',\n title=None,\n spot_borders=False,\n border_color='black',\n border_size=0.3,\n fig_path=None,\n fig_format='pdf',\n fig_dpi=150\n ):\n if ax is None:\n if colorbar:\n width = 7\n else:\n width = 5\n figure, ax = plt.subplots(\n 1,\n 1,\n figsize=(width,5)\n )\n\n if estimate == 'local':\n corrs, keep_inds = _plot_correlation_local(\n adata,\n gene_1,\n gene_2,\n bandwidth=bandwidth,\n contrib_thresh=contrib_thresh,\n kernel_matrix=kernel_matrix,\n row_key=row_key, \n col_key=col_key, \n condition=condition,\n cmap=cmap,\n colorbar=colorbar,\n ticks=ticks,\n ax=ax,\n figure=figure,\n dsize=dsize,\n title=title,\n spot_borders=spot_borders,\n border_color=border_color,\n border_size=border_size\n )\n extra_data = {}\n elif estimate == 'regional':\n corrs, keep_inds, ct_to_corr = _plot_correlation_regional(\n adata,\n gene_1,\n gene_2,\n condition,\n kernel_matrix=kernel_matrix,\n row_key=row_key,\n col_key=col_key, \n cmap=cmap,\n colorbar=colorbar,\n ticks=ticks,\n ax=ax,\n figure=figure,\n dsize=dsize,\n title=title,\n spot_borders=spot_borders,\n border_color=border_color,\n border_size=border_size\n )\n extra_data={'region_to_corr': ct_to_corr}\n\n if fig_path:\n plt.tight_layout()\n figure.savefig(\n fig_path,\n format=fig_format,\n dpi=fig_dpi\n )\n plt.show()\n\n return corrs, keep_inds, extra_data", "def autocorrelation(self):\n # For all features calculate kendall's tau with every other feature.\n df_bin = pd.read_csv(self.path_bin)\n features = sorted(list(df_bin.columns))\n df_correlation = pd.DataFrame({f: [np.nan] * len(features) for f in features}, index=features)\n for f1 in features:\n for f2 in features:\n x = list(df_bin[f1])\n y = list(df_bin[f2])\n corr, p = scipy.stats.kendalltau(x, y)\n df_correlation.loc[f1, f2] = \"{} (p={:.3f})\".format(corr, p)\n if f1 == f2:\n break\n df_correlation.to_csv(self.path_autocorrelation, index=True)" ]
[ "0.6614545", "0.6281476", "0.6077013", "0.6034651", "0.5968193", "0.5919165", "0.5915795", "0.57973635", "0.5676885", "0.5622812", "0.56201494", "0.55895126", "0.5481448", "0.5457837", "0.5448213", "0.54476875", "0.5436288", "0.543615", "0.53927565", "0.53915906", "0.5390924", "0.5388387", "0.53825164", "0.536725", "0.5356895", "0.535064", "0.53118324", "0.5291556", "0.5290147", "0.5284699" ]
0.6914292
0
Load the AC6 data.
def _loadData(self): self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date, dType='10Hz') return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_data(self) -> None:", "def load_data(self):", "def load_data(self):\n if self.debug:\n print(\"Loading data\")", "def test_load_from_v6(self) -> None:\n self.save_new_valid_exploration(\n 'Exp1', '[email protected]', end_state_name='End')\n collection = collection_domain.Collection.from_yaml(\n 'cid', self.YAML_CONTENT_V6)\n self.assertEqual(collection.to_yaml(), self._LATEST_YAML_CONTENT)", "def load_data(self):\n super(SubjectRAMEventsData, self).load_data()\n\n # also load electrode info\n self.elec_info = ecog_helpers.load_elec_info(self.subject, self.montage, self.bipolar)", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def load_data(self):\n raise NotImplementedError()", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexSession._loadData(self, data)", "def load(self):\r\n\r\n if self.usercharacters or self.npcs:\r\n wg.log.critical(f\"load() called on active FFGame {debug_id(guild=self.guild)}. \"\r\n \"Live data will be overwritten.\")\r\n \r\n savefile = os.path.join(wg.DATADIR, str(self.guild.id) + \".json\")\r\n if not os.path.isfile(savefile):\r\n wg.log.info(f'No save data for {debug_id(guild=self.guild)}')\r\n return\r\n \r\n with open(savefile) as inf:\r\n data = json.load(inf)\r\n\r\n for uid, chardata in data['userchars'].items():\r\n user = wg.bot.get_user(int(uid))\r\n if user is None:\r\n name = chardata.get('last_known_dname', '') \r\n cname = chardata.get(\"name\",\"INVALID\")\r\n wg.log.warning(\r\n f'User {debug_id(guild=self.guild, userid=uid, username=name)}'\r\n f'not found on discord, dropping character record \"{cname}\"')\r\n continue\r\n self.usercharacters[user.id] = Character(self, user, data=chardata)\r\n \r\n wg.log.info(f'Guild {debug_id(guild=self.guild)} loaded. '\r\n f'{len(self.usercharacters)} user chars and {len(self.npcs)} npcs.')", "def load(db):\n r = db.truncate_table('calibrations')\n print \"Truncated calibrations table\"\n\n # Allowed columns\n columns = ['class','asset_uid','start_date','serial','name','value','notes']\n\n # Read in calibration data\n file_mask = \"repos/asset-management/calibration/*\"\n directory_list = glob.glob(file_mask)\n for directory in directory_list:\n file_list = glob.glob(directory + '/*.csv')\n for ifile in file_list:\n with open(ifile, 'rb') as csvfile:\n print \"Loading file: \" + ifile\n reader = csv.DictReader(csvfile)\n for row in reader:\n row['class'] = directory.split('/')[-1]\n row['asset_uid'] = ifile.split('/')[-1].split('__')[0]\n row['start_date'] = ifile.split('/')[-1].split('__')[1].split('.')[0]\n data = remove_extraneous_columns(columns, row)\n save_cal(db,data)", "def load_data():\r\n global labelNames\r\n print(\"Loading Data...\")\r\n\r\n fnpath = \"rawdata\\\\cifar-10-batches-py\"\r\n fnprefix = 'data_batch_'\r\n fnlblnames = 'batches.meta'\r\n fntstbatch = 'test_batch'\r\n\r\n labelNames = unpickle(path.join(fnpath, fnlblnames))\r\n label_names = []\r\n for label in labelNames['label_names']:\r\n label_names.append(\"\".join(map(chr, label)))\r\n labelNames['label_names'] = label_names\r\n\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fntstbatch)))\r\n for n in range(1, 6):\r\n CIFAR_Data.append(unpickle(path.join(fnpath, fnprefix + str(n))))", "def load_data(self, data):\n self._load_raw_data = data", "def data_airline():\n return load_airline()", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def _loadData(self, data):\n Movie._loadData(self, data)\n PlexSession._loadData(self, data)", "def load_data(self):\n self.data = pd.read_csv(self.data_path, dtype=self.dtype)\n self.data.columns = self.data_cols\n self.data.topic = self.data.topic.str.lower()\n logging.debug(f'Data Load Complete: {self.data_path}')", "def load_training_data(s3: str = \"s3://epam-hack4med-dataset\") -> pd.DataFrame:\n # Load labels\n df_labels = pd.read_csv(f\"{s3}/CRACoV-ETYKIETY.csv\")\n df_labels[id_cols] = df_labels[id_cols].astype(int)\n df_labels = df_labels.set_index(id_cols)\n labels = df_labels[[basic_target]]\n idx = labels.index\n\n # Load hospital admission file (PRZYJECIE)\n df_admission = pd.read_csv(f\"{s3}/CRACoV-PRZYJECIE.csv\")\n binary_adm_vars = [x for x in basic_adm_vars if df_admission[x].isin([\"Tak\", \"Nie\"]).any()]\n other_adm_vars = [x for x in basic_adm_vars if x not in binary_adm_vars]\n adm = df_admission.copy()\n adm = adm[id_cols + binary_adm_vars + other_adm_vars]\n adm = adm.set_index(id_cols).reindex(idx)\n \n # Load biochem analyses\n biochem_raw = pd.read_csv(f\"{s3}/CRACoV-BIOCHEMIA.csv\", parse_dates=['DATA_WYK']).sort_values('DATA_WYK')\n biochem = (\n biochem_raw.loc[biochem_raw.KOD.isin(basic_bio_codes)]\n .pivot_table(index=['LP.', 'ID_LAB'], columns='KOD', values='WYNIK', aggfunc='first')\n .reindex(idx)\n )\n # Merge it all together\n Xy_raw = pd.concat([labels, adm, biochem], axis='columns')\n return Xy_raw", "def loadData(self,ins):\n raise AbstractError", "def _load_training_data(self):\n self._save_training_data()", "def load_cup_data(train=True):\n type = \"TR\" if train else \"TS\"\n csv_file = path_data / Path(f\"ML_CUP/ML-CUP20-{type}.csv\")\n return pd.read_csv(csv_file, skiprows=7, header=None, index_col=0)", "def _loadData(self, data):\n Episode._loadData(self, data)\n PlexSession._loadData(self, data)", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def _load_ludb(self, path):\n signal, info = wfdb.rdsamp(path)\n self.fs = 500\n self.lead_match = ['I', 'II', 'III', 'aVR', 'aVL', 'aVF', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']\n self.raw_data = np.transpose(np.array([signal]), (2, 0, 1))\n self.symbol = []\n self.coords = []\n for lead in ['i', 'ii', 'iii', 'avr', 'avl', 'avf', 'v1', 'v2', 'v3', 'v4', 'v5', 'v6']:\n ann_ii = wfdb.rdann(path, extension='atr_{}'.format(lead))\n symbol_1 = ann_ii.symbol\n coords_1 = ann_ii.sample\n if list(np.unique(np.array(symbol_1))) != ['(', ')', 'N', 'p', 't'] and list(np.unique(np.array(symbol_1))) != ['(', ')', 'N', 'p', 't', 'u']:\n print(\"Invalid symbols in ECG annotations.\")\n raise ValueError\n self.symbol.append(symbol_1)\n self.coords.append(coords_1)\n self.label_name = ['(', 'p', ')', '(', 'N', ')', '(', 't', ')']\n self._generate_beatlabel_from_label()", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def load_values(self):\n # TODO: Add self.prefix and extension\n NetworkTables.loadEntries(self.file.get_filename(), prefix='/vision/' + self.name + '_')", "def load(self):\n\n logging.info(\"Loading the `BeagleSummaryCatalogue` file: \" + self.file_name)\n\n name = getPathForData(self.file_name)\n self.hdulist = fits.open(name)", "def _loadData(self, data):\n Clip._loadData(self, data)\n PlexHistory._loadData(self, data)", "def loadDiodeTemp(h6, filename):\n \n f_fine = h6.freqs\n f = h6.freqs_cal\n num_chans = h6.h5.root.raw_data.beam_01.cols.xx[0].shape[0]\n \n #temps_x = np.fromfile(filename_x).reshape([13,16])\n #temps_y = np.fromfile(filename_y).reshape([13,16])\n\n if filename.endswith('.hdf') or filename.endswith('.h5') or filename.endswith('.hdf5'):\n temps, tsys = mbcal(filename)\n else:\n temps = np.fromfile(filename).reshape([26,16])\n tsys = np.zeros_like(temps)\n\n temps_x = temps[0:13]\n temps_y = temps[13:26]\n tsys_x = tsys[0:13]\n tsys_y = tsys[13:26]\n\n temps_fine_x = np.zeros([13, num_chans])\n temps_fine_y = np.zeros([13, num_chans])\n tsys_fine_x = np.zeros([13, num_chans])\n tsys_fine_y = np.zeros([13, num_chans])\n \n for i in range(0,13):\n temps_fine_x[i] = fitLine(f, temps_x[i], num_chans)\n temps_fine_y[i] = fitLine(f, temps_y[i], num_chans)\n tsys_fine_x[i] = fitLine(f, tsys_x[i], num_chans)\n tsys_fine_y[i] = fitLine(f, tsys_y[i], num_chans)\n \n return temps_x, temps_y, tsys_x, tsys_y", "def loadData(self,ins):\n #--Read subrecords\n bytesRead = 0\n while bytesRead < self.size:\n (name,size) = ins.unpackSubHeader('GLOB')\n srData = ins.read(size,'GLOB.'+name)\n bytesRead += 8+size\n if name == 'NAME': self.id = cstrip(srData)\n elif name == 'FNAM': self.type = srData\n elif name == 'FLTV': self.value = struct.unpack('f',srData)\n #--Deleted?\n elif name == 'DELE': self.isDeleted = True\n #--Bad record?\n else: raise Tes3UnknownSubRecord(self.inName,name,self.name)" ]
[ "0.60632014", "0.59163", "0.5692363", "0.5506359", "0.54695237", "0.53923327", "0.5391937", "0.5374051", "0.53720856", "0.53419524", "0.5327158", "0.52767026", "0.52273965", "0.5223794", "0.5223542", "0.5215916", "0.52092546", "0.5207547", "0.5191525", "0.5180599", "0.51781684", "0.516562", "0.51398635", "0.51350933", "0.51215893", "0.51080894", "0.5085327", "0.50844806", "0.5078372", "0.507811" ]
0.654677
0
This function will look for periods of consecutive indicies of detections and find the peak for each period. The peak index array for the data is self.peakInd.
def _getPeaks(self, ch, validDataIdt): startInd, endInd = locate_consecutive_numbers.locateConsecutiveNumbers( self.burstIdt) # Find consecutive numbers to get a max of first self.peakInd = np.nan*np.ones(len(startInd), dtype=int) # Loop over every microburst detection region (consecutive microburst indicies) for i, (st, et) in enumerate(zip(startInd, endInd)): if st == et: # If the same index et += 1 # Index nightmare, but works. There may be a better way offset = validDataIdt[self.burstIdt[st]] self.peakInd[i] = np.argmax( self.d[ch][validDataIdt[self.burstIdt[st:et]]]) + offset self.peakInd = self.peakInd.astype(int) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_peak(data):\n nonlocal last, ascent_dist, ascent_start\n if data > last:\n if ascent_start is None:\n ascent_start = last\n ascent_dist += 1\n else:\n if ascent_dist:\n peak = last\n ascent_dist = 0\n if (peak - ascent_start) > thresh:\n last = data\n ascent_start = None\n return peak\n ascent_start = None\n last = data\n return None", "def peak_finder(thresh=0):\n last = 0 # Track last input value\n ascent_dist = 0 # Distance from last trough.\n ascent_start = None # Last trough height\n\n def detect_peak(data):\n \"\"\" Returns initialized function to detect peaks on live streaming data.\n\n Args:\n data (numeric value): Input data point.\n\n Returns:\n If peak is detected return peak value, else return None\n \"\"\"\n nonlocal last, ascent_dist, ascent_start\n if data > last:\n if ascent_start is None:\n ascent_start = last\n ascent_dist += 1\n else:\n if ascent_dist:\n peak = last\n ascent_dist = 0\n if (peak - ascent_start) > thresh:\n last = data\n ascent_start = None\n return peak\n ascent_start = None\n last = data\n return None\n\n return detect_peak", "def panPeakDetect(detection, fs):\n\n min_distance = int(0.25 * fs)\n\n signal_peaks = [0]\n noise_peaks = []\n\n SPKI = 0.0\n NPKI = 0.0\n\n threshold_I1 = 0.0\n threshold_I2 = 0.0\n\n RR_missed = 0\n index = 0\n indexes = []\n\n missed_peaks = []\n peaks = []\n\n for i in range(len(detection)):\n\n if 0 < i < len(detection) - 1:\n if detection[i - 1] < detection[i] and detection[i + 1] < detection[i]:\n peak = i\n peaks.append(i)\n\n if detection[peak] > threshold_I1 and (peak - signal_peaks[-1]) > 0.25 * fs:\n\n signal_peaks.append(peak)\n indexes.append(index)\n SPKI = 0.125 * detection[signal_peaks[-1]] + 0.875 * SPKI\n if RR_missed != 0:\n if signal_peaks[-1] - signal_peaks[-2] > RR_missed:\n missed_section_peaks = peaks[indexes[-2] + 1:indexes[-1]]\n missed_section_peaks2 = []\n for missed_peak in missed_section_peaks:\n if missed_peak - signal_peaks[-2] > min_distance and signal_peaks[\n -1] - missed_peak > min_distance and detection[missed_peak] > threshold_I2:\n missed_section_peaks2.append(missed_peak)\n\n if len(missed_section_peaks2) > 0:\n missed_peak = missed_section_peaks2[np.argmax(detection[missed_section_peaks2])]\n missed_peaks.append(missed_peak)\n signal_peaks.append(signal_peaks[-1])\n signal_peaks[-2] = missed_peak\n\n else:\n noise_peaks.append(peak)\n NPKI = 0.125 * detection[noise_peaks[-1]] + 0.875 * NPKI\n\n threshold_I1 = NPKI + 0.25 * (SPKI - NPKI)\n threshold_I2 = 0.5 * threshold_I1\n\n if len(signal_peaks) > 8:\n RR = np.diff(signal_peaks[-9:])\n RR_ave = int(np.mean(RR))\n RR_missed = int(1.66 * RR_ave)\n\n index = index + 1\n # First possible peak detection\n first_possible_peak = np.argmax(detection[0:int(0.25 * fs)])\n if detection[first_possible_peak] > SPKI:\n signal_peaks[0] = first_possible_peak\n else:\n signal_peaks.pop(0)\n signal_peaks = np.array(signal_peaks)\n return signal_peaks", "def _peakdet(ts, threshold_ratio=.1):\n THRESH = threshold_ratio * (max(ts)-min(ts))\n maxima = []\n minima = []\n extrema = []\n looking_for_maximum = True\n last = 0\n for i in range(1, len(ts)):\n if looking_for_maximum:\n if ts[i] > ts[last]:\n last = i\n elif ts[i] + THRESH < ts[last]:\n maxima.append(last)\n extrema.append(last)\n looking_for_maximum = False\n else: #looking for minimum\n if ts[i] < ts[last]:\n last = i\n elif ts[i] - THRESH > ts[last]:\n minima.append(last)\n extrema.append(last)\n looking_for_maximum = True\n \n return extrema", "def peak_indices(self, **kwargs):\n kwarg_defaults = {\n 'width': 5, # ensure small spikes are ignored\n }\n kwarg_defaults.update(kwargs)\n return signal.find_peaks(self.ys, **kwarg_defaults)", "def measure_peak(sig, use_inflection=True, return_allinfo=False):\n sig = np.array(sig)\n cr = locate_peak(sig)\n cr_crosszero = np.zeros_like(cr)\n cr_inflection = np.zeros_like(cr)\n\n # cross zero points\n cr_cr1 = -int_sign(sig[1:] * sig[:-1])\n cr_cr2 = -int_sign(sig[:-1] * sig[1:])\n cr_cr1[cr_cr1<0] = 0\n cr_cr2[cr_cr2<0] = 0\n cr_crosszero[1:] = cr_cr1\n cr_crosszero[:-1] += cr_cr2\n cr_crosszero = int_sign(cr_crosszero * sig) * 4\n\n # inflection points\n d2 = second_derivate(sig)\n d2p = locate_peak(d2)\n d2p[np.where( np.abs(d2p) != 1 )] = 0\n d2p[np.where( ((d2p==1) & (sig<0)) | ((d2p==-1) & (sig>0)) )] = 0\n cr_inflection[np.where(d2p==-1)] = 8\n cr_inflection[np.where(d2p==1)] = -8\n \n if use_inflection:\n cr_combine = cr + cr_inflection + cr_crosszero \n else:\n cr_combine = cr + cr_crosszero\n\n oned = False\n if len(np.shape(sig)) == 1:\n oned = True\n sig = sig[:, np.newaxis]\n \n peaks_list = []\n for i in range(np.shape(sig)[1]):\n pvs = np.where(np.abs(cr[:,i]) == 1)[0]\n lims = np.where(np.abs(cr_combine[:,i]) >= 2)[0]\n if len(pvs) == 0 :\n peaks_list.append([])\n continue\n if np.shape(lims)[0] == 0:\n lower_pos = pvs\n upper_pos = pvs\n else:\n lower_arr = (pvs > lims[:, np.newaxis])\n upper_arr = (pvs < lims[:, np.newaxis])\n lower_arr_r = np.flipud(lower_arr)\n upper_pos_i = np.argmax(upper_arr, axis=0)\n upper_pos = lims[(upper_pos_i, )]\n w_upper_none = np.where(upper_arr[-1,:] == False)\n upper_pos[w_upper_none] = pvs[w_upper_none]\n lower_pos_r_i = np.argmax(lower_arr_r, axis=0)\n lower_pos_i = len(lims) - 1 - lower_pos_r_i\n lower_pos = lims[(lower_pos_i, )]\n w_lower_none = np.where(lower_arr[0, :] == False)\n lower_pos[w_lower_none] = 0\n\n peaks = []\n for center, lower, upper in zip(pvs, lower_pos, upper_pos):\n depth = sig[center, i]\n sig_range = sig[lower:upper+1, i]\n sig_range[np.where(int_sign(sig_range) != int_sign(depth))] = 0.0\n volume = np.sum(sig_range)\n peaks.append(Peak(center=center, lower=lower, upper=upper, depth=depth, volume=volume))\n peaks_list.append(peaks)\n if oned:\n peaks_list = peaks_list[0]\n \n if return_allinfo:\n return peaks_list, cr, cr_crosszero, cr_inflection \n else:\n return peaks_list", "def peakdetect_zero_crossing(y_axis, x_axis = None, window = 11):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n \n zero_indices = zero_crossings(y_axis, window = window)\n period_lengths = np.diff(zero_indices)\n \n bins_y = [y_axis[index:index + diff] for index, diff in \n zip(zero_indices, period_lengths)]\n bins_x = [x_axis[index:index + diff] for index, diff in \n zip(zero_indices, period_lengths)]\n \n even_bins_y = bins_y[::2]\n odd_bins_y = bins_y[1::2]\n even_bins_x = bins_x[::2]\n odd_bins_x = bins_x[1::2]\n hi_peaks_x = []\n lo_peaks_x = []\n \n #check if even bin contains maxima\n if abs(even_bins_y[0].max()) > abs(even_bins_y[0].min()):\n hi_peaks = [bin.max() for bin in even_bins_y]\n lo_peaks = [bin.min() for bin in odd_bins_y]\n # get x values for peak\n for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, hi_peaks):\n hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, lo_peaks):\n lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n else:\n hi_peaks = [bin.max() for bin in odd_bins_y]\n lo_peaks = [bin.min() for bin in even_bins_y]\n # get x values for peak\n for bin_x, bin_y, peak in zip(odd_bins_x, odd_bins_y, hi_peaks):\n hi_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n for bin_x, bin_y, peak in zip(even_bins_x, even_bins_y, lo_peaks):\n lo_peaks_x.append(bin_x[np.where(bin_y==peak)[0][0]])\n \n max_peaks = [[x, y] for x,y in zip(hi_peaks_x, hi_peaks)]\n min_peaks = [[x, y] for x,y in zip(lo_peaks_x, lo_peaks)]\n \n return [max_peaks, min_peaks]", "def find_peaks(self, t_measure):\n self._check_time(t_measure)\n #widths = np.arange(2,7) # range of widths to check by find_peaks_cwt\n #peak_nodes = find_peaks_cwt(self.get_velocities(t_measure), widths, min_snr=2.0,noise_perc=30.0)\n peak_beads = peakutils.peak.indexes(self.get_velocities(t_measure), thres=0.75, min_dist=7)\n return peak_beads", "def splitDetectorPeakInfo(self):\r\n\t\tsplit_raw_min = np.amin(self.splitData)\r\n\t\tsplit_min = split_raw_min - self.splitBaseline\r\n\t\t\t\t\r\n\t\tsplit_raw_max = np.amax(self.splitData)\r\n\t\tsplit_max = split_raw_max - self.splitBaseline\r\n\t\r\n\t\tself.splitMax = split_max\r\n\t\tself.splitMin = split_min", "def pull_peak_times(data):\n bin_centers = np.arange(0.,1.501,0.002)\n data = np.asarray(data)\n maxs = np.argmax(data, axis=1)\n return bin_centers[maxs]", "def detect_peaks(x_data, y_data, imx, sigmamv=.25, fig=400, period=1e-3, model='one_ele'):\n thr = .4\n thr2 = .6\n\n # chop off part of the data, because T1 is relatively long\n mvedge = .1 * (np.max(x_data) - np.min(x_data))\n if model == 'two_ele':\n mvthr = (np.max(x_data) - np.min(x_data)) * .25e-3 / period # T1 \\approx .1 ms [Ref]\n horz_vals = x_data[(x_data > (np.min(x_data) + np.maximum(mvthr, mvedge)))\n & (x_data < (np.max(x_data) - mvedge))]\n z_data = imx[:, (x_data > (np.min(x_data) + np.maximum(mvthr, mvedge))) & (x_data < (np.max(x_data) - mvedge))]\n elif model == 'one_ele':\n horz_vals = x_data[(x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]\n z_data = imx[:, (x_data > (np.min(x_data) + mvedge)) & (x_data < (np.max(x_data) - mvedge))]\n else:\n raise Exception('no such model')\n\n scalefac = (np.max(horz_vals) - np.min(horz_vals)) / (z_data.shape[1] - 1) # mV/pixel\n\n # smooth input image\n kern = scipy.signal.gaussian(71, std=sigmamv / scalefac)\n kern = kern / kern.sum()\n imx2 = scipy.ndimage.convolve(z_data, kern.reshape((1, -1)), mode='nearest')\n\n # get maximum value for each row\n mm1 = np.argmax(imx2, axis=1)\n val = imx2[np.arange(0, imx2.shape[0]), mm1]\n\n idx1 = np.where(np.abs(val) > thr)[0] # only select indices above scaled threshold\n\n xx1 = np.vstack((horz_vals[mm1[idx1]], y_data[idx1])) # position of selected points\n\n # get minimum value for each row\n mm2 = np.argmin(imx2, axis=1)\n val = imx2[np.arange(0, imx2.shape[0]), mm2]\n # remove points below threshold\n idx2 = np.where(np.abs(val) > thr)[0]\n\n xx2 = np.vstack((horz_vals[mm2[idx2]], y_data[idx2]))\n\n # join the two sets\n detected_peaks = np.hstack((xx1, xx2))\n\n # determine weights for the points\n qq = np.intersect1d(idx1, idx2)\n q1 = np.searchsorted(idx1, qq)\n q2 = np.searchsorted(idx2, qq)\n w1 = .5 * np.ones(len(idx1))\n w1[q1] = 1\n w2 = .5 * np.ones(len(idx2))\n w2[q2] = 1\n\n wfac = .1\n w1[np.abs(val[idx1]) < thr2] = wfac\n w2[np.abs(val[idx2]) < thr2] = wfac\n weights = np.hstack((w1, w2))\n\n if fig is not None:\n plt.figure(fig)\n plt.clf()\n plt.pcolormesh(x_data, y_data, imx, shading='auto')\n plt.plot(horz_vals[mm1[idx1]], y_data[idx1], '.b', markersize=14, label='idx1')\n plt.plot(horz_vals[mm2[idx2]], y_data[idx2], '.r', markersize=14, label='idx2')\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n\n return detected_peaks, {'weights': weights, 'detected_peaks': detected_peaks}", "def get_following_peak(ind_spike, sig, sign):\n sig1 = sig[:-2]\n sig2 = sig[1:-1]\n sig3 = sig[2:]\n if sign == '+':\n all_peaks, = np.where(numexpr.evaluate( '(sig1<=sig2) & ( sig2>sig3)'))\n elif sign == '-':\n all_peaks, = np.where(numexpr.evaluate( '(sig1>=sig2) & ( sig2<sig3)'))\n \n ind_peaks = -np.ones(ind_spike.size, dtype = 'i')\n for i, ind in enumerate(ind_spike):\n possible = all_peaks[all_peaks>ind]\n if possible.size>0:\n ind_peaks[i] = possible[0]\n \n return ind_peaks", "def test_peak_detection(self):\n from sms.models import utilFunctions # pylint: disable=C0415\n\n for i, (mx, _) in enumerate(self.sm.dft_frames(self.x)):\n ploc = sample_dsp.peak_detect(mx, self.sm.t)\n ploc_sms = utilFunctions.peakDetection(mx, self.sm.t)\n for j, (p, p_s) in enumerate(itertools.zip_longest(ploc, ploc_sms)):\n with self.subTest(frame=i, peak_n=j):\n self.assertEqual(p, p_s)", "def findPeakAndValley(np):\n peakValleyArray = []\n for i in range (1, len(np) - 1):\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] > 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] < 1):\n peakValleyArray.append(i)\n if (np[i][STOCK_VALUE_INDEX] / np[i - 1][STOCK_VALUE_INDEX] < 1 and np[i + 1][STOCK_VALUE_INDEX] / np[i][STOCK_VALUE_INDEX] > 1):\n peakValleyArray.append(i)\n return peakValleyArray", "def find_peak(mhw, mhw_relSeas, ev, tt_start):\n tt_peak = np.argmax(mhw_relSeas)\n mhw[\"time_peak\"].append(mhw[\"time_start\"][ev] + tt_peak)\n mhw[\"date_peak\"].append(date.fromordinal(mhw[\"time_start\"][ev] + tt_peak))\n mhw[\"index_peak\"].append(tt_start + tt_peak)\n\n return mhw, tt_peak", "def peakdet2d(image):\n # define an 8-connected neighborhood\n neighborhood = generate_binary_structure(2,2)\n\n #apply the local maximum filter; all pixel of maximal value \n #in their neighborhood are set to 1\n local_max = maximum_filter(image, footprint=neighborhood)==image\n #local_max is a mask that contains the peaks we are \n #looking for, but also the background.\n #In order to isolate the peaks we must remove the background from the mask.\n\n #we create the mask of the background\n background = (image==0)\n\n #a little technicality: we must erode the background in order to \n #successfully subtract it form local_max, otherwise a line will \n #appear along the background border (artifact of the local maximum filter)\n eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)\n\n #we obtain the final mask, containing only peaks, \n #by removing the background from the local_max mask\n detected_peaks = local_max - eroded_background\n\n return(detected_peaks)", "def calculate_peak_prominence(data, index):\n current_peak = data[index]\n\n # ignore values at either end of the dataset or values that are not local maxima\n if (\n index == 0\n or index == len(data) - 1\n or data[index - 1] > current_peak\n or data[index + 1] > current_peak\n or (data[index - 1] == current_peak and data[index + 1] == current_peak)\n ):\n return 0\n\n # by definition, the prominence of the highest value in a dataset is equal to the value itself\n if current_peak == max(data):\n return np.log(current_peak)\n\n # find index of nearest maxima which is higher than the current peak\n higher_peaks_inds = [i for i, x in enumerate(data) if x > current_peak]\n\n right_peaks = [x for x in higher_peaks_inds if x > index]\n if right_peaks:\n closest_right_ind = min(right_peaks)\n else:\n closest_right_ind = np.inf\n\n left_peaks = [x for x in higher_peaks_inds if x < index]\n if left_peaks:\n closest_left_ind = max(left_peaks)\n else:\n closest_left_ind = -np.inf\n\n right_distance = closest_right_ind - index\n left_distance = index - closest_left_ind\n\n if (right_distance) > (left_distance):\n closest = closest_left_ind\n else:\n closest = closest_right_ind\n\n # find the value at the lowest point between the nearest higher peak (the key col)\n lo = min(closest, index)\n hi = max(closest, index)\n between_slice = data[lo:hi]\n key_col = min(between_slice)\n\n prominence = np.log(data[index] - key_col + 1)\n\n return prominence", "def peakdetect_fft(y_axis, x_axis, pad_len = 5):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n zero_indices = zero_crossings(y_axis, window = 11)\n #select a n amount of periods\n last_indice = - 1 - (1 - len(zero_indices) & 1)\n # Calculate the fft between the first and last zero crossing\n # this method could be ignored if the begining and the end of the signal\n # are discardable as any errors induced from not using whole periods\n # should mainly manifest in the beginning and the end of the signal, but\n # not in the rest of the signal\n fft_data = fft(y_axis[zero_indices[0]:zero_indices[last_indice]])\n padd = lambda x, c: x[:len(x) // 2] + [0] * c + x[len(x) // 2:]\n n = lambda x: int(log(x)/log(2)) + 1\n # padds to 2**n amount of samples\n fft_padded = padd(list(fft_data), 2 ** \n n(len(fft_data) * pad_len) - len(fft_data))\n \n # There is amplitude decrease directly proportional to the sample increase\n sf = len(fft_padded) / float(len(fft_data))\n # There might be a leakage giving the result an imaginary component\n # Return only the real component\n y_axis_ifft = ifft(fft_padded).real * sf #(pad_len + 1)\n x_axis_ifft = np.linspace(\n x_axis[zero_indices[0]], x_axis[zero_indices[last_indice]],\n len(y_axis_ifft))\n # get the peaks to the interpolated waveform\n max_peaks, min_peaks = peakdetect(y_axis_ifft, x_axis_ifft, 500,\n delta = abs(np.diff(y_axis).max() * 2))\n #max_peaks, min_peaks = peakdetect_zero_crossing(y_axis_ifft, x_axis_ifft)\n \n # store one 20th of a period as waveform data\n data_len = int(np.diff(zero_indices).mean()) / 10\n data_len += 1 - data_len & 1\n \n \n fitted_wave = []\n for peaks in [max_peaks, min_peaks]:\n peak_fit_tmp = []\n index = 0\n for peak in peaks:\n index = np.where(x_axis_ifft[index:]==peak[0])[0][0] + index\n x_fit_lim = x_axis_ifft[index - data_len // 2:\n index + data_len // 2 + 1]\n y_fit_lim = y_axis_ifft[index - data_len // 2:\n index + data_len // 2 + 1]\n \n peak_fit_tmp.append([x_fit_lim, y_fit_lim])\n fitted_wave.append(peak_fit_tmp)\n \n #pylab.plot(range(len(fft_data)), fft_data)\n #pylab.show()\n \n #pylab.plot(x_axis, y_axis)\n # pylab.hold(True)\n # pylab.plot(x_axis_ifft, y_axis_ifft)\n #for max_p in max_peaks:\n # pylab.plot(max_p[0], max_p[1], 'xr')\n #pylab.show()\n return [max_peaks, min_peaks]", "def get_following_peak_multi_channel(ind_spike, sigs, sign, method = 'biggest_amplitude'):\n \n multi_peaks =[ ]\n amplitudes = [ ]\n for c, sig in enumerate(sigs):\n multi_peaks.append(get_following_peak(ind_spike, sig, sign))\n multi_peaks = np.array(multi_peaks)\n \n ind_peaks = -np.ones(ind_spike.size, dtype = 'i')\n for i, ind in enumerate(ind_spike):\n if method == 'closer':\n ind_peaks = multi_peak[:,i].min()\n elif method == 'biggest_amplitude':\n if np.all(multi_peaks[:,i] == -1):\n ind_peaks[i] = -1\n continue\n \n peak_values = [ ]\n for c, sig in enumerate(sigs):\n if multi_peaks[c,i] != -1:\n peak_values.append(sig[multi_peaks[c,i]])\n else:\n peak_values.append(0)\n \n if sign == '+':\n biggest = np.argmax(peak_values)\n elif sign == '-':\n biggest = np.argmin(peak_values)\n ind_peaks[i] = multi_peaks[biggest,i]\n \n \n return ind_peaks+1", "def peaks(n, binCenters, method=\"JI\", window=100, peakAmpThresh=0.00005, valleyThresh=0.00003):\n data = zip(binCenters, n)\n binCenters = np.array(binCenters)\n firstCenter = (min(binCenters)+1.5*window)/window*window\n lastCenter = (max(binCenters)-window)/window*window\n if firstCenter < -1200: firstCenter = -1200\n if lastCenter > 3600: lastCenter = 3600\n\n\n if method == \"slope\" or method == \"hybrid\":\n peaks = {}\n peakInfo = peaksBySlope(n, binCenters, lookahead=20, delta=valleyThresh, averageHist=True)\n\n #find correspondences between peaks and valleys, and set valleys are left and right Indices\n #see the other method(s) for clarity!\n\n peakData = peakInfo[\"peaks\"]\n valleyData = peakInfo[\"valleys\"]\n\n #print len(peakData[0]), len(peakData[1])\n for i in xrange(len(peakData[0])):\n nearestIndex = findNearestIndex(valleyData[0], peakData[0][i])\n if valleyData[0][nearestIndex] < peakData[0][i]:\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][nearestIndex+1:]) == 0):\n rightIndex = findNearestIndex(binCenters, peakData[0][i]+window/2.0)\n else:\n offset = nearestIndex+1\n nearestIndex = offset+findNearestIndex(valleyData[0][offset:], peakData[0][i])\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n else:\n rightIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n if (len(valleyData[0][:nearestIndex]) == 0):\n leftIndex = findNearestIndex(binCenters, peakData[0][i]-window/2.0)\n else:\n nearestIndex = findNearestIndex(valleyData[0][:nearestIndex], peakData[0][i])\n leftIndex = findNearestIndex(binCenters, valleyData[0][nearestIndex])\n\n pos = findNearestIndex(binCenters, peakData[0][i])\n #print binCenters[pos], peakData[1][i], binCenters[leftIndex], binCenters[rightIndex]\n peaks[pos] = [peakData[1][i], leftIndex, rightIndex]\n\n if method == \"hybrid\": slopePeaks = peaks\n \n if method == \"JI\" or method == \"ET\" or method == \"hybrid\":\n peaks = {}\n #Obtain max value per interval\n if method == \"JI\" or method == \"hybrid\":\n firstCenter = nearestJI(firstCenter)\n lastCenter = nearestJI(lastCenter)\n\n interval = firstCenter\n prevInterval = firstCenter-window\n #NOTE: All *intervals are in cents. *indices are of binCenters/n\n while interval < lastCenter:\n if method == \"ET\":\n leftIndex = findNearestIndex(binCenters, interval-window/2)\n rightIndex = findNearestIndex(binCenters, interval+window/2)\n interval += window\n elif method == \"JI\" or method == \"hybrid\":\n leftIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n prevInterval = interval\n interval = nextJI(interval)\n rightIndex = findNearestIndex(binCenters, (interval+prevInterval)/2.0)\n peakPos = np.argmax(n[leftIndex:rightIndex])\n peakAmp = n[leftIndex+peakPos]\n peaks[leftIndex+peakPos] = [peakAmp, leftIndex, rightIndex]\n \n #print binCenters[leftIndex], binCenters[rightIndex], binCenters[leftIndex+peakPos], peakAmp\n #NOTE: All the indices (left/rightIndex, peakPos) are to be changed to represent respective cent \n #value corresponding to the bin. Right now, they are indices of respective binCenters in the array.\n \n if method == \"hybrid\":\n #Mix peaks from slope method and JI method.\n p1 = slopePeaks.keys()\n p2 = peaks.keys()\n allPeaks = {} #overwriting peaks dict\n for p in p1:\n nearIndex = findNearestIndex(p2, p)\n if abs(p-p2[nearIndex]) < window/2.0: p2.pop(nearIndex)\n \n for p in p1: allPeaks[p] = slopePeaks[p]\n for p in p2: allPeaks[p] = peaks[p]\n peaks = allPeaks\n\n #Filter the peaks and retain eligible peaks, also get their valley points.\n\n # ----> peakAmpThresh <---- : remove the peaks which are below that\n\n for pos in peaks.keys():\n #pos is an index in binCenters/n. DOES NOT refer to a cent value.\n if peaks[pos][0] < peakAmpThresh:\n #print \"peakAmp: \", binCenters[pos]\n peaks.pop(pos)\n\n #Check if either left or right valley is deeper than ----> valleyThresh <----.\n valleys = {}\n for pos in peaks.keys():\n leftLobe = n[peaks[pos][1]:pos]\n rightLobe = n[pos:peaks[pos][2]]\n #Sanity check: Is it a genuine peak? Size of distributions on either side of the peak should be comparable.\n if len(leftLobe) == 0 or len(rightLobe) == 0:\n continue\n if 1.0*len(leftLobe)/len(rightLobe) < 0.15 or 1.0*len(leftLobe)/len(rightLobe) > 6.67:\n #print \"size: \", binCenters[pos]\n #peaks.pop(pos)\n continue\n\n leftValleyPos = np.argmin(leftLobe)\n rightValleyPos = np.argmin(rightLobe)\n if (abs(leftLobe[leftValleyPos]-n[pos]) < valleyThresh and abs(rightLobe[rightValleyPos]-n[pos]) < valleyThresh):\n #print \"valley: \", binCenters[pos]\n peaks.pop(pos)\n else:\n valleys[peaks[pos][1]+leftValleyPos] = leftLobe[leftValleyPos]\n valleys[pos+rightValleyPos] = rightLobe[rightValleyPos]\n \n if len(peaks) > 0:\n temp1 = np.array(peaks.values())\n temp1 = temp1[:, 0]\n\n return {'peaks':[binCenters[peaks.keys()], temp1], 'valleys':[binCenters[valleys.keys()], valleys.values()]}\n else:\n return {'peaks':[[], []], 'valleys':[[], []]}", "def peak_index(A):\n peak = 0\n for idx in range(1, len(A)-1):\n if A[idx] > A[idx-1]:\n peak = idx\n return peak", "def get_steps_between_peaks(self):\n max_x, max_y = self.get_local_maxes()\n full_steps = np.ediff1d(max_x)\n # _full_mean, _full_std = np.mean(full_steps), np.std(full_steps)\n _full_count = len(full_steps)\n\n unique_steps_between_peaks, unique_steps_counts = np.unique(full_steps, return_counts=True)\n\n _filter = np.logical_and(full_steps < unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 1.7,\n full_steps > unique_steps_between_peaks[np.argmax(unique_steps_counts)] * 0.3)\n # 1.7 chosen as filter, as there seems to be another peak ~2* (probably due to single missed peaks)\n # 1.7 avoids the start of the gaussian at 2*\n\n if not _filter.all():\n steps = full_steps[_filter]\n # print(unique_steps_between_peaks[np.argmax(unique_steps_counts)])\n _filtered_count = len(steps)\n _counts = (_full_count, _filtered_count, _full_count - _filtered_count)\n # print('Original Count: %s, Filtered Count: %s, Excluded Count: %s' % _counts)\n # print('Filtered:', full_steps[np.invert(_filter)])\n unique_steps_between_peaks, unique_steps_counts = np.unique(steps, return_counts=True)\n else:\n steps = full_steps\n\n return steps, unique_steps_between_peaks, unique_steps_counts", "def narrowIncandPeakInfo(self):\r\n\t\tself.narrowIncandBaseline = (np.mean(self.narrowBandIncandData[0:10]))\r\n\t\t\t\t\r\n\t\traw_narrowIncand_max = np.amax(self.narrowBandIncandData)\r\n\t\tnarrowIncand_max = raw_narrowIncand_max - self.narrowIncandBaseline\t\t\r\n\t\tnarrowIncand_max_index = np.argmax(self.narrowBandIncandData)\r\n\t\t\r\n\t\tself.narrowIncandMax =narrowIncand_max\r\n\t\tself.narrowIncandMaxPos = narrowIncand_max_index", "def peakdetect_zero_crossing(y_axis, x_axis=None, window=49):\n\n if x_axis is None:\n x_axis = range(len(y_axis))\n\n length = len(y_axis)\n# if length != len(x_axis):\n# raise ValueError, 'Input vectors y_axis and x_axis must have same length'\n\n # needs to be a numpy array\n y_axis = np.asarray(y_axis)\n\n zero_indices = zero_crossings(y_axis, window=window)\n period_lengths = np.diff(zero_indices)\n\n bins = [y_axis[indice:indice + diff] for indice, diff in\n zip(zero_indices, period_lengths)]\n\n even_bins = bins[::2]\n odd_bins = bins[1::2]\n # check if even bin contains maxima\n if even_bins[0].max() > abs(even_bins[0].min()):\n hi_peaks = [bin.max() for bin in even_bins]\n lo_peaks = [bin.min() for bin in odd_bins]\n else:\n hi_peaks = [bin.max() for bin in odd_bins]\n lo_peaks = [bin.min() for bin in even_bins]\n\n\n hi_peaks_x = [x_axis[np.where(y_axis == peak)[0]] for peak in hi_peaks]\n lo_peaks_x = [x_axis[np.where(y_axis == peak)[0]] for peak in lo_peaks]\n\n maxtab = [(x, y) for x, y in zip(hi_peaks, hi_peaks_x)]\n mintab = [(x, y) for x, y in zip(lo_peaks, lo_peaks_x)]\n\n return maxtab, mintab", "def peak(self):\n pass", "def get_peak_ind(discrete_array):\n\n indexes = [j for j in range(discrete_array.size) if discrete_array[j-1]==0 and\\\n discrete_array[j]==1]\n\n return indexes", "def peakdetect_parabole(y_axis, x_axis, points = 9):\n # check input data\n x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)\n # make the points argument odd\n points += 1 - points % 2\n #points += 1 - int(points) & 1 slower when int conversion needed\n \n # get raw peaks\n max_raw, min_raw = peakdetect_zero_crossing(y_axis)\n \n # define output variable\n max_peaks = []\n min_peaks = []\n \n max_ = _peakdetect_parabole_fitter(max_raw, x_axis, y_axis, points)\n min_ = _peakdetect_parabole_fitter(min_raw, x_axis, y_axis, points)\n \n max_peaks = map(lambda x: [x[0], x[1]], max_)\n max_fitted = map(lambda x: x[-1], max_)\n min_peaks = map(lambda x: [x[0], x[1]], min_)\n min_fitted = map(lambda x: x[-1], min_)\n \n \n #pylab.plot(x_axis, y_axis)\n #pylab.hold(True)\n #for max_p, max_f in zip(max_peaks, max_fitted):\n # pylab.plot(max_p[0], max_p[1], 'x')\n # pylab.plot(max_f[0], max_f[1], 'o', markersize = 2)\n #for min_p, min_f in zip(min_peaks, min_fitted):\n # pylab.plot(min_p[0], min_p[1], 'x')\n # pylab.plot(min_f[0], min_f[1], 'o', markersize = 2)\n #pylab.show()\n \n return [max_peaks, min_peaks]", "def peakFinder(self, fit_peaks_image):\n # Calculate background variance.\n #\n # Note the assumption here that we are working in units of photo-electrons\n # so Poisson statistics applies, variance = mean.\n #\n bg_var = self.background + fit_peaks_image\n \n # Add camera variance if set.\n if self.camera_variance is not None:\n bg_var += self.camera_variance\n\n # Calculate weighted variance if the image is being smoothed.\n if self.fg_vfilter is not None:\n bg_var = self.fg_vfilter.convolve(bg_var)\n\n if self.check_mode:\n with tifffile.TiffWriter(\"variances.tif\") as tf:\n tf.save(bg_var.astype(numpy.float32))\n \n # Remove problematic values.\n #\n mask = (bg_var <= 0.1)\n if (numpy.sum(mask) > 0):\n if self.check_mode:\n print(\"Warning! small and/or negative values detected in background variance!\")\n bg_var[mask] = 0.1\n \n # Convert to standard deviation.\n bg_std = numpy.sqrt(bg_var)\n\n # Calculate foreground.\n foreground = self.image - self.background - fit_peaks_image\n\n # Calculate smoothed image if we have a foreground filter.\n if self.fg_mfilter is not None:\n foreground = self.fg_mfilter.convolve(foreground)\n\n if self.check_mode:\n with tifffile.TiffWriter(\"foreground.tif\") as tf:\n tf.save(foreground.astype(numpy.float32))\n \n # Calculate foreground in units of signal to noise.\n foreground = foreground/bg_std\n \n if self.check_mode:\n with tifffile.TiffWriter(\"fg_bg_ratio.tif\") as tf:\n tf.save(foreground.astype(numpy.float32))\n \n # Mask the image so that peaks are only found in the AOI.\n masked_image = foreground * self.peak_mask\n\n # Identify local maxima in the masked image.\n [x, y, z] = self.mfinder.findMaxima([masked_image])\n return {\"x\" : x, \"y\" : y, \"z\" : z, \"sigma\" : numpy.ones(x.size)*self.sigma}", "def simple_peak_find(s, init_slope=500, start_slope=500, end_slope=200,\n min_peak_height=50, max_peak_width=1.5):\n point_gap = 10\n\n def slid_win(itr, size=2):\n \"\"\"Returns a sliding window of size 'size' along itr.\"\"\"\n itr, buf = iter(itr), []\n for _ in range(size):\n try:\n buf += [next(itr)]\n except StopIteration:\n return\n for new_item in itr:\n yield buf\n buf = buf[1:] + [new_item]\n yield buf\n\n # TODO: check these smoothing defaults\n y, t = s.values, s.index.astype(float)\n smooth_y = movingaverage(y, 9)\n dxdt = np.gradient(smooth_y) / np.gradient(t)\n # dxdt = -savitzkygolay(ts, 5, 3, deriv=1).y / np.gradient(t)\n\n init_slopes = np.arange(len(dxdt))[dxdt > init_slope]\n if len(init_slopes) == 0:\n return []\n # get the first points of any \"runs\" as a peak start\n # runs can have a gap of up to 10 points in them\n peak_sts = [init_slopes[0]]\n peak_sts += [j for i, j in slid_win(init_slopes, 2) if j - i > 10]\n peak_sts.sort()\n\n en_slopes = np.arange(len(dxdt))[dxdt < -end_slope]\n if len(en_slopes) == 0:\n return []\n # filter out any lone points farther than 10 away from their neighbors\n en_slopes = [en_slopes[0]]\n en_slopes += [i[1] for i in slid_win(en_slopes, 3)\n if i[1] - i[0] < point_gap or i[2] - i[1] < point_gap]\n en_slopes += [en_slopes[-1]]\n # get the last points of any \"runs\" as a peak end\n peak_ens = [j for i, j in slid_win(en_slopes[::-1], 2)\n if i - j > point_gap] + [en_slopes[-1]]\n peak_ens.sort()\n # avals = np.arange(len(t))[np.abs(t - 0.675) < 0.25]\n # print([i for i in en_slopes if i in avals])\n # print([(t[i], i) for i in peak_ens if i in avals])\n\n peak_list = []\n pk2 = 0\n for pk in peak_sts:\n # don't allow overlapping peaks\n if pk < pk2:\n continue\n\n # track backwards to find the true start\n while dxdt[pk] > start_slope and pk > 0:\n pk -= 1\n\n # now find where the peak ends\n dist_to_end = np.array(peak_ens) - pk\n pos_end = pk + dist_to_end[dist_to_end > 0]\n for pk2 in pos_end:\n if (y[pk2] - y[pk]) / (t[pk2] - t[pk]) > start_slope:\n # if the baseline beneath the peak is too large, let's\n # keep going to the next dip\n peak_list.append({'t0': t[pk], 't1': t[pk2]})\n pk = pk2\n elif t[pk2] - t[pk] > max_peak_width:\n # make sure that peak is short enough\n pk2 = pk + np.abs(t[pk:] - t[pk] - max_peak_width).argmin()\n break\n else:\n break\n else:\n # if no end point is found, the end point\n # is the end of the timeseries\n pk2 = len(t) - 1\n\n if pk == pk2:\n continue\n pk_hgt = max(y[pk:pk2]) - min(y[pk:pk2])\n if pk_hgt < min_peak_height:\n continue\n peak_list.append({'t0': t[pk], 't1': t[pk2]})\n return peak_list", "def first_peak_detect(beam, start_point):\n logging.debug('running first_peak_detect function')\n for i in range(start_point, len(beam)):\n logging.debug('current value of i is %d', i)\n if beam[i-1] < beam[i] > beam[i+1]:\n logging.debug('value determined to be the center of the values %d, %d, %d', beam[i-1], beam[i], beam[i+1])\n return i\n\n logging.error(\"no peak was found. will try working with the length of the beam\")\n return len(beam)" ]
[ "0.6640405", "0.6564305", "0.65567225", "0.654545", "0.64378893", "0.642369", "0.63958347", "0.62712115", "0.6269682", "0.6266941", "0.6174153", "0.6173894", "0.61729497", "0.61286587", "0.61276084", "0.6126366", "0.60827506", "0.60455763", "0.60307384", "0.6011327", "0.59966505", "0.5993669", "0.5992361", "0.5990447", "0.5974655", "0.5973763", "0.5963055", "0.59509885", "0.5940162", "0.59356713" ]
0.6901499
0
TMIN, TAVG, and TMAX for a list of dates.
def calc_temps(start_date, end_date): print("two dates\n") return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\ filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_time_means(val_list, time_list, t, start_month=1, err='std'):\n if len(time_list) != len(val_list):\n raise ValueError('val_list and time_list must have same length')\n # Arrays for outputs\n dates = []\n means = []\n errs = []\n year = np.min(time_list).year\n month = start_month\n while year <= np.max(time_list).year:\n # Sort start date\n start_date = datetime(year, month, 1)\n # sort end date\n end_year = year\n end_month = month + t\n if end_month > 12:\n end_year = year + 1\n end_month = end_month - 12\n end_date = datetime(end_year, end_month, 1)\n # get the relevant points \n points = []\n for n, i in enumerate(time_list):\n if i >= start_date:\n if i < end_date:\n points.append(val_list[n])\n dates.append(start_date + timedelta(weeks=26))\n if points != []:\n means.append(np.nanmean(points))\n s = np.std(points)\n if err == 'sem':\n s = s / np.sqrt(len(points))\n errs.append(s)\n else:\n means.append(np.NaN)\n errs.append(np.NaN) \n month = month + t\n if month > 12:\n year = year + 1\n month = month - 12\n dates = np.unique(dates)\n return dates, means, errs", "def calc_temps(start_date, end_date):\n\n Temperatures = (\n db.session.query(\n func.min(Measurement.tobs),\n func.avg(Measurement.tobs),\n func.max(Measurement.tobs),\n )\n .filter(Measurement.date >= start_date)\n .filter(Measurement.date <= end_date)\n .all()\n )\n\n return Temperatures", "def calc_temps(start_date, end_date):\n \n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)). filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()", "def calc_temps(start_date, end_date='2017-08-23'):\n \n return ses.query(func.min(clsMes.tobs), func.avg(clsMes.tobs), func.max(clsMes.tobs)).\\\n filter(clsMes.date >= start_date).filter(clsMes.date <= end_date).all()", "def find_time_running_means(val_list, time_list, t, err='std'):\n dates = []\n means = []\n errs = []\n # need to find means starting with each month of the year\n for i in range(1, 13):\n dates2, means2, errs2 = find_time_means(val_list, time_list, t, err=err,\n month=i)\n dates = np.append(dates2)\n means = np.append(means2)\n errs = np.append(errs2)\n return dates, means, errs", "def calc_temps_2(start_date):\r\n print(\"one date\\n\")\r\n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).all()", "def temp_daterange(start_date,end_date):\r\n # Query\r\n mam_temp_dr_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n \r\n # Convert results into a list of min, ave, max temps for date range with specific start_date and end_date\r\n mam_temp_start_end = list(np.ravel(mam_temp_dr_results))\r\n return jsonify(mam_temp_start_end)", "def <start>/<end>(<start>/<end>)\ndef calc_temps(start_date, end_date):", "def calc_temps(start_date, end_date = '0'):\n if end_date == '0':\n end_date = session.query(measurement.date).order_by(measurement.id.desc()).first()[0]\n temps = (session.query(measurement.date, func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\\\n filter(measurement.date >= start_date).filter(measurement.date <= end_date).all())\n temps_dict = [{row[0]:[{'TMIN':row[1]},{'TAVG':row[2]},{'TMAX':row[3]}]} for row in temps]\n if temps_dict is None:\n return 'Not found', 404\n return jsonify(temps_dict)", "def compute_avg_func(values, get_date_func, get_value_func, time_from, time_to):\n values_in = [get_value_func(v) for v in values if time_from <= get_date_func(v) <= time_to]\n if len(values_in) > 0:\n a = np.array(values_in)\n avg = a.mean()\n else:\n avg = np.NaN\n return avg", "def getMinMaxMeanFromList(val, in_list):\n min_value = -999\n max_value = -999\n mean_value = -999\n between_mean_max = 0\n between_min_mean = 0\n greater_max = 0\n lesser_min = 0\n if in_list != []:\n min_value = min(in_list)\n max_value = max(in_list)\n mean_value = np.mean(in_list)\n val = float(val)\n if val >= mean_value and val<= max_value:\n between_mean_max = 1\n elif val >= min_value and val <= mean_value:\n between_min_mean = 1\n elif val > max_value:\n greater_max = 1\n elif val < min_value:\n lesser_min = 1\n return [min_value, max_value, mean_value, between_mean_max, between_min_mean, greater_max, lesser_min]", "def calc_temps(start_date, end_date):\r\n startend_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\r\n\r\n #convert list of tuples into normal list\r\n startend_calculations = list(np.ravel(startend_results))\r\n\r\n return jsonify(startend_calculations)", "def get_temps(st_dt = \"\", end_dt = \"\"):\n session = Session(engine)\n \n if(st_dt == \"\"):\n end_dt, st_dt = get_year_past()\n \n if(end_dt == \"\" or end_dt is None):\n res = session.query(coalesce(func.min(M.tobs),0), coalesce(func.avg(M.tobs),0), coalesce(func.max(M.tobs),0)).\\\n filter(M.date >= st_dt).one()\n else:\n res = session.query(coalesce(func.min(M.tobs),0), coalesce(func.avg(M.tobs),0), coalesce(func.max(M.tobs),0)).\\\n filter(M.date.between(st_dt, end_dt)).one()\n \n session.close()\n \n return res", "def t_days_lst2range(t_array: list) -> list:\r\n if type(t_array[0]) == np.datetime64:\r\n t0 = t_array[0].astype(datetime.datetime)\r\n t1 = t_array[-1].astype(datetime.datetime)\r\n else:\r\n t0 = t_array[0]\r\n t1 = t_array[-1]\r\n sd = t0.strftime(\"%Y-%m-%d\")\r\n ed = t1.strftime(\"%Y-%m-%d\")\r\n return [sd, ed]", "def calc_temps(start_date, end_date):\n engine = create_engine(\"sqlite:///Resources/hawaii.sqlite\") \n session = Session(engine) ##I had to reconnect to the database to avoid errors\n return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)). filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()", "def get_tmin_tmax(self, models: List[Model] = None) -> DataFrame:\n if models is None:\n models = self.models\n\n tmintmax = DataFrame(columns=[\"tmin\", \"tmax\"], dtype=\"datetime64[ns]\")\n for ml in models:\n tmintmax.loc[ml.name, [\"tmin\", \"tmax\"]] = [\n ml.get_tmin(),\n ml.get_tmax(),\n ]\n\n return tmintmax", "def calculate_statistics(df, baseline_date, comparative_dates):\n median_baseline = round(df[df[\"date\"] == baseline_date][\"rate\"].median(), 2)\n differences = []\n values = []\n for date in comparative_dates:\n value = round(df[df[\"date\"] == date][\"rate\"].median(), 2)\n difference = round(((value - median_baseline) / median_baseline) * 100, 2)\n differences.append(difference)\n values.append(round(value, 2))\n\n return median_baseline, values, differences", "def min_max_date(self, min, max, date):\n\t\tif not min or min > date:\n\t\t\tmin = date\n\n\t\tif not max or max < date:\n\t\t\tmax = date\n\n\t\treturn min, max", "def calculate_avg_min_max(temps):\n\n temp_average = sum(temps) / len(temps)\n return temp_average, min(temps), max(temps)", "def dates(start, end):\n \n sel4 = [\n func.min(Measurement.tobs),\n func.max(Measurement.tobs),\n func.avg(Measurement.tobs),]\n\n if end is None: \n start_date = dt.datetime.strptime(start , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date >= start_date).all() \n else\n end_date = dt.datetime.strptime(end , '%Y-%m-%d')\n temp_analysis = session.query(*sel4).filter(Measurement.date.between (start_date, end_date)).all() \n\n# Create a dictionary from the row data and append to a list of all_dates\n all_dates = []\n for Measurement.tobs in temp_analysis:\n date_dict = {}\n date_dict['TMIN'] = func.min(Measurement.tobs)\n date_dict['TMAX'] = func.max(Measurement.tobs)\n date_dict['TAVG'] = func.avg(Measurement.tobs)\n all_dates.append(date_dict)\n\n return jsonify(date_dict)", "def temp_range_stats(start, end):\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n dates_ = session.query(Measurement.date)\n dates = [x[0] for x in dates_]\n if start not in dates or end not in dates:\n session.close()\n return jsonify({\"error\": f\"Date {start} or {end} not found.\"}), 404\n \n else:\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start).filter(Measurement.date <= end).all()\n \n temp_stats = [\n {\"tmin\": results[0][0]},\n {\"tavg\": results[0][1]},\n {\"tavg\": results[0][2]}\n ]\n\n session.close()\n \n return jsonify(temp_stats)", "def tstamps_for_daterange(self, start_date, end_date):\n\n img_offsets = np.array([timedelta(hours=h) for h in self.h_steps])\n\n timestamps = []\n diff = end_date - start_date\n for i in range(diff.days + 1):\n daily_dates = start_date + timedelta(days=i) + img_offsets\n timestamps.extend(daily_dates.tolist())\n\n return timestamps", "def tstamps_for_daterange(self, start_date, end_date):\n img_offsets = np.array([timedelta(hours=h) for h in self.h_steps])\n\n timestamps = []\n diff = end_date - start_date\n for i in range(diff.days + 1):\n daily_dates = start_date + timedelta(days=i) + img_offsets\n timestamps.extend(daily_dates.tolist())\n\n return timestamps", "def MaxMinLevels(dates, levels):\r\n datestart_neg = 0\r\n datestart_pos = 0\r\n date_interval_neg = 0\r\n date_interval_pos = 0\r\n bin_start_neg = 0\r\n bin_start_pos = 0\r\n max_dates = []\r\n min_dates = []\r\n y_mins = []\r\n y_maxes = []\r\n for bin_index in range(len(dates)-1):\r\n elev_start = levels[bin_index]\r\n elev_end = levels[bin_index+1]\r\n trans_cond = (elev_start-np.nanmean(levels))*(elev_end-np.nanmean(levels)) # subtract the means for a good crossover point\r\n if (trans_cond<=0)&(elev_start<elev_end):\r\n datestart_pos = dates.iloc[bin_index]\r\n bin_start_pos = bin_index\r\n dateend_neg = dates.iloc[bin_index+1]\r\n if (datestart_neg!=0):\r\n date_interval_neg = (dateend_neg - datestart_neg).seconds # date interval in seconds\r\n if (date_interval_neg > 6000): # Make sure small fluctuations aren't being counted\r\n temp_interval = levels.iloc[bin_start_neg:bin_index]\r\n min_index = temp_interval.loc[temp_interval==np.nanmin(temp_interval)].index.values[0]\r\n if (len(min_dates) == 0):\r\n y_mins.append(np.nanmin(temp_interval))\r\n min_dates.append(dates.iloc[min_index])\r\n if (dates.iloc[min_index] != min_dates[-1]): # makes sure duplicates aren't being printed\r\n y_mins.append(np.nanmin(temp_interval)) # duplicates are somehow the result of nans\r\n min_dates.append(dates.iloc[min_index])\r\n if (trans_cond<=0)&(elev_start>elev_end):\r\n datestart_neg = dates.iloc[bin_index]\r\n bin_start_neg = bin_index\r\n dateend_pos = dates.iloc[bin_index+1]\r\n if (datestart_pos!=0):\r\n date_interval_pos = (dateend_pos - datestart_pos).seconds # date interval in seconds\r\n if (date_interval_pos > 6000): # Make sure small fluctuations aren't being counted\r\n temp_interval = levels.iloc[bin_start_pos:bin_index] \r\n max_index = temp_interval.loc[temp_interval==np.nanmax(temp_interval)].index.values[0] \r\n if (len(max_dates) == 0):\r\n y_maxes.append(np.nanmax(temp_interval))\r\n max_dates.append(dates.iloc[max_index])\r\n if (dates.iloc[max_index] != max_dates[-1]): \r\n y_maxes.append(np.nanmax(temp_interval)) # makes sure duplicates aren't being printed\r\n max_dates.append(dates.iloc[max_index]) # duplicates are somehow the result of nans\r\n min_dates = np.array(min_dates)\r\n max_dates = np.array(max_dates)\r\n y_mins = np.array(y_mins)\r\n y_maxes = np.array(y_maxes)\r\n return min_dates, y_mins, max_dates, y_maxes", "def tstamps_for_daterange(self, start_date, end_date):\n img_offsets = np.array([timedelta(hours=0)])\n\n timestamps = []\n diff = end_date - start_date\n for i in range(diff.days + 1):\n daily_dates = start_date + timedelta(days=i) + img_offsets\n timestamps.extend(daily_dates.tolist())\n\n return timestamps", "def temp_date(enter_date):\r\n # Query\r\n mam_temp_date_results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\r\n filter(Measurement.date == enter_date).all()\r\n \r\n # Convert results into a list of min, ave, max temps for a specific date\r\n mam_temp_date = list(np.ravel(mam_temp_date_results))\r\n return jsonify(mam_temp_date)", "def temp_range(start_date, end_date):\n \"\"\"for dates between the start and end date inclusive.\"\"\"\n results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\\\n filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()\n\n # Convert list of tuples into normal list\n startend = list(np.ravel(results))\n\n return jsonify(startend)", "def get_statistics(data):\n v_min = None\n v_max = None\n v_avg = None\n v = None\n v_sum = .0\n count = 0\n for d in data:\n if d is None:\n continue\n try:\n v = float(d)\n except ValueError:\n print(pc.CRED, d, pc.CEND, end=',')\n continue\n if count == 0:\n v_min = v\n v_max = v\n else:\n if v < v_min:\n v_min = v\n if v > v_max:\n v_max = v\n v_sum += v\n count += 1\n if count > 0:\n v_avg = round(v_sum/count, 2)\n return v_min, v_max, v_avg", "def datelst_get_month_aligned_bounds(dates_):\n dfirst = dates_[0]\n dlast = dates_[-1]\n\n bound_lo = dt.datetime(dfirst.year, dfirst.month, 1)\n bound_hi = (dt.datetime(dates_[-1].year, dates_[-1].month, 1)+dt.timedelta(days=32))\n bound_hi.replace(day=1)\n bound_hi = bound_hi.replace(day=1) - dt.timedelta(seconds=1)\n\n return (bound_lo, bound_hi)", "def forecast_means(data):\n\t# collect dates\n\tdate_keys = [x.date() for x in list(data)]\n\t# filter out full days\n\tdays = set([x for x in date_keys if date_keys.count(x) == 8])\n\t# group temperature by dates from the filtered list\n\ttemps_grouped = map(lambda x: [v for (k, v) in data.items() if x == k.date()], list(sorted(days)))\n\t# return a dictionary with dates and mean temperature\n\treturn dict([(x, round(statistics.mean(y), 2)) for x, y in zip(list(sorted(days)), list(temps_grouped))])" ]
[ "0.6345828", "0.59785753", "0.595069", "0.5902184", "0.58804536", "0.5833137", "0.5816172", "0.57829845", "0.57284266", "0.56646013", "0.5582322", "0.5576219", "0.5570467", "0.5535141", "0.5529744", "0.5529462", "0.54203075", "0.54126215", "0.5412404", "0.5373379", "0.5356169", "0.5272502", "0.5254912", "0.5252233", "0.5250159", "0.5239807", "0.52300143", "0.522408", "0.52096397", "0.52081233" ]
0.6042569
1
Mark a song with score = 1000 Delete the file from Playlist
def markfile(self, song_id): cur = self.conn.cursor() query = """UPDATE caro_song SET score = -1000 WHERE id=%s""" cur.execute(query, (song_id, )) self.memcache.delete(":1:song_%d" % song_id) query = """DELETE FROM caro_playlistentry WHERE song_id=%s""" cur.execute(query, (song_id, ))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_song(song):\n logging.debug('{CRUD_operations} BEGIN function delete_song()')\n logging.debug('{CRUD_operations} Data received: song: %s', song)\n song.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_song()')", "def delete_song(self):\r\n song_id = tuple(input(\"Give the melody id to be deleted:\\t\"))\r\n sql = \"SELECT file_title, form FROM songs WHERE id = %s\" # Check existence of song with given ID\r\n self.cursor.execute(sql, song_id)\r\n result = self.cursor.fetchall()\r\n if len(result) > 0:\r\n path = self.p_storage + \"/\" + result[0][0] + \".\" + result[0][\r\n 1] # Find path of song by appending the name and format to the storage directory path\r\n os.remove(path) # Remove song from directory\r\n sql = \"DELETE FROM songs WHERE id = %s\" # Delete song from database\r\n self.cursor.execute(sql, song_id)\r\n self.cnx.commit()\r\n print(self.cursor.rowcount, \"record(s) deleted\")\r\n else:\r\n print(\"Give a valid id...\")", "def delete_music():\n track_id = request.vars.track_id\n if track_id is None:\n raise HTTP(500)\n db(db.track_data.track_id == track_id).delete()\n return \"ok\"", "def track_del(self,posicion):\n self.tracks.pop(posicion)", "def delete_pl_btn_push(self):\n try:\n pl_name = self.pl_line_edit.text().replace(\" \", \"_\")\n path = os.path.abspath(\"Playlists/\"+ pl_name+\".m3u\")\n to_keep = {}\n for row in range(self.model.rowCount()):\n if not self.model.item(row).checkState():\n title = str(self.model.data(self.model.index(row, 1)))\n artist = str(self.model.data(self.model.index(row, 2)))\n to_keep[title] = artist\n os.system(\"rm %s\" % (path))\n\n pl_file = open(path, \"w\")\n for mp3 in glob.glob(\"Fixed/*/*/*\"):\n data = mutagen.File(mp3, easy=True)\n if (data[\"title\"][0] in to_keep.keys() and\n to_keep[data[\"title\"][0]] == data[\"artist\"][0]):\n pl_file.write(mp3+\"\\n\")\n QMessageBox.about(self, \"Playlist Updated\",\n 'Playlist \"%s\" has been updated, please view again to see changes.'% (self.pl_line_edit.text()))\n except:\n QMessageBox.about(self, \"Playlist Not Updated\",\n 'Playlist \"%s\" could not be updated, please view again to see changes.'% (self.pl_line_edit.text()))", "def delete(self, pos):\n if self.is_playing() and self.current_position() == pos:\n self.x.playback_stop().wait()\n self.x.playlist_remove_entry(pos).wait()", "def remove_song(self):\n self.stop()\n self.listbox.delete(\"anchor\")\n pygame.mixer.music.stop()", "def remove_file(path, save):\n if not save:\n os.remove(path)\n print \"[crawler] removing audio file...\"", "def remove_song(self, song):\n # code omitted\n self.playlist.remove(song)", "def playlist_remove(name):\n if name.isdigit() or g.userpl.get(name):\n\n if name.isdigit():\n name = int(name) - 1\n name = sorted(g.userpl)[name]\n\n del g.userpl[name]\n g.message = \"Deleted playlist %s%s%s\" % (c.y, name, c.w)\n g.content = playlists_display()\n save_to_file()\n\n else:\n g.message = F('pl not found advise ls') % name\n g.content = playlists_display()", "def short():\n countneg = 0\n countpos = 0\n testset_id = 4\n\n testfiles = db.session.query(evaluation.Testfile).filter(evaluation.Testfile.testset_id==testset_id)\n print \"Number testfiles: %s\" % testfiles.count()\n for i, tf in enumerate(testfiles):\n if i % 100 == 0:\n print i\n with audioread.audio_open(tf.file.path.encode(\"utf-8\")) as f:\n duration = f.duration\n if duration < 60.0:\n if tf.file.negative:\n countneg+=1\n else:\n countpos+=1\n print \"Removing short duration file: %s (%s)\" % (tf.file.path.encode(\"utf-8\"), duration)\n cur = db.session.query(evaluation.Result).filter(evaluation.Result.testfile_id==tf.id)\n print \"%d results to remove\" % cur.count()\n cur.delete()\n db.session.query(evaluation.Testfile).filter(evaluation.Testfile.id==tf.id).delete()\n db.session.commit()\n testfiles = db.session.query(evaluation.Testfile).filter(evaluation.Testfile.testset_id==testset_id)\n print \"New number testfiles: %s\" % testfiles.count()\n print \"deleted negative: %s\" % countneg\n print \"deleted positive: %s\" % countpos", "def delete_song(_id):\r\n Song.query.filter_by(id=_id).delete()\r\n # filter song by id and delete\r\n db.session.commit() # commiting the new change to our database\r", "def Delete(self):\n\n self.db.ExecuteSql('delete from tracks where id=%d;'\n % self.persistant['id'])\n self.db.ExecuteSql('commit;')", "def del_highscores(self):\n\t\ttry:\n\t\t\twith open(self.filename) as f_obj:\n\t\t\t\tcontents = f_obj.read()\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 733 7043 for assistance.')\n\t\telse:\n\t\t\tjson_contents = json.loads(contents)\n\t\t\tfor item in json_contents:\n\t\t\t\titem['player_name'] = 'EMPTY'\n\t\t\t\titem['player_score'] = 0\n\t\t\tself.save_highscores(json_contents)", "def delete(self, filename):\n pass", "def remove_track():\n if playlist_box.size() > 0: # Check if the playlist is not empty\n\n if playlist_box.curselection(): # Check if a track is selected\n # Get index of selected track\n track_indx = int(playlist_box.curselection()[0])\n\n if track_indx >= 0 and track_indx <= playlist_box.size()-1:\n\n track = playlist_box.get(track_indx)\n playlist_box.delete(track_indx)\n playlist.pop(track_indx)\n\n if playlist_box.size() == 0: # Check if playlist is empty\n init_player()\n # BUG Using rewind and pause instead of stop,\n # Reason: after stopping a track and playing the same track (or other track),\n # an \"End of track\" event is generated, BUGGG???\n pygame.mixer.music.rewind()\n pygame.mixer.music.pause()\n # pygame.mixer.music.stop()\n # Playlist is not empty\n elif track_title.get() == track: # The deleted track is the track being played\n # If track is not the last, play the next track (same index of removed track)\n if track_indx <= playlist_box.size()-1:\n playlist_box.selection_set(track_indx)\n play_pause(track_idx=track_indx)\n # If deleted track is the last, play the first track in playlist\n else:\n play_pause()\n else: # The deleted track is not the track being played\n # Select the Playing (Active) track\n if track_indx < active_track_idx: # The deleted track is before playing track\n playlist_box.selection_set(active_track_idx-1)\n else: # The deleted track is after playing track\n playlist_box.selection_set(active_track_idx)\n\n else: # User didn't select a track to delete\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=\"Please select a TRACK to DELETE!\")\n else: # User trying to delete from empty playlist\n tkinter.messagebox.showwarning(\n title=\"Warning!\", message=\"Playlist is empty. Please insert at least One TRACK to DELETE!\")", "def DeletePlaylist(self):\n os.remove(self.path)", "def removebannedtracks(bannedtracks, similartracks, logger): # {{{1\n nbannedtracks = 0\n index = 0\n while index < len(similartracks):\n weight, track = similartracks[index]\n if track.get('artist', '') != '' and track.get('title', '') != '' and \\\n createkey(track['artist'], track['title']) in bannedtracks:\n del similartracks[index]\n nbannedtracks += 1\n else:\n index += 1\n if nbannedtracks > 0:\n logger.debug('Ignored %i banned track(s) from Last.fm', nbannedtracks)", "def remove(request, music_id: int) -> HttpResponseRedirect:\n music_item = get_object_or_404(Music, id=music_id)\n request.user.profile.playlist.remove(music_item)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))", "def remove_songs(self):\n self.stop()\n self.listbox.delete(0, \"end\")\n pygame.mixer.music.stop()", "def move(self):\n for artist in self.audio_dict:\n for album in self.audio_dict[artist]:\n for songlist in self.audio_dict[artist][album]:\n if len(self.audio_dict[artist][album][songlist]) > 1:\n \n # track the song that wont be deleted\n song_to_keep = {}\n # track bitrate through songlist\n highest_bitrate = 0\n # find the highest bitrate\n for song in self.audio_dict[artist][album][songlist]:\n if song['bitrate'] > highest_bitrate:\n highest_bitrate = song['bitrate']\n song_to_keep = song\n # flag files for deletion \n for song in self.audio_dict[artist][album][songlist]:\n if song != song_to_keep:\n self._do_move(artist, album, song)\n \n return self", "def fix_score(self,req):\n if self.kind in (\"album\",\"artist\"):\n self.update_score()\n req.message=\"score reset from child scores\"\n elif self.kind==\"track\":\n self.score=0\n for i in self.Play.list(page=self.uid):\n self.score+=i.times\n self.flush()\n req.message=\"score reset from plays table\"\n else:\n req.error= \"not a track, album, or artist\"\n return self.view(req)", "async def _remove(self, ctx: commands.Context, index: int):\n\n if len(ctx.voice_state.songs) == 0:\n return await ctx.send('Cannot remove song because the queue is empty.')\n\n ctx.voice_state.songs.remove(index - 1)\n await ctx.message.add_reaction('✅')", "def test_delete_song():\n\n target_song = {\n \"artist\": \"Heng\",\n \"song\": \"I can do all things\",\n \"genre\": \"Hip-Hop\",\n \"lyrics\": \"Like Steph said I can do all things...\",\n \"year\": get_timestamp_year(),\n \"timestamp\": get_timestamp()\n }\n\n make_response, code = delete_song(target_song)\n\n assert make_response == \"The song title I can do all things is deleted for artist: Heng.\"", "def delete_playlist(self, playlist_name):\n print(\"deletes_playlist needs implementation\")", "def delete_file(filename: str):\n\t\tif filename == \"ALL\":\n\t\t\tfor file in os.listdir(\"data/music/\"):\n\t\t\t\tdeleted = False\n\t\t\t\twhile not deleted:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(f\"data/music/{file}\")\n\t\t\t\t\t\tdeleted = True\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"Not removed, waiting 1 second...\")\n\t\t\t\t\t\tasyncio.sleep(1)\n\t\telse:\n\t\t\tprint(\"File--: \", filename)", "def delete(self, request, pk=None):\n song = get_object_or_404(Song, pk=pk)\n\n self.check_object_permissions(request, song.creator)\n\n song.delete()\n return Response({}, status.HTTP_204_NO_CONTENT)", "def delete():", "def updatesong(song, fpath):\n song.filename = fpath\n song.save()\n return \"[U] %s\\n\" % song.title", "def disassociate_song(self, song):\n self.songs.remove(song)" ]
[ "0.65513116", "0.6344058", "0.63115144", "0.61488205", "0.60956544", "0.60894954", "0.6069589", "0.6061151", "0.6005755", "0.5924812", "0.5891067", "0.58458006", "0.5767601", "0.5687184", "0.56593955", "0.5651815", "0.5644319", "0.55622435", "0.55598545", "0.5545439", "0.5534006", "0.5530127", "0.5519496", "0.55103534", "0.5507189", "0.55043644", "0.5467658", "0.543689", "0.54321945", "0.54277545" ]
0.80521697
0
randomly returns 'm' or 'f'
def gender(): return random.choice((GENDER_FEMALE, GENDER_MALE))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def name():\r\n return _random.choice([male_first(), female_first()])", "def choose_guard(self):\n\n\t\tg = randint(1, 3)\n\n\t\tif g == 1:\n\t\t\treturn 'h'\n\t\tif g == 2:\n\t\t\treturn 't'\n\t\tif g == 3:\n\t\t\treturn 'l'", "def get_random_male_name ():\n return db_random_pop_default(DB_FIRST_MALE, \"John\")", "def gender():\r\n\r\n return _random.choice(['Male', 'Female'])", "def decide_action(p: float) -> str:\n if p == 0:\n return D\n if p == 1:\n return C\n return C if random.random() < p else D", "def get_random(self,num):\n return ''.join(sample('abcdefghijklmnopqrstuvwxyz1234567890!',8))", "def get_random_female_name ():\n return db_random_pop_default(DB_FIRST_FEMALE, \"Jane\")", "def random_float():\n return (random() - 0.5) * 2", "def randomHelmet():\n return random.choice(HELMETS)", "def random_test(self):\r\n return 1", "def random_test(self):\r\n return 1", "def random_letter(letters):\n return random.choice(letters)", "def get_random_phrase():\n return random.choices(PHRASES, WEIGHTS, k=1)[0]", "def random():\n return constant(1)", "def random() -> float:\n ...", "def random_character(latin_chance=0.6):\n if random.random() < latin_chance:\n return random.choice(LATIN) + random.choice(LATIN)\n else:\n return random.choice(NON_LATIN)", "def __call__(self):\n return random.choice(self.fakers)", "def single_temp() -> str:\n return '36.' + str(random.randint(1, 5))", "def random():\r\n return R.NextDouble()", "def _random_function(self, random_state):\n return random_state.rand", "def lf():\n return random.sample(font_list, 25)", "def get_random_2(number):\n return ''.join(random.sample(field, number))", "def random(self):\r\n return random.randint(1, 4)", "def getRandomRarity():\n r = random.randint(1,100)\n if r <= Rarities.IMPOSIBIL:\n return \"IMPOSIBIL\"\n elif r <= Rarities.LEGENDAR:\n return \"LEGENDAR\"\n elif r <= Rarities.EPIC:\n return \"EPIC\"\n else:\n return \"COMUN\"", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def random_word():\n num = random.choice(range(9))\n if num == 0:\n return \"NOTORIOUS\"\n elif num == 1:\n return \"GLAMOROUS\"\n elif num == 2:\n return \"CAUTIOUS\"\n elif num == 3:\n return \"DEMOCRACY\"\n elif num == 4:\n return \"BOYCOTT\"\n elif num == 5:\n return \"ENTHUSIASTIC\"\n elif num == 6:\n return \"HOSPITALITY\"\n elif num == 7:\n return \"BUNDLE\"\n elif num == 8:\n return \"REFUND\"", "def generate_RME():\n RME = [\"ogre\", \"goblin\", \"gnoll\", \"orc\", \"personal injury lawyer\"]\n monster = random.choice(RME)\n return monster", "def __getRandChar(self):\n return self.letterbag[random.randint(0,25)]", "def generate_fantasy_title():\n d20 = random.randint(1, 20)\n if d20 <= 4:\n #genetive noun\n return fantasy_genetive[random.randint(0, len(fantasy_genetive) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 > 4 and d20 < 13: \n #The adj noun\n return \"The \" + fantasy_adj[random.randint(0, len(fantasy_adj) - 1)] + \" \" + fantasy_noun[random.randint(0, len(fantasy_noun) - 1)]\n elif d20 >= 13:\n #something of something\n return fantasy_noun[random.randint(0, len(fantasy_noun) - 1)] + \" of \" + fantasy_what_is_this[random.randint(0, len(fantasy_what_is_this) - 1)]" ]
[ "0.6963704", "0.69568205", "0.6864142", "0.6769605", "0.6755098", "0.6719579", "0.6704305", "0.66721994", "0.66479915", "0.6628899", "0.6628899", "0.6626261", "0.66160446", "0.6609609", "0.6583059", "0.65823984", "0.6559137", "0.6552863", "0.6538592", "0.65204793", "0.6515001", "0.65079015", "0.65057725", "0.6490588", "0.6479788", "0.6479788", "0.64555675", "0.64475954", "0.6438535", "0.64263076" ]
0.71739215
0
create a VTIMEZONE object from a caldav data block
def __init__(self, data: str): # parsing notes: # required fields: tzid # optional fields: last-mod, tzurl, x-prop # # required blocks: standard or daylight (at least once) # required fields in block standard/daylight: dtstart, tzoffsetto, tzoffsetfrom # optional fields in block standard/daylight: comment, rrule, rdate, tzname, x-prop logging.debug('creating VTIMEZONE from %s bytes of data' % len(data)) data = VOBJECT.clean_vobject_block(data) try: self.tzid = re.search(r'^TZID:(.*?)$', data, re.MULTILINE).group(1) except AttributeError: raise MalformedVObjectException("Required property TZID not found") for m in re.finditer(r'^BEGIN:(STANDARD|DAYLIGHT)\n(.*?)END:(STANDARD|DAYLIGHT)$', data, re.MULTILINE+re.DOTALL): try: dtstart = re.search(r'^DTSTART:(.*?)$', m.group(2), re.MULTILINE).group(1) tzoffsetfrom = re.search(r'TZOFFSETFROM:(.*?)$', m.group(2), re.MULTILINE).group(1) tzoffsetto = re.search(r'TZOFFSETTO:(.*?)$', m.group(2), re.MULTILINE).group(1) rrule = re.search(r'RRULE:(.*?)$', m.group(2), re.MULTILINE) values = { 'TYPE': m.group(1), 'DTSTART': dtstart, 'TZOFFSETFROM': tzoffsetfrom, 'TZOFFSETTO': tzoffsetto, } if rrule is not None: values['RRULE'] = rrule.group(1) self._times.append(values) except AttributeError: raise MalformedVObjectException('Required Properties not found for %s block' % m.group(1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **kwargs):\n\t\tself.__c_version = c_singlezone(**kwargs)", "def decode_zone(obj, name):\n try:\n zone_obj = Zone([Card(c) for c in obj], name)\n except TypeError as e:\n raise GTREncodingError('Error decoding Zone: ' + e.message)\n\n return zone_obj", "def __init__ (self, spec=None):\n\n if spec is not None:\n if isinstance(spec, six.string_types):\n if 'Z' == spec:\n self.__utcOffset_min = 0\n else:\n match = self.__Lexical_re.match(spec)\n if match is None:\n raise ValueError('Bad time zone: %s' % (spec,))\n self.__utcOffset_min = int(match.group(2)) * 60 + int(match.group(3))\n if '-' == match.group(1):\n self.__utcOffset_min = - self.__utcOffset_min\n elif isinstance(spec, int):\n self.__utcOffset_min = spec\n elif isinstance(spec, datetime.timedelta):\n self.__utcOffset_min = spec.seconds // 60\n else:\n raise TypeError('%s: unexpected type %s' % (type(self), type(spec)))\n self.__utcOffset_td = datetime.timedelta(minutes=self.__utcOffset_min)\n if self.__utcOffset_td < -self.__MaxOffset_td or self.__utcOffset_td > self.__MaxOffset_td:\n raise ValueError('XSD timezone offset %s larger than %s' % (self.__utcOffset_td, self.__MaxOffset_td))\n if 0 == self.__utcOffset_min:\n self.__tzName = 'Z'\n elif 0 > self.__utcOffset_min:\n self.__tzName = '-%02d:%02d' % divmod(-self.__utcOffset_min, 60)\n else:\n self.__tzName = '+%02d:%02d' % divmod(self.__utcOffset_min, 60)", "def fromZone(cls,rd,origin=None):\n # Unknown rata - assume hexdump in zone format\n # (DiG prepends \"\\\\# <len>\" to the hexdump so get last item)\n return cls(binascii.unhexlify(rd[-1].encode('ascii')))", "def _read_vtc(vtc_file):\r\n with open(vtc_file, 'rb') as f:\r\n filebytes = f.read()\r\n\r\n hdr = {}\r\n hdr['file_guid'] = hexlify(filebytes[:16])\r\n # not sure about the 4 Bytes inbetween\r\n\r\n i = 20\r\n mpg_file = []\r\n start_time = []\r\n end_time = []\r\n while i < len(filebytes):\r\n mpg_file.append(_make_str(unpack('c' * 261, filebytes[i:i + 261])))\r\n i += 261\r\n Location = filebytes[i:i + 16]\r\n correct = b'\\xff\\xfe\\xf8^\\xfc\\xdc\\xe5D\\x8f\\xae\\x19\\xf5\\xd6\"\\xb6\\xd4'\r\n assert Location == correct\r\n i += 16\r\n start_time.append(_filetime_to_dt(unpack('<q',\r\n filebytes[i:(i + 8)])[0]))\r\n i += 8\r\n end_time.append(_filetime_to_dt(unpack('<q',\r\n filebytes[i:(i + 8)])[0]))\r\n i += 8\r\n\r\n return mpg_file, start_time, end_time", "def loads(self, data):\n self._id = data.get('id', -1)\n self._created = data.get('created', 0) # datetime.strptime(data.get('created', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()\n self._stage = data.get('stage', 0) # self.stage_from_str(data.get('stage', ''))\n self._dir = data.get('direction', 0) # self.direction_from_str(data.get('direction', ''))\n self._timeframe = data.get('timeframe') # timeframe_from_str(data.get('timeframe', 't'))\n self._expiry = data.get('expiry', 0) # datetime.strptime(data.get('expiry', '1970-01-01T00:00:00'), '%Y-%m-%dT%H:%M:%S').timestamp()", "def __init__(self, name):\n self.t_sect = {\n 'name': name,\n 'filepaths': [],\n 'datetime_start': None,\n 'datetime_finish': None\n }", "def _convertTZ(self):\n tz = timezone.get_current_timezone()\n dtstart = self['DTSTART']\n dtend = self['DTEND']\n if dtstart.zone() == \"UTC\":\n dtstart.dt = dtstart.dt.astimezone(tz)\n if dtend.zone() == \"UTC\":\n dtend.dt = dtend.dt.astimezone(tz)", "def verify_t(data):\n if 't_utc' not in data['properties']:\n return None\n data['properties']['DateTime'] = util.datestring(data['properties']['t_utc'], tz=config['local_tz']) \n return data", "def create(self):\n\n record = {\n 'type': self.type,\n 'ttl': self.ttl,\n 'priority': self.priority,\n 'rdata': self.rdata,\n }\n\n if self.call(method='addZoneRecord', args=[self.domainname, self.subdomain, record]):\n return self", "def customize_video(course_data, block_data):\n try:\n block_data['youtube_id'] = course_data['metadata']['youtube_id_1_0']\n except KeyError:\n block_data['youtube_id'] = None\n try:\n block_data['start_time'] = course_data['metadata']['start_time']\n except KeyError:\n block_data['start_time'] = None\n try:\n block_data['end_time'] = course_data['metadata']['end_time']\n except KeyError:\n block_data['end_time'] = None", "def getTimeZoneDict():\n if not len(TimeZoneDict):\n for tz_descr in map(str.split, TimeZoneStr.split('\\n')):\n tz_offset = int(float(tz_descr[0]) * 3600)\n for tz_code in tz_descr[1:]:\n TimeZoneDict[tz_code] = tz_offset\n return TimeZoneDict", "def localize(self, dt):\n\n #\n # TODO: implement various RRULE styles (at least common ones..)\n # possibly move rrule parsing into own classes because it's used by VEVENT as well\n # TODO: move get x-th day of month, first sunday, etc in separate functions\n\n logging.debug('localizing %s for timezone %s', (dt, self.tzid))\n\n cur_timezone = None\n cur_timestamp = None\n\n for t in self._times:\n dtstart = t['DTSTART']\n\n if 'RRULE' in t.keys():\n target_date = None\n vals = {}\n for k in t['RRULE'].split(';'):\n (key, value) = k.split('=')\n vals[key] = value\n\n if 'FREQ' in vals.keys():\n if vals['FREQ'] == 'YEARLY':\n month = int(vals['BYMONTH'])\n day = vals['BYDAY']\n\n if not day.isnumeric():\n wd = day[-2:]\n if day[:1] == \"-\":\n cnt = int(day[1:2])\n year = datetime.today().year\n month = (month + 1) % 12\n if month == 1:\n year += 1\n\n start_date = datetime(year, int(month), 1)\n\n day_num = start_date.weekday()\n day_num_target = VTIMEZONE._weekdays.index(wd)\n days_ago = (7 + day_num - day_num_target) % 7\n if days_ago == 0:\n days_ago = 7\n target_date = start_date - timedelta(days=days_ago + ((cnt-1)*7))\n\n else:\n cnt = int(day[:1])\n\n start_date = datetime(datetime.today().year, int(month), 1)\n\n day_num = start_date.weekday()\n day_num_target = VTIMEZONE._weekdays.index(wd)\n days_ago = (7 + day_num_target - day_num) % 7\n if days_ago == 0:\n days_ago = 7\n target_date = start_date + timedelta(days=days_ago + ((cnt-1)*7))\n\n if target_date is not None:\n if cur_timestamp is None:\n cur_timestamp = target_date\n cur_timezone = t\n else:\n if target_date.date() < dt.date():\n if cur_timestamp.date() > dt.date() or target_date.date() > cur_timestamp.date():\n cur_timestamp = target_date\n cur_timezone = t\n else:\n logging.error('RRULE not implemented yet, no localization possible (%s)' % t['RRULE'])\n\n logging.debug('decided on timezone offset: %s' % cur_timezone['TZOFFSETTO'])\n\n m = re.search(r'([+-])?(\\d\\d)(\\d\\d)', cur_timezone['TZOFFSETTO'])\n\n if m.group(1) == \"-\":\n dt -= timedelta(hours=int(m.group(2)), minutes=int(m.group(3)))\n else:\n dt += timedelta(hours=int(m.group(2)), minutes=int(m.group(3)))\n\n logging.debug('localized to %s' % dt)\n return dt", "def __init__(self):\n # read in timezone database\n self.__zones = []\n try:\n for x in csv.reader(open(self.__tzdata, 'r'), delimiter='\\t'):\n # skip the rows that start #\n if not x[0].startswith(\"#\"):\n if len(x) > 2:\n self.__zones.append(x[2])\n except FileNotFoundError:\n print(\"@TimeZone: {} not found\".format(self.__tzdata))\n except:\n print(\"@TimeZone Unexpected error:\", sys.exc_info()[0])\n print(\"Read list of valid time zones: {}\".format(len(self.__zones)))", "def _process_zone_line(line: str) -> ZoneEraRaw:\n tokens: List[str] = line.split()\n\n # STDOFF\n offset_string: str = tokens[0]\n\n # 'RULES' field can be:\n rules_string: str = tokens[1]\n\n # check 'until' year\n if len(tokens) >= 4:\n until_year: int = int(tokens[3])\n else:\n until_year = MAX_UNTIL_YEAR\n\n # check for additional components of 'UNTIL' field\n if len(tokens) >= 5:\n until_year_only: bool = False\n until_month: int = MONTH_TO_MONTH_INDEX[tokens[4]]\n else:\n until_year_only = True\n until_month = 1\n\n if len(tokens) >= 6:\n until_day: str = tokens[5]\n else:\n until_day = '1'\n\n if len(tokens) >= 7:\n (until_time, until_time_suffix) = parse_at_time_string(tokens[6])\n else:\n until_time = '00:00'\n until_time_suffix = 'w'\n\n # FORMAT\n format: str = tokens[2]\n\n # Return map corresponding to a ZoneEra instance\n return {\n 'offset_string': offset_string,\n 'rules': rules_string,\n 'format': format,\n 'until_year': until_year,\n 'until_year_only': until_year_only,\n 'until_month': until_month,\n 'until_day_string': until_day,\n 'until_time': until_time,\n 'until_time_suffix': until_time_suffix,\n 'raw_line': line,\n }", "def test_calendar_query_timezone(self):\n TimezoneCache.create()\n self.addCleanup(TimezoneCache.clear)\n\n tzid1 = \"Etc/GMT+1\"\n tz1 = Component(None, pycalendar=readVTZ(tzid1))\n\n calendar_properties = (\n davxml.GETETag(),\n caldavxml.CalendarData(),\n )\n\n query_timerange = caldavxml.TimeRange(\n start=\"%04d1001T000000Z\" % (DateTime.getToday().getYear(),),\n end=\"%04d1101T000000Z\" % (DateTime.getToday().getYear(),),\n )\n\n query = caldavxml.CalendarQuery(\n davxml.PropertyContainer(*calendar_properties),\n caldavxml.Filter(\n caldavxml.ComponentFilter(\n caldavxml.ComponentFilter(\n query_timerange,\n name=\"VEVENT\",\n ),\n name=\"VCALENDAR\",\n ),\n ),\n caldavxml.TimeZone.fromCalendar(tz1),\n )\n\n def got_xml(doc):\n if not isinstance(doc.root_element, davxml.MultiStatus):\n self.fail(\"REPORT response XML root element is not multistatus: %r\" % (doc.root_element,))\n\n return self.calendar_query(query, got_xml)", "def __init__(self, name=None):\n if name:\n self.name = name\n else:\n self.name = 'UTC'\n\n #Check timezone is valid by trying to instantiate it. May raise error.\n pytz.timezone(self.name)", "def get_transport_zone(options):\n vsm_obj = get_vsm_object(options, '2.0')\n transport_zone = VDNScope(vsm_obj)\n response = transport_zone.query()\n transport_zones_object = VDNScopesSchema()\n transport_zones_object.set_data(response, 'xml')\n id = transport_zones_object.vdnScope[0].objectId\n transport_zone.id = id\n return transport_zone", "def __init__(self,site,startDate,endDate,path='verif_data/'):\n self.site = site.upper()\n self.startDateTime = datetime.strptime(startDate,'%Y%m%d')\n self.endDateTime = datetime.strptime(endDate,'%Y%m%d')\n years = range(self.startDateTime.year,self.endDateTime.year + 1)\n data = []\n for year in years:\n self.filename=path + self.site + '_asos_' + str(year) + '.txt'\n datafile = open(self.filename)\n for line in datafile:\n if line[0] != '#':\n if 'station' in line:\n self.header = [x.strip() for x in line[:-1].split(',')]\n else:\n dataline = line[:-2].split(',')\n for i,val in enumerate(dataline[:-1]):\n if val=='M':\n dataline[i] = -999\n dataline[1] = dataline[1].replace(' ','_')\n dataline[1] = dataline[1].replace('-','')\n currDateTime = datetime.strptime(dataline[1][:14],'%Y%m%d_%H:%M')\n if currDateTime >= self.startDateTime and currDateTime <= self.endDateTime:\n data.append(tuple(dataline))\n datafile.close()\n self.datatype = []\n for item in self.header:\n if item == 'station':\n self.datatype.append((item,'S3'))\n elif 'valid' in item:\n self.datatype.append(('time','S14'))\n elif 'skyc' in item:\n self.datatype.append((item,'S3'))\n elif item=='metar':\n self.datatype.append((item,'S99'))\n else:\n self.datatype.append((item,float))\n self.data = np.array(data,dtype=self.datatype)", "def _load_object(self, cid):\n object_data = unixfs_pb2.Data()\n object_data.ParseFromString(self.client.object.data(\n cid,\n **self.client_request_kwargs,\n ))\n\n self.cid_type_cache[cid] = object_data.Type\n self.path_size_cache[cid] = object_data.filesize\n self.block_cache[cid] = object_data.Data\n self.subblock_sizes_cache[cid] = object_data.blocksizes\n\n return object_data", "def testProtobufDecodeDateTimeMessageWithTimeZone(self):\n nested = NestedDateTimeMessage()\n nested.value = message_types.DateTimeMessage(milliseconds=12345678,\n time_zone_offset=60)\n value = protobuf.decode_message(HasDateTimeMessage,\n protobuf.encode_message(nested)).value\n self.assertEqual(datetime.datetime(1970, 1, 1, 3, 25, 45, 678000,\n tzinfo=util.TimeZoneOffset(60)),\n value)", "def fromZone(cls, zone, origin=\"\", ttl=0):\n return list(ZoneParser(zone, origin=origin, ttl=ttl))", "def __init__(self, data):\n self._spatial = data['spatial']['bbox']\n self._temporal = data['temporal']['interval']", "def object_decoder(obj):\n\t\tif 'logfile' in obj:\n\t\t\treturn logfile(obj['logfile']['name'], obj['logfile']['lines'], obj['logfile']['type'], obj['logfile']['content'], obj['logfile']['sources'])\n\t\tif 'logfile_entry' in obj:\n\t\t\tif len(obj['logfile_entry']['timestamp']['datetime']) >= 20 :\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S.%f\")\n\t\t\telif obj['logfile_entry']['timestamp']['datetime'][-6:-5] != '+':\n\t\t\t\tdate = datetime.datetime.strptime(obj['logfile_entry']['timestamp']['datetime'],\"%Y-%m-%dT%H:%M:%S\")\n\t\t\telse:\n\t\t\t\tunformatted_date = obj['logfile_entry']['timestamp']['datetime']\n\t\t\t\tunformatted_date = unformatted_date[:-3]+unformatted_date[-2:]\n\t\t\t\t# once again, related to missing features in Python 3.6\n\t\t\t\tdate = datetime.datetime.strptime(unformatted_date,\"%Y-%m-%dT%H:%M:%S.%f%z\")\n\t\t\treturn logfile_entry(obj['logfile_entry']['id'], file, obj['logfile_entry']['message'], obj['logfile_entry']['structured_data'], date,obj['logfile_entry']['hostname'],obj['logfile_entry']['source'])\n\t\treturn obj", "def setNodeTimeZone(self,node,timezone):\n post_data = {'timezone': str(timezone)}\n data = self.connect('put',\"nodes/%s/time\" % (node), post_data)\n return data", "def decode_sect1(sec1, edition=3):\r\n key_offs = {\r\n 3:((\"length\", 0, 2), (\"master\", 3, 3), (\"center\", 5, 5), (\"subcenter\", 4 , 4),\r\n (\"update\", 6, 6), (\"cat\", 8, 8), (\"cat_int\", 9, 9), (\"cat_loc\", 9, 9),\r\n (\"mver\", 10, 10), (\"lver\", 11, 11), (\"datetime\", 12 , 16), (\"sect2\", 7, 7),\r\n ),\r\n 4:((\"length\", 0, 2), (\"master\", 3, 3), (\"center\", 4, 5), (\"subcenter\", 6 , 7),\r\n (\"update\", 8, 8), (\"cat\", 10, 10), (\"cat_int\", 11, 11), (\"cat_loc\", 12, 12),\r\n (\"mver\", 13, 13), (\"lver\", 14, 14), (\"datetime\", 15 , 21), (\"sect2\", 9, 9),\r\n ),\r\n }\r\n meta_dict = {}\r\n for t in key_offs[edition]:\r\n if t[0]=='datetime':\r\n meta_dict[t[0]] = bf.dtg(sec1[t[1]*8:(t[2]+1)*8], edition)\r\n else:\r\n meta_dict[t[0]] = bf.bits_to_n(sec1[t[1]*8:(t[2]+1)*8])\r\n meta_dict['sect2'] = meta_dict['sect2'] & 128\r\n\r\n return meta_dict", "def _decode_sdt(self, sdt: bytes) -> SDT.SDT:\n sdtdk = SDT.SDT()\n try:\n pointer_field = sdt[0]\n pos = 1 + pointer_field\n sdtdk.table_id = sdt[pos]\n b12, sdtdk.transport_stream_id = struct.unpack('>HH', sdt[pos+1:pos+5])\n section_length = b12 & 4095\n pos_crc = pos + 3 + section_length - 4 # - CRC\n b = sdt[pos + 5]\n sdtdk.ver_num = (b & 62) >> 1\n sdtdk.cur_next_ind = b & 1\n sdtdk.sec_num, sdtdk.last_sec_num, sdtdk.original_network_id = struct.unpack('>BBH', sdt[pos+6:pos+10])\n pos += 10 + 1 # 1 - reserved\n while pos < pos_crc:\n service_id, b34, b56 = struct.unpack('>HBH', sdt[pos:pos+5])\n EIT_schedule_flag = (b34 & 2) >> 1\n EIT_present_following_flag = b34 & 1\n running_status = (b56 & 57344) >> 13\n free_CA_mode = (b56 & 4096) >> 12\n descriptors_loop_length = (b56 & 4095)\n pos += 5\n descriptors = []\n if descriptors_loop_length > 0:\n descriptors = DescriptorParser.decode_descriptors(sdt[pos:pos+descriptors_loop_length])\n pos += descriptors_loop_length\n sdtdk.services.append({'service_id': service_id, 'EIT_schedule_flag': EIT_schedule_flag,\n 'EIT_present_following_flag': EIT_present_following_flag,\n 'running_status': running_status, 'free_CA_mode': free_CA_mode,\n 'descriptors': descriptors})\n try:\n sdtdk.crc32 = (struct.unpack('>L', sdt[pos_crc:pos_crc+4]))[0]\n crc_check = self.crc32mpeg2(sdt[1+pointer_field:pos_crc])\n if sdtdk.crc32 != crc_check:\n sdtdk.crc32_ok = False\n except Exception as err:\n sdtdk.crc32_ok = False\n logging.warning('SDT CRC check error:' + str(err))\n return sdtdk\n except Exception as err:\n logging.warning('SDT parsing error:' + str(err))\n return None", "def test_download_date_tz_3(temp_file):\n from osxmetadata import OSXMetaData\n from osxmetadata.datetime_utils import datetime_naive_to_local\n import datetime\n\n meta = OSXMetaData(temp_file, tz_aware=False)\n dt = datetime.datetime.now()\n dt_tz = datetime_naive_to_local(dt)\n meta.downloadeddate = dt_tz\n assert meta.downloadeddate == [dt]\n assert meta.get_attribute(\"downloadeddate\") == [dt]", "def toZone(self, z):\n t, tz = self._t, _TZINFO._zmap[z.lower()]\n micros = self.micros()\n tznaive = False # you're performing a timzone change, can't be naive\n\n try:\n # Try to use time module for speed.\n yr, mo, dy, hr, mn, sc = safegmtime(t + _tzoffset(tz, t))[:6]\n sc = self._second\n return self.__class__(yr, mo, dy, hr, mn, sc, tz, t,\n self._d, self.time, micros, tznaive)\n except Exception:\n # gmtime can't perform the calculation in the given range.\n # Calculate the difference between the two time zones.\n tzdiff = _tzoffset(tz, t) - _tzoffset(self._tz, t)\n if tzdiff == 0:\n return self\n sc = self._second\n ms = sc - math.floor(sc)\n x = _calcDependentSecond2(self._year, self._month, self._day,\n self._hour, self._minute, sc)\n x_new = x + tzdiff\n yr, mo, dy, hr, mn, sc = _calcYMDHMS(x_new, ms)\n return self.__class__(yr, mo, dy, hr, mn, sc, tz, t,\n self._d, self.time, micros, tznaive)", "def test_download_date_tz_1B(temp_file):\n from osxmetadata import OSXMetaData\n from osxmetadata.datetime_utils import datetime_naive_to_local\n import datetime\n\n meta = OSXMetaData(temp_file, tz_aware=True)\n dt = datetime.datetime.now()\n meta.downloadeddate = dt\n dt_tz = datetime_naive_to_local(dt)\n assert meta.downloadeddate == [dt_tz]\n assert meta.get_attribute(\"downloadeddate\") == [dt_tz]" ]
[ "0.5184043", "0.51366085", "0.51014906", "0.507741", "0.5037655", "0.49450675", "0.4804317", "0.47907048", "0.47804165", "0.47664854", "0.47210106", "0.47165537", "0.46668682", "0.46470034", "0.46281227", "0.46242642", "0.46223897", "0.4621646", "0.45555016", "0.4546735", "0.45274732", "0.4525757", "0.45134825", "0.45133206", "0.4509635", "0.4493022", "0.44921228", "0.4487905", "0.44810313", "0.4472631" ]
0.7330531
0
Two packages with same filename should be equal
def test_equality(self): p1 = make_package(filename="foo") p2 = make_package(filename="foo") self.assertEqual(hash(p1), hash(p2)) self.assertEqual(p1, p2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_equal(self):\n p1 = make_package(filename=\"foobar\")\n p2 = make_package(filename=\"foo\")\n self.assertNotEqual(hash(p1), hash(p2))\n self.assertNotEqual(p1, p2)", "def test_multiple_packages_same_version(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = []\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n cache.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n cache.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 2)\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 2)", "def test_IsPackage_files():\n with tempfile.NamedTemporaryFile() as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".txt\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".tar.bz2\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".dpack.tar.bz2\") as f:\n assert dpack._IsPackage(pathlib.Path(f.name))", "def assertFilesEqual(self, name1, name2, msg=None):\n self.assertEqual(name1.getContent(), name2.getContent(), msg)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def test_2_recv_compare(self):\n\n f = fmri.PkgFmri(self.published[4], None)\n\n # First, pkgrecv the pkg to a directory. The files are\n # kept compressed so they can be compared directly to the\n # repository's internal copy.\n self.pkgrecv(self.durl1, \"--raw -k -d {0} {1}\".format(self.tempdir,\n f))\n\n # Next, compare the manifests.\n orepo = self.get_repo(self.dpath1)\n old = orepo.manifest(f)\n new = os.path.join(self.tempdir, f.get_dir_path(), \"manifest\")\n\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))\n\n # Next, load the manifest.\n m = manifest.Manifest()\n raw = open(new, \"rb\").read()\n m.set_content(raw)\n\n # Next, compare the package actions that have data.\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n old = orepo.file(a.hash)\n new = os.path.join(self.tempdir,\n f.get_dir_path(), a.hash)\n self.assertNotEqual(old, new)\n self.assertEqual(misc.get_data_digest(old,\n hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new,\n hash_func=DEFAULT_HASH_FUNC))\n\n # Second, pkgrecv to the pkg to a file repository.\n npath = tempfile.mkdtemp(dir=self.test_root)\n self.pkgsend(\"file://{0}\".format(npath),\n \"create-repository --set-property publisher.prefix=test1\")\n self.pkgrecv(self.durl1, \"-d file://{0} {1}\".format(npath, f))\n\n # Next, compare the manifests (this will also only succeed if\n # the fmris are exactly the same including timestamp).\n nrepo = self.get_repo(npath)\n old = orepo.manifest(f)\n new = nrepo.manifest(f)\n\n self.debug(old)\n self.debug(new)\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))\n\n # Next, load the manifest.\n m = manifest.Manifest()\n raw = open(new, \"rb\").read()\n m.set_content(raw)\n\n # Next, compare the package actions that have data.\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n old = orepo.file(a.hash)\n new = nrepo.file(a.hash)\n self.assertNotEqual(old, new)\n self.assertEqual(misc.get_data_digest(old,\n hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new,\n hash_func=DEFAULT_HASH_FUNC))\n\n # Third, pkgrecv to the pkg to a http repository from the\n # file repository from the last test.\n self.pkgrecv(\"file://{0}\".format(npath), \"-d {0} {1}\".format(\n self.durl2, f))\n orepo = nrepo\n\n # Next, compare the manifests (this will also only succeed if\n # the fmris are exactly the same including timestamp).\n nrepo = self.get_repo(self.dpath2)\n old = orepo.manifest(f)\n new = nrepo.manifest(f)\n\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))\n\n # Next, load the manifest.\n m = manifest.Manifest()\n raw = open(new, \"rb\").read()\n m.set_content(raw)\n\n # Next, compare the package actions that have data.\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n old = orepo.file(a.hash)\n new = nrepo.file(a.hash)\n self.assertNotEqual(old, new)\n self.assertEqual(\n misc.get_data_digest(old,\n hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new,\n hash_func=DEFAULT_HASH_FUNC))\n\n # Fourth, create an image and verify that the sent package is\n # seen by the client.\n self.wait_repo(self.dpath2)\n self.image_create(self.durl2, prefix=\"test1\")\n self.pkg(\"info -r [email protected]\")\n\n # Fifth, pkgrecv the pkg to a file repository and compare the\n # manifest of a package published with the scheme (pkg:/) given.\n f = fmri.PkgFmri(self.published[6], None)\n npath = tempfile.mkdtemp(dir=self.test_root)\n self.pkgsend(\"file://{0}\".format(npath),\n \"create-repository --set-property publisher.prefix=test1\")\n self.pkgrecv(self.durl1, \"-d file://{0} {1}\".format(npath, f))\n\n # Next, compare the manifests (this will also only succeed if\n # the fmris are exactly the same including timestamp).\n orepo = self.get_repo(self.dpath1)\n nrepo = self.get_repo(npath)\n old = orepo.manifest(f)\n new = nrepo.manifest(f)\n\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))", "def test_distinct(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def _verify_archive_equality(self, file1, file2):\r\n temp_dir_1 = mkdtemp()\r\n temp_dir_2 = mkdtemp()\r\n try:\r\n extract_source(file1, temp_dir_1)\r\n extract_source(file2, temp_dir_2)\r\n return directories_equal(temp_dir_1, temp_dir_2)\r\n\r\n finally:\r\n shutil.rmtree(temp_dir_1)\r\n shutil.rmtree(temp_dir_2)", "def check_pkg_consistency():\n pass", "def test_equality(self):\n tools.eq_(self.old_manifest, load_manifest(StringIO(old_manifest)))", "def test_component_resolution_same_file():\n\n assert snippet_eval(ComponentSnippet(modulea.ComponentResolutionSameFile())) == \"hi\\n\"", "def _compare_files(self, filename1, filename2):\n if filename1.find('.') != -1 and filename2.find('.') != -1:\n basename1, ext1 = filename1.rsplit('.', 1)\n basename2, ext2 = filename2.rsplit('.', 1)\n\n if basename1 == basename2:\n if (ext1 in self.HEADER_EXTENSIONS and\n ext2 in self.IMPL_EXTENSIONS):\n return -1\n elif (ext1 in self.IMPL_EXTENSIONS and\n ext2 in self.HEADER_EXTENSIONS):\n return 1\n\n return cmp(filename1, filename2)", "def test_distinct(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.sql.add_all(pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def _fuzzy_module_name_eq(self, module, package_name):\n return ((module.__name__ == package_name) or \n (module.__name__.replace('_pb2', '') == package_name) or \n (module.DESCRIPTOR.name == package_name) or \n (module.DESCRIPTOR.name.replace('.proto', '') == package_name) or\n (module.DESCRIPTOR.package == package_name))", "def are_files_equal(file1, file2):\n input_file_1 = open(file1, \"r\")\n input_file_2 = open(file2, \"r\")\n\n file1 = input_file_1.read()\n file2 = input_file_2.read()\n print(type(file1), file1, type(file2), file2)\n\n result =False\n if file1 == file1:\n result = True\n\n input_file_1.close()\n input_file_2.close()\n return result", "def testTwo(self):\n ret = resolveNames(\"qconfname_CH_1.QCONF\", (\"_CH_2\", \"_CH_1\", \"_CH_1_snakemask\"))\n self.assertTupleEqual(ret, (\"qconfname_CH_2.tif\", \"qconfname_CH_1.tif\", \"qconfname_CH_1_snakemask.tif\"))", "def test_distinct(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.distinct()\n\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def test_all_versions(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def _compare_files(self, first_file, second_file):\n\n self.log.info('-' * 80)\n self.log.info('Compare files')\n\n code, out = cmd_exec(['cmp', str(first_file), str(second_file)], shell=False, log=self.log)\n if code:\n self.log.warning('md5 checksum IS NOT SAME with ffmpeg sw decode')\n self.log.warning(out)\n return False\n\n self.log.info('md5 checksum IS SAME with ffmpeg sw decode')\n return True", "def testDuplicateFiles(self):\n\n INPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 foo/../file1_1.cc\nFILE 2 bar/../file1_1.cc\nFILE 3 baz/../file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 2\n1008 4 46 3\n100c 4 44 1\n\"\"\"\n EXPECTED_OUTPUT = \\\n\"\"\"MODULE windows x86 111111111111111111111111111111111 module1.pdb\nINFO CODE_ID FFFFFFFF module1.exe\nFILE 1 file1_1.cc\nFUNC 1000 c 0 Function1_1\n1000 8 45 1\n1008 4 46 1\n100c 4 44 1\n\"\"\"\n self.assertParsed(INPUT, [], EXPECTED_OUTPUT)", "def testAbsolutePackageImport(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(self.temp_fake_aa, '__init__')\r\n aaeggs = os.path.join(self.temp_fake_aa, 'eggs.py')\r\n self.assertEqual(expected, modulefinder.get_module_filename('aa', aaeggs))", "def bless_output(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if path.exists(expected_output_file):\n os.unlink(expected_output_file)\n os.rename(actual_output_file, expected_output_file)", "def get_changed_packages(blob_name1, blob_name2, package_list):\n changed_files = check_output(\n 'git', 'diff', '--name-only', blob_name1, blob_name2)\n changed_files = changed_files.split('\\n')\n\n result = set()\n for filename in changed_files:\n file_root = rootname(filename)\n if file_root in package_list:\n result.add(file_root)\n\n return sorted(result)", "def test_same_models_are_equal(dbdiskrepo):\n fit1 = fit_model()\n fit2 = fit_model()\n assert fit1.artifact.id == fit2.artifact.id\n assert fit1.artifact.value_id == fit2.artifact.value_id\n assert hash(fit1) == hash(fit2)", "def testRelativePackageImport(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(self.temp_fake_aa, '__init__')\r\n aaeggs = os.path.join(self.temp_fake_aa, 'eggs.py')\r\n self.assertEqual(expected, modulefinder.get_module_filename('aa', aaeggs))", "def samefile(self, other):\n other = os.fspath(other)\n if not isabs(other):\n other = abspath(other)\n if self == other:\n return True\n if not hasattr(os.path, \"samefile\"):\n return False\n return error.checked_call(os.path.samefile, self.strpath, other)", "def test_packages():\n # Currently assume s`pyproject.toml` is at the same level as `graphblas_algorithms` folder.\n # This probably isn't always True, and we can probably do a better job of finding it.\n path = pathlib.Path(ga.__file__).parent\n pkgs = [f\"graphblas_algorithms.{x}\" for x in setuptools.find_packages(path)]\n pkgs.append(\"graphblas_algorithms\")\n pkgs.sort()\n pyproject = path.parent / \"pyproject.toml\"\n if not pyproject.exists():\n pytest.skip(\"Did not find pyproject.toml\")\n with pyproject.open(\"rb\") as f:\n pkgs2 = sorted(tomli.load(f)[\"tool\"][\"setuptools\"][\"packages\"])\n assert (\n pkgs == pkgs2\n ), \"If there are extra items on the left, add them to pyproject.toml:tool.setuptools.packages\"", "def test_file_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"bar\")\n time.sleep(0.1)\n self.write_file(dir1, \"foo\", \"baz\")\n self.sync_all()\n # File with later mtime wins\n self.assertFile(dir0, \"foo\", \"baz\")\n self.assertFile(dir1, \"foo\", \"baz\")" ]
[ "0.71986055", "0.63136137", "0.6229243", "0.61793065", "0.6143484", "0.6143484", "0.6143484", "0.6113326", "0.61121", "0.6060622", "0.6058038", "0.60573334", "0.6034512", "0.6006042", "0.59972495", "0.5978069", "0.5958835", "0.5921375", "0.5919401", "0.58932966", "0.5881119", "0.5870286", "0.5854532", "0.58421403", "0.58260024", "0.5794786", "0.5784806", "0.5782449", "0.57610995", "0.57584876" ]
0.7288239
0
Two packages with different filenames should not be equal
def test_not_equal(self): p1 = make_package(filename="foobar") p2 = make_package(filename="foo") self.assertNotEqual(hash(p1), hash(p2)) self.assertNotEqual(p1, p2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_equality(self):\n p1 = make_package(filename=\"foo\")\n p2 = make_package(filename=\"foo\")\n self.assertEqual(hash(p1), hash(p2))\n self.assertEqual(p1, p2)", "def test_IsPackage_files():\n with tempfile.NamedTemporaryFile() as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".txt\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".tar.bz2\") as f:\n assert not dpack._IsPackage(pathlib.Path(f.name))\n with tempfile.NamedTemporaryFile(suffix=\".dpack.tar.bz2\") as f:\n assert dpack._IsPackage(pathlib.Path(f.name))", "def _verify_archive_equality(self, file1, file2):\r\n temp_dir_1 = mkdtemp()\r\n temp_dir_2 = mkdtemp()\r\n try:\r\n extract_source(file1, temp_dir_1)\r\n extract_source(file2, temp_dir_2)\r\n return directories_equal(temp_dir_1, temp_dir_2)\r\n\r\n finally:\r\n shutil.rmtree(temp_dir_1)\r\n shutil.rmtree(temp_dir_2)", "def test_equality(self):\n tools.eq_(self.old_manifest, load_manifest(StringIO(old_manifest)))", "def assertFilesEqual(self, name1, name2, msg=None):\n self.assertEqual(name1.getContent(), name2.getContent(), msg)", "def test_unequal(self):\n\n qs = FBO(path=TEST_FILES_ROOT, glob='*.md').order_by('name')\n # There are four of these.\n for a, b in combinations(qs.all(), 2):\n self.assertNotEqual(a, b)", "def _compare_files(self, filename1, filename2):\n if filename1.find('.') != -1 and filename2.find('.') != -1:\n basename1, ext1 = filename1.rsplit('.', 1)\n basename2, ext2 = filename2.rsplit('.', 1)\n\n if basename1 == basename2:\n if (ext1 in self.HEADER_EXTENSIONS and\n ext2 in self.IMPL_EXTENSIONS):\n return -1\n elif (ext1 in self.IMPL_EXTENSIONS and\n ext2 in self.HEADER_EXTENSIONS):\n return 1\n\n return cmp(filename1, filename2)", "def test_2_recv_compare(self):\n\n f = fmri.PkgFmri(self.published[4], None)\n\n # First, pkgrecv the pkg to a directory. The files are\n # kept compressed so they can be compared directly to the\n # repository's internal copy.\n self.pkgrecv(self.durl1, \"--raw -k -d {0} {1}\".format(self.tempdir,\n f))\n\n # Next, compare the manifests.\n orepo = self.get_repo(self.dpath1)\n old = orepo.manifest(f)\n new = os.path.join(self.tempdir, f.get_dir_path(), \"manifest\")\n\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))\n\n # Next, load the manifest.\n m = manifest.Manifest()\n raw = open(new, \"rb\").read()\n m.set_content(raw)\n\n # Next, compare the package actions that have data.\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n old = orepo.file(a.hash)\n new = os.path.join(self.tempdir,\n f.get_dir_path(), a.hash)\n self.assertNotEqual(old, new)\n self.assertEqual(misc.get_data_digest(old,\n hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new,\n hash_func=DEFAULT_HASH_FUNC))\n\n # Second, pkgrecv to the pkg to a file repository.\n npath = tempfile.mkdtemp(dir=self.test_root)\n self.pkgsend(\"file://{0}\".format(npath),\n \"create-repository --set-property publisher.prefix=test1\")\n self.pkgrecv(self.durl1, \"-d file://{0} {1}\".format(npath, f))\n\n # Next, compare the manifests (this will also only succeed if\n # the fmris are exactly the same including timestamp).\n nrepo = self.get_repo(npath)\n old = orepo.manifest(f)\n new = nrepo.manifest(f)\n\n self.debug(old)\n self.debug(new)\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))\n\n # Next, load the manifest.\n m = manifest.Manifest()\n raw = open(new, \"rb\").read()\n m.set_content(raw)\n\n # Next, compare the package actions that have data.\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n old = orepo.file(a.hash)\n new = nrepo.file(a.hash)\n self.assertNotEqual(old, new)\n self.assertEqual(misc.get_data_digest(old,\n hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new,\n hash_func=DEFAULT_HASH_FUNC))\n\n # Third, pkgrecv to the pkg to a http repository from the\n # file repository from the last test.\n self.pkgrecv(\"file://{0}\".format(npath), \"-d {0} {1}\".format(\n self.durl2, f))\n orepo = nrepo\n\n # Next, compare the manifests (this will also only succeed if\n # the fmris are exactly the same including timestamp).\n nrepo = self.get_repo(self.dpath2)\n old = orepo.manifest(f)\n new = nrepo.manifest(f)\n\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))\n\n # Next, load the manifest.\n m = manifest.Manifest()\n raw = open(new, \"rb\").read()\n m.set_content(raw)\n\n # Next, compare the package actions that have data.\n for atype in (\"file\", \"license\"):\n for a in m.gen_actions_by_type(atype):\n if not hasattr(a, \"hash\"):\n continue\n\n old = orepo.file(a.hash)\n new = nrepo.file(a.hash)\n self.assertNotEqual(old, new)\n self.assertEqual(\n misc.get_data_digest(old,\n hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new,\n hash_func=DEFAULT_HASH_FUNC))\n\n # Fourth, create an image and verify that the sent package is\n # seen by the client.\n self.wait_repo(self.dpath2)\n self.image_create(self.durl2, prefix=\"test1\")\n self.pkg(\"info -r [email protected]\")\n\n # Fifth, pkgrecv the pkg to a file repository and compare the\n # manifest of a package published with the scheme (pkg:/) given.\n f = fmri.PkgFmri(self.published[6], None)\n npath = tempfile.mkdtemp(dir=self.test_root)\n self.pkgsend(\"file://{0}\".format(npath),\n \"create-repository --set-property publisher.prefix=test1\")\n self.pkgrecv(self.durl1, \"-d file://{0} {1}\".format(npath, f))\n\n # Next, compare the manifests (this will also only succeed if\n # the fmris are exactly the same including timestamp).\n orepo = self.get_repo(self.dpath1)\n nrepo = self.get_repo(npath)\n old = orepo.manifest(f)\n new = nrepo.manifest(f)\n\n self.assertEqual(\n misc.get_data_digest(old, hash_func=DEFAULT_HASH_FUNC),\n misc.get_data_digest(new, hash_func=DEFAULT_HASH_FUNC))", "def check_pkg_consistency():\n pass", "def test_MergeManifests_missing_files():\n d1 = dpack_pb2.DataPackage()\n f1 = d1.file.add()\n f1.relative_path = \"a\"\n f1.comment = \"abc\"\n d2 = dpack_pb2.DataPackage()\n f2 = d2.file.add()\n f2.relative_path = \"b\"\n f2.comment = \"def\"\n dpack.MergeManifests(d1, d2)\n assert d1.file[0].comment == \"abc\"\n assert d2.file[0].comment == \"def\"", "def _compare_files(self, first_file, second_file):\n\n self.log.info('-' * 80)\n self.log.info('Compare files')\n\n code, out = cmd_exec(['cmp', str(first_file), str(second_file)], shell=False, log=self.log)\n if code:\n self.log.warning('md5 checksum IS NOT SAME with ffmpeg sw decode')\n self.log.warning(out)\n return False\n\n self.log.info('md5 checksum IS SAME with ffmpeg sw decode')\n return True", "def test_multiple_packages_same_version(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = []\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n cache.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n cache.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 2)\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 2)", "def bless_output(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n expected_output_file = path.splitext(self.source_name)[0] + \".expected\"\n if path.exists(expected_output_file):\n os.unlink(expected_output_file)\n os.rename(actual_output_file, expected_output_file)", "def test_file_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"bar\")\n time.sleep(0.1)\n self.write_file(dir1, \"foo\", \"baz\")\n self.sync_all()\n # File with later mtime wins\n self.assertFile(dir0, \"foo\", \"baz\")\n self.assertFile(dir1, \"foo\", \"baz\")", "def are_files_equal(file1, file2):\n input_file_1 = open(file1, \"r\")\n input_file_2 = open(file2, \"r\")\n\n file1 = input_file_1.read()\n file2 = input_file_2.read()\n print(type(file1), file1, type(file2), file2)\n\n result =False\n if file1 == file1:\n result = True\n\n input_file_1.close()\n input_file_2.close()\n return result", "def get_changed_packages(blob_name1, blob_name2, package_list):\n changed_files = check_output(\n 'git', 'diff', '--name-only', blob_name1, blob_name2)\n changed_files = changed_files.split('\\n')\n\n result = set()\n for filename in changed_files:\n file_root = rootname(filename)\n if file_root in package_list:\n result.add(file_root)\n\n return sorted(result)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def test_multiple_packages_same_version(self):\n with patch.object(self.request.access, \"allow_overwrite\", []):\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n self.db.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n self.db.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 2)", "def compare_files(file1, file2):\n return filecmp.cmp(file1, file2)", "def test_distinct(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # Now let's do some db sanity checks.\r\n self._delicious_xml_data_test()", "def testTwo(self):\n ret = resolveNames(\"qconfname_CH_1.QCONF\", (\"_CH_2\", \"_CH_1\", \"_CH_1_snakemask\"))\n self.assertTupleEqual(ret, (\"qconfname_CH_2.tif\", \"qconfname_CH_1.tif\", \"qconfname_CH_1_snakemask.tif\"))", "def check_duplicate(fp1, fp2):\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False", "def diff_bundle_contents():\n dir_package = os.listdir(ARCHIVE_TARGET)\n dir_setup = os.listdir(MODEL_TARGET)\n if dir_package != dir_setup:\n return True\n for bundle in dir_package:\n os.chdir(ARCHIVE_TARGET)\n subprocess.run([\"git\", \"clone\", bundle])\n os.chdir(\"..\")\n os.chdir(MODEL_TARGET)\n subprocess.run([\"git\", \"clone\", bundle])\n os.chdir(\"..\")\n dcmp = filecmp.dircmp(\n join(ARCHIVE_TARGET, bundle[: bundle.find(\".bundle\")]),\n join(MODEL_TARGET, bundle[: bundle.find(\".bundle\")]),\n )\n diff = Diff(dcmp)\n if diff.run():\n return True\n return False", "def testAbsolutePackageImport(self):\r\n self.buildTempDirs()\r\n expected = os.path.join(self.temp_fake_aa, '__init__')\r\n aaeggs = os.path.join(self.temp_fake_aa, 'eggs.py')\r\n self.assertEqual(expected, modulefinder.get_module_filename('aa', aaeggs))", "def compare_manifest_files(manifest_a: str, manifest_b: str,\n ignored_attrs: Set[str]) -> ManifestChanges:\n e1 = ET.parse(manifest_a).getroot()\n e2 = ET.parse(manifest_b).getroot()\n return compare_manifest_elements(\n manifest_e1=e1, manifest_e2=e2, ignored_attrs=ignored_attrs)", "def samefile(self, other):\n other = os.fspath(other)\n if not isabs(other):\n other = abspath(other)\n if self == other:\n return True\n if not hasattr(os.path, \"samefile\"):\n return False\n return error.checked_call(os.path.samefile, self.strpath, other)", "def test_dupe_imports(self):\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n good_file = self._get_del_file()\r\n imp = Importer(good_file, username=u\"admin\")\r\n imp.process()\r\n\r\n # now let's do some db sanity checks\r\n self._delicious_data_test()", "def assert_pofile_same(self, pofile1, pofile2):\r\n po1 = polib.pofile(pofile1)\r\n po2 = polib.pofile(pofile2)\r\n self.assertEqual(po1, po2)" ]
[ "0.721788", "0.64885724", "0.6443223", "0.6327845", "0.6313415", "0.6254438", "0.62533826", "0.6172888", "0.6132945", "0.6088958", "0.59762913", "0.59170675", "0.59131634", "0.5901148", "0.5895722", "0.58880514", "0.58856195", "0.58856195", "0.58856195", "0.5827569", "0.5802238", "0.5801967", "0.5790191", "0.5786357", "0.5782563", "0.57795", "0.5766676", "0.5761199", "0.5759782", "0.5751234" ]
0.76380116
0
Uploading a package generates SHA256 and MD5 hashes
def test_upload_hash_generation(self): cache = DummyCache() pkg = cache.upload("a-1.tar.gz", BytesIO(b"test1234"), "a") self.assertEqual( pkg.data["hash_sha256"], "937e8d5fbb48bd4949536cd65b8d35c426b80d2f830c5c308e2cdec422ae2244", ) self.assertEqual(pkg.data["hash_md5"], "16d7a4fca7442dda3ad93c9a726597e4")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upload_prepend_hash(self):\n self.storage.prepend_hash = True\n package = make_package()\n data = StringIO()\n self.storage.upload(package, data)\n key = list(self.bucket.list())[0]\n\n pattern = r'^[0-9a-f]{4}/%s/%s$' % (re.escape(package.name),\n re.escape(package.filename))\n match = re.match(pattern, key.key)\n self.assertIsNotNone(match)", "def checksum(**kwargs):\n\n # remove secretkey from kwargs, lookup if missing\n secretkey = kwargs.pop('secretkey', resolve_secretkey())\n\n # sort the args, and concatenate them\n param_string = ''.join([''.join([str(x), str(y)])\n for x, y in sorted(kwargs.items())])\n\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def upload_package(self, __contents):\n raise NotImplementedError", "def install():\n execute(generate)\n execute(upload)", "def generate_checksum(artifact_folder):\n print(\"Generating checksum files...\")\n files_grabed = get_folder_files(artifact_folder, [\"*.jar\", \"*.pom\"])\n for file in files_grabed:\n file_name = os.path.basename(file)\n\n md5 = file.replace(file_name, file_name + \".md5\")\n print(\"--md5 file \" + md5)\n with open(md5, \"w\") as md5_file:\n md5_file.write(hashlib.md5(open(file, 'rb').read()).hexdigest())\n\n sha1 = file.replace(file_name, file_name + \".sha1\")\n print(\"--sha1 file \" + sha1)\n with open(sha1, \"w\") as sha1_file:\n sha1_file.write(hashlib.sha1(open(file, 'rb').read()).hexdigest())", "def generate_sum(file_path):\n #file = open(file_path, 'rb')\n #header = file.read()\n header = open(file_path, 'rb').read()\n suma_md5 = md5(header).hexdigest()\n return suma_md5", "def do_pack():\n local(\"sudo mkdir -p versions\")\n date_time = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n name_file = \"versions/web_static{}.tgz\".format(date_time)\n local(\"sudo tar -cvzf {} web_static\".format(name_file))\n return name_file", "def gen_sig():\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()", "def checksum(self, **kwargs):\n try:\n # if a secretkey is in **kwargs, use it, and remove it\n secretkey = kwargs['secretkey']\n del kwargs['secretkey']\n except KeyError:\n # if the kwargs lookup fails, get secretkey elsewhere\n secretkey = self.secretkey or resolve_secretkey()\n args = kwargs.items()\n args.sort()\n\n param_string = ''\n for key, value in args:\n param_string += str(key)\n param_string += str(value)\n return b64encode(str(new_hmac(secretkey, param_string, sha1).digest()))", "def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)", "def MD5(self) -> _n_0_t_3[_n_0_t_9]:", "def test_upload(self):\n package = make_package()\n datastr = 'foobar'\n data = StringIO(datastr)\n self.storage.upload(package, data)\n key = list(self.bucket.list())[0]\n self.assertEqual(key.get_contents_as_string(), datastr)\n self.assertEqual(key.get_metadata('name'), package.name)\n self.assertEqual(key.get_metadata('version'), package.version)", "def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"", "def file_checksum(filename):\n hash_md5 = hashlib.md5()\n with tf.gfile.Open(filename, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n f.close()\n return hash_md5.hexdigest()", "def _get_signature(value):\n mySha = hashlib.sha256()\n mySha.update(value)\n # print mySha.hexdigest()\n return mySha.hexdigest()", "def calculate_hash(self, include_md: bool = True) -> str:\n # sourcery skip: reintroduce-else, swap-if-else-branches, use-named-expression\n # BUF_SIZE is totally arbitrary,\n BUF_SIZE = 65536 * 16 # lets read stuff in 16 x 64kb chunks!\n\n file_hash = hashlib.sha1()\n # Stubs Only\n files = list((self.package_path).rglob(\"**/*.pyi\"))\n if include_md:\n files += (\n [self.package_path / \"LICENSE.md\"]\n + [self.package_path / \"README.md\"]\n # do not include [self.toml_file]\n )\n for file in sorted(files):\n # TODO: Extract function to allow for retry on file not found\n try:\n with open(file, \"rb\") as f:\n while True:\n data = f.read(BUF_SIZE)\n if not data:\n break\n file_hash.update(data)\n except FileNotFoundError:\n log.warning(f\"File not found {file}\")\n # ignore file not found errors to allow the hash to be created WHILE GIT / VIRUS SCANNERS HOLD LINGERING FILES\n return file_hash.hexdigest()", "def python_repo_hash_md5(root_dir: str, *, verbose: bool = False):\n m = hashlib.md5()\n for e in _collect_entries(root_dir, '.'):\n if verbose:\n log_info('Processing e', e)\n m.update(\n f\"path={e['path']}\\tisdir={e['isdir']}\\tsize={e['size']}\\tmode={e['mode']:03o}\\tmtime={e['mtime']}\\n\"\n .encode('UTF-8'))\n\n return m.hexdigest()", "def git_hash():\n if not exists('qmk_firmware'):\n checkout_qmk()\n\n return open('qmk_firmware/version.txt').read().strip()", "def test_upload(self):\n pkg = make_package(factory=SQLPackage)\n content = BytesIO(b\"test1234\")\n self.db.upload(pkg.filename, content, pkg.name, pkg.version)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 1)\n saved_pkg = self.sql.query(SQLPackage).first()\n self.assertEqual(saved_pkg, pkg)\n # If calculate hashes is on, it'll read the data\n # and rewrap with BytesIO\n self.storage.upload.assert_called_with(pkg, ANY)", "def _calculate_hash(files: Iterable[str], root: str) -> str:\n file_hash = hashlib.md5()\n for file_name in sorted(files):\n file_path = os.path.join(root, file_name)\n file_hash.update((file_name + \"\\0\").encode())\n with open(file_path, \"rb\") as file_:\n # pylint: disable=cell-var-from-loop\n for chunk in iter(lambda: file_.read(4096), \"\"):\n if not chunk:\n break\n file_hash.update(chunk)\n file_hash.update(\"\\0\".encode())\n\n return file_hash.hexdigest()", "def fingerprint():\n files = (glob.glob(base_dir + '**/*.html') +\n glob.glob(base_dir + '*.html') +\n glob.glob(base_dir + 'core.js'))\n\n md5s = OrderedDict()\n\n for fil in sorted(files):\n name = fil[len(base_dir):]\n with open(fil) as fp:\n md5 = hashlib.md5(fp.read().encode('utf-8')).hexdigest()\n md5s[name] = md5\n\n template = \"\"\"\\\"\\\"\\\"DO NOT MODIFY. Auto-generated by script/fingerprint_frontend.\\\"\\\"\\\"\n\nFINGERPRINTS = {}\n\"\"\"\n\n result = template.format(json.dumps(md5s, indent=4))\n\n with open(fingerprint_file, 'w') as fp:\n fp.write(result)", "def generate_hash(*args):\n key = bytes(' '.join(args), 'utf_8')\n hashh = hashlib.md5()\n hashh.update(key)\n return hashh.hexdigest()", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def checksumFile(filename):\n return md5File(filename)", "def fsum(fpath):\n import hashlib\n import codecs\n with codecs.open(fpath, \"r\", \"utf-8\") as filep:\n buff = filep.read()\n cksum = hashlib.md5(buff.encode(\"utf-8\"))\n return cksum.hexdigest()", "def calchash(filename):\n sha = hashlib.sha1()\n with open(filename, 'rb') as f:\n sha.update(f.read())\n return sha", "def sha256Sum(self, data):\n data = str(data)\n m = hashlib.sha256()\n if os.path.isfile(data):\n try:\n f = file(data, 'rb')\n except:\n return 'ERROR: unable to open %s' % data\n while True:\n d = f.read(8096)\n if not d:\n break\n m.update(d)\n f.close()\n # Otherwise it could be either 1) a directory 2) miscellaneous data (like json)\n else:\n m.update(data)\n return m.hexdigest()", "def sha256sum(filename):\n content = open(filename, 'rb').read()\n sha256_obj = hashlib.sha256(content)\n return sha256_obj.hexdigest()" ]
[ "0.62484837", "0.5853526", "0.5780175", "0.57252145", "0.5716217", "0.5705437", "0.5700427", "0.56927264", "0.568528", "0.56655866", "0.56243956", "0.5618303", "0.5617467", "0.560571", "0.5599217", "0.5593012", "0.5587831", "0.55812234", "0.55801195", "0.55577385", "0.5551657", "0.5485627", "0.5456722", "0.5456505", "0.54536664", "0.5452468", "0.54335594", "0.5430929", "0.54232097", "0.54018575" ]
0.7278852
0
If allow_delete=[], packages cannot be deleted
def test_no_delete(self): request = DummyRequest() request.access = DummyAccess(request) cache = DummyCache(request) request.access.allow_delete = [] pkg = make_package() with self.assertRaises(ValueError): cache.delete(pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def can_delete(self):\r\n return True", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def remove_packages(self, packages):", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def clean_extra_package_managment_files():\n use_pipenv = '{{cookiecutter.use_pipenv}}'\n to_delete = []\n\n if use_pipenv == 'yes':\n to_delete = to_delete + ['requirements.txt', 'requirements']\n else:\n to_delete.append('Pipfile')\n\n try:\n for file_or_dir in to_delete:\n if os.path.isfile(file_or_dir):\n os.remove(file_or_dir)\n else:\n shutil.rmtree(file_or_dir)\n sys.exit(0)\n except OSError as e:\n sys.stdout.write(\n 'While attempting to remove file(s) an error occurred'\n )\n sys.stdout.write('Error: {}'.format(e))", "def test_delete(self):\n package = make_package()\n self.storage.upload(package, StringIO())\n self.storage.delete(package)\n keys = list(self.bucket.list())\n self.assertEqual(len(keys), 0)", "def test_delete_package(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n self.db.clear(pkgs[0])\n saved_pkgs = self.db.distinct()\n self.assertEqual(saved_pkgs, [\"mypkg2\"])\n summaries = self.db.summary()\n self.assertEqual(len(summaries), 1)", "def remove(self, packages):\n raise NotImplementedError()", "def test_delete(self):\n package = make_package()\n path = self.storage.get_path(package)\n os.makedirs(os.path.dirname(path))\n with open(path, 'w') as ofile:\n ofile.write('foobar')\n self.storage.delete(package)\n self.assertFalse(os.path.exists(path))", "def remove(self, packages):\n if packages:\n cmd = ['dnf', 'remove'] + list(packages)\n subprocess.Popen(cmd).wait()", "def test_delete(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.delete(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)\n self.storage.delete.assert_called_with(pkg)", "def __check_removed_permissions(self) -> None:\n for permission in Permission.objects.all():\n if not self.__is_permission_allowed_to_delete(permission):\n continue\n\n if self.__is_permission_in_groups(permission.codename):\n raise PermissionInUse(f'Permission {permission.codename} is used in groups. Delete it first.')\n\n permission.delete()\n\n self.stdout.write(f'Removed {permission.codename} permission')", "def can_fast_delete(self, *args, **kwargs):\n return False", "def test_package_can_not_upgraded_cause_required(self):\n with self.with_config_update():\n with patch(\n \"aea.cli.upgrade.ItemRemoveHelper.check_remove\",\n return_value=(\n set([PackageId(\"connection\", PublicId(\"test\", \"test\", \"0.0.1\"))]),\n set(),\n dict(),\n ),\n ), pytest.raises(\n ClickException,\n match=r\"Can not upgrade .* because it is required by '.*'\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def do_delete(self, arg):\n \treturn False", "def delete():", "def can_delete_families(self):\n # Implemented from template for\n # osid.resource.BinAdminSession.can_delete_bins\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n if self._catalog_session is not None:\n return self._catalog_session.can_delete_catalogs()\n return True", "def delete_data_package(self):\n try:\n bucket_name = app.config['S3_BUCKET_NAME']\n s3_client = app.config['S3']\n\n keys = []\n list_objects = s3_client.list_objects(Bucket=bucket_name,\n Prefix=self.build_s3_base_prefix())\n if list_objects is not None and 'Contents' in list_objects:\n for ob in s3_client \\\n .list_objects(Bucket=bucket_name,\n Prefix=self.build_s3_base_prefix())['Contents']:\n keys.append(dict(Key=ob['Key']))\n\n s3_client.delete_objects(Bucket=bucket_name, Delete=dict(Objects=keys))\n return True\n except Exception as e:\n app.logger.error(e)\n return False", "def delete_pip3_multiple_version_site_packages():\n yield\n shutil.rmtree(SitePackages().path)", "def remove_packages(self, ref, package_ids=None):\n self.check_credentials()\n if not package_ids:\n url = self.conans_router.remove_all_packages(ref)\n self.requester.delete(url, auth=self.auth, headers=self.custom_headers,\n verify=self.verify_ssl)\n return\n for pid in package_ids:\n pref = PackageReference(ref, pid)\n url = self.conans_router.remove_package(pref)\n self.requester.delete(url, auth=self.auth, headers=self.custom_headers,\n verify=self.verify_ssl)", "def __do_package_delete(item):\n\n file_path = DTF_PACKAGES_DIR + item.install_name\n\n if utils.delete_tree(file_path) != 0:\n log.e(TAG, \"Error removing tree! Continuing.\")\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM packages '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n conn.commit()\n\n return 0", "def test_clear(self):\n self.request.access.allow_delete = [\"everyone\"]\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n self.db.delete(pkg)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 0)\n count = self.engine.scan(PackageSummary).count()\n self.assertEqual(count, 0)", "def remove():\n run('pew rm {0}'.format(package_name()))", "def test_clear(self):\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n transaction.commit()\n self.request.access.can_delete_package = lambda: True\n self.sql.add(pkg)\n self.db.delete(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 0)", "def delete(self):\n self.package = None", "def test_delete_run(self):\n pass", "def UnlinkPackages():\n for path in packagesToUnlink:\n if dirMod.isFolder(path):\n dirMod.deleteFolder(path)\n elif dirMod.isFile(path):\n dirMod.deleteFile(path)\n else:\n print(errMod.formatError(\"Sequestrum\", \"uwu This was not supposed to happen... uwu\"))", "def delete(self) -> bool:\n return False", "def delete_files_for_package(self, package):\n files = self.find_files_for_package(package, absolute_path=True)\n if not files:\n return\n path = os.path.dirname(files[0])\n for file in files:\n if os.path.exists(file):\n log.debug(\"Removing file '%s'\" % (file))\n os.unlink(file)\n if os.path.isdir(path) and os.listdir(path) == []:\n log.debug(\"Remove empty package repository '%s'\" % (path))\n os.rmdir(path)", "def _remove_extra_packages(frozen_pkgs, ret, **kwargs):\n pkgs = __salt__[\"pkg.list_pkgs\"](**kwargs)\n extra_pkgs = set(pkgs) - set(frozen_pkgs)\n for pkg in extra_pkgs:\n try:\n __salt__[\"pkg.remove\"](name=pkg, **kwargs)\n ret[\"pkgs\"][\"remove\"].append(pkg)\n log.info(\"Removed extra package %s\", pkg)\n except Exception as e: # pylint: disable=broad-except\n msg = \"Error removing %s package: %s\"\n log.error(msg, pkg, e)\n ret[\"comment\"].append(msg % (pkg, e))" ]
[ "0.6490357", "0.64601755", "0.6455567", "0.64492196", "0.6326829", "0.6319609", "0.6257597", "0.6189358", "0.61655724", "0.60241777", "0.5932366", "0.5870834", "0.5808837", "0.5798392", "0.57893324", "0.57326317", "0.5730256", "0.5702716", "0.56959826", "0.56811345", "0.5677903", "0.56750727", "0.56660795", "0.56583583", "0.56441206", "0.5636058", "0.5630814", "0.56257343", "0.55637157", "0.5537929" ]
0.66625583
0
Don't reload the cache if it's not necessary
def test_no_reload_if_needed(self): cache = DummyCache() cache.reload_from_storage = MagicMock() cache.distinct = MagicMock() cache.distinct.return_value = ["hi"] cache.reload_if_needed() self.assertFalse(cache.reload_from_storage.called)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_cached(self):\n return False", "def maybe_refresh(self, name=None):\n now = time.time()\n if self.last_load is None or (now - self.last_load) > self.tfs.cache_validity:\n self.load(name)", "def mark_if_cached(self, args):\n pass", "def reload_cache(self):\n self.data = self.read_data_cache()", "def clean_cache(self):\n return", "def invalidate_cache(self):\n #self.objects.objects = []\n return True", "def _clear_cache(self):\n self.cache = {}", "def _invalidate_http_cache(self):\n self._requests_cache = {}", "def test_cache_without_data_change(self):\n self.assertTrue(self.host_updater.refresh_cache())\n\n self.assertFalse(self.host_updater.refresh_cache())", "def use_cached_files(self, cache_key):\r\n pass", "def write_to_cache(self):\n return False", "def _refresh_cache(self, data_dict):\r\n pass", "def test_reload_if_needed(self):\n cache = DummyCache()\n cache.reload_from_storage = MagicMock()\n cache.reload_if_needed()\n self.assertTrue(cache.reload_from_storage.called)", "def set_emptying_cache():\r\n from pylons import g\r\n from r2.lib.cache import SelfEmptyingCache\r\n g.cache.caches = [SelfEmptyingCache(),] + list(g.cache.caches[1:])", "def cache_clear(self):\n\t\tself.__cache = {}", "def reset_cache():\n global _CACHE\n _CACHE.clear()", "def clear_cache(self):\n pass", "def clear_cache():\n # TODO\n pass", "def decache(self):", "def reset_cache(self):\n self._cache_complete = False\n self._cache = {}\n self._catcache = {}", "def disable_caching(self):\n\n def after_request(r: flask.Response):\n if 'Cache-Control' not in r.headers:\n r.headers['Cache-Control'] = 'no-store'\n return r\n\n self.after_request(after_request)", "def toggle_caching(on=None):\n global DISABLE_CACHING\n if on is None:\n DISABLE_CACHING = not DISABLE_CACHING\n else:\n DISABLE_CACHING = bool(on)", "def _clear_cache(self):\n\n self._cache = dict()", "def refresh_cache_file(form, model, is_created):\n common.save_serialized_file()\n app.global_content = common.load_cached()", "def _retrieveCachedData(self):", "def test_client_max_age_0(self, sess):\r\n print('first request')\r\n r = sess.get(self.url)\r\n assert self.cache.get(self.url) == r.raw\r\n\r\n print('second request')\r\n r = sess.get(self.url, headers={'Cache-Control': 'max-age=0'})\r\n\r\n # don't remove from the cache\r\n assert self.cache.get(self.url)\r\n assert not r.from_cache", "def reset_cache():\n setup_cache({})\n yield # test\n setup_cache({})", "def _purge():\r\n _cache.clear()", "def _check_cache(self):\n return os.path.exists(self._cache_key)", "def enable_caching_acts_data() -> bool:\n return True" ]
[ "0.71614826", "0.7092918", "0.70813805", "0.69887173", "0.69872105", "0.6943646", "0.6940279", "0.693159", "0.69176036", "0.69003165", "0.6895797", "0.68905365", "0.6876613", "0.6831341", "0.6824815", "0.6815882", "0.6813079", "0.68049896", "0.6743433", "0.66721517", "0.66212654", "0.6615578", "0.66134375", "0.66049117", "0.6585611", "0.6561431", "0.65539366", "0.6538249", "0.653093", "0.65262437" ]
0.7300289
0
save() can store packages with unicode in the names
def test_save_unicode(self): pkg = make_package("mypackage™", factory=SQLPackage) self.db.save(pkg) count = self.sql.query(SQLPackage).count() self.assertEqual(count, 1) saved_pkg = self.sql.query(SQLPackage).first() self.assertEqual(saved_pkg, pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _save_pkgs(self, *pkgs):\n for pkg in pkgs:\n self.engine.save(pkg)\n summary = PackageSummary(pkg)\n self.engine.save(summary, overwrite=True)", "def save_as_data_packages(path):\n\n logger.info(f'Saving as data packages at {path}')\n def f(row):\n package = Package()\n logger.debug(f'Resource {row}')\n # TODO check this, I'm learning datapackages.\n resource = Resource({'data': row})\n resource.infer() # adds \"name\": \"inline\"\n if not resource.valid:\n raise Exception('Invalid resource')\n\n encoded_identifier = helpers.encode_identifier(identifier=row['identifier'])\n\n package.add_resource(descriptor=resource.descriptor)\n filename = f'data-json-{encoded_identifier}.json'\n package_path = os.path.join(path, filename)\n\n # no not rewrite if exists\n if not os.path.isfile(package_path):\n package.save(target=package_path)\n\n return f", "def save_books(collection_of_books_as_str: str, file_name: str) -> None:\r\n with open(file_name, \"w\", encoding='windows-1251') as save_file:\r\n save_file.write(collection_of_books_as_str)\r\n\r\n print(\"All the changes were successfully saved.\\n\"\r\n \"Thanks for choosing us! Hope to see you again.\")", "def _store_package_metadata(self):", "def save_konzepte(self, _):\n konzeptnames = []\n for x in self.checkbox.GetCheckedItems():\n konzeptnames.append(self.choices[x])\n konzeptnames.append(\"_\")\n for k, v in self.konzepte.items():\n\n konzeptnames.append(k)\n konzeptnames.append(\"_\")\n str_konzeptnames = ''.join(konzeptnames)\n str_konzeptnames = str_konzeptnames[:len(str_konzeptnames) - 1] # um den letzen Unterstrich zu löschen\n str_konzeptnames = str_konzeptnames.replace(\"/\", \"-\") # um keinen subfolder zu erzeugen (Error)\n\n new_filename = model.RECHNUNGSTYP + \"_\" + str_konzeptnames\n\n filedlg = wx.FileDialog(self.p, \"Save Konzept\", defaultFile=new_filename, style=wx.FD_SAVE)\n filedlg.ShowModal()\n filepath = filedlg.GetPath()\n\n if filepath:\n if model.RECHNUNGSTYP == \"INST\":\n self.get_inst_konzepte()\n else:\n self.new_get_konzept()\n\n with open(filepath, \"w\") as outfile:\n json.dump(self.konzepte, outfile, default=lambda o: o.__dict__)", "def save(self, file_name):\n invalid_characters = ['#','%','&','{','}','\\\\','<','>','*','?','/','^','$','!','\\'','\\\"',':','@','+',\"`\",'|','=','~']\n if len(file_name) == 0:\n message = \"The import name cannot be empty\"\n SaveError(self, message)\n elif any(invalid_char in file_name for invalid_char in invalid_characters):\n used_invalid_chars = [invalid_char for invalid_char in invalid_characters if invalid_char in file_name]\n display_text = \",\".join(used_invalid_chars)\n message = \"The import name cannot contain the character(s) \\n \" + display_text\n SaveError(self, message)\n else:\n if(self.file_exists(file_name)):\n SaveOverwrite(self, file_name)\n else:\n self.create_json_file(file_name)\n self.destroy()", "def save():", "def test_save(self):\n pkg = make_package(factory=SQLPackage)\n self.db.save(pkg)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 1)\n saved_pkg = self.sql.query(SQLPackage).first()\n self.assertEqual(saved_pkg, pkg)", "def add_package_name(self, package, name, seen):\n with self._conn.begin():\n self._conn.execute(\n \"VALUES (add_package_name(%s, %s, %s))\",\n (package, name, seen.astimezone(UTC).replace(tzinfo=None)))", "def save_as(self, filename):\n assert type(filename) == str, 'ERROR: filename should be type str'\n if '.pkl' in filename:\n with open(filename, 'wb') as f:\n dill.dump(self, f)\n else:\n with open(filename + '.pkl', 'wb') as f:\n dill.dump(self, f)", "def save_project(uid, song_notes, author_name, creation_date, project_name):", "def package_name(string):\n return 'USymbol' + convert_name(string, False)", "def save(self, handler, name):", "def save(self):\n if PYTHON3:\n fileobj = open(self.filename, 'w', encoding=self.ENCODING, errors=\"replace\")\n else:\n fileobj = open(self.filename, 'w')\n self.save_to_fileobj(fileobj)\n fileobj.close()", "def _save(self, name, content):\n cloud_obj = self.container.create_object(name)\n mimetype, _ = mimetypes.guess_type(name)\n cloud_obj.content_type = mimetype\n cloud_obj.send(content)\n return name", "def saveD(p, D, text):", "def store(self, filename):", "def default_save(self,suffix=EMPTYCHAR,extra=EMPTYCHAR):\r\n\r\n pass", "def save(self, export_path: str):", "def save(self, file):\n pkgng_pkg = pptx.packaging.Package().marshal(self)\n pkgng_pkg.save(file)", "def save(self, fname):\n pass", "def saveas(self, name):\n self.filename = name\n self.save()", "def save(self, path: str):\n\n\t\tinfo_dict = {\n\t\t\t\"n_gram_size\": self.n_gram_size,\n\t\t\t\"caseless\": self.caseless,\n\t\t\t\"ignore_punctuation\": self.ignore_punctuation,\n\t\t\t\"add_pos_tags\": self.add_pos_tags,\n\t\t\t\"uses_lemma\": self.uses_lemma,\n\t\t\t\"uses_sentences\": self.uses_sentences\n\t\t}\n\n\t\twith open(path, \"wt\", encoding=\"utf8\") as f:\n\t\t\tjson.dump(info_dict, f)", "def test_unicode_errors_db(self):\n order = OrderTest.create_order_1()\n book = models.Book.get_book('1781101361')\n book.order = order\n book.save()", "def __unicode__(self):\n # TODO: Curently this just stores/returns the file path.\n return unicode(self.path).encode('utf-8')", "def upload_package(self, __contents):\n raise NotImplementedError", "def save(self, filename):\n pass", "def save(item,name):\n\n file = open(name,'wb')\n dump(item,file)\n file.close()", "def saveVar(var,name):\n with open(name+'.pickle','wb') as fl:\n pickle.dump(var,fl)", "def save(self, filename):\n pass" ]
[ "0.5931251", "0.5794059", "0.57553834", "0.5731187", "0.57028395", "0.5587278", "0.5544812", "0.54981047", "0.5447239", "0.5393025", "0.5386598", "0.5382567", "0.531384", "0.5301667", "0.52455693", "0.52381504", "0.5207928", "0.517478", "0.5152811", "0.5141345", "0.5124013", "0.51071185", "0.509909", "0.5090802", "0.5069014", "0.50549185", "0.5047922", "0.5040995", "0.50315785", "0.5026397" ]
0.7716409
0
reload_from_storage() inserts packages into the database
def test_reload(self): keys = [ make_package(factory=SQLPackage), make_package( "mypkg2", "1.3.4", "my/other/path", factory=SQLPackage, hash_md5="md5", hash_sha256="sha256", ), ] self.storage.list.return_value = keys self.db.reload_from_storage() all_pkgs = self.sql.query(SQLPackage).all() self.assertCountEqual(all_pkgs, keys)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reload(self):\n keys = [\n make_package(factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n for pkg in keys:\n self.assert_in_redis(pkg)", "def test_reload(self):\n keys = [\n make_package(factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n all_pkgs = self.engine.scan(DynamoPackage).all()\n self.assertCountEqual(all_pkgs, keys)", "def syncSave(self):\n for pyfile in self.files.values():\n pyfile.sync()\n\n for pypack in self.packages.values():\n pypack.sync()\n\n self.db.syncSave()", "def populateDB(db, packages):\n conn = sqlite3.connect(db)\n cur = conn.cursor()\n print \"opened db successfully\"\n for vul_id, package in packages.items():\n for info in package:\n cur.execute('''INSERT INTO packages(name, version, release, vulnerabilityId, OS_name, OS_version)\n\t\t\t VALUES(?,?,?,?,?,?);''', (info['name'], info['version'], info['release'], vul_id, info['os_name'], info['os_version']))\n \n conn.commit()\n conn.close()", "def test_reload_if_needed(self):\n self.db.storage = MagicMock()\n self.db.storage.list.return_value = [make_package(factory=SQLPackage)]\n self.db.reload_if_needed()\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 1)", "def sync_db():\n pass", "def insert_db():\n populate_tables()", "def reload(self):\n if os.path.exists(FileStorage.__file_path):\n with open(FileStorage.__file_path, \"r\", encoding=\"utf-8\") as f:\n loaded = json.load(f)\n for _id, v in loaded.items():\n cls = loaded[_id].pop(\"__class__\", None)\n try:\n loaded[_id][\"created_at\"] = datetime.strptime(\n loaded[_id][\"created_at\"], dt_format)\n loaded[_id][\"updated_at\"] = datetime.strptime(\n loaded[_id][\"updated_at\"], dt_format)\n except:\n pass\n FileStorage.__objects[_id] = FileStorage.class_models[cls](**v)", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def reload(self):", "def reload(self):", "async def reload_database(self, schema='conf/schema.sql'):\n with open(schema) as schema:\n await self.dao.build((schema.read()))", "def refresh_well_registry_pg(connect):\n cursor = connect.cursor()\n cursor.execute(DELETE_MV)\n cursor.execute(INSERT_MV)\n connect.commit()", "def _save_pkgs(self, *pkgs):\n for pkg in pkgs:\n self.engine.save(pkg)\n summary = PackageSummary(pkg)\n self.engine.save(summary, overwrite=True)", "def rebuild_db():\n delete_db()\n create_db()\n insert_db()", "def import_and_update(distribution: str):\n fetch_packages(distribution)\n update_snapshot(distribution)", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def reconstruct(self):\n if os.path.exists(self.dbname):\n with open(self.dbname, mode='rb') as db:\n self.cache = pickle.load(db)", "def test_save_reload(self):\n base = BaseModel()\n idd = base.id\n base.name = \"betty\"\n base.save()\n storage.reload()\n key = \"BaseModel.{}\".format(idd)\n objs = storage.all()[key]\n self.assertTrue(hasattr(objs, \"name\"))\n self.assertTrue(objs.name == \"betty\")\n self.assertTrue(os.path.exists('file.json'))", "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def packages_file(self, uri):\n if basename(uri)==\"Packages\" or basename(uri)==\"Release\":\n log.msg(\"REGISTERING PACKAGE:\"+uri,'apt_pkg')\n mtime = os.stat(self.factory.cache_dir+'/'+uri)\n self.packages[uri] = mtime\n self.unload()", "def save_db(self) -> None:", "def reload(self):\n try:\n with open(FileStorage.__file_path) as f:\n objs = json.load(f)\n for obj in objs.values():\n name = obj['__class__']\n del obj['__class__']\n self.new(eval(name)(**obj))\n except FileNotFoundError:\n return", "def reload(self):\n if file_exist(self.__file_path):\n with open(self.__file_path, \"r\", encoding=\"UTF-8\") as file:\n data = read_data(file)\n for key, value in data.items():\n instance = BaseModel(**value)\n FileStorage.__objects[key] = instance", "def regen(self):\n self.create(overwrite=True)\n self.load()", "def refreshdb(var, wrapper, message):\n db.expire_stasis()\n db.init_vars()\n expire_tempbans()\n wrapper.reply(\"Done.\")" ]
[ "0.6807296", "0.66666466", "0.6007408", "0.59651184", "0.5937385", "0.5856151", "0.55851835", "0.5584457", "0.55680364", "0.55680364", "0.55680364", "0.55486923", "0.55486923", "0.5518602", "0.5455169", "0.5449565", "0.54419625", "0.5432723", "0.54300004", "0.5427746", "0.5406448", "0.54061025", "0.5405926", "0.5403297", "0.53767616", "0.537072", "0.5362611", "0.53540796", "0.53134423", "0.5294738" ]
0.7006676
0
fetch() retrieves a package from the database
def test_fetch(self): pkg = make_package(factory=SQLPackage) self.sql.add(pkg) saved_pkg = self.db.fetch(pkg.filename) self.assertEqual(saved_pkg, pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fetch(self):\n pkg = make_package()\n self.db.save(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def test_fetch(self):\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def test_fetch_missing(self):\n saved_pkg = self.db.fetch(\"missing_pkg-1.2.tar.gz\")\n self.assertIsNone(saved_pkg)", "def test_fetch_missing(self):\n saved_pkg = self.db.fetch(\"missing_pkg-1.2.tar.gz\")\n self.assertIsNone(saved_pkg)", "def test_fetch_missing(self):\n saved_pkg = self.db.fetch(\"missing_pkg-1.2.tar.gz\")\n self.assertIsNone(saved_pkg)", "def fetch_package(self, package_name):\n\t\t\t\n\t\t\tpackage_root_url = urlparse.urljoin(self.packages_root_url,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpackage_name + \"/\")\n\t\t\t\n\t\t\tpackage_info_url = urlparse.urljoin(package_root_url, \"info\")\n\t\t\tpackage_archive_url = urlparse.urljoin(package_root_url, \"archive\")\n\t\t\t\n\t\t\tlogger.debug(\"Get: {0}\".format(package_info_url))\n\t\t\ttry:\n\t\t\t\tinfo = json.loads(urllib2.urlopen(package_info_url).read())\n\t\t\t\treturn ups.package.Package(self, package_root_url, info)\n\t\t\texcept urllib2.HTTPError as e:\n\t\t\t\traise RepositoryError(e)\n\t\t\texcept ValueError as e:\n\t\t\t\traise RepositoryError(\"Unable to parse info file: {0}\".format(e))", "def get(self, pref: PkgReference):\n where_clause = self._where_clause(pref)\n query = f'SELECT * FROM {self.table_name} ' \\\n f'WHERE {where_clause};'\n\n with self.db_connection() as conn:\n r = conn.execute(query)\n row = r.fetchone()\n\n if not row:\n raise ConanReferenceDoesNotExistInDB(f\"No entry for package '{repr(pref)}'\")\n return self._as_dict(self.row_type(*row))", "def fetch(self) -> None:\n pass", "async def fetch(self, query, *args):\n return await self.conn.fetch(query, *args)", "def fetch():\n req_data= request.get_json()\n \n ## ddb uses text files, using this as to eat my own dogfoor and improve\n ## no service sql client. No daemon, low cpu.\n\n\n e=load_db()\n try:\n res=e.query(req_data['query'])\n \n serialized = jsonpickle.encode( res,\n unpicklable=False,\n make_refs=False)\n return serialized\n except Exception as ex:\n return \"{0} -> '{1}'\".format(ex,req_data['query'])", "def fetch():\n return True", "def fetch(self):\n pass", "def fetch(self):\n pass", "def do_fetch(self):\n pass", "def package(id = 0):\n\tresults = queries.package(id)\n\tif not results:\n\t\treturn render_template('package_not_found.html')\n\treturn render_template('package.html', package=results)", "def get_pkg(pkg_name):\n pkg = Database().db.get_pkg_details(pkg_name, \"\", False)\n pkg = PackageDetail.surClass(pkg)\n print('dir: ', dir(pkg))\n \n #print('name:', pkg.nane)\n #print('props.name:', pkg.props.nane)\n return render_template(\"pkg.html\", \n title=\" - \"+pkg_name,\n repos=Database().db.get_repos_names(),\n pkg=pkg)", "def get_package(package, create=False):\n index = PackageIndex.objects.first()\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(index=index, name=package)[0]\n else:\n try:\n package = Package.objects.get(index=index, name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def get_package(package, create=False):\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(name=package)[0]\n else:\n try:\n package = Package.objects.get(name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def fetch_package(source, method=None, headers=None, auth=None):\n #if method not in ('requests', 'curl_cli'):\n # raise Exception('Fetch package method \"{}\" not found'.format(method))\n if not method:\n method = detect_fetch_method()\n print('Using fetch method \"{}\"'.format(method))\n print('Source {}'.format(source))\n fetch_method = '_fetch_package_{}'.format(method)\n package = eval(fetch_method)(source, headers, auth)\n return package", "def fetch(self, location=None, conn_timeout=None):\r\n target = super(SourcePackage, self).fetch(conn_timeout=conn_timeout)\r\n return self._unpack(target, location)", "def fetch(self):\n raise NotImplementedError()", "def bd_selectPackageList_byID(self, _c, _pckgID):\n\n result = {}\n\n _c.execute(\"SELECT id, num, desc, status, source_env, dest_env, app, last_rev FROM package WHERE id=? ORDER BY num DESC\", [_pckgID]) \n data = _c.fetchone()\n\n if data:\n result['id'] = data[0] \n result['desc'] = data[2]\n result['status'] = data[3]\n result['source_env'] = data[4]\n result['dest_env'] = data[5]\n result['app'] = data[6]\n result['last_rev'] = data[7]\n result['num'] = data[1] #Place this attribute the last because of print issue. I know this is a Dict, take it easy.\n #\n\n return result", "def fetch_data_from_db(query):\n cursor.execute(query)\n result = cursor.fetchall()\n return result", "def retrieve_from_db(self):\n pass", "def test_load(self):\n kwargs = {\"url\": \"my.url\", \"expire\": 7237}\n pkg = make_package(**kwargs)\n # Due to some rounding weirdness in old Py3 versions, we need to remove\n # the microseconds to avoid a flappy test.\n # See: https://bugs.python.org/issue23517\n pkg.last_modified = pkg.last_modified.replace(microsecond=0)\n self.db.save(pkg)\n\n loaded = self.db.fetch(pkg.filename)\n self.assertEqual(loaded.name, pkg.name)\n self.assertEqual(loaded.version, pkg.version)\n self.assertEqual(loaded.filename, pkg.filename)\n self.assertEqual(loaded.last_modified, pkg.last_modified)\n self.assertEqual(loaded.summary, pkg.summary)\n self.assertEqual(loaded.data, kwargs)", "def get(self, fetch_number: int):\n return self.results[fetch_number]", "def order_book_fetch(self, symbol):\n pass", "def select(self):\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM foodbank\")\n return cursor.fetchall()", "async def get_object(conn: Database, query):\n return await conn.fetch_one(query=query)", "def fetch(self, remote, *args):\n return self.cmd('fetch', remote, *args)" ]
[ "0.77681977", "0.76642436", "0.6594907", "0.6594907", "0.6594907", "0.65284604", "0.63306046", "0.61829334", "0.61411977", "0.6081578", "0.6049043", "0.60410875", "0.60410875", "0.59358525", "0.5873866", "0.58250237", "0.58231133", "0.58095026", "0.57333004", "0.5728252", "0.5725556", "0.5636155", "0.56339794", "0.5534508", "0.552698", "0.5519777", "0.5505839", "0.55017805", "0.5498044", "0.54948336" ]
0.7715148
1
Assert that a package exists in redis
def assert_in_redis(self, pkg): self.assertTrue(self.redis.sismember(self.db.redis_set, pkg.name)) data = self.redis.hgetall(self.db.redis_key(pkg.filename)) dt = pkg.last_modified lm = calendar.timegm(dt.utctimetuple()) + dt.microsecond / 1000000.0 lm_str = ("%.6f" % lm).rstrip("0").rstrip(".") pkg_data = { "name": pkg.name, "version": pkg.version, "filename": pkg.filename, "last_modified": lm_str, "summary": pkg.summary, } pkg_data.update(pkg.data) self.assertEqual(data, pkg_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_subversion_package_installed(host):\n assert host.package(PACKAGE).is_installed", "def test_72_packages(host, pkg):\n assert host.package(pkg).is_installed", "def test_azurecli_package_installed(host):\n assert host.package(PACKAGE).is_installed", "def test_is_package_installed(self, mock_run):\n\n build_cmake_project.is_package_installed('tmux')\n mock_run.assert_called_once_with(\n args='dpkg-query -l tmux', check=True, shell=True)", "def test_reload(self):\n keys = [\n make_package(factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n for pkg in keys:\n self.assert_in_redis(pkg)", "def test_is_installed():\n assert _is_installed('coverage') is True # regular dependency\n assert _is_installed('pytest') is True # dev dependency\n assert _is_installed('missing') is False # missing dependency", "def test_install(self):\n self.assertIn('kser', [x.key for x in pkg_resources.working_set])", "def test_default_packages(host, pkg):\n assert host.package(pkg).is_installed", "def test_packages(host, distribution, name):\n\n if host.system_info.distribution != distribution:\n pytest.skip('Distribution not managed for this package')\n\n assert host.package(name).is_installed", "def test_subversion_binary_exists(host):\n assert host.file(PACKAGE_BINARY).exists", "def test_01_package_exists_returns_pkg(self, Mock):\r\n html_request = FakeRequest(json.dumps(self.pkg_json_found), 200,\r\n {'content-type': 'application/json'})\r\n Mock.return_value = html_request\r\n with self.flask_app.test_request_context('/'):\r\n # Resource that exists\r\n out, e = self.ckan.package_exists(name='urbanpark')\r\n assert out is not False, \"It should return a pkg\"\r\n err_msg = \"The pkg id should be the same\"\r\n assert out['id'] == self.pkg_json_found['result']['id'], err_msg", "def test_clear(self):\n pkg = make_package()\n key = self.db.redis_key(pkg.filename)\n self.redis[key] = \"foobar\"\n self.db.clear(pkg)\n val = self.redis.get(key)\n self.assertIsNone(val)\n count = self.redis.scard(self.db.redis_set)\n self.assertEqual(count, 0)", "def test_npm_installed_pkgs(npm):\n ret = npm.installed(\n name=\"unused\",\n pkgs=[\"[email protected]\", \"[email protected]\"],\n registry=\"https://registry.npmjs.org/\",\n )\n assert ret.result is True", "def test_install_universe_package(package):\n\n install_package_and_wait(package)\n assert package_installed(package), 'Package failed to install'\n\n deployment_wait(max_attempts=300)\n assert service_healthy(package)", "def test_packages_present(self):\n packages = [\"ca-certificates\", \"sudo\", \"wget\", \"unzip\"]\n for pkg in packages:\n with self.subTest(package=pkg):\n self.assertTrue(self.host.package(pkg).is_installed)", "def test_package_can_not_be_found_in_registry(self):\n with self.with_config_update():\n with patch(\n \"aea.cli.registry.utils.get_package_meta\",\n side_effects=Exception(\"expected!\"),\n ), patch(\n \"aea.cli.registry.utils.find_item_locally\",\n side_effects=Exception(\"expected!\"),\n ), pytest.raises(\n ClickException,\n match=r\"Package .* details can not be fetched from the registry!\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def check_remote_rpm_install(self, rpm_package_name, host):\n results = run_remote_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE), host)\n self.assertEqual(results, rpm_package_name)", "def test_Package(self):\n self.assertEqual(8, Package.objects.count())\n\n for pkg in ('wireshark-common', 'wireshark-gtk', 'wireshark-cli', 'wireshark-qt'):\n for ver in ('2.6.0-1', '2.6.1-1'):\n self.assertTrue(Package.objects.filter(name=pkg, version=ver))\n\n for pkg in Package.objects.filter(name='wireshark-cli'):\n self.assertEqual('pacman', pkg.type)\n self.assertEqual('archlinux', pkg.namespace)", "def test_install_packages():\n\n\tassert packaging.install_packages(pkgs) == None", "def check_rpm_install(self, rpm_package_name):\n results = run_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE))\n self.assertEqual(results, rpm_package_name)", "def test_packages(self):\n for pkg in self.expected_packages:\n status, output = commands.getstatusoutput('pkg_info -qx %s' % pkg)\n assert status == 0", "def test_reinstall_packages():\n\tassert packaging.install_packages(pkgs) == None", "def test_installed_packages():\n features = (\n \"Package Identity : Capa1\\r\\n State : Installed\\r\\n\"\n \"Package Identity : Capa2\\r\\n State : Installed\\r\\n\"\n )\n\n mock = MagicMock(return_value=features)\n with patch.dict(dism.__salt__, {\"cmd.run\": mock}):\n out = dism.installed_packages()\n mock.assert_called_once_with(\n [dism.bin_dism, \"/English\", \"/Online\", \"/Get-Packages\"]\n )\n assert out == [\"Capa1\", \"Capa2\"]", "def test_apache2_is_installed(host):\n apache2 = host.package(\"apache2\")\n assert apache2.is_installed", "async def test_get_redis(redis_rpc_transport):\n pool = await redis_rpc_transport.get_redis_pool()\n with await pool as redis:\n assert await redis.info()", "def test_fetch(self):\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def test_package(self, package):\n with self._conn.begin():\n return bool(self._conn.scalar(\n \"VALUES (test_package(%s))\", (package,)\n ))", "def test_version_exists():\n assert ztm.__version__", "def verify_package(dut, packane_name):\n command = \"dpkg -s {} | grep Status\".format(packane_name)\n output = st.config(dut, command, skip_error_check=True)\n if \"package '{}' is not installed\".format(packane_name) in output:\n st.log(\"Package '{}' is not installed in DUT\".format(packane_name))\n return False\n return True", "def test_check_dependencies_with_found(self):\n self.spy_on(check_install, op=kgb.SpyOpMatchAny([\n {\n 'args': (['cm', 'version'],),\n 'op': kgb.SpyOpReturn(True),\n },\n ]))\n\n client = self.build_client(setup=False)\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])" ]
[ "0.6934081", "0.6924417", "0.66299754", "0.6493026", "0.6456694", "0.6321571", "0.62864304", "0.62710935", "0.62278736", "0.61834127", "0.61584306", "0.61579615", "0.61522186", "0.6134971", "0.6124497", "0.6056061", "0.6053051", "0.6052222", "0.60043275", "0.5990259", "0.5973918", "0.5971213", "0.5929829", "0.591025", "0.5888942", "0.5888577", "0.5873292", "0.5866319", "0.5861794", "0.58520544" ]
0.7310376
0
reload_from_storage() inserts packages into the database
def test_reload(self): keys = [ make_package(factory=SQLPackage), make_package("mypkg2", "1.3.4", "my/other/path", factory=SQLPackage), ] self.storage.list.return_value = keys self.db.reload_from_storage() for pkg in keys: self.assert_in_redis(pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reload(self):\n keys = [\n make_package(factory=SQLPackage),\n make_package(\n \"mypkg2\",\n \"1.3.4\",\n \"my/other/path\",\n factory=SQLPackage,\n hash_md5=\"md5\",\n hash_sha256=\"sha256\",\n ),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n all_pkgs = self.sql.query(SQLPackage).all()\n self.assertCountEqual(all_pkgs, keys)", "def test_reload(self):\n keys = [\n make_package(factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n all_pkgs = self.engine.scan(DynamoPackage).all()\n self.assertCountEqual(all_pkgs, keys)", "def syncSave(self):\n for pyfile in self.files.values():\n pyfile.sync()\n\n for pypack in self.packages.values():\n pypack.sync()\n\n self.db.syncSave()", "def populateDB(db, packages):\n conn = sqlite3.connect(db)\n cur = conn.cursor()\n print \"opened db successfully\"\n for vul_id, package in packages.items():\n for info in package:\n cur.execute('''INSERT INTO packages(name, version, release, vulnerabilityId, OS_name, OS_version)\n\t\t\t VALUES(?,?,?,?,?,?);''', (info['name'], info['version'], info['release'], vul_id, info['os_name'], info['os_version']))\n \n conn.commit()\n conn.close()", "def test_reload_if_needed(self):\n self.db.storage = MagicMock()\n self.db.storage.list.return_value = [make_package(factory=SQLPackage)]\n self.db.reload_if_needed()\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 1)", "def sync_db():\n pass", "def insert_db():\n populate_tables()", "def reload(self):\n if os.path.exists(FileStorage.__file_path):\n with open(FileStorage.__file_path, \"r\", encoding=\"utf-8\") as f:\n loaded = json.load(f)\n for _id, v in loaded.items():\n cls = loaded[_id].pop(\"__class__\", None)\n try:\n loaded[_id][\"created_at\"] = datetime.strptime(\n loaded[_id][\"created_at\"], dt_format)\n loaded[_id][\"updated_at\"] = datetime.strptime(\n loaded[_id][\"updated_at\"], dt_format)\n except:\n pass\n FileStorage.__objects[_id] = FileStorage.class_models[cls](**v)", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n DB.session.commit()\n return 'Data refreshed!'", "def reload(self):", "def reload(self):", "async def reload_database(self, schema='conf/schema.sql'):\n with open(schema) as schema:\n await self.dao.build((schema.read()))", "def refresh_well_registry_pg(connect):\n cursor = connect.cursor()\n cursor.execute(DELETE_MV)\n cursor.execute(INSERT_MV)\n connect.commit()", "def _save_pkgs(self, *pkgs):\n for pkg in pkgs:\n self.engine.save(pkg)\n summary = PackageSummary(pkg)\n self.engine.save(summary, overwrite=True)", "def rebuild_db():\n delete_db()\n create_db()\n insert_db()", "def import_and_update(distribution: str):\n fetch_packages(distribution)\n update_snapshot(distribution)", "def populate_database(self):\n self.insert_products()\n self.insert_categories()\n self.insert_products_categories()\n self.insert_stores()\n self.insert_products_stores()", "def refresh():\n DB.drop_all()\n DB.create_all()\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\n aq_data.add_aq_to_db()\n DB.session.commit()\n return 'Data refreshed!'", "def update_database():\n\n # We obtain the data from the official database\n df = getData.extractData()\n\n # We save the dataframe for later use in the API\n auxiliary.saveToCsv(df, 'app/resources')", "def reconstruct(self):\n if os.path.exists(self.dbname):\n with open(self.dbname, mode='rb') as db:\n self.cache = pickle.load(db)", "def test_save_reload(self):\n base = BaseModel()\n idd = base.id\n base.name = \"betty\"\n base.save()\n storage.reload()\n key = \"BaseModel.{}\".format(idd)\n objs = storage.all()[key]\n self.assertTrue(hasattr(objs, \"name\"))\n self.assertTrue(objs.name == \"betty\")\n self.assertTrue(os.path.exists('file.json'))", "def update_db(self):\n songs = self.db.get_all_songs()\n for song in songs:\n if choose_song(song) == ERROR:\n self.db.delete_song(song)\n files = []\n for song in glob.glob(\"songs\\*.wav\"):\n to_append = song.split('\\\\')[ONE][:-4]\n files.append(to_append)\n for song in files:\n if song not in songs:\n self.db.add_new_song(song)", "def packages_file(self, uri):\n if basename(uri)==\"Packages\" or basename(uri)==\"Release\":\n log.msg(\"REGISTERING PACKAGE:\"+uri,'apt_pkg')\n mtime = os.stat(self.factory.cache_dir+'/'+uri)\n self.packages[uri] = mtime\n self.unload()", "def save_db(self) -> None:", "def reload(self):\n try:\n with open(FileStorage.__file_path) as f:\n objs = json.load(f)\n for obj in objs.values():\n name = obj['__class__']\n del obj['__class__']\n self.new(eval(name)(**obj))\n except FileNotFoundError:\n return", "def reload(self):\n if file_exist(self.__file_path):\n with open(self.__file_path, \"r\", encoding=\"UTF-8\") as file:\n data = read_data(file)\n for key, value in data.items():\n instance = BaseModel(**value)\n FileStorage.__objects[key] = instance", "def regen(self):\n self.create(overwrite=True)\n self.load()", "def refreshdb(var, wrapper, message):\n db.expire_stasis()\n db.init_vars()\n expire_tempbans()\n wrapper.reply(\"Done.\")" ]
[ "0.7005225", "0.66653717", "0.600818", "0.59671783", "0.59358585", "0.5855708", "0.5586571", "0.5582626", "0.556691", "0.556691", "0.556691", "0.55463755", "0.55463755", "0.55172724", "0.5454357", "0.5451549", "0.54415125", "0.54324967", "0.5429829", "0.5429193", "0.54062206", "0.5405157", "0.5403142", "0.5402811", "0.53776175", "0.5370432", "0.5360729", "0.5351557", "0.53133094", "0.529389" ]
0.6805712
1
fetch() retrieves a package from the database
def test_fetch(self): pkg = make_package() self.db.save(pkg) saved_pkg = self.db.fetch(pkg.filename) self.assertEqual(saved_pkg, pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_fetch(self):\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def test_fetch(self):\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def test_fetch_missing(self):\n saved_pkg = self.db.fetch(\"missing_pkg-1.2.tar.gz\")\n self.assertIsNone(saved_pkg)", "def test_fetch_missing(self):\n saved_pkg = self.db.fetch(\"missing_pkg-1.2.tar.gz\")\n self.assertIsNone(saved_pkg)", "def test_fetch_missing(self):\n saved_pkg = self.db.fetch(\"missing_pkg-1.2.tar.gz\")\n self.assertIsNone(saved_pkg)", "def fetch_package(self, package_name):\n\t\t\t\n\t\t\tpackage_root_url = urlparse.urljoin(self.packages_root_url,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpackage_name + \"/\")\n\t\t\t\n\t\t\tpackage_info_url = urlparse.urljoin(package_root_url, \"info\")\n\t\t\tpackage_archive_url = urlparse.urljoin(package_root_url, \"archive\")\n\t\t\t\n\t\t\tlogger.debug(\"Get: {0}\".format(package_info_url))\n\t\t\ttry:\n\t\t\t\tinfo = json.loads(urllib2.urlopen(package_info_url).read())\n\t\t\t\treturn ups.package.Package(self, package_root_url, info)\n\t\t\texcept urllib2.HTTPError as e:\n\t\t\t\traise RepositoryError(e)\n\t\t\texcept ValueError as e:\n\t\t\t\traise RepositoryError(\"Unable to parse info file: {0}\".format(e))", "def get(self, pref: PkgReference):\n where_clause = self._where_clause(pref)\n query = f'SELECT * FROM {self.table_name} ' \\\n f'WHERE {where_clause};'\n\n with self.db_connection() as conn:\n r = conn.execute(query)\n row = r.fetchone()\n\n if not row:\n raise ConanReferenceDoesNotExistInDB(f\"No entry for package '{repr(pref)}'\")\n return self._as_dict(self.row_type(*row))", "def fetch(self) -> None:\n pass", "async def fetch(self, query, *args):\n return await self.conn.fetch(query, *args)", "def fetch():\n req_data= request.get_json()\n \n ## ddb uses text files, using this as to eat my own dogfoor and improve\n ## no service sql client. No daemon, low cpu.\n\n\n e=load_db()\n try:\n res=e.query(req_data['query'])\n \n serialized = jsonpickle.encode( res,\n unpicklable=False,\n make_refs=False)\n return serialized\n except Exception as ex:\n return \"{0} -> '{1}'\".format(ex,req_data['query'])", "def fetch():\n return True", "def fetch(self):\n pass", "def fetch(self):\n pass", "def do_fetch(self):\n pass", "def package(id = 0):\n\tresults = queries.package(id)\n\tif not results:\n\t\treturn render_template('package_not_found.html')\n\treturn render_template('package.html', package=results)", "def get_pkg(pkg_name):\n pkg = Database().db.get_pkg_details(pkg_name, \"\", False)\n pkg = PackageDetail.surClass(pkg)\n print('dir: ', dir(pkg))\n \n #print('name:', pkg.nane)\n #print('props.name:', pkg.props.nane)\n return render_template(\"pkg.html\", \n title=\" - \"+pkg_name,\n repos=Database().db.get_repos_names(),\n pkg=pkg)", "def get_package(package, create=False):\n index = PackageIndex.objects.first()\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(index=index, name=package)[0]\n else:\n try:\n package = Package.objects.get(index=index, name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def get_package(package, create=False):\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(name=package)[0]\n else:\n try:\n package = Package.objects.get(name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def fetch_package(source, method=None, headers=None, auth=None):\n #if method not in ('requests', 'curl_cli'):\n # raise Exception('Fetch package method \"{}\" not found'.format(method))\n if not method:\n method = detect_fetch_method()\n print('Using fetch method \"{}\"'.format(method))\n print('Source {}'.format(source))\n fetch_method = '_fetch_package_{}'.format(method)\n package = eval(fetch_method)(source, headers, auth)\n return package", "def fetch(self, location=None, conn_timeout=None):\r\n target = super(SourcePackage, self).fetch(conn_timeout=conn_timeout)\r\n return self._unpack(target, location)", "def fetch(self):\n raise NotImplementedError()", "def bd_selectPackageList_byID(self, _c, _pckgID):\n\n result = {}\n\n _c.execute(\"SELECT id, num, desc, status, source_env, dest_env, app, last_rev FROM package WHERE id=? ORDER BY num DESC\", [_pckgID]) \n data = _c.fetchone()\n\n if data:\n result['id'] = data[0] \n result['desc'] = data[2]\n result['status'] = data[3]\n result['source_env'] = data[4]\n result['dest_env'] = data[5]\n result['app'] = data[6]\n result['last_rev'] = data[7]\n result['num'] = data[1] #Place this attribute the last because of print issue. I know this is a Dict, take it easy.\n #\n\n return result", "def fetch_data_from_db(query):\n cursor.execute(query)\n result = cursor.fetchall()\n return result", "def retrieve_from_db(self):\n pass", "def test_load(self):\n kwargs = {\"url\": \"my.url\", \"expire\": 7237}\n pkg = make_package(**kwargs)\n # Due to some rounding weirdness in old Py3 versions, we need to remove\n # the microseconds to avoid a flappy test.\n # See: https://bugs.python.org/issue23517\n pkg.last_modified = pkg.last_modified.replace(microsecond=0)\n self.db.save(pkg)\n\n loaded = self.db.fetch(pkg.filename)\n self.assertEqual(loaded.name, pkg.name)\n self.assertEqual(loaded.version, pkg.version)\n self.assertEqual(loaded.filename, pkg.filename)\n self.assertEqual(loaded.last_modified, pkg.last_modified)\n self.assertEqual(loaded.summary, pkg.summary)\n self.assertEqual(loaded.data, kwargs)", "def get(self, fetch_number: int):\n return self.results[fetch_number]", "def order_book_fetch(self, symbol):\n pass", "def select(self):\n connection = sqlite3.connect(DB_FILE)\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM foodbank\")\n return cursor.fetchall()", "async def get_object(conn: Database, query):\n return await conn.fetch_one(query=query)", "def fetch(self, remote, *args):\n return self.cmd('fetch', remote, *args)" ]
[ "0.7715148", "0.76642436", "0.6594907", "0.6594907", "0.6594907", "0.65284604", "0.63306046", "0.61829334", "0.61411977", "0.6081578", "0.6049043", "0.60410875", "0.60410875", "0.59358525", "0.5873866", "0.58250237", "0.58231133", "0.58095026", "0.57333004", "0.5728252", "0.5725556", "0.5636155", "0.56339794", "0.5534508", "0.552698", "0.5519777", "0.5505839", "0.55017805", "0.5498044", "0.54948336" ]
0.77681977
0
fetch() returns None if no package exists
def test_fetch_missing(self): saved_pkg = self.db.fetch("missing_pkg-1.2.tar.gz") self.assertIsNone(saved_pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_package(self, package_name):\n\t\t\t\n\t\t\tpackage_root_url = urlparse.urljoin(self.packages_root_url,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpackage_name + \"/\")\n\t\t\t\n\t\t\tpackage_info_url = urlparse.urljoin(package_root_url, \"info\")\n\t\t\tpackage_archive_url = urlparse.urljoin(package_root_url, \"archive\")\n\t\t\t\n\t\t\tlogger.debug(\"Get: {0}\".format(package_info_url))\n\t\t\ttry:\n\t\t\t\tinfo = json.loads(urllib2.urlopen(package_info_url).read())\n\t\t\t\treturn ups.package.Package(self, package_root_url, info)\n\t\t\texcept urllib2.HTTPError as e:\n\t\t\t\traise RepositoryError(e)\n\t\t\texcept ValueError as e:\n\t\t\t\traise RepositoryError(\"Unable to parse info file: {0}\".format(e))", "def test_fetch(self):\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def test_fetch(self):\n pkg = make_package()\n self.db.save(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def translate(self, package):\r\n if not isinstance(package, self._package_type):\r\n return None\r\n if not package.compatible(identity=self._identity, platform=self._platform):\r\n return None\r\n try:\r\n bdist = package.fetch(location=self._install_cache, conn_timeout=self._conn_timeout)\r\n except package.UnreadableLink as e:\r\n TRACER.log('Failed to fetch %s: %s' % (package, e))\r\n return None\r\n return DistributionHelper.distribution_from_path(bdist)", "def fetch_package(source, method=None, headers=None, auth=None):\n #if method not in ('requests', 'curl_cli'):\n # raise Exception('Fetch package method \"{}\" not found'.format(method))\n if not method:\n method = detect_fetch_method()\n print('Using fetch method \"{}\"'.format(method))\n print('Source {}'.format(source))\n fetch_method = '_fetch_package_{}'.format(method)\n package = eval(fetch_method)(source, headers, auth)\n return package", "def fetch_pkgbuild(self):\n\n package_dir = os.path.join(Package.cache_dir, self.pkgbase)\n\n # check if repo has ever been fetched\n if os.path.isdir(package_dir):\n if run([\"git\", \"fetch\"], cwd=package_dir).returncode != 0:\n logging.error(\"git fetch failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git fetch failed in directory {}\".format(package_dir))\n\n head = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n u = run(\n [\"git\", \"rev-parse\", \"@{u}\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n\n # if new sources available\n if head != u:\n reset_return = run(\n [\"git\", \"reset\", \"--hard\", \"HEAD\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if reset_return.returncode != 0:\n print(reset_return.stderr)\n logging.error(\"git reset failed in directory {}\".format(package_dir))\n raise InvalidInput(\"git reset failed in directory {}\".format(package_dir))\n\n pull_return = run(\n [\"git\", \"pull\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if pull_return.returncode != 0:\n print(pull_return.stderr)\n logging.error(\"git pull failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git pull failed in directory {}\".format(package_dir))\n\n # repo has never been fetched\n else:\n # create package dir\n try:\n os.makedirs(package_dir, mode=0o700, exist_ok=True)\n except OSError:\n logging.error(\"Creating package dir {} failed\".format(package_dir))\n raise InvalidInput(\"Creating package dir {} failed\".format(package_dir))\n\n # clone repo\n if run(\n [\"git\", \"clone\", \"{}/{}.git\".format(AurVars.aur_domain, self.pkgbase)],\n cwd=Package.cache_dir\n ).returncode != 0:\n logging.error(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))\n raise ConnectionProblem(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))", "def test_fetch(self):\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def fetch():\n return True", "def fetch(opts):\n\n assert opts\n cache_dir = opts.cache_dir\n name = opts.name\n revision = opts.revision\n\n if not GIT.exists():\n err('unable to fetch package; git is not installed')\n return None\n\n git_dir = '--git-dir=' + cache_dir\n\n # check if we have the target revision cached; if so, package is ready\n if os.path.isdir(cache_dir) and not opts.ignore_cache:\n erv = revision_exists(git_dir, revision)\n if erv in REVISION_EXISTS:\n # ensure configuration is properly synchronized\n if not _sync_git_configuration(opts):\n return None\n\n # if no explicit ignore-cache request and if the revision is a\n # branch, force ignore-cache on and allow fetching to proceed\n if opts.ignore_cache is None and erv == GitExistsType.EXISTS_BRANCH:\n opts.ignore_cache = True\n # return cache dir if not verifying or verification succeeds\n elif not opts._git_verify_revision or _verify_revision(\n git_dir, revision, quiet=True):\n return cache_dir\n\n note('fetching {}...', name)\n sys.stdout.flush()\n\n # validate any cache directory (if one exists)\n has_cache, bad_validation = _validate_cache(cache_dir)\n if bad_validation:\n return None\n\n # if we have no cache for this repository, build one\n if not has_cache:\n if not ensure_dir_exists(cache_dir):\n return None\n\n if not _create_bare_git_repo(cache_dir):\n return None\n\n # ensure configuration is properly synchronized\n if not _sync_git_configuration(opts):\n return None\n\n # fetch sources for this repository\n if not _fetch_srcs(opts, cache_dir, revision, refspecs=opts._git_refspecs):\n return None\n\n # verify revision (if configured to check it)\n if opts._git_verify_revision:\n if not _verify_revision(git_dir, revision):\n err('''\\\nfailed to validate git revision\n\nPackage has been configured to require the verification of the GPG signature\nfor the target revision. The verification has failed. Ensure that the revision\nis signed and that the package's public key has been registered in the system.\n\n Package: {}\n Revision: {}''', name, revision)\n return None\n\n # fetch submodules (if configured to do so)\n if opts._git_submodules:\n if not _fetch_submodules(opts, cache_dir, revision):\n return None\n\n return cache_dir", "def test_load_one_pip(self, build_image_for_jupyterlab):\n\n key = \"pip&gtmunit1\"\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n\n loader = PackageLatestVersionLoader([key], lb, username)\n promise1 = loader.load(key)\n assert isinstance(promise1, Promise)\n\n pkg = promise1.get()\n assert pkg == '0.12.4'", "def get_package_info(package_name):\n r = requests.get(f'https://api.npms.io/v2/search?q={package_name}&size=1')\n response_json = r.json()\n\n if 'results' in response_json:\n result = response_json['results'][0]\n return result['package']", "def find_module(self, abs_name, path=None):\n package_name = abs_name.split(\".\")[0]\n\n last_name = abs_name.split(\".\")[-1]\n if last_name in sys.modules:\n return None\n\n try:\n # means it can already be imported, no work to be done here\n imp.find_module(abs_name)\n\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # see the comment in the docstring\n return None\n except ImportError as e:\n pass\n\n if package_name == \"talus\" and self._module_in_git(abs_name):\n self.download_module(abs_name)\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # see the comment in the docstring\n return None\n\n if package_name in self.cache[\"packages\"] and package_name not in sys.modules:\n self.install_package_from_talus(package_name)\n return None\n\n # we NEED to have the 2nd check here or else it will keep downloading\n # the same package over and over\n if package_name in self.cache[\"pypi\"] and package_name not in sys.modules:\n self.install_cached_package(package_name)\n return None\n\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # see the comment in the docstring\n return None", "def cabal_get(name, unpack_to, version=None):\n pkg = name\n if version is not None:\n pkg += \"==\"\n pkg += version\n\n return run([\"cabal\", \"get\", pkg, \"-d\", unpack_to])", "def fetch(self, location=None, conn_timeout=None):\r\n target = super(SourcePackage, self).fetch(conn_timeout=conn_timeout)\r\n return self._unpack(target, location)", "def package_exists (package_name, package_version, lang):\n\n url = make_package_url (package_name, package_version, lang)\n victim_file = download_file (url)\n\n if victim_file is None:\n return False\n else:\n return True", "def fetch_maybe(cls, url, path, save=False):\n if os.path.isfile(path):\n # print(\"Found %s\" % os.path.basename(path))\n with open(path, \"rb\") as file:\n return file.read(), True\n if save:\n return cls.fetch_and_save(url, path), False\n return cls.fetch_with_retry(url), False", "def __fetch_remote_source(self):\n # type: () -> Union(Git, None)\n if self.source == 'git':\n return self.git_source_class(**self.configuration).fetch()\n return None", "def test_install_without_connection_from_cache():\n\n\tclean_apt(full=False)\n\n\twith no_connection():\n\t\tassert packaging.install_packages(pkgs) == None", "async def get_latest_version(self, pkg: str) -> Optional[str]:\n return None", "def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)", "def get_pkg(pkg_name):\n pkg = Database().db.get_pkg_details(pkg_name, \"\", False)\n pkg = PackageDetail.surClass(pkg)\n print('dir: ', dir(pkg))\n \n #print('name:', pkg.nane)\n #print('props.name:', pkg.props.nane)\n return render_template(\"pkg.html\", \n title=\" - \"+pkg_name,\n repos=Database().db.get_repos_names(),\n pkg=pkg)", "def getPackageFromResource(resource):\n import sd\n url = resource.getUrl()\n pkg_manager = sd.getContext().getSDApplication().getPackageMgr()\n for p in pkg_manager.getPackages():\n for r in p.getChildrenResources(False):\n if r.getUrl() == url:\n return p\n return None", "def test_npm_latest_version_request(_foo):\n version = NPMMonitor.fetch_latest_package_version('foobar')\n assert version == '1.3.5'\n assert NPMMonitor.fetch_latest_package_version('foobar') is None\n assert NPMMonitor.fetch_latest_package_version('foobar') is None", "def get_package(self, __package_id):\n raise NotImplementedError", "def test_load(self):\n kwargs = {\"url\": \"my.url\", \"expire\": 7237}\n pkg = make_package(**kwargs)\n # Due to some rounding weirdness in old Py3 versions, we need to remove\n # the microseconds to avoid a flappy test.\n # See: https://bugs.python.org/issue23517\n pkg.last_modified = pkg.last_modified.replace(microsecond=0)\n self.db.save(pkg)\n\n loaded = self.db.fetch(pkg.filename)\n self.assertEqual(loaded.name, pkg.name)\n self.assertEqual(loaded.version, pkg.version)\n self.assertEqual(loaded.filename, pkg.filename)\n self.assertEqual(loaded.last_modified, pkg.last_modified)\n self.assertEqual(loaded.summary, pkg.summary)\n self.assertEqual(loaded.data, kwargs)", "def getusersitepackages():\n\tpass", "def get_info(self, pkgname):\n for pkg in self.rpc.info(pkgname):\n return pkg", "def _fetch_package_requests(source, headers, auth):\n import requests\n dest = build_temp_package_filepath()\n response = requests.get(source, stream=True, headers=headers, auth=auth)\n response.raise_for_status()\n with open(dest, 'wb') as handle:\n for block in response.iter_content(1024):\n handle.write(block)\n package = tarfile.open(dest)\n return package", "def get_package(package, create=False):\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(name=package)[0]\n else:\n try:\n package = Package.objects.get(name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True" ]
[ "0.6841072", "0.67000926", "0.66926116", "0.6597814", "0.6547767", "0.6434574", "0.64287364", "0.62104625", "0.6064177", "0.5982968", "0.59753776", "0.5958001", "0.5895387", "0.5870588", "0.5860548", "0.58389133", "0.5822474", "0.5799762", "0.57829195", "0.57777876", "0.57693094", "0.57109016", "0.5648336", "0.56448364", "0.5641809", "0.56316715", "0.5619414", "0.55990696", "0.55963135", "0.55917925" ]
0.73527503
1
Can upload multiple packages that have the same version
def test_multiple_packages_same_version(self): with patch.object(self.request.access, "allow_overwrite", []): name, version = "a", "1" path1 = "old_package_path-1.tar.gz" self.db.upload(path1, BytesIO(b"test1234"), name, version) path2 = "new_path-1.whl" self.db.upload(path2, BytesIO(b"test1234"), name, version) all_versions = self.db.all(name) self.assertEqual(len(all_versions), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_multiple_packages_same_version(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = []\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n cache.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n cache.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 2)\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 2)", "def upload_packages(self, packages):\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n\n filepaths = [os.path.join(os.path.dirname(path), pfile['filename'])\n for path in packages\n for pfile in deb_changes(path)['files']]\n filepaths.extend(packages)\n\n # get upload token\n resp = self._client.postjson(path=\"/users/%(username)s/\"\n \"repos/%(reponame)s/\"\n \"branches/%(name)s/get_upload_token\" %\n context)\n token = resp['utoken']\n for pfile in filepaths:\n self._client.upload(path=\"/upload/%s/send/%s\" %\n (token, os.path.basename(pfile)),\n filepath=pfile)\n self._client.post(path=\"/upload/%s/dput\" % token)", "def add_uppers():\n for filename, requirements in _sync():\n LOG.info(\"Obtaining latest versions of packages for %s.\", filename)\n for req in requirements:\n if isinstance(req, Requirement):\n if isinstance(req.version, dict) and not req.version[\"max\"]:\n req.sync_max_version_with_pypy()\n _write_requirements(filename, requirements)", "def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.sql.add_all(pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def upload_package(self, __contents):\n raise NotImplementedError", "def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def test_all_versions(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def uploadPackages(self, directory):\n files_to_upload_dict = {}\n files_to_upload_list = [ f for f in listdir(directory) if isfile(join(directory,f)) ]\n self.logger.debug(\"uploadPackages(\" + \"{})\".format(directory))\n #print \"Files to upload:\"\n for index in range(len(files_to_upload_list)):\n self.logger.info(files_to_upload_list[index])\n self.uploadFileToIM (directory, files_to_upload_list[index], files_to_upload_list[index])\n #file_tuple = {'files':{str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}} \n #file_tuple = {str(files_to_upload_list[index]), {open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}}\n #file_tuple = {'files': (str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm')}\n #file_tuple = (str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'))\n #file_tuple = {str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}\n #files_data_to_upload_list.append(file_tuple)\n #print \"Files to upload Dictionary:\"", "def test_load_many_pip(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"pip&gtmunit1\", \"pip&gtmunit2\", \"pip&gtmunit3\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n assert version_list[0] == \"0.12.4\"\n assert version_list[1] == \"12.2\"\n assert version_list[2] == \"5.0\"", "def packages():", "def _provision_package(self):", "def test_runs_with_multiple_packages(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(MULTIPLE_PACKAGES_REPO)\n\n assert result.status == Status.SUCCESS", "def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())", "def get_changed_packages(blob_name1, blob_name2, package_list):\n changed_files = check_output(\n 'git', 'diff', '--name-only', blob_name1, blob_name2)\n changed_files = changed_files.split('\\n')\n\n result = set()\n for filename in changed_files:\n file_root = rootname(filename)\n if file_root in package_list:\n result.add(file_root)\n\n return sorted(result)", "def test_upload_overwrite(self):\n self.request.access.allow_overwrite = [\"everyone\"]\n name, filename = \"a\", \"a-1.tar.gz\"\n self.db.upload(filename, BytesIO(b\"old\"), name)\n self.db.upload(filename, BytesIO(b\"new\"), name)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 1)", "def test_all_python_versions_deploy():\n pass", "def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))", "def test_load_many_mixed(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"conda3&cdutil\", \"pip&gtmunit1\", \"conda3&nltk\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n assert version_list[0] == \"8.1\"\n assert version_list[1] == \"0.12.4\"\n assert version_list[2] == \"3.2.5\"", "def upload_jars(configs):\n print(\"Upload jars to signing server...\")\n jar_list = []\n for module_name in configs[\"moduleNames\"]:\n module_folder = get_module_folder(configs, module_name)\n module_jars = get_folder_files(module_folder, [\"*.jar\"])\n\n for module_jar in module_jars:\n print(\"--Uploading \" + module_jar)\n jar_list.append(os.path.basename(module_jar))\n shutil.copy2(module_jar, configs[\"toSignFolder\"])\n\n return jar_list", "def test_multiple_manifest_multiple_dep(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1 + DEP_2, True)\n collector.parse_and_collect(MANIFEST_START + DEP_1 + DEP_2, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri, pydantic': 2\n }", "def test_3x_only_python_versions_deploy():\n pass", "def _get_versions(self, package):\n raise NotImplementedError(self, \"_get_versions\")", "def test_upload_overwrite(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = [\"everyone\"]\n name, filename, content = \"a\", \"a-1.tar.gz\", BytesIO(b\"new\")\n cache.upload(filename, BytesIO(b\"old\"), name)\n cache.upload(filename, content, name)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 1)\n data = cache.storage.open(all_versions[0]).read()\n self.assertEqual(data, b\"new\")\n\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 1)", "def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True", "def upload():\n sh('python setup.py register sdist upload')", "def update(self, iterable):\n for package in iterable:\n self.add_package(package)", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def upload_pkg(identifier, pkgname, metadata, directory, years):\n files = []\n for f in os.scandir(directory):\n if not f.is_symlink():\n continue\n path = os.readlink(f)\n match = re.match(SYMLINK_YEAR_REGEXP, path)\n if not match:\n continue\n year = match[1]\n if year not in years:\n continue\n files.append(f.path)\n if not files:\n return\n # Get last package, to extract a description\n last_pkg = sorted(filter(lambda x: not x.endswith('.sig'), files))[-1]\n pkginfo = extract_pkginfo(last_pkg)\n pkgdesc = pkginfo['pkgdesc'] if 'pkgdesc' in pkginfo else ''\n metadata['description'] = DESCRIPTION.format(pkgname=pkgname, pkgdesc=pkgdesc, url=pkginfo['url'], license=pkginfo['license'])\n metadata['rights'] = 'License: ' + pkginfo['license']\n #print(pkgname, len(files))\n #print(metadata)\n try:\n res = ia.upload(identifier, files=files, metadata=metadata)\n if not all([x.status_code == 200 for x in res]):\n ok = len([x for x in res if x.status_code == 200])\n nok = len([x for x in res if x.status_code != 200])\n codes = set([x.status_code for x in res])\n print(\"{}: only {}/{} files uploaded, status codes: {}\".format(identifier, ok, ok+nok, codes), file=sys.stderr)\n print(directory)\n except Exception as e:\n print(\"{}: exception raised\".format(identifier), file=sys.stderr)\n print(e, file=sys.stderr)\n print(directory)", "def test_01_upload_langpacks(self):\n cmd = (\n 'pulp-admin rpm repo uploads langpacks --repo-id {0} '\n '--name {1} --install {1}-%s'\n ).format(self.repo_id, utils.uuid4()).split()\n self.client.run(cmd)\n num_langpacks = _count_langpacks(self.cfg, self.repo_id)\n self.assertEqual(num_langpacks, 1, cmd)", "def not_reupload_test(self):\n servers = {}\n test_server = TestServer([(\"*/*@*/*\", \"*\")], [(\"*/*@*/*\", \"*\")],\n users={\"lasote\": \"mypass\"})\n servers[\"default\"] = test_server\n client = TestClient(servers=servers, users={\"default\": [(\"lasote\", \"mypass\")]})\n\n files = cpp_hello_conan_files(\"Hello0\", \"1.2.1\", build=False)\n client.save(files)\n client.run(\"export frodo/stable\")\n client.run(\"install Hello0/1.2.1@frodo/stable --build=missing\")\n client.run(\"upload Hello0/1.2.1@frodo/stable -r default --all\")\n self.assertIn(\"Uploading conan_package.tgz\", client.user_io.out)\n client.run(\"remove Hello0/1.2.1@frodo/stable -f\")\n client.run(\"search\")\n self.assertNotIn(\"Hello0/1.2.1@frodo/stable\", client.user_io.out)\n client.run(\"install Hello0/1.2.1@frodo/stable\")\n self.assertIn(\"Downloading conan_package.tgz\", client.user_io.out)\n client.run(\"upload Hello0/1.2.1@frodo/stable -r default --all\")\n self.assertIn(\"Uploaded conan recipe\", client.user_io.out)\n self.assertNotIn(\"Uploading conan_package.tgz\", client.user_io.out)\n self.assertIn(\"Package is up to date\", client.user_io.out)" ]
[ "0.71966016", "0.6126126", "0.6103652", "0.60980034", "0.60938525", "0.6091341", "0.6082932", "0.5858274", "0.5816589", "0.57428926", "0.57245314", "0.56891763", "0.5637761", "0.5536747", "0.55115604", "0.5509517", "0.549662", "0.5485993", "0.54698163", "0.5434405", "0.54317516", "0.54068357", "0.54019064", "0.5401485", "0.53933114", "0.5379447", "0.53765494", "0.53762305", "0.53675383", "0.53671914" ]
0.764577
1
reload_from_storage() doesn't break on packages with None summary
def test_reload_none_summary(self): pkg = make_package( "mypkg3", "1.2", "some/other/path", summary=None, factory=SQLPackage ) keys = [pkg] self.storage.list.return_value = keys self.db.reload_from_storage() # The shim will convert None summary to "" pkg.summary = "" self.assert_in_redis(pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reload(self):\n keys = [\n make_package(factory=SQLPackage),\n make_package(\n \"mypkg2\",\n \"1.3.4\",\n \"my/other/path\",\n factory=SQLPackage,\n hash_md5=\"md5\",\n hash_sha256=\"sha256\",\n ),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n all_pkgs = self.sql.query(SQLPackage).all()\n self.assertCountEqual(all_pkgs, keys)", "def test_reload(self):\n keys = [\n make_package(factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n all_pkgs = self.engine.scan(DynamoPackage).all()\n self.assertCountEqual(all_pkgs, keys)", "def test_reload_method(self):\n\n self.assertTrue(models.storage.reload() is None)", "def force_load(self):\n pass", "def test_reload(self):\n keys = [\n make_package(factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.storage.list.return_value = keys\n self.db.reload_from_storage()\n for pkg in keys:\n self.assert_in_redis(pkg)", "def handle_reload_toolbox(self):", "def test_reload_if_needed(self):\n self.db.storage = MagicMock()\n self.db.storage.list.return_value = [make_package(factory=SQLPackage)]\n self.db.reload_if_needed()\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 1)", "def reload(self):", "def reload(self):", "def reload_definitions():\n package_list = [\n # Reload minimum needs\n 'safe.definitions.minimum_needs',\n # Reload everything that depends on minimum_needs\n 'safe.definitions.fields',\n 'safe.definitions',\n\n # Reload min needs postprocessors\n 'safe.processors.minimum_needs_post_processors',\n # Reload everything that depends on postprocessors\n 'safe.processors',\n 'safe.impact_function.postprocessors',\n 'safe.impact_function',\n\n # Reload everything that depends on reporting\n 'safe.report.extractors.aggregate_postprocessors',\n 'safe.report.extractors.minimum_needs',\n 'safe.report'\n ]\n for p in package_list:\n reload(importlib.import_module(p))\n\n from safe.definitions import minimum_needs\n from safe import processors\n LOGGER.debug('Minimum Needs list:')\n for m in minimum_needs.minimum_needs_fields:\n LOGGER.debug(m)\n\n LOGGER.debug('Minimum Needs Processors list:')\n for m in processors.minimum_needs_post_processors:\n LOGGER.debug(m)", "def test_no_reload_if_needed(self):\n cache = DummyCache()\n cache.reload_from_storage = MagicMock()\n cache.distinct = MagicMock()\n cache.distinct.return_value = [\"hi\"]\n cache.reload_if_needed()\n self.assertFalse(cache.reload_from_storage.called)", "def reload(self):\n self.restore()", "def test_reload_if_needed(self):\n cache = DummyCache()\n cache.reload_from_storage = MagicMock()\n cache.reload_if_needed()\n self.assertTrue(cache.reload_from_storage.called)", "def reload(self) -> None: # pragma: no cover\n raise NotImplementedError()", "def reload(self):\n\n pass", "def unloaded():\n pass", "def module_cleanup():\n from bokeh.core.has_props import _default_resolver\n to_reset = list(panel_extension._imports.values())\n\n _default_resolver._known_models = {\n name: model for name, model in _default_resolver._known_models.items()\n if not any(model.__module__.startswith(tr) for tr in to_reset)\n }", "def reload(self):\n # type: () -> None\n parsed_requirements = self.parse()\n self.requirements = parsed_requirements[0]\n self.index_urls = parsed_requirements[1]\n self.nested_cfiles = parsed_requirements[2]\n self.nested_rfiles = parsed_requirements[3]", "def reload(bot, event, *args):\n bot.config.load()\n bot.memory.load()", "async def reload(ctx, name):\n await unload_extension(name, channel=ctx.channel)\n await load_extension(name, channel=ctx.channel)", "def reload_placeholder(update):\n pass", "def load(path, reset=False):\n pass", "def force_reload(service):\n _service(service, 'force-reload')", "def _store_package_metadata(self):", "def _load(self):\n raise NotImplementedError()", "def clear_loaded_modules(self):\n self._loaded_modules = []", "def load_full():\n _fetch_full()\n return _load(cache_full, _parse_full)", "async def reload():\n global DF\n DF = load_data()\n return True", "def load_new_data():\n require('settings', provided_by=[production, staging])\n \n maintenance_up()\n load_data()\n maintenance_down()", "def load(self):\n return None" ]
[ "0.6402811", "0.62282103", "0.61236465", "0.60962933", "0.5989792", "0.5803124", "0.57837903", "0.57631075", "0.57631075", "0.5665015", "0.5651823", "0.5642191", "0.5568245", "0.551319", "0.54255974", "0.5408669", "0.5347922", "0.5310908", "0.53005844", "0.529438", "0.5281935", "0.52687186", "0.5238618", "0.52355176", "0.5227406", "0.52158135", "0.51942945", "0.51931286", "0.5191911", "0.51856744" ]
0.7729607
0
fetch() returns None if no package exists
def test_fetch_missing(self): saved_pkg = self.db.fetch("missing_pkg-1.2.tar.gz") self.assertIsNone(saved_pkg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fetch_package(self, package_name):\n\t\t\t\n\t\t\tpackage_root_url = urlparse.urljoin(self.packages_root_url,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tpackage_name + \"/\")\n\t\t\t\n\t\t\tpackage_info_url = urlparse.urljoin(package_root_url, \"info\")\n\t\t\tpackage_archive_url = urlparse.urljoin(package_root_url, \"archive\")\n\t\t\t\n\t\t\tlogger.debug(\"Get: {0}\".format(package_info_url))\n\t\t\ttry:\n\t\t\t\tinfo = json.loads(urllib2.urlopen(package_info_url).read())\n\t\t\t\treturn ups.package.Package(self, package_root_url, info)\n\t\t\texcept urllib2.HTTPError as e:\n\t\t\t\traise RepositoryError(e)\n\t\t\texcept ValueError as e:\n\t\t\t\traise RepositoryError(\"Unable to parse info file: {0}\".format(e))", "def test_fetch(self):\n pkg = make_package(factory=DynamoPackage)\n self._save_pkgs(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def test_fetch(self):\n pkg = make_package()\n self.db.save(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def translate(self, package):\r\n if not isinstance(package, self._package_type):\r\n return None\r\n if not package.compatible(identity=self._identity, platform=self._platform):\r\n return None\r\n try:\r\n bdist = package.fetch(location=self._install_cache, conn_timeout=self._conn_timeout)\r\n except package.UnreadableLink as e:\r\n TRACER.log('Failed to fetch %s: %s' % (package, e))\r\n return None\r\n return DistributionHelper.distribution_from_path(bdist)", "def fetch_package(source, method=None, headers=None, auth=None):\n #if method not in ('requests', 'curl_cli'):\n # raise Exception('Fetch package method \"{}\" not found'.format(method))\n if not method:\n method = detect_fetch_method()\n print('Using fetch method \"{}\"'.format(method))\n print('Source {}'.format(source))\n fetch_method = '_fetch_package_{}'.format(method)\n package = eval(fetch_method)(source, headers, auth)\n return package", "def fetch_pkgbuild(self):\n\n package_dir = os.path.join(Package.cache_dir, self.pkgbase)\n\n # check if repo has ever been fetched\n if os.path.isdir(package_dir):\n if run([\"git\", \"fetch\"], cwd=package_dir).returncode != 0:\n logging.error(\"git fetch failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git fetch failed in directory {}\".format(package_dir))\n\n head = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n u = run(\n [\"git\", \"rev-parse\", \"@{u}\"], stdout=PIPE, universal_newlines=True, cwd=package_dir\n ).stdout.strip()\n\n # if new sources available\n if head != u:\n reset_return = run(\n [\"git\", \"reset\", \"--hard\", \"HEAD\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if reset_return.returncode != 0:\n print(reset_return.stderr)\n logging.error(\"git reset failed in directory {}\".format(package_dir))\n raise InvalidInput(\"git reset failed in directory {}\".format(package_dir))\n\n pull_return = run(\n [\"git\", \"pull\"],\n stdout=DEVNULL, stderr=PIPE, cwd=package_dir, universal_newlines=True\n )\n if pull_return.returncode != 0:\n print(pull_return.stderr)\n logging.error(\"git pull failed in directory {}\".format(package_dir))\n raise ConnectionProblem(\"git pull failed in directory {}\".format(package_dir))\n\n # repo has never been fetched\n else:\n # create package dir\n try:\n os.makedirs(package_dir, mode=0o700, exist_ok=True)\n except OSError:\n logging.error(\"Creating package dir {} failed\".format(package_dir))\n raise InvalidInput(\"Creating package dir {} failed\".format(package_dir))\n\n # clone repo\n if run(\n [\"git\", \"clone\", \"{}/{}.git\".format(AurVars.aur_domain, self.pkgbase)],\n cwd=Package.cache_dir\n ).returncode != 0:\n logging.error(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))\n raise ConnectionProblem(\"Cloning repo of {} failed in directory {}\".format(self.name, package_dir))", "def test_fetch(self):\n pkg = make_package(factory=SQLPackage)\n self.sql.add(pkg)\n saved_pkg = self.db.fetch(pkg.filename)\n self.assertEqual(saved_pkg, pkg)", "def fetch():\n return True", "def fetch(opts):\n\n assert opts\n cache_dir = opts.cache_dir\n name = opts.name\n revision = opts.revision\n\n if not GIT.exists():\n err('unable to fetch package; git is not installed')\n return None\n\n git_dir = '--git-dir=' + cache_dir\n\n # check if we have the target revision cached; if so, package is ready\n if os.path.isdir(cache_dir) and not opts.ignore_cache:\n erv = revision_exists(git_dir, revision)\n if erv in REVISION_EXISTS:\n # ensure configuration is properly synchronized\n if not _sync_git_configuration(opts):\n return None\n\n # if no explicit ignore-cache request and if the revision is a\n # branch, force ignore-cache on and allow fetching to proceed\n if opts.ignore_cache is None and erv == GitExistsType.EXISTS_BRANCH:\n opts.ignore_cache = True\n # return cache dir if not verifying or verification succeeds\n elif not opts._git_verify_revision or _verify_revision(\n git_dir, revision, quiet=True):\n return cache_dir\n\n note('fetching {}...', name)\n sys.stdout.flush()\n\n # validate any cache directory (if one exists)\n has_cache, bad_validation = _validate_cache(cache_dir)\n if bad_validation:\n return None\n\n # if we have no cache for this repository, build one\n if not has_cache:\n if not ensure_dir_exists(cache_dir):\n return None\n\n if not _create_bare_git_repo(cache_dir):\n return None\n\n # ensure configuration is properly synchronized\n if not _sync_git_configuration(opts):\n return None\n\n # fetch sources for this repository\n if not _fetch_srcs(opts, cache_dir, revision, refspecs=opts._git_refspecs):\n return None\n\n # verify revision (if configured to check it)\n if opts._git_verify_revision:\n if not _verify_revision(git_dir, revision):\n err('''\\\nfailed to validate git revision\n\nPackage has been configured to require the verification of the GPG signature\nfor the target revision. The verification has failed. Ensure that the revision\nis signed and that the package's public key has been registered in the system.\n\n Package: {}\n Revision: {}''', name, revision)\n return None\n\n # fetch submodules (if configured to do so)\n if opts._git_submodules:\n if not _fetch_submodules(opts, cache_dir, revision):\n return None\n\n return cache_dir", "def test_load_one_pip(self, build_image_for_jupyterlab):\n\n key = \"pip&gtmunit1\"\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n\n loader = PackageLatestVersionLoader([key], lb, username)\n promise1 = loader.load(key)\n assert isinstance(promise1, Promise)\n\n pkg = promise1.get()\n assert pkg == '0.12.4'", "def get_package_info(package_name):\n r = requests.get(f'https://api.npms.io/v2/search?q={package_name}&size=1')\n response_json = r.json()\n\n if 'results' in response_json:\n result = response_json['results'][0]\n return result['package']", "def find_module(self, abs_name, path=None):\n package_name = abs_name.split(\".\")[0]\n\n last_name = abs_name.split(\".\")[-1]\n if last_name in sys.modules:\n return None\n\n try:\n # means it can already be imported, no work to be done here\n imp.find_module(abs_name)\n\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # see the comment in the docstring\n return None\n except ImportError as e:\n pass\n\n if package_name == \"talus\" and self._module_in_git(abs_name):\n self.download_module(abs_name)\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # see the comment in the docstring\n return None\n\n if package_name in self.cache[\"packages\"] and package_name not in sys.modules:\n self.install_package_from_talus(package_name)\n return None\n\n # we NEED to have the 2nd check here or else it will keep downloading\n # the same package over and over\n if package_name in self.cache[\"pypi\"] and package_name not in sys.modules:\n self.install_cached_package(package_name)\n return None\n\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # THIS IS IMPORTANT, YES WE WANT TO RETURN NONE!!!\n # see the comment in the docstring\n return None", "def cabal_get(name, unpack_to, version=None):\n pkg = name\n if version is not None:\n pkg += \"==\"\n pkg += version\n\n return run([\"cabal\", \"get\", pkg, \"-d\", unpack_to])", "def fetch(self, location=None, conn_timeout=None):\r\n target = super(SourcePackage, self).fetch(conn_timeout=conn_timeout)\r\n return self._unpack(target, location)", "def package_exists (package_name, package_version, lang):\n\n url = make_package_url (package_name, package_version, lang)\n victim_file = download_file (url)\n\n if victim_file is None:\n return False\n else:\n return True", "def fetch_maybe(cls, url, path, save=False):\n if os.path.isfile(path):\n # print(\"Found %s\" % os.path.basename(path))\n with open(path, \"rb\") as file:\n return file.read(), True\n if save:\n return cls.fetch_and_save(url, path), False\n return cls.fetch_with_retry(url), False", "def __fetch_remote_source(self):\n # type: () -> Union(Git, None)\n if self.source == 'git':\n return self.git_source_class(**self.configuration).fetch()\n return None", "def test_install_without_connection_from_cache():\n\n\tclean_apt(full=False)\n\n\twith no_connection():\n\t\tassert packaging.install_packages(pkgs) == None", "async def get_latest_version(self, pkg: str) -> Optional[str]:\n return None", "def _get_metadata(self, pkg_name):\n pkg_name = urllib.parse.quote(pkg_name, safe='@')\n if self.metadatas.get(pkg_name):\n return self.metadatas.get(pkg_name)\n else:\n url = urllib.parse.urljoin(self.REGISTRY, pkg_name)\n try:\n pkg_metadata = requests.get(url).json()\n self.metadatas[pkg_name] = pkg_metadata\n return pkg_metadata\n except urllib.error.HTTPError as e:\n print('Could not download {} from: {} with error: {}'. format(pkg_name, url, e.msg))\n exit(-1)", "def get_pkg(pkg_name):\n pkg = Database().db.get_pkg_details(pkg_name, \"\", False)\n pkg = PackageDetail.surClass(pkg)\n print('dir: ', dir(pkg))\n \n #print('name:', pkg.nane)\n #print('props.name:', pkg.props.nane)\n return render_template(\"pkg.html\", \n title=\" - \"+pkg_name,\n repos=Database().db.get_repos_names(),\n pkg=pkg)", "def getPackageFromResource(resource):\n import sd\n url = resource.getUrl()\n pkg_manager = sd.getContext().getSDApplication().getPackageMgr()\n for p in pkg_manager.getPackages():\n for r in p.getChildrenResources(False):\n if r.getUrl() == url:\n return p\n return None", "def test_npm_latest_version_request(_foo):\n version = NPMMonitor.fetch_latest_package_version('foobar')\n assert version == '1.3.5'\n assert NPMMonitor.fetch_latest_package_version('foobar') is None\n assert NPMMonitor.fetch_latest_package_version('foobar') is None", "def get_package(self, __package_id):\n raise NotImplementedError", "def test_load(self):\n kwargs = {\"url\": \"my.url\", \"expire\": 7237}\n pkg = make_package(**kwargs)\n # Due to some rounding weirdness in old Py3 versions, we need to remove\n # the microseconds to avoid a flappy test.\n # See: https://bugs.python.org/issue23517\n pkg.last_modified = pkg.last_modified.replace(microsecond=0)\n self.db.save(pkg)\n\n loaded = self.db.fetch(pkg.filename)\n self.assertEqual(loaded.name, pkg.name)\n self.assertEqual(loaded.version, pkg.version)\n self.assertEqual(loaded.filename, pkg.filename)\n self.assertEqual(loaded.last_modified, pkg.last_modified)\n self.assertEqual(loaded.summary, pkg.summary)\n self.assertEqual(loaded.data, kwargs)", "def getusersitepackages():\n\tpass", "def get_info(self, pkgname):\n for pkg in self.rpc.info(pkgname):\n return pkg", "def _fetch_package_requests(source, headers, auth):\n import requests\n dest = build_temp_package_filepath()\n response = requests.get(source, stream=True, headers=headers, auth=auth)\n response.raise_for_status()\n with open(dest, 'wb') as handle:\n for block in response.iter_content(1024):\n handle.write(block)\n package = tarfile.open(dest)\n return package", "def get_package(package, create=False):\n if isinstance(package, basestring):\n if create:\n package = Package.objects.get_or_create(name=package)[0]\n else:\n try:\n package = Package.objects.get(name=package)\n except Package.DoesNotExist:\n package = None\n return package", "def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True" ]
[ "0.6841072", "0.67000926", "0.66926116", "0.6597814", "0.6547767", "0.6434574", "0.64287364", "0.62104625", "0.6064177", "0.5982968", "0.59753776", "0.5958001", "0.5895387", "0.5870588", "0.5860548", "0.58389133", "0.5822474", "0.5799762", "0.57829195", "0.57777876", "0.57693094", "0.57109016", "0.5648336", "0.56448364", "0.5641809", "0.56316715", "0.5619414", "0.55990696", "0.55963135", "0.55917925" ]
0.73527503
0
all() returns all versions of a package
def test_all_versions(self): pkgs = [ make_package(factory=DynamoPackage), make_package(version="1.3", filename="mypath3", factory=DynamoPackage), make_package("mypkg2", "1.3.4", "my/other/path", factory=DynamoPackage), ] self._save_pkgs(*pkgs) saved_pkgs = self.db.all("mypkg") self.assertCountEqual(saved_pkgs, pkgs[:2])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_versions(cls) -> list[str]:\n\n s = run([cls.command, \"install\", \"-l\"])\n versions = s.split()\n\n return list(filter(cls._is_python_version, versions))", "def list_package(all: bool = False) -> List[List[str]]:\n if not all:\n pkgs_info = read_installation_records()\n else:\n pkgs_info = []\n for pkg in pkg_resources.working_set:\n pkgs_info.append([pkg.project_name, pkg.version])\n\n return pkgs_info", "def get_all_package_versions(self):\n with self._conn.begin():\n return {\n (rec.package, rec.version)\n for rec in self._conn.execute(self._versions.select())\n }", "def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.sql.add_all(pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def get_all_available_versions_of_package(self, package_name):\n # Sample output format\n # Available Packages\n # kernel.x86_64 3.10.0-862.el7 base\n # kernel.x86_64 3.10.0-862.2.3.el7 updates\n # kernel.x86_64 3.10.0-862.3.2.el7 updates\n cmd = self.single_package_check_versions.replace('<PACKAGE-NAME>', package_name)\n output = self.invoke_package_manager(cmd)\n packages, package_versions = self.extract_packages_and_versions_including_duplicates(output)\n return package_versions", "def _get_versions(self, package):\n raise NotImplementedError(self, \"_get_versions\")", "def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())", "def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions", "def include_all_versions(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"include_all_versions\")", "def get_mirror_versions(factory, package):\n all_vers = []\n for backend in factory.backends:\n vers = backend.get_packages_db().get_mirror_versions(package)\n for ver in vers:\n path = backend.get_packages_db().get_mirror_path(package, ver)\n all_vers.append((ver, \"%s/%s\"%(backend.base,path)))\n return all_vers", "def get_all_packages(self):\n return self._package_cache.values()", "def all(self):\r\n if self._versions is None or \\\r\n len(self._versions) == 0:\r\n url = \"%s/versions\" % self._url\r\n params = {'f':'json'}\r\n res = self._con.get(url, params)\r\n self._versions = []\r\n if 'versions' in res:\r\n for v in res['versions']:\r\n guid = v['versionGuid'][1:-1]\r\n vurl = \"%s/versions/%s\" % (self._url, guid)\r\n self._versions.append(Version(url=vurl,\r\n flc=self._flc,\r\n gis=self._gis))\r\n return self._versions\r\n return self._versions", "def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))", "def get_used_release_specs(package, installed_version=None):", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def packages():", "def version(*names, **kwargs):\n ret = {}\n versions_as_list = salt.utils.data.is_true(kwargs.pop(\"versions_as_list\", False))\n pkg_glob = False\n if names:\n pkgs = __salt__[\"pkg.list_pkgs\"](versions_as_list=True, **kwargs)\n for name in names:\n if \"*\" in name:\n pkg_glob = True\n for match in fnmatch.filter(pkgs, name):\n ret[match] = pkgs.get(match, [])\n else:\n ret[name] = pkgs.get(name, [])\n if not versions_as_list:\n __salt__[\"pkg_resource.stringify\"](ret)\n # Return a string if no globbing is used, and there is one item in the\n # return dict\n if len(ret) == 1 and not pkg_glob:\n try:\n return next(iter(ret.values()))\n except StopIteration:\n return \"\"\n return ret", "def available_versions(self):\n return list(sorted(self.onxs))", "def _get_autogluon_versions():\n versions = dict()\n for pkg in list(pkgutil.iter_modules(autogluon.__path__, autogluon.__name__ + \".\")):\n # The following packages will be recognized as a submodule by pkgutil -exclude them.\n if pkg.name in [\"autogluon.version\", \"autogluon.setup\", \"autogluon._internal_\"]:\n continue\n try:\n versions[pkg.name] = version(pkg.name)\n versions.update(_get_dependency_versions(pkg.name))\n except ImportError:\n versions[pkg.name] = None\n return versions", "def get_all_versions(func, source=None):\n\n func = validate_func(func, source)\n return get_all_funcs(func.__dict__)", "def get_versions(self):\n raise NotImplementedError", "def select_versions(self):\n return []", "def check_versions(ctx, show=False):\n sys.path.insert(0, os.path.join(ROOT_DIR, '_tools'))\n import versions\n versions.main()", "def getversions(package_name: str) -> list:\n\t# execute command\n\tproc = subprocess.Popen(['pip', 'install', package_name+'==CRASHME'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tproc.wait()\n\n\t# processed returned data\n\tlines = proc.stderr.read().decode('utf8')\n\tsearchterm = \"(from versions:\"\n\tstart = lines.find(searchterm) + len(searchterm)\n\tend = lines.find(\")\", start)\n\tlines = lines[start:end].split(',')\n\tlines = list(map(lambda x: x.strip(), lines))\n\n\treturn lines", "def check_pkg_consistency():\n pass", "def listpacks(all: bool=False) -> [str, str]:\n\t# execute command\n\tcommand = ['pip', 'freeze']\n\tif all:\n\t\tcommand.append('--all')\n\tproc = subprocess.Popen(command, stdout=subprocess.PIPE)\n\tproc.wait()\n\n\t# process returned data\n\tlines = proc.stdout.read().decode('utf8')\n\tlines = list(\n\t\tfilter(\n\t\t\tlambda inf: inf[0] and inf[0].split(' ')[0].lower() != '-e',\n\t\t\tmap(\n\t\t\t\tlambda inf: list(map(\n\t\t\t\t\tlambda x: x.lower().strip(),\n\t\t\t\t\tinf.split('==')\n\t\t\t\t\t)),\n\t\t\t\tlines.split('\\n')\n\t\t\t)\n\t\t)\n\t)\n\n\treturn lines", "def get_all_packages(cls):\n packages = Package.query.all()\n return packages", "def get_python_verlist(): \n l = []\n fv = []\n \n for pyexe in find_all_pythons():\n v = get_pyver_from_exe(pyexe)\n if v != None and v not in fv: # watch for duplicates\n l.append( (pyexe, v) )\n fv.append(v)\n\n return l" ]
[ "0.717854", "0.696309", "0.6916211", "0.68950415", "0.68423796", "0.6829205", "0.6755086", "0.6545546", "0.6498525", "0.6406439", "0.63651323", "0.6318197", "0.6294569", "0.62841994", "0.62509996", "0.6247027", "0.6237215", "0.6169527", "0.6168053", "0.6163518", "0.6115892", "0.60918754", "0.60903203", "0.6076727", "0.6073281", "0.6015901", "0.6015555", "0.6012134", "0.5972232", "0.59359765" ]
0.70585597
1
distinct() returns all unique package names
def test_distinct(self): pkgs = [ make_package(factory=DynamoPackage), make_package(version="1.3", filename="mypath3", factory=DynamoPackage), make_package("mypkg2", "1.3.4", "my/other/path", factory=DynamoPackage), ] self._save_pkgs(*pkgs) saved_pkgs = self.db.distinct() self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_packages():\n\n packages = find_packages()\n packages = ['{}.{}'.format('uniq', package) for package in packages]\n packages.append('uniq')\n return packages", "def test_distinct(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.sql.add_all(pkgs)\n saved_pkgs = self.db.distinct()\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def test_distinct(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.distinct()\n\n self.assertCountEqual(saved_pkgs, set([p.name for p in pkgs]))", "def distinct(x):\n return list(set(x))", "def unique_deps(deps):\n deps.sort()\n return list(k for k, _ in itertools.groupby(deps))", "def _extract_r_remove_package_names(log):\n start = \"remove.packages(c(\"\n i_start = log.find(start) + len(start)\n i_end = log.find(\")\", i_start)\n package_names = [name.strip('\"') for name in log[i_start:i_end].split(\",\")]\n return package_names", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def get_all_package_ids(self):\n return self._package_cache.keys()", "def getusersitepackages():\n\tpass", "def get_packages(self):\n packages = []\n for obj in self.objects_used:\n packages.extend(obj.get_packages())\n # Remove duplicates (not necessary but it's cleaner)\n packages = list(set(packages))\n return packages", "def get_packages(package):\n return [str(path.parent) for path in Path(package).glob(\"**/__init__.py\")]", "def get_package_names(packages_json) -> List[str]:\n return [package[\"package\"][\"name\"] for package in packages_json[\"results\"]]", "def _extract_conda_remove_package_names(log):\n names = log.split(\"--name\")[-1].split()\n return names[1:]", "def list_unique_names(self):\n return [os.path.splitext(x)[0] for x in os.listdir(self._event_dir)]", "def packages(self):\n return []", "def _fuzzy_products(self, package: ImagePackage) -> typing.List[str]:\n\n products = {package.name}\n # TODO: add the generic product generation code (including nomatch exclusions here)\n return list(products)", "def list_packages(self):\n for tag, pkg in PACKAGES.iteritems():\n print \"{tag} - {label}\".format(tag=tag, label=pkg['label'])", "def pyranose_names(self):\n output = set()\n for item in self.pyranoses():\n if item in self.pyranose_fac:\n output.add(self.pyranose_fac[item][\"name\"])\n return list(output)", "def _filter_pkgs(self, pkgs):\n pkgs = [pkg.strip() for pkg in pkgs]\n return [\n pkg for pkg in pkgs\n if pkg.startswith(self.base_pkg) and not pkg.startswith(os.path.join(self.base_pkg, \"vendor/\"))\n ]", "def list_minerals():\n return _list_tindyb_unique_values(\"name\", dbpath=__dbpath__)", "def list_packages():\n\n shelf_dir = settings.shelf_dir\n\n package_list = os.listdir(shelf_dir)\n\n package_list.sort()\n\n return package_list", "def get_app_names(self):\n groups = self['__store']\n lookup = {\n g.group_id: g.name[2:]\n for g in groups\n if (g.name.startswith('a_'))\n }\n return set(map(lookup.get, self.get_app_ids()))", "def unique_detector_names() :\n return experiment_info.unique_detector_names()", "def get_all_packages(cls):\n packages = Package.query.all()\n return packages", "def IterUniqueSymbols(self):\n return SymbolGroup._IterUnique(self)", "def list_packages ( self, name_only=False ):\n if name_only:\n for name, subdir in self._subdirs.items():\n if not subdir.empty():\n yield name\n else:\n for name, subdir in self._subdirs.items():\n if not subdir.empty():\n yield self.name + os.sep + name", "def get_non_vendor_package_path(aea_project_path: Path) -> Set[Path]:\n result: Set[Path] = set()\n for item_type_plural in ComponentType.plurals():\n nonvendor_package_dir_of_type = aea_project_path / item_type_plural\n result = result.union(\n {p for p in nonvendor_package_dir_of_type.iterdir() if p.is_dir()}\n if nonvendor_package_dir_of_type.exists()\n else {}\n )\n return result", "def removed_pkgs():\n name_versions = defaultdict(set)\n fedoras = py2_pkgs()\n last_fedoras = defaultdict(set)\n new = {pkg.name for pkg in repoquery(all=True)}\n for version in fedoras:\n for name_evr in set(fedoras[version]):\n name, _, evr = name_evr.partition(' ')\n if name not in new:\n name_versions[name].add(evr)\n last_fedoras[version].add(name)\n max_versions = {name: max(versions, key=SortableEVR)\n for name, versions in name_versions.items()}\n return last_fedoras, max_versions", "def list_package_ids(self):\n raise NotImplementedError", "def __catalogue__(interface):\n names = []\n seen = set()\n for component in interface.__implementations__():\n for name in component.__names__:\n if name not in seen:\n names.append(name)\n seen.add(name)\n names.sort(key=(lambda n: str(n)))\n return names" ]
[ "0.73910946", "0.72039706", "0.7160833", "0.636343", "0.6134391", "0.60275495", "0.60188", "0.5997735", "0.59974086", "0.5962981", "0.5940934", "0.5940207", "0.5934591", "0.59199566", "0.58889276", "0.58124715", "0.58024675", "0.5769506", "0.5720157", "0.5686013", "0.56856835", "0.5653271", "0.5645811", "0.5632108", "0.56284946", "0.56187624", "0.56151944", "0.5611537", "0.56105375", "0.55809677" ]
0.72898066
1
Can upload multiple packages that have the same version
def test_multiple_packages_same_version(self): with patch.object(self.request.access, "allow_overwrite", []): name, version = "a", "1" path1 = "old_package_path-1.tar.gz" self.db.upload(path1, BytesIO(b"test1234"), name, version) path2 = "new_path-1.whl" self.db.upload(path2, BytesIO(b"test1234"), name, version) all_versions = self.db.all(name) self.assertEqual(len(all_versions), 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_multiple_packages_same_version(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = []\n name, version = \"a\", \"1\"\n path1 = \"old_package_path-1.tar.gz\"\n cache.upload(path1, BytesIO(b\"test1234\"), name, version)\n path2 = \"new_path-1.whl\"\n cache.upload(path2, BytesIO(b\"test1234\"), name, version)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 2)\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 2)", "def upload_packages(self, packages):\n\n context = {\n \"username\": self.username,\n \"reponame\": self.reponame,\n \"name\": self.name\n }\n\n filepaths = [os.path.join(os.path.dirname(path), pfile['filename'])\n for path in packages\n for pfile in deb_changes(path)['files']]\n filepaths.extend(packages)\n\n # get upload token\n resp = self._client.postjson(path=\"/users/%(username)s/\"\n \"repos/%(reponame)s/\"\n \"branches/%(name)s/get_upload_token\" %\n context)\n token = resp['utoken']\n for pfile in filepaths:\n self._client.upload(path=\"/upload/%s/send/%s\" %\n (token, os.path.basename(pfile)),\n filepath=pfile)\n self._client.post(path=\"/upload/%s/dput\" % token)", "def add_uppers():\n for filename, requirements in _sync():\n LOG.info(\"Obtaining latest versions of packages for %s.\", filename)\n for req in requirements:\n if isinstance(req, Requirement):\n if isinstance(req.version, dict) and not req.version[\"max\"]:\n req.sync_max_version_with_pypy()\n _write_requirements(filename, requirements)", "def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n self.sql.add_all(pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def upload_package(self, __contents):\n raise NotImplementedError", "def test_all_versions(self):\n pkgs = [\n make_package(factory=SQLPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=SQLPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=SQLPackage),\n ]\n for pkg in pkgs:\n self.db.save(pkg)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def test_all_versions(self):\n pkgs = [\n make_package(factory=DynamoPackage),\n make_package(version=\"1.3\", filename=\"mypath3\", factory=DynamoPackage),\n make_package(\"mypkg2\", \"1.3.4\", \"my/other/path\", factory=DynamoPackage),\n ]\n self._save_pkgs(*pkgs)\n saved_pkgs = self.db.all(\"mypkg\")\n self.assertCountEqual(saved_pkgs, pkgs[:2])", "def uploadPackages(self, directory):\n files_to_upload_dict = {}\n files_to_upload_list = [ f for f in listdir(directory) if isfile(join(directory,f)) ]\n self.logger.debug(\"uploadPackages(\" + \"{})\".format(directory))\n #print \"Files to upload:\"\n for index in range(len(files_to_upload_list)):\n self.logger.info(files_to_upload_list[index])\n self.uploadFileToIM (directory, files_to_upload_list[index], files_to_upload_list[index])\n #file_tuple = {'files':{str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}} \n #file_tuple = {str(files_to_upload_list[index]), {open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}}\n #file_tuple = {'files': (str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm')}\n #file_tuple = (str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'))\n #file_tuple = {str(files_to_upload_list[index]), open(directory + files_to_upload_list[index], 'rb'), 'application/x-rpm'}\n #files_data_to_upload_list.append(file_tuple)\n #print \"Files to upload Dictionary:\"", "def test_load_many_pip(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"pip&gtmunit1\", \"pip&gtmunit2\", \"pip&gtmunit3\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n assert version_list[0] == \"0.12.4\"\n assert version_list[1] == \"12.2\"\n assert version_list[2] == \"5.0\"", "def packages():", "def _provision_package(self):", "def test_runs_with_multiple_packages(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(MULTIPLE_PACKAGES_REPO)\n\n assert result.status == Status.SUCCESS", "def get_package_versions(name: str) -> List[str]:\n with request.urlopen(PYPI_SIMPLE_API_URL + name) as response:\n html = response.read()\n\n return re.findall(f'>{name}-(.+).tar', html.decode())", "def get_changed_packages(blob_name1, blob_name2, package_list):\n changed_files = check_output(\n 'git', 'diff', '--name-only', blob_name1, blob_name2)\n changed_files = changed_files.split('\\n')\n\n result = set()\n for filename in changed_files:\n file_root = rootname(filename)\n if file_root in package_list:\n result.add(file_root)\n\n return sorted(result)", "def test_all_python_versions_deploy():\n pass", "def test_upload_overwrite(self):\n self.request.access.allow_overwrite = [\"everyone\"]\n name, filename = \"a\", \"a-1.tar.gz\"\n self.db.upload(filename, BytesIO(b\"old\"), name)\n self.db.upload(filename, BytesIO(b\"new\"), name)\n\n all_versions = self.db.all(name)\n self.assertEqual(len(all_versions), 1)", "def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))", "def test_load_many_mixed(self, build_image_for_jupyterlab):\n lb, username = build_image_for_jupyterlab[0], build_image_for_jupyterlab[5]\n keys = [\"conda3&cdutil\", \"pip&gtmunit1\", \"conda3&nltk\"]\n loader = PackageLatestVersionLoader(keys, lb, username)\n promise1 = loader.load_many(keys)\n assert isinstance(promise1, Promise)\n\n version_list = promise1.get()\n assert len(version_list) == 3\n assert version_list[0] == \"8.1\"\n assert version_list[1] == \"0.12.4\"\n assert version_list[2] == \"3.2.5\"", "def upload_jars(configs):\n print(\"Upload jars to signing server...\")\n jar_list = []\n for module_name in configs[\"moduleNames\"]:\n module_folder = get_module_folder(configs, module_name)\n module_jars = get_folder_files(module_folder, [\"*.jar\"])\n\n for module_jar in module_jars:\n print(\"--Uploading \" + module_jar)\n jar_list.append(os.path.basename(module_jar))\n shutil.copy2(module_jar, configs[\"toSignFolder\"])\n\n return jar_list", "def test_multiple_manifest_multiple_dep(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1 + DEP_2, True)\n collector.parse_and_collect(MANIFEST_START + DEP_1 + DEP_2, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri, pydantic': 2\n }", "def test_3x_only_python_versions_deploy():\n pass", "def _get_versions(self, package):\n raise NotImplementedError(self, \"_get_versions\")", "def populate_package(package_count: int) -> None:\n logging.info(f\"Fetching {package_count} packages\")\n response = CurlController.send_get_request(url=CONFIG.EXTERNAL_API.ALL_PACKAGES)\n get_version = False\n count = 0\n temp_dir = filestore.generate_temp_dir()\n # Local Testing\n # response_arr = ['Package: A3', 'Version: 1.0.0', 'Depends: R (>= 2.15.0), xtable, pbapply', 'Suggests: randomForest, e1071', 'License: GPL (>= 2)', 'MD5sum: 027ebdd8affce8f0effaecfcd5f5ade2', 'NeedsCompilation: no', '', 'Package: aaSEA', 'Version: 1.1.0', 'Depends: R(>= 3.4.0)', 'Imports: DT(>= 0.4), networkD3(>= 0.4), shiny(>= 1.0.5),', ' shinydashboard(>= 0.7.0), magrittr(>= 1.5), Bios2cor(>= 2.0),', ' seqinr(>= 3.4-5), plotly(>= 4.7.1), Hmisc(>= 4.1-1)', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 0f9aaefc1f1cf18b6167f85dab3180d8', 'NeedsCompilation: no', '', 'Package: AATtools', 'Version: 0.0.1', 'Depends: R (>= 3.6.0)', 'Imports: magrittr, dplyr, doParallel, foreach', 'License: GPL-3', 'MD5sum: 3bd92dbd94573afb17ebc5eab23473cb', 'NeedsCompilation: no', '', 'Package: ABACUS', 'Version: 1.0.0', 'Depends: R (>= 3.1.0)', 'Imports: ggplot2 (>= 3.1.0), shiny (>= 1.3.1),', 'Suggests: rmarkdown (>= 1.13), knitr (>= 1.22)', 'License: GPL-3', 'MD5sum: 50c54c4da09307cb95a70aaaa54b9fbd', 'NeedsCompilation: no', '', 'Package: abbyyR', 'Version: 0.5.5', 'Depends: R (>= 3.2.0)', 'Imports: httr, XML, curl, readr, plyr, progress', 'Suggests: testthat, rmarkdown, knitr (>= 1.11), lintr', 'License: MIT + file LICENSE', 'MD5sum: e048a3bca6ea32126e6c367415c0bfaf', 'NeedsCompilation: no', '', 'Package: abc', 'Version: 2.1', 'Depends: R (>= 2.10), abc.data, nnet, quantreg, MASS, locfit', 'License: GPL (>= 3)', 'MD5sum: c9fffe4334c178917f762735aba59653', 'NeedsCompilation: no', '', 'Package: abc.data', 'Version: 1.0', 'Depends: R (>= 2.10)', 'License: GPL (>= 3)', 'MD5sum: 799079dbbdd0cfc9d9c61c3e35241806', 'NeedsCompilation: no', '', 'Package: ABC.RAP', 'Version: 0.9.0', 'Depends: R (>= 3.1.0)', 'Imports: graphics, stats, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: 38c65a7251d28ef2462ee430ded95700', 'NeedsCompilation: no', '', 'Package: abcADM', 'Version: 1.0', 'Imports: Rcpp (>= 1.0.1)', 'LinkingTo: Rcpp, BH', 'License: GPL-3', 'MD5sum: 8134f67912b506194e3dab4ccd6e75f7', 'NeedsCompilation: yes', '', 'Package: ABCanalysis', 'Version: 1.2.1', 'Depends: R (>= 2.10)', 'Imports: plotrix', 'License: GPL-3', 'MD5sum: 678e03837e25a922bf71bafe1f8de617', 'NeedsCompilation: no', '', 'Package: abcdeFBA', 'Version: 0.4', 'Depends: Rglpk,rgl,corrplot,lattice,R (>= 2.10)', 'Suggests: LIM,sybil', 'License: GPL-2', 'MD5sum: c84d45a85d8ab6bbe517365e8845db83', 'NeedsCompilation: no', '', 'Package: ABCoptim', 'Version: 0.15.0', 'Imports: Rcpp, graphics, stats, utils', 'LinkingTo: Rcpp', 'Suggests: testthat, covr', 'License: MIT + file LICENSE', 'MD5sum: a62ed03650273c09899655065437078f', 'NeedsCompilation: yes', '', 'Package: ABCp2', 'Version: 1.2', 'Depends: MASS', 'License: GPL-2', 'MD5sum: e920282d5a369df71e15241be40cb60e', 'NeedsCompilation: no', '', 'Package: abcrf', 'Version: 1.8.1', 'Depends: R(>= 3.1)', 'Imports: readr, MASS, matrixStats, ranger, doParallel, parallel,', ' foreach, stringr, Rcpp (>= 0.11.2)', 'LinkingTo: Rcpp, RcppArmadillo', 'License: GPL (>= 2)', 'MD5sum: 4d5a304f46d117226791523cef4e2427', 'NeedsCompilation: yes', '', 'Package: abcrlda', 'Version: 1.0.3', 'Imports: stats', 'License: GPL-3', 'MD5sum: 651e6e18e08916b443aaf011b5a63525', 'NeedsCompilation: no', '', 'Package: abctools', 'Version: 1.1.3', 'Depends: R (>= 2.10), abc, abind, parallel, plyr, Hmisc', 'Suggests: ggplot2, abc.data', 'License: GPL (>= 2)', 'MD5sum: c5937b65837ef7e6bfbe141cea257f40', 'NeedsCompilation: yes', '', 'Package: abd', 'Version: 0.2-8', 'Depends: R (>= 3.0), nlme, lattice, grid, mosaic', 'Suggests: boot, car, ggplot2, plyr, HH, ICC, vcd, Hmisc', 'License: GPL-2', 'MD5sum: 1913d76a0fbc44222709381f63f385b9', 'NeedsCompilation: no', '', 'Package: abdiv', 'Version: 0.2.0', 'Imports: ape', 'Suggests: testthat (>= 2.1.0), vegan', 'License: MIT + file LICENSE', 'MD5sum: 80931c0ca85ba5386000bf617552c5ce', 'NeedsCompilation: no', '', 'Package: abe', 'Version: 3.0.1', 'License: GPL (>= 2)', 'MD5sum: 9c151db5397422c8927dee41dabfbfab', 'NeedsCompilation: no', '', 'Package: abess', 'Version: 0.3.0', 'Depends: R (>= 3.1.0)', 'Imports: Rcpp, MASS, methods, Matrix', 'LinkingTo: Rcpp, RcppEigen', 'Suggests: testthat, knitr, rmarkdown', 'License: GPL (>= 3) | file LICENSE', 'MD5sum: e0ea7d068147c49c011c7135ab290bd3', 'NeedsCompilation: yes', '', 'Package: abf2', 'Version: 0.7-1', 'License: Artistic-2.0', 'MD5sum: 6792a51c6fb3e239165d69aa8a71d3cd', 'NeedsCompilation: no', '', 'Package: abglasso', 'Version: 0.1.1', 'Imports: MASS, pracma, stats, statmod', 'Suggests: testthat', 'License: GPL-3', 'MD5sum: 18bd0759cd005c5ac6fb515799b3f3d8', 'NeedsCompilation: no', '', 'Package: ABHgenotypeR', 'Version: 1.0.1', 'Imports: ggplot2, reshape2, utils', 'Suggests: knitr, rmarkdown', 'License: GPL-3', 'MD5sum: ca4397ba7390c0e0a3728c0cda864494', 'NeedsCompilation: no', '', 'Package: abind', 'Version: 1.4-5', 'Depends: R (>= 1.5.0)', 'Imports: methods, utils', 'License: LGPL (>= 2)', 'MD5sum: 136f981e1c4f618b64a87faaa7797c97', 'NeedsCompilation: no', '', 'Package: abjutils', 'Version: 0.3.1', 'Depends: R (>= 4.0)', 'Imports: dplyr, magrittr, purrr, rlang, rstudioapi, stringi, stringr,', ' tidyr', 'Suggests: testthat', 'License: MIT + file LICENSE', 'MD5sum: a596c07aaa7f82e5d123b2f7354e5b55', 'NeedsCompilation: no', '', 'Package: abmR', 'Version: 1.0.2', 'Depends: R (>= 3.5)', 'Imports: sp, rgdal, table1, googledrive, swfscMisc, geosphere,', ' kableExtra, gtsummary, ggplot2, gstat, purrr, rnaturalearth,', ' rnaturalearthdata, sf, tmap, raster, utils, stats, methods,', ' rgeos', 'Suggests: jpeg, knitr', 'License: GPL (>= 3)', 'MD5sum: cf96d']\n response_arr = response.decode(\"utf-8\").split(\"\\n\")\n with temp_dir:\n for item in response_arr:\n if count >= package_count:\n break\n if get_version:\n # Fetching the version, once we have the package name\n package_version = Command.get_package_version(item=item)\n if package_version:\n # Generating the required URL for the package to fetch the details\n package_url = Template(\n CONFIG.EXTERNAL_API.PACKAGE_DETAIL\n ).substitute(\n package_name=package_name,\n separator=\"_\",\n package_version=package_version,\n )\n logging.info(f\"Downloading {package_url}\")\n # Downloading the details of the package and extracting the DESCRIPTION file\n extract_file_path = filestore.join_paths(\n prefix=package_name,\n suffix=CONFIG.EXTERNAL_API.DETAIL_FILE_NAME,\n )\n target_dir = filestore.download_file(\n url=package_url,\n temp_dir=temp_dir,\n extract_file_path=extract_file_path,\n )\n # Reading contents of DESCRIPTION file\n package_details = filestore.join_paths(\n prefix=temp_dir.name,\n suffix=extract_file_path,\n )\n with open(package_details) as details_file:\n for line in details_file:\n if line.startswith(PackageInfoPrefix.PUBLICATION_DATE):\n publication_time_str = (\n Command.get_publication_timestamp(line)\n )\n publication_timestamp = (\n datetime_util.string_to_datetime(\n publication_time_str\n )\n )\n elif line.startswith(PackageInfoPrefix.TITLE):\n title = Command.get_package_title(line)\n elif line.startswith(PackageInfoPrefix.DESCRIPTION):\n description = Command.get_package_description(line)\n elif line.startswith(PackageInfoPrefix.AUTHOR):\n (\n author_name,\n author_email,\n ) = Command.get_package_author(line)\n elif line.startswith(PackageInfoPrefix.MAINTAINER):\n (\n maintainer_name,\n maintainer_email,\n ) = Command.get_package_maintainer(line)\n\n package_info_dict = {\n \"name\": package_name,\n \"version\": package_version,\n \"publication_timestamp\": publication_timestamp,\n \"title\": title,\n \"description\": description,\n \"author_name\": author_name,\n \"author_email\": author_email,\n \"maintainer_name\": maintainer_name,\n \"maintainer_email\": maintainer_email,\n }\n logging.info(package_info_dict)\n obj = PackageManager.create_object(\n create_data=package_info_dict\n )\n if obj == CONFIG.DB.FAILURE:\n raise Exception(f\"Could not insert package in DB\")\n count += 1\n get_version = False\n # Fetching the package name\n package_name = Command.get_package_name(item=item)\n if package_name:\n get_version = True", "def test_upload_overwrite(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = [\"everyone\"]\n name, filename, content = \"a\", \"a-1.tar.gz\", BytesIO(b\"new\")\n cache.upload(filename, BytesIO(b\"old\"), name)\n cache.upload(filename, content, name)\n\n all_versions = cache.all(name)\n self.assertEqual(len(all_versions), 1)\n data = cache.storage.open(all_versions[0]).read()\n self.assertEqual(data, b\"new\")\n\n stored_pkgs = list(cache.storage.list(cache.new_package))\n self.assertEqual(len(stored_pkgs), 1)", "def upload():\n sh('python setup.py register sdist upload')", "def update(self, iterable):\n for package in iterable:\n self.add_package(package)", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def upload_pkg(identifier, pkgname, metadata, directory, years):\n files = []\n for f in os.scandir(directory):\n if not f.is_symlink():\n continue\n path = os.readlink(f)\n match = re.match(SYMLINK_YEAR_REGEXP, path)\n if not match:\n continue\n year = match[1]\n if year not in years:\n continue\n files.append(f.path)\n if not files:\n return\n # Get last package, to extract a description\n last_pkg = sorted(filter(lambda x: not x.endswith('.sig'), files))[-1]\n pkginfo = extract_pkginfo(last_pkg)\n pkgdesc = pkginfo['pkgdesc'] if 'pkgdesc' in pkginfo else ''\n metadata['description'] = DESCRIPTION.format(pkgname=pkgname, pkgdesc=pkgdesc, url=pkginfo['url'], license=pkginfo['license'])\n metadata['rights'] = 'License: ' + pkginfo['license']\n #print(pkgname, len(files))\n #print(metadata)\n try:\n res = ia.upload(identifier, files=files, metadata=metadata)\n if not all([x.status_code == 200 for x in res]):\n ok = len([x for x in res if x.status_code == 200])\n nok = len([x for x in res if x.status_code != 200])\n codes = set([x.status_code for x in res])\n print(\"{}: only {}/{} files uploaded, status codes: {}\".format(identifier, ok, ok+nok, codes), file=sys.stderr)\n print(directory)\n except Exception as e:\n print(\"{}: exception raised\".format(identifier), file=sys.stderr)\n print(e, file=sys.stderr)\n print(directory)", "def test_01_upload_langpacks(self):\n cmd = (\n 'pulp-admin rpm repo uploads langpacks --repo-id {0} '\n '--name {1} --install {1}-%s'\n ).format(self.repo_id, utils.uuid4()).split()\n self.client.run(cmd)\n num_langpacks = _count_langpacks(self.cfg, self.repo_id)\n self.assertEqual(num_langpacks, 1, cmd)", "def not_reupload_test(self):\n servers = {}\n test_server = TestServer([(\"*/*@*/*\", \"*\")], [(\"*/*@*/*\", \"*\")],\n users={\"lasote\": \"mypass\"})\n servers[\"default\"] = test_server\n client = TestClient(servers=servers, users={\"default\": [(\"lasote\", \"mypass\")]})\n\n files = cpp_hello_conan_files(\"Hello0\", \"1.2.1\", build=False)\n client.save(files)\n client.run(\"export frodo/stable\")\n client.run(\"install Hello0/1.2.1@frodo/stable --build=missing\")\n client.run(\"upload Hello0/1.2.1@frodo/stable -r default --all\")\n self.assertIn(\"Uploading conan_package.tgz\", client.user_io.out)\n client.run(\"remove Hello0/1.2.1@frodo/stable -f\")\n client.run(\"search\")\n self.assertNotIn(\"Hello0/1.2.1@frodo/stable\", client.user_io.out)\n client.run(\"install Hello0/1.2.1@frodo/stable\")\n self.assertIn(\"Downloading conan_package.tgz\", client.user_io.out)\n client.run(\"upload Hello0/1.2.1@frodo/stable -r default --all\")\n self.assertIn(\"Uploaded conan recipe\", client.user_io.out)\n self.assertNotIn(\"Uploading conan_package.tgz\", client.user_io.out)\n self.assertIn(\"Package is up to date\", client.user_io.out)" ]
[ "0.7196442", "0.61255217", "0.61043423", "0.6099664", "0.60938144", "0.6092737", "0.60841924", "0.5857378", "0.5818061", "0.57452154", "0.57256395", "0.5691203", "0.56372887", "0.5537358", "0.5511155", "0.55096114", "0.5498683", "0.54872787", "0.546868", "0.54370767", "0.5432951", "0.5407688", "0.5401209", "0.54001623", "0.5390622", "0.5381321", "0.5376578", "0.5374356", "0.5367282", "0.5367197" ]
0.7645613
0
Calling clear_all will keep same table throughput
def test_clear_all_keep_throughput(self): throughput = {} for model in (DynamoPackage, PackageSummary): tablename = model.meta_.ddb_tablename(self.engine.namespace) desc = self.dynamo.describe_table(tablename) self.dynamo.update_table(desc.name, Throughput(7, 7)) for index in desc.global_indexes: self.dynamo.update_table( desc.name, global_indexes={index.name: Throughput(7, 7)} ) self.db.clear_all() for model in (DynamoPackage, PackageSummary): tablename = model.meta_.ddb_tablename(self.engine.namespace) desc = self.dynamo.describe_table(tablename) self.assertEqual(desc.throughput.read, 7) self.assertEqual(desc.throughput.write, 7) for index in desc.global_indexes: self.assertEqual(index.throughput.read, 7) self.assertEqual(index.throughput.write, 7)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deleteAll():\n _table.deleteAll()\n _initialiseGlobals()\n\n return", "def flush(self):\n self.table = []", "def clear_tables(self):\n for table in self.modified_tables:\n self.api.do_table_clear(table)\n self.modified_tables = []", "def remove_all():\n \"\"\" Removes all from the database \"\"\"\n redis_store.flushall()", "def clear(self):\n\n self.size = 0\n\n self.table = [[]] * 100\n\n self.keys_set = set()\n\n self.keys_ref = [[]] * 100", "def clear_all(self):\n self.clear_redis()\n self.clear_cache()", "def clear_db():\n for name in TABLES:\n result = execute_query('truncate table {};'.format(name)), ())", "def clear_all():\n bpy.ops.object.select_all(action='SELECT')\n bpy.ops.object.delete()", "def clear_tables(self):\n for name, coin in self.coins.all():\n with self.conflict_resolver.transaction() as session:\n session.query(coin.wallet_model).delete()\n session.query(coin.transaction_model).delete()\n session.query(coin.network_transaction_model).delete()\n session.query(coin.account_model).delete()\n session.query(coin.address_model).delete()", "def removeall(table):\n doall(\"DELETE FROM {table}\".format(table=table))", "def clear(self):\n try:\n self._load(False)\n except KeyError:\n return\n\n for i in xrange(self.size):\n try:\n del self.db[i]\n except KeyError:\n pass\n del self.db['count']\n del self.db['head']\n del self.db['size']", "def reset(self):\n self._execute(\"DELETE FROM collection_table\")\n self._execute(\"DELETE FROM keyword_table\")", "def purge(self):\n sql = \"DELETE FROM {t_id}\".format(t_id=self.table_id)\n self.fusiontables.query().sql(sql=sql).execute()", "async def clear_all(self) -> None:", "def delete_all(self):\n with self.__lock:\n self.__data = dict()\n self.flush()", "def empty_tables(self):\n for table in TABLES_TO_EMPTY:\n self.empty_table(table)", "def clear_data_from_table():\n global data_base, table\n sqlite3_simple_clear_table(data_base, table)\n output_on_display.delete(1.0, END)\n output_on_display.insert(END, '')\n return", "def clear(cls)->None:\n database.cursor.execute(\"DELETE FROM {}\".format(cls.table_name))\n database.connection.commit()", "def flush(self):\n cursor = self.db.cursor()\n cursor.execute(\"DELETE FROM triples\")\n self.db.commit()", "def delete_counts(self):\n\n qry = \"TRUNCATE TABLE baseline.parameter_value_counts\"\n self.engine.execute(text(qry))", "def sqlite3_simple_clear_table(data_base, table):\n con = sqlite3.connect(data_base)\n cur = con.cursor()\n query = 'DELETE FROM ' + table\n cur.execute(query)\n con.commit()\n cur.close()\n con.close()", "def clear_all(self):\n raise NotImplementedError", "def empty_table(self, table):\n print(\"Emptying {0}...\".format(table))\n query = \"TRUNCATE TABLE {0} RESTART IDENTITY CASCADE;\".format(table)\n cursor = connection.cursor()\n cursor.execute(query)", "def clear_table(self):\n\n db = self.connection(database=\"imdb\")\n\n try:\n cur = db.cursor()\n sql = \"TRUNCATE TABLE film;\"\n cur.execute(sql)\n db.commit()\n except:\n return\n\n db.close()", "def clear_db():\n from flask_monitoringdashboard.database import get_tables, engine\n\n for table in get_tables():\n table.__table__.drop(engine)\n table.__table__.create(engine)", "def clearmodels(self):\n \n dbpath, config = self._start() \n ModelDescriptionTable(dbpath).empty()\n ModelPhenotypeTable(dbpath).empty()\n ModelScoreTable(dbpath).empty() \n self._end()", "def clear_table(self, table: Table):\n self._requires_table(table)\n table.clear()", "def cleanup(self):\n for table in filter(lambda x: self.cmd.exists(x, silent=(log.level < DEBUG)), self.tables):\n log.info(\"MLoad\", \"Dropping table '{}'...\".format(table))\n self.cmd.drop_table(table, silent=True)", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def clear_tables(cursor):\n cursor.execute(\"delete from Review_Votes\")\n cursor.execute(\"delete from Review\")" ]
[ "0.73763025", "0.72920036", "0.7195111", "0.7004636", "0.6977167", "0.69601184", "0.6958541", "0.6949839", "0.6910979", "0.6881586", "0.6842338", "0.682759", "0.6827083", "0.67934114", "0.6769874", "0.6764047", "0.6752019", "0.6714772", "0.6650677", "0.6617581", "0.6610839", "0.65978545", "0.659015", "0.6582286", "0.6572806", "0.6572418", "0.6569169", "0.65600914", "0.6551793", "0.65299904" ]
0.77589965
0
upload() saves package even when there is no summary
def test_upload_no_summary(self): pkg = make_package(factory=DynamoPackage) self.db.upload( pkg.filename, BytesIO(b"test1234"), pkg.name, pkg.version, summary="" ) count = self.engine.scan(DynamoPackage).count() self.assertEqual(count, 1) saved_pkg = self.engine.scan(DynamoPackage).first() self.assertEqual(saved_pkg, pkg) self.storage.upload.assert_called_with(pkg, ANY)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload_package(self, __contents):\n raise NotImplementedError", "def test_upload(self):\n pkg = make_package(factory=SQLPackage)\n content = BytesIO(b\"test1234\")\n self.db.upload(pkg.filename, content, pkg.name, pkg.version)\n count = self.sql.query(SQLPackage).count()\n self.assertEqual(count, 1)\n saved_pkg = self.sql.query(SQLPackage).first()\n self.assertEqual(saved_pkg, pkg)\n # If calculate hashes is on, it'll read the data\n # and rewrap with BytesIO\n self.storage.upload.assert_called_with(pkg, ANY)", "def test_upload(self):\n package = make_package()\n datastr = 'foobar'\n data = StringIO(datastr)\n self.storage.upload(package, data)\n filename = self.storage.get_path(package)\n self.assertTrue(os.path.exists(filename))\n with open(filename, 'r') as ifile:\n self.assertEqual(ifile.read(), 'foobar')", "def test_upload(self):\n pkg = make_package(factory=DynamoPackage)\n self.db.upload(pkg.filename, BytesIO(b\"test1234\"), pkg.name, pkg.version)\n count = self.engine.scan(DynamoPackage).count()\n self.assertEqual(count, 1)\n saved_pkg = self.engine.scan(DynamoPackage).first()\n self.assertEqual(saved_pkg, pkg)\n self.storage.upload.assert_called_with(pkg, ANY)", "def upload():\n sh('python setup.py register sdist upload')", "def _store_package_metadata(self):", "def test_upload(self):\n package = make_package()\n datastr = 'foobar'\n data = StringIO(datastr)\n self.storage.upload(package, data)\n key = list(self.bucket.list())[0]\n self.assertEqual(key.get_contents_as_string(), datastr)\n self.assertEqual(key.get_metadata('name'), package.name)\n self.assertEqual(key.get_metadata('version'), package.version)", "def upload_pkg(identifier, pkgname, metadata, directory, years):\n files = []\n for f in os.scandir(directory):\n if not f.is_symlink():\n continue\n path = os.readlink(f)\n match = re.match(SYMLINK_YEAR_REGEXP, path)\n if not match:\n continue\n year = match[1]\n if year not in years:\n continue\n files.append(f.path)\n if not files:\n return\n # Get last package, to extract a description\n last_pkg = sorted(filter(lambda x: not x.endswith('.sig'), files))[-1]\n pkginfo = extract_pkginfo(last_pkg)\n pkgdesc = pkginfo['pkgdesc'] if 'pkgdesc' in pkginfo else ''\n metadata['description'] = DESCRIPTION.format(pkgname=pkgname, pkgdesc=pkgdesc, url=pkginfo['url'], license=pkginfo['license'])\n metadata['rights'] = 'License: ' + pkginfo['license']\n #print(pkgname, len(files))\n #print(metadata)\n try:\n res = ia.upload(identifier, files=files, metadata=metadata)\n if not all([x.status_code == 200 for x in res]):\n ok = len([x for x in res if x.status_code == 200])\n nok = len([x for x in res if x.status_code != 200])\n codes = set([x.status_code for x in res])\n print(\"{}: only {}/{} files uploaded, status codes: {}\".format(identifier, ok, ok+nok, codes), file=sys.stderr)\n print(directory)\n except Exception as e:\n print(\"{}: exception raised\".format(identifier), file=sys.stderr)\n print(e, file=sys.stderr)\n print(directory)", "def _upload_build_source_package(self, targz):\n # Upload to temporary storage, only if doesn't exist\n self.pipeline_package = \"source/cache/%s\" % os.path.basename(targz)\n blob = self.bucket.blob(self.pipeline_package)\n logger.debug(\"build-package=%s\" % self.pipeline_package)\n if not blob.exists():\n blob.upload_from_filename(targz, content_type=\"application/gzip\")", "def _provision_package(self):", "def upload_package(self, pointer: FileStorage, token: str) -> Upload:\n files = {'file': (pointer.filename, pointer, pointer.mimetype)}\n data, _, _ = self.json('post', '/', token, files=files,\n expected_code=[status.CREATED,\n status.OK],\n timeout=30, allow_2xx_redirects=False)\n return self._parse_upload_status(data)", "def upload_package(self, filename=None):\n logger.info(\"Uploading the package to S3\")\n s3f = S3FunctionUploader(self.function_config['Code']['S3Bucket'])\n self.s3_filename = path.join(\n self.function_config['Code']['S3KeyPath'],\n path.basename(filename or self.local_filename)\n )\n s3f.upload(filename or self.local_filename,\n self.s3_filename)", "def upload_build(self, name, directory):\n logging.info('Not uploading build because no Filestore.')", "def upload(ctx: click.Context, **kwargs):\n root_commands.cmd_upload(ctx.obj, **kwargs)", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def upload(context, request):\n if request.method == 'POST':\n if not hasattr(request.POST['content'], 'file'):\n raise RuntimeError('No file attached')\n\n fieldstorage = request.POST['content']\n filename = fieldstorage.filename\n logger.info(\"%s posted\", filename)\n\n with bm(\"%s released\" %filename):\n dest = path(request.file_root) / request.namer(filename)\n dest.write_bytes(fieldstorage.file.read())\n try:\n request.registry.notify(event.PackageAdded(request.index, path=dest))\n request.response.headers['X-Swalow-Status'] = 'SUCCESS'\n try:\n for ep in pkg_resources.iter_entry_points('cheeseprism.on_upload'):\n func = ep.load()\n func(context, request, dest)\n except Exception as e:\n logger.exception('Entry point %r failed', ep)\n return request.response\n except :\n logger.exception(\"Processing of %s failed\", filename)\n raise\n return {}", "def _save_pkgs(self, *pkgs):\n for pkg in pkgs:\n self.engine.save(pkg)\n summary = PackageSummary(pkg)\n self.engine.save(summary, overwrite=True)", "def release_package_to_repository(self, version: str) -> None:\n logger.info(f\"Uploading the package [{version}]\")\n pass", "def post_package():\n package_file = BytesIO()\n with tarfile.open(mode='w', fileobj=package_file) as tar:\n # metadata\n meta_content = b'encoding: utf-8\\npost: post.md'\n file_info = tarfile.TarInfo('package.yml')\n file_info.size = len(meta_content)\n tar.addfile(file_info, BytesIO(meta_content))\n\n # post\n post_content = b'''---\ntitle: A title\ntopic: A topic\n---\n\n[summary]\nA summary\n\nA paragraph\n'''\n file_info = tarfile.TarInfo('post.md')\n file_info.size = len(post_content)\n tar.addfile(file_info, BytesIO(post_content))\n package_file.seek(0)\n\n return package_file", "def upload_preset(self, filename, title, description, version, author, REQUEST=None):\r\n\r\n # TODO presets.py - upload_preset - specify how to authenticate\r\n\r\n raise NotImplementedError", "def install():\n execute(generate)\n execute(upload)", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def upload():\n env.user = 'webcontent'\n rsync_project(DOCDIR, 'doc/_build/html/', delete=True)", "def upload_package(conn, module, remotepath = None, chunk_size = 16000):\n if remotepath is None:\n site = conn.modules[\"distutils.sysconfig\"].get_python_lib()\n remotepath = conn.modules.os.path.join(site, module.__name__)\n localpath = os.path.dirname(inspect.getsourcefile(module))\n upload(conn, localpath, remotepath, chunk_size = chunk_size)", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def upload(self):\n if not self.prepare():\n Settings.err_print(\"unable to upload file - {}\".format(self.get_title()))\n return False\n self.backup()\n self.delete()\n return True", "def file_upload():\n\n click.secho('*** Uploading image...', fg='green')\n uploaded = _uploaded_file('cover.jpg')\n click.secho(json.dumps(uploaded, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating a Picture document for it...', fg='green')\n picture = _make_document('picture', title='cover image', sys_filename=uploaded['path'])\n click.secho(json.dumps(picture, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Attaching it to a Blueray as cover...', fg='green')\n slp = _make_document('movie', title='Silver Linings Playbook')\n blueray = _make_document('blueray', movie_id=slp['_id'], cover_id=picture['_id'])\n click.secho(json.dumps(blueray, indent=2, sort_keys=True), fg='yellow')", "def upload_corpus(self, name, directory, replace=False):\n logging.info('Not uploading corpus because no Filestore.')", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()" ]
[ "0.75582093", "0.652778", "0.6510554", "0.6490564", "0.63053066", "0.61908734", "0.61849767", "0.6156637", "0.6149053", "0.61228496", "0.6098898", "0.6089424", "0.604158", "0.60039645", "0.5974472", "0.59176534", "0.5875046", "0.5840688", "0.58405584", "0.58125734", "0.57909214", "0.5783452", "0.5767116", "0.5765599", "0.5748685", "0.5710685", "0.5701209", "0.5691785", "0.5667963", "0.5657051" ]
0.75371444
1
Returns all the files in the given path which matches the extension
def get_files(path, extension): extension = listify(extension) return [p for p in path.ls() if p.suffix in extension and "(" not in p.stem]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_files(path, extension):\n matches = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, extension):\n matches.append(os.path.join(root, filename))\n return matches", "def findFiles(path, ext='txt'):\n if not os.path.exists(path):\n print 'no such directory: %s' %path\n return []\n files=[]\n for dirname, dirnames, filenames in os.walk(path):\n for f in filenames:\n name = os.path.join(dirname, f)\n if f.endswith(ext):\n files.append(name)\n return files", "def search_extension(path, ext):\n output = []\n for root, dirs, files in os.walk(path, topdown=True):\n for file in files:\n if file.endswith(ext):\n path = os.path.join(root, file)\n output.append(path)\n\n return output", "def get_files(path: str, extension: str = '.wav') -> List[Path]:\n\n return list(Path(path).expanduser().resolve().rglob(f'*{extension}'))", "def find_files_by_extensions(cls, search_path, allowed_ext):\n file_list = []\n for root, dirnames, filenames in os.walk(search_path):\n for filename in filenames:\n name, extension = os.path.splitext(filename)\n if extension in allowed_ext:\n file_list.append(os.path.join(root, filename))\n\n return file_list", "def dir_search(ext, file_path='./'):\n try:\n return [file_path + \"/\" + file for file in os.listdir(file_path) if file.endswith(ext)]\n except OSError as e:\n logger.error(e)\n return []\n except Exception as e:\n logger.error(e)\n return []", "def list_type_in_dir(path, extension):\n path, extension = check_args(path, extension)\n files = os.listdir(path)\n file_list = [os.path.join(path, f)\n for f in fnmatch.filter(files, '*' + extension)]\n\n return file_list", "def findfiles(path, ext=\".pyc\"):\n results = []\n regex = re.compile(re.escape(ext)+\"$\", re.I)\n\n tree = os.walk(path)\n for d in tree:\n # Each element of a walker represents a directory and its contents.\n # Diagnostic, if you wish.\n #print(d)\n if d[2]:\n # Are there files in this directory?\n for f in d[2]:\n if regex.findall(f):\n relpath = os.path.join(d[0], f)\n results.append(os.path.realpath(relpath))\n\n return results", "def find_files(extensions):\n\n return [fname for fname in os.listdir('.') if fname.endswith(extensions)]", "def get_path_files_with_ext(extension, directory):\n\n return [\n directory + file_name for file_name in os.listdir(directory)\n if file_name.endswith(extension)\n ]", "def list_a_file_type(path, extension):\n path, extension = check_args(path, extension)\n file_list = [os.path.join(dirpath, f)\n for dirpath, dirnames, files in os.walk(path)\n for f in fnmatch.filter(files, '*' + extension)]\n\n return file_list", "def get_files(path, extension=None, full_path=True):\n\n file_list = list()\n for root, _, files in walk(path):\n for filename in files:\n if extension:\n if filename.endswith(extension):\n if full_path:\n file_list.append(join(root, filename))\n else:\n file_list.append(filename)\n else:\n file_list.append(join(root, filename))\n\n return file_list", "def get_files_from_of_type(path: str, ext: str) -> List[str]:\n files = []\n for root, dirnames, filenames in os.walk(path):\n for filename in fnmatch.filter(filenames, \"*.\" + str(ext)):\n files.append(os.path.join(root, filename))\n if not files:\n logging.error(\"No language files found in folder: \" + str(os.sep.join([convert_vars.BASE_PATH, \"source\"])))\n logging.debug(f\" --- found {len(files)} files of type {ext}. Showing first few:\\n* \" + str(\"\\n* \".join(files[:3])))\n return files", "def find_files(suffix, path):\n \n\n files = []\n for file in os.listdir(path):\n filepath = os.path.join(path, file)\n if os.path.isfile(filepath):\n if file.endswith(suffix):\n files.append(filepath)\n else:\n rec_files = find_files(suffix, filepath)\n files.extend(rec_files)\n\n return files", "def get_extensionless_filenames(path):\n return [\n filename.replace('.pdf', '')\n for filename in os.listdir(path)\n if isfile(join(path, filename))\n ]", "def list_all_files_with_extension(path, extension, do_unzip=True):\n files = []\n for file in os.listdir(path):\n if file.endswith(extension):\n files.append(file)\n if do_unzip and file.endswith(\".zip\"):\n new_path = unzip(os.path.join(path, file))\n os.remove(os.path.join(path, file))\n files = files + list_all_files_with_extension(new_path, extension)\n # Dive into subfolders.\n if os.path.isdir(os.path.join(path, file)):\n files = files + list_all_files_with_extension(os.path.join(path, file), extension)\n return files", "def find_files(suffix, path):\n try:\n result = list()\n if os.path.isfile(path):\n if path.endswith(suffix):\n return [path]\n else:\n return result\n for file in os.listdir(path):\n if file.endswith(suffix):\n result.extend([os.path.join(path, file)])\n else:\n temp_path = os.path.join(path, file)\n if os.path.isdir(temp_path):\n result.extend(find_files(suffix, temp_path))\n return result\n except Exception as e:\n print(f\"couldn't find the files with extension/suffix - {suffix} in the path - {path} due to - {str(e)}\")\n return None", "def load_filenames_from_path(path: str, extension: str ='.bin') -> List[str]:\n sorted_filenames_list = []\n if(os.path.exists(path)):\n directory_list = load_directory_list_from_path(path)\n \n for directory in directory_list:\n filename_list = [filename for filename in os.listdir(os.path.join(path, directory))\n if (os.path.isfile(\n os.path.join(path, \n os.path.join(directory, filename)\n )) and extension in filename) ]\n \n filename_list = sort_list(filename_list)\n\n sorted_filenames_list += [os.path.join(path, os.path.join(directory, filename)) for filename in filename_list]\n else:\n raise FileNotFoundError\n \n return sorted_filenames_list", "def getFiles(searchDir = './', extension = 'source'):\n from glob import glob \n\n return glob(searchDir+'/*.'+extension)", "def get_all_files(directory, extension):\n return (f for f in os.listdir(directory) if f.endswith(extension) and os.path.isfile(os.path.join(directory, f)))", "def _get_files(p, fs, extensions=None):\n p = Path(p)\n res = [\n p / f\n for f in fs\n if not f.startswith(\".\")\n and ((not extensions) or f'.{f.split(\".\")[-1].lower()}' in extensions)\n ]\n return res", "def get_files_from_directory(path):\n return [f for f in listdir(path) if isfile(join(path, f))]", "def glob(path):\n path = os.path.abspath(path)\n if os.path.isdir(path):\n files = [d for d in [\n os.path.join(path, f) for f in os.listdir(path)\n ] if os.path.isfile(d)]\n else:\n files = glob.glob(path)\n print(\"Found {0} files\".format(len(files)))\n return files", "def getAllFilesWithExtension(directory,extension):\n filesWithExtension = []\n for root, dirs, files in os.walk(directory):\n for file in files:\n if file.endswith(extension):\n filesWithExtension.append(os.path.realpath(os.path.join(root, file)))\n return filesWithExtension", "def glob_ext_files(dirname, ext=\"fa\") -> list:\n fnames = glob(os.path.join(dirname, f\"*.{ext}*\"))\n return [f for f in fnames if f.endswith((ext, f\"{ext}.gz\"))]", "def glob_match_image_files(path):\n return sum([glob.glob(os.path.join(path, \"*%s\" % extension)) for extension in EXTENSIONS], [])", "def find_files(suffix, path):\n\tif not os.path.isdir(path):\n\t\t return 'Invalid Directory'\n\n\tpath_list = os.listdir(path)\n\toutput = []\n\tfor item in path_list:\n\t\titem_path = os.path.join(path, item)\n\t\tif os.path.isdir(item_path):\n\t\t\toutput += find_files(suffix,item_path)\n\t\tif os.path.isfile(item_path) and item_path.endswith(suffix):\n\t\t\toutput.append(item_path)\n\treturn output", "def get_file_names(path, extension='.json'):\n fn = os.listdir(path)\n l = []\n for f in fn:\n if f.endswith(extension, 4):\n l.append(f)\n l = sorted(l)\n return l", "def get_files_with_extension(self, extension=sys.argv[1]) -> list:\n if extension == \"\":\n raise EnvironmentError(\"No extension provided!\")\n\n result = []\n for idx, file in enumerate(self.file_list):\n if re.search(extension + \"$\", file):\n result.append(file)\n\n if len(result) == 0:\n raise Exception(\"No {} files found.\".format(extension))\n\n return result", "def find_files(path, extension = 'csv', min = 2):\n try:\n os.chdir(path) # go to the directory of the path\n except FileNotFoundError:\n print(\"ERROR - Couldn't find file \" + path)\n exit()\n files = [i for i in glob.glob('*.{}'.format(extension))] # place all the csv files in this array\n\n if len(files) < min:\n print(\"ERROR - Couldn't find at least \" + str(min) + \" \" + extension + \" file(s)\")\n exit()\n\n return files" ]
[ "0.8473555", "0.8358024", "0.82955307", "0.8249992", "0.8165438", "0.80491453", "0.79155713", "0.78752804", "0.78646743", "0.781289", "0.78089064", "0.7808255", "0.77932817", "0.7784879", "0.77192956", "0.7589907", "0.758737", "0.7579747", "0.75113535", "0.750457", "0.74776345", "0.74698454", "0.74640495", "0.7450137", "0.74483824", "0.74464893", "0.74419266", "0.7440596", "0.74298143", "0.74158144" ]
0.87833667
0
Returns a list of paths of all label files.
def get_label_files(path, ext=".txt"): return get_files(path, ext)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_file_paths_labels(data_root: str) -> list:\n\n speaker_dirs = os.listdir(data_root)\n all_files = []\n i = 0\n for d in speaker_dirs:\n files = glob.iglob(data_root + '/' + d + '/**/*.wav', recursive=True)\n files = [[f, i] for f in files]\n all_files += files\n i += 1\n all_files = sorted(all_files, key=lambda x:x[0], reverse=False)\n\n return all_files", "def get_all_image_paths(self):\n image_paths, image_labels = [], []\n for directory_name, subdirectory_list, file_list in os.walk(self.root_directory):\n for file_name in file_list:\n if file_name.endswith(('.jpg',)):\n image_paths.append(os.path.join(directory_name, file_name))\n # Translates labels to 0-26 as recommended in the exercise description\n image_labels.append(ord(directory_name[-1]) - 97)\n return image_paths, image_labels", "def list_labels(self):\n # Create empty list\n label_names = []\n \n # For every name in training directory\n for name in os.listdir(self.train_data):\n # If it does not start with . (which hidden files do)\n if not name.startswith('.'):\n label_names.append(name)\n \n return label_names", "def label_names_file():\n return tfds.core.tfds_path(_LABELS_FNAME)", "def get_label_list():\n f_name = os.path.join(FLAGS.labels_dir, FLAGS.labels_name)\n if os.path.exists(f_name):\n with open(f_name, 'rb') as f:\n try:\n label_list = [line.rstrip('\\n') for line in f]\n except:\n print(\"Could not read file:\" + f_name)\n sys.exit()\n return label_list", "def get_labels():\n return [name for name in os.listdir(os.getcwd() + '/assets/images') if name != 'test']", "def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)", "def _load_labels(self, label_path: str) -> List[str]:\n with open(label_path, 'r') as f:\n return [line.strip() for _, line in enumerate(f.readlines())]", "def dataset_files_labels(folders, db_root):\n fl = []\n for f in folders:\n\n fo = open(db_root + '/sample labels ' + f + '.txt', 'r')\n dialect = csv.Sniffer().sniff(fo.read(1024), delimiters=\"\\t \")\n fo.seek(0)\n for x in csv.reader(fo, dialect):\n fl.append([db_root + '/' + f + '/' + x[0], x[1]])\n return fl", "def get_filepaths(extract_dir):\n\n index = []\n labels = []\n _extract_dir = os.path.join(extract_dir, 'UCF-101')\n for folder in os.listdir(_extract_dir):\n labels.append(folder)\n folderpath = os.path.join(_extract_dir, folder)\n\n if not os.path.isdir(folderpath):\n continue\n\n for filename in os.listdir(folderpath):\n if 'avi' not in filename:\n continue\n\n if filename[0] == '.':\n continue\n\n filepath = os.path.join(folderpath, filename)\n\n if os.path.exists(filepath):\n index.append(filepath)\n else:\n print(filepath)\n return index, labels", "def getFilePaths():\n \n image_dir = r'/hpc/wfok007/mpi_heart/Training Set'\n mask_paths = []\n image_paths = []\n for root, dirs, files in os.walk(image_dir, topdown=False):\n for name in files:\n if name == 'laendo.nrrd':\n mask_paths.append(os.path.join(root, name))\n elif name == 'lgemri.nrrd':\n image_paths.append(os.path.join(root, name))\n else:\n print ('%s is unknown' %name)\n return mask_paths, image_paths", "def get_image_path_label(all_paths):\r\n n_folders_int = random.sample(range(0, len(all_paths)), n_way)\r\n image_labels = [[(glob.glob(all_paths[n] + '\\*')[k], n) # (path, label)\r\n for n in n_folders_int\r\n for k in random.sample(range(0, len(glob.glob(all_paths[n] + '\\*'))), k_shot+1)\r\n ] for b in range(batch_size)] \r\n return image_labels", "def get_list_of_data_and_labels():\n list_of_imgs = []\n list_of_img_labels = []\n for root, dirs, files in os.walk(config[\"PathToData\"], topdown=False):\n for f in files:\n ext = os.path.splitext(f)[-1].lower()\n\n if ext in config[\"ValidImageFileExtensions\"]:\n list_of_imgs.append(os.path.join(root, f))\n if ext in config[\"ValidLabelFileExtensions\"]:\n list_of_img_labels.append(os.path.join(root, f))\n\n list_of_imgs_with_labels = []\n list_of_corresponing_labels = []\n for img_full_file_name in list_of_imgs:\n img_file_name = os.path.splitext(img_full_file_name)[0].lower()\n corresponding_label = [label_full_file_name for label_full_file_name in list_of_img_labels if os.path.splitext(label_full_file_name)[0].lower() == img_file_name]\n if len(corresponding_label) != 0:\n list_of_imgs_with_labels.append(img_full_file_name)\n list_of_corresponing_labels.append(corresponding_label[0])\n\n assert len(list_of_imgs_with_labels) == len(list_of_corresponing_labels)\n\n return list_of_imgs_with_labels, list_of_corresponing_labels", "def get_log_paths(root_dir: str) -> List[str]:\n paths = []\n if not tf.io.gfile.isdir(root_dir):\n raise ValueError(f'{root_dir} is not a directory.')\n for path, _, files in tf.io.gfile.walk(root_dir):\n if 'metadata.riegeli' in files:\n paths.append(path)\n return paths", "def _FindLabels(self):\n texs = \" \".join(glob.glob(\"*.tex\"))\n cat_process = subprocess.Popen(shlex.split(\"cat %s\" % texs),\n stdout=subprocess.PIPE)\n grep_process = subprocess.Popen(shlex.split(r\"grep \\\\\\\\label\"),\n stdin=cat_process.stdout,\n stdout=subprocess.PIPE)\n cat_process.stdout.close()\n\n lines = grep_process.communicate()[0]\n\n ret = []\n for label in lines.split(\"\\n\"):\n ret.append(responses.BuildCompletionData(\n re.sub(r\".*\\label{(.*)}.*\", r\"\\1\", label)\n )\n )\n\n return ret", "def _GetLabels(self, directory, scan_subdirs, label, predicate):\n\n labels = []\n\n # Go through all of the files (and subdirectories) in that\n # directory.\n for entry in dircache.listdir(directory):\n entry_label = self._GetLabelFromBasename(entry)\n # If the label is not valid then pretend it\n # does not exist. It would not be valid to create an entity\n # with such an id.\n if not self.IsValidLabel(entry_label):\n continue\n # Compute the full path to 'entry'.\n entry_path = os.path.join(directory, entry)\n # If it satisfies the 'predicate', add it to the list.\n if predicate(entry_path):\n labels.append(self.JoinLabels(label, entry_label))\n # If it is a subdirectory, recurse.\n if (scan_subdirs and os.path.isdir(entry_path)\n and self._IsSuiteFile(entry_path)):\n labels.extend(self._GetLabels(entry_path,\n scan_subdirs,\n self.JoinLabels(label, \n entry_label),\n predicate))\n\n return labels", "def all_notebook_filenames():\n return [str(filename) for filename in Path(\".\").rglob(\"*.ipynb\")]", "def _findfile(self,path,label):\n files=[];filenames=os.listdir(path)\n for name in filenames:\n if os.path.splitext(name)[0]==str(label):\n files.append(name)\n return files", "def _get_file_names_and_labels(self):\n # Get waveform file names\n file_names = list(self.lookup_dict.keys())\n\n # Get labels\n labels = [self.lookup_dict[key] for key in self.lookup_dict.keys()]\n\n # file_paths and labels should have same length\n assert len(file_names) == len(labels)\n\n return file_names, labels", "def p_and_l_from(files):\n if isinstance(files, str):\n files = [files]\n paths = []\n labels = []\n for file in files:\n print(f'read {file}')\n with open(file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n line = line.split(' ')\n paths.append(line[0])\n labels.append(int(line[1]))\n return [paths, labels]", "def getFiles(folder, pattern, labelfile):\n # read labelfile\n with open(labelfile, 'r') as f:\n all_lines = f.readlines()\n \n # get filenames from labelfile\n all_files = []\n labels = []\n check = True\n for line in all_lines:\n # using shlex we also allow spaces in filenames when escaped w. \"\"\n splits = shlex.split(line)\n file_name = splits[0]\n class_id = splits[1]\n\n # strip all known endings, note: os.path.splitext() doesnt work for\n # '.' in the filenames, so let's do it this way...\n for p in ['.pkl.gz', '.txt', '.png', '.jpg', '.tif', '.ocvmb','.csv']:\n if file_name.endswith(p):\n file_name = file_name.replace(p,'')\n\n # get now new file name\n true_file_name = os.path.join(folder, file_name + pattern)\n all_files.append(true_file_name)\n labels.append(class_id)\n\n return all_files, labels", "def files(self):\n all_files = set()\n for label in self.filesets:\n all_files.update(self.filesets[label])\n return all_files", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def files(self):\n self._printer('\\tFiles Walk')\n for directory in self.directory:\n for path in os.listdir(directory):\n full_path = os.path.join(directory, path)\n if os.path.isfile(full_path):\n if not path.startswith('.'):\n self.filepaths.append(full_path)\n return self._get_filepaths()", "def collect_train_paths(self):\n\n image_paths = []\n annotation_paths = []\n n_images = 7200\n for i in tqdm(range(1, n_images + 1)):\n added = False\n for extension in ['jpg', 'png']:\n image_path = os.path.join(self.folder,\n f'ch8_training_images_{(i - 1) // 1000 + 1}',\n f'img_{i}.{extension}')\n if os.path.exists(image_path):\n image_paths.append(image_path)\n added = True\n break\n if added:\n annotation_paths.append(\n os.path.join(self.folder, 'ch8_training_localization_transcription_gt_v2',\n f'gt_img_{i}.txt')\n )\n else:\n logging.warning(f'Could not find: {image_path[:-3]}*')\n return image_paths, annotation_paths", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def get_labels(self) -> List[str]:\n return self.labels", "def load_labels(self, pathLabel):\n self.pathLabel = pathLabel\n self.labelList = os.listdir(pathLabel)" ]
[ "0.7681277", "0.74249905", "0.742166", "0.73119056", "0.72976214", "0.7211796", "0.7121309", "0.69645387", "0.68985945", "0.689014", "0.68764573", "0.67646277", "0.6759799", "0.671353", "0.6682471", "0.6668028", "0.665166", "0.66508526", "0.65965366", "0.65963733", "0.65886337", "0.65488446", "0.65354353", "0.65258014", "0.65258014", "0.65160257", "0.6514378", "0.6509495", "0.6456704", "0.6454527" ]
0.7458727
1
Gets a label from a given image path
def get_label(img_path): img_name = img_path.stem label_name = img_name + ".txt" label_path = img_path.parent / label_name with open(label_path) as f: label = json.load(f) return label
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_label(client, label):\n image_name = get_image_name()\n image = client.images.get(image_name)\n try:\n return image.labels[label]\n except KeyError:\n raise Exception(f\"Image should have a label '{label}'\")", "def get_label(image, model):\n x = Variable(image, volatile=True)\n label = model(x).data.max(1)[1].numpy()[0]\n # We have string labels for ImageNet\n if isinstance(model, torchvision.models.inception.Inception3):\n label_string = labels.get(label)\n return label_string\n return label", "def get_path_image(path_data, label, filename):\n\n return path_data.joinpath(f'label_{label}', filename)", "def get_path_and_label(root, file):\n # path of image (path is image)\n path = os.path.join(root, file)\n # Grab name of folder / Grab image folder name and replace spaces to - and convert all into lower case\n label = os.path.basename(root).replace(\" \", \"-\").lower()\n return path, label", "def read_image_with_label(dir, file):\n assert type(file) == str, \"File name is not string.\"\n f = os.path.join(dir, file)\n info = file.split(\"_\")\n try:\n label = [int(info[x]) for x in range(1, 3)]\n except:\n print(\"The format of file name is not correct.\")\n else:\n return Image.open(f), label", "def _get_img_label(self, path):\n food_items = self.annotations[path]\n tomato_items = [\n item for item in food_items\n if item['id'] in self.tomato_label_ids\n ]\n return 1 if len(tomato_items) > 0 else 0", "def GetImageLabelFromImage(image, parent=None):\n pixmap = GetPixelMapFromImage(image)\n return GetImageLabelFromPixelMap(pixmap, parent=parent)", "def _load_img_label(self):\n name = self._launch_file_b()\n self._img_label.configure(text=name)", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def get_label(filename:str) -> str:\n label = filename.split(\"/\")[-2]\n return label", "def get_output(path, label_file = None):\n img_id = path.split('/')[-1]\n labels = label_file.loc[img_id].values\n return labels", "def detect_labels(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n # [START vision_python_migration_label_detection]\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n ss=labels[0].description \n ss.split('/')[0]\n os.system(\"./ILOVEAPPLE/sort {} {}\".format(ss, path))\n # [END vision_python_migration_label_detection]", "def LoadImageLabel(filename, scaledXSize=None, scaledYSize=None, parent=None):\n image = LoadImage(filename, scaledXSize=scaledXSize, scaledYSize=scaledYSize)\n return GetImageLabelFromImage(image, parent=parent)", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def create_label(self, loaded_img, loaded_label):\n _, label = cv2.threshold(loaded_label, 120, 255, cv2.THRESH_BINARY)\n kernel = np.ones((5, 5), np.uint8)\n label = cv2.dilate(label, kernel, iterations=1)\n _, contours, _ = cv2.findContours(label, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n if contours:\n areas = [cv2.contourArea(cnt) for cnt in contours]\n x, y, w, h = cv2.boundingRect(contours[np.argmax(areas)])\n label = label[y:y + h, x:x + w]\n return loaded_img.astype(np.float32) / 255, cv2.resize(label, (self.label_w, self.label_h)).astype(np.float32) / 255\n else:\n return loaded_img.astype(np.float32) / 255, np.zeros([self.label_h, self.label_w], dtype=np.float32)", "def get_image_path_label(all_paths):\r\n n_folders_int = random.sample(range(0, len(all_paths)), n_way)\r\n image_labels = [[(glob.glob(all_paths[n] + '\\*')[k], n) # (path, label)\r\n for n in n_folders_int\r\n for k in random.sample(range(0, len(glob.glob(all_paths[n] + '\\*'))), k_shot+1)\r\n ] for b in range(batch_size)] \r\n return image_labels", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def get_label(self, name):\n label_list = self.wls_board.get_labels()\n for label in label_list:\n if name in label.name: \n return label", "def load_label(self, idx):\n im = open('{}/GTTXT/{}.txt'.format(root_dir, idx))\n\t#print(type(im.readlines()[0].rstrip(\"\\n\")))\n rgb_label = [i.rstrip(\"\\n\").split(\" \") for i in im.readlines()]\n\tlabel=[]\t\n\tfor i in rgb_label:\n\t\tlabel+=[int(j) for j in i]\n\tlabel=np.array(label).reshape(720,960)\n\tlabel[label==-1]=12\n\t#print(np.unique(label))\n #label = label[np.newaxis, ...]\n return label", "def get_labels():\n return [name for name in os.listdir(os.getcwd() + '/assets/images') if name != 'test']", "def get_image_path(image_lists, label_name, index, image_dir, category):\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n \n if not category_list:\n tf.logging.fatal('Label %s has no images in the category %s.',\n label_name, category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n \n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path", "def file_reader(image_file, label_file):\n\n image = im.imread(image_file)\n\n with open(label_file, \"r\") as file:\n label = float(file.read())\n\n return image, label", "def get_label(self, key):\n return self.labels.get(key, None)", "def __get_label(file_path, fruits):\n try:\n root, name, fruit, location, time, filename = r\"{}\".format(file_path).split(\"\\\\\")\n if fruit.lower() not in fruits:\n return \"Unknown fruit\"\n return fruit.lower()\n except ValueError:\n for fruit in fruits:\n if fruit in file_path.lower():\n return fruit\n\n return \"Unknown fruit\" # if we got here so we don't know what is the label", "def get_label(id):\n return if_found(dao.get_label(id))", "def label(image,**kw):\n # default connectivity in OpenCV: 8 (which is equivalent to...)\n # default connectivity in scikit-image: 2\n n, labels = cv2.connectedComponents(image.astype(uint8), connectivity=4)\n #n, labels = cv2.connectedComponentsWithAlgorithm(image.astype(uint8), connectivity=4, ltype=2, ccltype=cv2.CCL_DEFAULT)\n return labels, n-1\n # try: return measurements.label(image,**kw)\n # except: pass\n # types = [\"int32\",\"uint32\",\"int64\",\"uint64\",\"int16\",\"uint16\"]\n # for t in types:\n # try: return measurements.label(array(image,dtype=t),**kw)\n # except: pass\n # # let it raise the same exception as before\n # return measurements.label(image,**kw)", "def _get_label(self):\n return self.label", "def get_imagenet_label(index):\n global _CLASS_INDEX\n if _CLASS_INDEX is None:\n with open(os.path.join(os.path.dirname(__file__), '../resources/imagenet_class_index.json')) as f:\n _CLASS_INDEX = json.load(f)\n return _CLASS_INDEX[str(index)][1]", "def _get_label(cls, file_name):\n if cls == \"neg\":\n return \"0\"\n else:\n return \"1\"\n # reg = _REGEX_\n # rmtch = reg.match(file_name)\n # if rmtch:\n # return rmtch.groupdict()[\"label\"]\n # else:\n # return \"unknown_positive\"" ]
[ "0.8130192", "0.7271338", "0.72610754", "0.72576123", "0.71734583", "0.7101198", "0.7082114", "0.707369", "0.7006125", "0.6975027", "0.6914926", "0.6888219", "0.6779321", "0.66797745", "0.66453075", "0.66104287", "0.653919", "0.6511472", "0.65014905", "0.6453433", "0.64453256", "0.6411255", "0.6396673", "0.638939", "0.63611597", "0.6343702", "0.6291641", "0.62749964", "0.62747717", "0.6268842" ]
0.85277015
0
Creates a label df with all labels concatenated.
def create_label_df(path): image_files = get_images(path) labels = [get_label(p) for p in image_files] label_df = pd.DataFrame(labels) label_df.fillna(" ", inplace=True) return label_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def add_label_counts(labels_df):\n # ---------------------------------------------------------------------\n # label count: count total number of occurences of a label in all label sources\n projects = ['food', 'internet', 'technology', 'media']\n final = []\n for project in projects:\n final.append(pd.read_csv(\n \"data/\" + project + \"/hierarchical_category_names.csv\"))\n final.append(\n pd.read_csv(\"data/\" + project + \"/keyphrases_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/keyword_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/lda_label_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/link_names.csv\"))\n # final.append(pd.read_csv(\"data/\" + project + \"/lda_label_names.csv\"))\n\n final = pd.concat(final)\n counts = final.groupby([\"label\"]).size().reset_index(name=\"label_count\")\n\n # ---------------------------------------------------------------------\n # label count: count total number of occurences of a label wihtin current project\n # Todo: discuss with Shilad which to use\n counts2 = labels_df.groupby([\"label\"]).size().reset_index(name=\"label_count_project\")\n with_count_all = pd.merge(labels_df, counts, on='label')\n return pd.merge(with_count_all, counts2, on='label')", "def get_labels(df):\n labels = []\n for i in df.index:\n label = sample_label_from_sample_name(i)\n labels.append(label)\n return labels", "def get_labels_df():\n labels_df = pd.read_csv('data/train/truth_train.csv', header=None)\n return labels_df", "def labeling_func(df_clus):\n\n df_all_labeled = df_all_columns.copy()\n df_all_labeled['Clus_label'] = df_clus['Clus_label'].copy()\n df_all_labeled['Clus_label']= df_all_labeled['Clus_label'].astype(int)\n for i in range(0, clus_params['n_components']):\n df_all_labeled['Prob_L'+str(i)] = df_clus['Prob_L'+str(i)].copy()\n\n return df_all_labeled", "def df():\n path, _ = os.path.split(os.path.abspath(__file__))\n project_path = os.path.join(path, os.pardir, os.pardir)\n\n values_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_values.csv\")\n labels_path = os.path.join(project_path, \"data\", \"raw\", \"pumps_train_labels.csv\")\n\n train = pd.read_csv(values_path, index_col='id', parse_dates=[\"date_recorded\"])\n labels = pd.read_csv(labels_path, index_col='id')\n\n return train.join(labels)", "def fix_label(df):\n df_label = df['OVERALL_DIAGNOSIS']\n\n df_label.replace({0: -1, 1: 1}, inplace=True)\n df = df.drop(['OVERALL_DIAGNOSIS'], axis=1)\n df = pd.concat([df_label, df], axis=1)\n df.columns.values[0] = \"label\"\n return df", "def __get_labels(self):\n\n uncertain_pairs_index = self.__query_pairs()\n\n to_label_raw = self.all_raw_data.loc[uncertain_pairs_index]\n to_label_features = self.all_features.loc[uncertain_pairs_index]\n\n # Remove uncertain pairs from the candidate pool\n self.all_features.drop(uncertain_pairs_index, axis=0, inplace=True)\n\n labels_list = []\n for index, row in to_label_raw.iterrows():\n\n print(\"\\n{0:30}\\t{1}\\n{2:30}\\t{3}\\n{4:30}\\t{5}\\n{6:30}\\t{7}\\n\".format(row.name_a, row.name_b,\n row.address_a, row.address_b,\n row.zip_a, row.zip_b,\n row.city_a, row.city_b))\n\n\n label = self.__user_input(\"Is this a match? (0/1)\")\n labels_list.append((index, label))\n\n labels_index = [index for index, label in labels_list]\n labels_values = [label for index, label in labels_list]\n\n # Create dataframe with index and labels\n add_labels = pd.Series(labels_values, index=labels_index, name='label')\n\n # Union the new training set to the full training set\n self.labeled_features = pd.concat([self.labeled_features, to_label_features], axis = 0, ignore_index=False)\n self.labeled_labels = pd.concat([self.labeled_labels, add_labels], axis = 0, ignore_index=False)\n\n return self", "def labels(self) -> pd.Series:\n return self.data.apply(to_label, axis=1)", "def consolidate_labels(labels):\n return map(RNN_model.consolidate_label , labels)", "def assign_random_labels(dataset: pd.DataFrame, n_classes: int) -> pd.DataFrame:\n labels = np.zeros(shape=(len(dataset), n_classes))\n labels = pd.DataFrame(labels, columns=[\"l{}\".format(i) for i in range(n_classes)])\n for label in labels.values:\n for cla in range(n_classes):\n label[cla] = random.randint(0, 1)\n\n return labels", "def label_extraction(self) -> None:\n self.df[\"label\"] = self.df[\"y\"]", "def create_labels(filepath):\r\n \r\n filelist = os.listdir(filepath)\r\n columns = ['filename','label']\r\n label_df = pd.DataFrame(columns = columns)\r\n count = 0\r\n col1 = []\r\n col2 = []\r\n \r\n for file in filelist:\r\n \r\n name = file[:-4]\r\n imagename = name+'.png'\r\n absolute_path = os.path.join(filepath,file)\r\n \r\n f = open(absolute_path,\"r\")\r\n classname = f.read(3).split(\" \")\r\n print(classname)\r\n print(classname[0])\r\n \r\n col1.append(imagename)\r\n col2.append(classname[0])\r\n count += 1\r\n \r\n \r\n label_df = pd.DataFrame({'filename': col1, 'label': col2}) \r\n return label_df", "def build_next_states_labeled_df(self, combined_df):\n if self.next_states_labeled_df is None:\n # label once for all new sents\n self.next_states_labeled_df = self.label_dataframe_with_expert(combined_df, self.col_names)\n return self.next_states_labeled_df", "def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames", "def label_encode(df):\n\n X = df.copy()\n for colname in X.select_dtypes([\"category\"]):\n X[colname] = X[colname].cat.codes\n return X", "def consolidate_labels(labels):\n return list(map(RNNOIE_model.consolidate_label , labels))", "def _label_encoding(self):\n for feat in self.cat_feats:\n if self.train:\n lbl = preprocessing.LabelEncoder()\n lbl.fit(self.dataframe[feat].values)\n self.dataframe_d_copy.loc[:,feat] = lbl.transform(self.dataframe[feat].values)\n self.label_encoders[feat] = lbl\n else:\n lbl = self.encoders[feat]\n self.dataframe_d_copy.loc[:,feat] = lbl.transform(self.dataframe[feat].values)\n \n if self.train:\n encoder_path = f\"{self.output_path}/_label_encoder.pkl\"\n self.cat_feats_cfg['encoder_path'] = encoder_path\n joblib.dump(self.label_encoders, encoder_path)\n \n return self.dataframe_d_copy", "def to_labels(frame_labels: np.ndarray, labelmap: dict) -> str:\n frame_labels = row_or_1d(frame_labels)\n\n onset_inds = np.diff(frame_labels, axis=0).astype(bool)\n onset_inds = np.insert(onset_inds, 0, True)\n\n labels = frame_labels[onset_inds]\n\n # remove 'unlabeled' label\n if \"unlabeled\" in labelmap:\n labels = labels[labels != labelmap[\"unlabeled\"]]\n\n if len(labels) < 1: # if removing all the 'unlabeled' leaves nothing\n return \"\"\n\n # only invert mapping and then map integer labels to characters\n inverse_labelmap = dict((v, k) for k, v in labelmap.items())\n labels = labels.tolist()\n labels = [inverse_labelmap[label] for label in labels]\n\n return \"\".join(labels)", "def label_dataframe_with_expert(sent_df, col_names, unlabeled_df_labeled=None):\n # type: (DataFrame, ColumnNames, DataFrame) -> DataFrame\n unlabeled_df = sent_df[sent_df[col_names.tag].isnull()].copy(deep=True)\n if unlabeled_df_labeled is None:\n from ResearchNLP.z_experiments.experiment_util import label_df_with_expert\n unlabeled_df_labeled = label_df_with_expert(unlabeled_df, col_names, print_status=False)\n # assuming unlabeled_df_labeled is meant for the end of $sent_df\n unlabeled_df_labeled_st = len(sent_df) - len(unlabeled_df_labeled)\n df_len = len(sent_df)\n for un_idx in unlabeled_df.index:\n assert unlabeled_df_labeled_st <= un_idx < df_len, \\\n \"$unlabeled_df_labeled should hold info for the end of the dataframe\"\n labeled_sent_df = sent_df.copy(deep=True)\n for idx in unlabeled_df.index: # set all new labels\n labeled_sent_df.iloc[idx] = unlabeled_df_labeled.iloc[idx - unlabeled_df_labeled_st].copy(deep=True)\n return labeled_sent_df", "def test_generate_df_with_label(self):\n\n data_df = pyjstat.generate_df(self.oecd_datasets['oecd'], 'label')\n line_thirty = ['Unemployment rate', 'Belgium', 2009, 7.891892855]\n dimensions = pyjstat.get_dimensions(self.oecd_datasets['oecd'],\n 'label')\n self.assertTrue(set(data_df.columns.values[:-1]) ==\n set(dimensions[1]))\n self.assertTrue(set(data_df.iloc[30].values) ==\n set(line_thirty))", "def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])", "def transform_labels(self, labels=None):\n encoded_labels = pandas.DataFrame([], columns=['low', 'medium', 'high'])\n encoded_labels.loc[:, 'low'] = 1.0 * (labels['damage_grade'].values == 1)\n encoded_labels.loc[:, 'medium'] = 1.0 * (labels['damage_grade'].values == 2)\n encoded_labels.loc[:, 'high'] = 1.0 * (labels['damage_grade'].values == 3)\n return encoded_labels", "def get_path_and_label(self):\n DATA_SIZE = 700\n category = os.listdir(os.path.join(self.root, 'NWPU-RESISC45'))\n image_path = []\n label = []\n for cat in category:\n cat_enc = self.class_enc[cat]\n label += [cat_enc] * DATA_SIZE\n for num in range(1, DATA_SIZE+1):\n filename = cat + '_' + str(num).zfill(3) + '.jpg'\n image_path += [os.path.join(self.root,\n 'NWPU-RESISC45', cat, filename)]\n df = pd.DataFrame({'image': image_path, 'label': label})\n\n return df", "def generate_labels():\n label_set = set([])\n for data in load_data():\n label = data.split(' ', 1)[0]\n label_set.add(label)\n labels = list(label_set)\n labels.sort()\n return labels", "def label_encoder(df, col_names: list) -> pd.DataFrame:\n le = LabelEncoder()\n for col_name in col_names:\n df.loc[:, col_name] = le.fit_transform(df[col_name])\n return df", "def transpose_labels(df, sort=False):\n label = 'gtruth'\n temp_gtruth = df[df['label'] == 'gtruth']\n for sample_method_to_test in ['lab', 'pier']:\n temp_gtruth = temp_gtruth \\\n .rename({\n f'{sample_method_to_test} total abundance': f'{sample_method_to_test} {label} total abundance',\n f'{sample_method_to_test} raw count': f'{sample_method_to_test} {label} raw count',\n f'{sample_method_to_test} nrmlzd raw count': f'{sample_method_to_test} {label} nrmlzd raw count',\n f'{sample_method_to_test} relative abundance': f'{sample_method_to_test} {label} relative abundance',\n f'{sample_method_to_test} cells/mL': f'{sample_method_to_test} {label} cells/mL'},\n axis=1)\n\n temp_gtruth = temp_gtruth.drop('label', axis=1)\n\n label = 'predicted'\n temp_pred = df[df['label'] == 'prediction']\n for sample_method_to_test in ['lab', 'pier']:\n temp_pred = temp_pred \\\n .rename({\n f'{sample_method_to_test} total abundance': f'{sample_method_to_test} {label} total abundance',\n f'{sample_method_to_test} raw count': f'{sample_method_to_test} {label} raw count',\n f'{sample_method_to_test} nrmlzd raw count': f'{sample_method_to_test} {label} nrmlzd raw count',\n f'{sample_method_to_test} relative abundance': f'{sample_method_to_test} {label} relative abundance',\n f'{sample_method_to_test} cells/mL': f'{sample_method_to_test} {label} cells/mL'},\n axis=1)\n temp_pred = temp_pred.drop('label', axis=1)\n\n merge_col = ['class', 'datetime', 'sampling time']\n micro_col = [col for col in df.columns if col.startswith('micro')]\n if all(mc in df.columns for mc in micro_col):\n merge_col += micro_col\n\n concat = temp_pred.merge(temp_gtruth, on=merge_col)\n\n # sort dataframe\n if sort:\n col = sorted(concat.columns)\n concat = concat[col[:2] + [col[-1]] + col[2:-1]]\n\n return concat", "def add_labels(df, binary=True, DELAY_THRESHOLD=20, categorical=False):\n\n def delay_class(minutes):\n if minutes <= 5:\n return 0\n if 5 < minutes <= 20:\n return 1\n if 20 < minutes <= 60:\n return 2\n if 60 < minutes <= 120:\n return 3\n if 120 < minutes:\n return 4\n else:\n return None\n\n if binary and not categorical:\n # add the target label \"binary: delayed (positive) not-delayed (negative)\" based on the threshold in minutes\n df['DELAYED'] = df['DEP_DELAY'].apply(lambda x: 1 if x >= DELAY_THRESHOLD else 0)\n\n # balance the data (same number of samples for delayed / not delayed flights)\n delayed = df[df['DELAYED'] == 1].copy()\n no_delay = df[df['DELAYED'] == 0][:delayed.shape[0]].copy()\n\n # concat into one dateframe\n data = delayed.append(no_delay, ignore_index=True)\n # logging\n percentage = delayed_percentage(df, DELAY_THRESHOLD)\n print('{:.2f}% of the total flights were delayed {} minutes or more.'.format(percentage, DELAY_THRESHOLD))\n\n del delayed, no_delay, df # release some memory\n\n elif categorical:\n df['DELAY_CLASS'] = df['DEP_DELAY'].apply(lambda row: delay_class(row))\n counts = df['DELAY_CLASS'].value_counts()\n m = min(counts)\n c0 = df[df['DELAY_CLASS'] == 0][:m].copy()\n c1 = df[df['DELAY_CLASS'] == 1][:m].copy()\n c2 = df[df['DELAY_CLASS'] == 2][:m].copy()\n c3 = df[df['DELAY_CLASS'] == 3][:m].copy()\n c4 = df[df['DELAY_CLASS'] == 4][:m].copy()\n data = c0.append([c1, c2, c3, c4])\n data['DELAY_CLASS'] = data['DELAY_CLASS'].astype(int)\n del c0, c1, c2, c3, c4 # release memory\n else:\n raise('either of binary or categorical must be true')\n\n # shuffle dataframe\n data = data.sample(frac=1).reset_index(drop=True)\n\n return data", "def _get_classify_labels(df):\n labels = np.ones((len(df), 1), dtype=dtype) * 2\n labels[df['A-coref']] = 0\n labels[df['B-coref']] = 1\n return labels", "def labels_all(self):\n return self._labels_all" ]
[ "0.6601174", "0.64376587", "0.62578154", "0.6249624", "0.6200135", "0.6038482", "0.599989", "0.59783006", "0.59754735", "0.5972586", "0.5940923", "0.5933921", "0.5933152", "0.5904", "0.58979785", "0.5895999", "0.58786684", "0.58408", "0.5820154", "0.5800604", "0.5785483", "0.57651937", "0.5763189", "0.57589984", "0.57496536", "0.57231724", "0.5714263", "0.571193", "0.5710738", "0.57037467" ]
0.6924985
0
Project managers can replace completion status of the project
def update_project_status(project_id): completion_status = request.get_json()['completion_status'] project = Project.query.filter_by(id=project_id).first() if not project: return { 'success': False, 'message': f"No project with the specified id {project_id} found.", } else: if is_project_manager(project, g.user): project.completion_status = completion_status db_session.add(project) db_session.commit() return { 'success': True, 'result': task_schema.dump(project), 'message': f"Successfully Updated the Completion Status of {project.name}." }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repo_refresh_for_unfinished():\n project_list = Project.objects()\n crawl_list = []\n for repo in project_list:\n if repo.analyser_progress != \"100%\":\n crawl_list.append(repo.project_name)\n analyser.add_repos(current_user.username, crawl_list)\n flash('Refresh for unfinished successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def test_update_project(self):\n pass", "def test_update_project(self):\n pass", "def project():", "def project():", "def project():", "def completion() -> None:", "def project_refresh(project_name):\n if not db_find_project(project_name):\n abort(404)\n analyser.add_repos(current_user.username, [project_name])\n return redirect(url_for('main.admin_manage'))", "def approve_project(cls, project):\n project.status = Project.APPROVED\n project.save()", "def status_project():\n remote('glogg -n 20 && echo \"\" && git status')", "def stop_modify_project(update, context):\n context.user_data[START_OVER] = True\n get_list_projects(update, context)\n\n return END", "def mark_completed(self,status):\r\n if status == \"r\":\r\n self.status = \"c\"#It is to test the mark complete function in the test_book.py, otherwise this program works fine in the main.py\r\n return True\r\n elif status == \"c\":\r\n return False", "def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")", "def test_replace_project(self):\n pass", "async def status(self, ctx, project_name: str) -> discord.Message:\n if not ctx.projects.find_project(project_name):\n await ctx.send(\"This project doesn't exist.\")\n return\n progress_bar = ctx.projects.project_progress_bar(project_name)\n if not progress_bar:\n progress_bar = self.empty_progress_bar\n await ctx.send(progress_bar)", "def poll_advanced(self):\r\n osName = platform.system()\r\n\r\n ## Check if user updated project name\r\n try:\r\n ## Check if user updated project name\r\n checkName = self.widgetList[3].get()\r\n if checkName != self.newProj.name:\r\n if kT.check_proj_name(checkName):\r\n self.newProj.name = checkName\r\n else:\r\n self.newProj.name = None\r\n if self.prevName != checkName:\r\n tkMessageBox.showinfo(\"Invalid Project Name\",\\\r\n \"No spaces or special characters.\")\r\n self.prevName = checkName\r\n kT.debug_log(\"Invalid name\")\r\n except AttributeError:\r\n kT.debug_log(\"AttributeError\", sys.exc_info()[2])\r\n return\r\n\r\n self._retLoop = self.after(250, self.poll_advanced)", "def status(self,project_dir):\n \n if \"towercrane\" not in os.listdir(project_dir):\n print('(!) No project has been initialized yet.\\n => you can use \"towercrane init\" to start a new project.\\n => Or it might be because you have lost the \"towercrane config file\" ')\n \n elif \"towercrane\" in os.listdir(project_dir):\n TowercraneConfig = read_config(project_dir)\n project, files = self.db.get_project(TowercraneConfig[\"projectkey\"])\n files_table = tabulate([[file[1],file[0],file[2],file[-1]] for file in files], headers=['File Name', 'File Key','Size','status'], tablefmt='orgtbl')\n print(f'project:\"{TowercraneConfig[\"project_name\"]}\" with projectkey: \"{TowercraneConfig[\"projectkey\"]}\"\\nFiles added to the project: \\n\\n{files_table}')", "def project_refresh_all():\n project_list = Project.objects()\n analyser.add_repos(current_user.username, [repo.project_name for repo in project_list])\n flash('Refresh all successfully!', 'success')\n return redirect(url_for('main.admin_manage'))", "def project(self, value):\n\n if self._project != value:\n self._project = value\n self._update_page()", "def change_status(self):\n if self.status == 'in progress':\n self.status = 'done'\n return self.status\n elif self.status == 'done':\n self.status = 'in progress'\n self.eisenhower_priority()\n return self.status", "def uncomplete(self):\n ### TODO: needs test code for code coverage!\n ## (it has been tested through the calendar-cli test code)\n if not hasattr(self.vobject_instance.vtodo, \"status\"):\n self.vobject_instance.vtodo.add(\"status\")\n self.vobject_instance.vtodo.status.value = \"NEEDS-ACTION\"\n if hasattr(self.vobject_instance.vtodo, \"completed\"):\n self.vobject_instance.vtodo.remove(self.vobject_instance.vtodo.completed)\n self.save()", "def update_status(self):\n\n self.num_made = self.num_made + 1\n print('Organisms left for {}: {} '.format(\n self.name, self.number - self.num_made))\n if self.num_made == len(self.files):\n self.is_finished = True", "def _updateStatus(self, result):\n\n if result.status is not None:\n # status was explicitly set\n self.target.localStatus = result.status\n if self.target.present and self.target.created is None:\n self.target.created = self.configSpec.operation not in [\n \"check\",\n \"discover\",\n ]\n elif not result.success:\n # if any task failed and (maybe) modified, target.status will be set to error or unknown\n if result.modified:\n self.target.localStatus = (\n Status.error if self.required else Status.degraded\n )\n elif result.modified is None:\n self.target.localStatus = Status.unknown\n # otherwise doesn't modify target status", "def update_project(self, name):\n self._log.info(\"Updating project: {}\".format(name))\n if name in self.projects:\n pass\n else:\n self.add_project(name)", "def __ensure_project(self, project_name):\n if project_name not in self.history:\n self.history[project_name] = {}\n self.history[project_name]['opened'] = []\n self.history[project_name]['closed'] = []", "def do_project_update(cs, args):\n raise NotImplementedError", "def mark_as_done(self):\n self.status = \"DONE\"", "def test_patch_project(self):\n pass", "def monitor_project_build(self, project_name):\n pass", "def __gitBisectReset(self):\n pfile = self.project.getProjectFile()\n lastModified = QFileInfo(pfile).lastModified().toString()\n shouldReopen = (\n self.vcs.gitBisect(self.project.getProjectPath(), \"reset\") or\n QFileInfo(pfile).lastModified().toString() != lastModified\n )\n if shouldReopen:\n res = E5MessageBox.yesNo(\n self.parent(),\n self.tr(\"Bisect\"),\n self.tr(\"\"\"The project should be reread. Do this now?\"\"\"),\n yesDefault=True)\n if res:\n self.project.reopenProject()" ]
[ "0.63556254", "0.6097139", "0.6097139", "0.6010299", "0.6010299", "0.6010299", "0.59843576", "0.59797865", "0.5946904", "0.5905803", "0.58927155", "0.5876474", "0.5854564", "0.5852657", "0.583997", "0.58357406", "0.5791279", "0.5755392", "0.57458824", "0.5745145", "0.57081807", "0.5672598", "0.56663823", "0.56646776", "0.5643562", "0.56142545", "0.5598258", "0.558944", "0.55480677", "0.55411327" ]
0.64196265
0
Get list of tasks associated with a project
def get_tasks_list(project_id): project = Project.query.filter_by(id=project_id).first() if not project: return { 'success': False, 'message': f"No project with the specified id {project_id} found.", } else: permission = has_project_permission(project, g.user) return jsonify( { "success": True, "result": { 'created_tasks': tasks_schema.dump(Task.query.filter_by(created_by_id = g.user.id).all()), 'tasks_you_work_on': tasks_schema.dump(g.user.tasks).all(), 'all': tasks_schema.dump(Task.query.filter(or_( Task.created_by_id==g.user.id, Task.project_id==g.user.project.id )).all()), }, "message": "Successfully fetched all tasks.", } )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tasks_of_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE project_id=? ORDER BY project_order\", (project_id,))\n return res.fetchall()", "def getProjectTasks(self, pid, archived=False):\n return self.request(Endpoints.PROJECTS + '/{0}'.format(pid) + '/tasks')", "def get_tasks(self):\n return self.tasks.all()", "def get_tasks(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/tasks\".format(project.id,\n story.id)\n tasks = self._request(\"get\", resource)\n\n for task in tasks:\n ret_val.append(Task(task))\n\n return ret_val", "def get_tasks(self, *args, **kwargs):\n tasks_endpoint = furl(self.ENDPOINT) / self.id / \"tasks\"\n return self._client.list(Task, endpoint=tasks_endpoint.url, *args, **kwargs)", "def get_tasks(taskid_list, module):\n tasks = module.client.api.get_tasks_by_status('Pending')\n task_list = list()\n for task in tasks:\n if task['workOrderId'] in taskid_list:\n task_list.append(task)\n return task_list", "def find_tasks(\n self,\n task_name: Optional[str] = None,\n project_id: Optional[str] = None,\n parent_task_id: Optional[str] = None,\n ) -> List[Task]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from tasks\n WHERE (?1 IS NULL OR task_name = ?1)\n AND (?2 IS NULL OR project_id = ?2)\n AND (?3 IS NULL OR parent_task_id = ?3)\n \"\"\",\n (task_name, nonesafe_int(project_id), nonesafe_int(parent_task_id)),\n )\n rows = c.fetchall()\n return [\n Task(self, str(r[\"task_id\"]), row=r, _used_new_call=True) for r in rows\n ]", "def get(self):\n\n return task_service.get_tasks()", "def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()", "def get_tasks_list(self):\n return self.task_controller.get_list()", "def get_unique_project_list(self) -> List[str]:\n return self.tasks.get_project_list()", "def get_tasks(self):\n return self.tasks", "def get_all_tasks(self):\n \n sql = \"select * from tasks;\"\n return self._query_all(sql)", "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def tasks_in_project(request, project):\n return project.task_set.filter(user=request.user).exclude(folder='trash')", "def get_task_list(self):\n raise NotImplementedError()", "def get_all_projects_tasks(dump: Optional[Union[bool, str]] = None,\n get_predictions_instead: bool = False):\n\n @ray.remote\n def _iter_projects(proj_id, get_preds_instead=get_predictions_instead):\n if get_preds_instead:\n _tasks = get_tasks_from_mongodb(proj_id,\n dump=dump,\n get_predictions=True)\n else:\n _tasks = get_tasks_from_mongodb(proj_id)\n for task in _tasks:\n task.pop('_id')\n return _tasks\n\n project_ids = get_project_ids_str().split(',')\n\n futures = []\n for project_id in project_ids:\n futures.append(_iter_projects.remote(project_id))\n\n tasks = []\n for future in tqdm(futures):\n tasks.append(ray.get(future))\n\n if dump:\n with open(dump, 'w') as j:\n json.dump(sum(tasks, []), j)\n\n return sum(tasks, [])", "async def list_tasks():", "def get(self, project_id):\n try:\n pagination_args = get_pagination_args(request)\n except ArgumentError as e:\n return {'message': e.message}, 500\n\n limit = pagination_args['limit'] if 'limit' in pagination_args else self.DEFAULT_LIMIT\n offset = pagination_args['offset'] if 'offset' in pagination_args else self.DEFAULT_OFFSET\n\n tasks = backend.filter(Task, {'project.pk': request.project.pk},\n include=('project',), only=TaskDetails.export_fields, raw=True\n ).sort('created_at', -1)\n\n return {'tasks': [TaskDetails.export(task) for task in tasks[offset:offset + limit]]}, 200", "def get_tasks(self):\n if self.tasks_url:\n resp = self._api.list_tasks(url=self.tasks_url)\n\n else:\n resp = self._api.list_tasks(job_id=self.id)\n\n if resp.success:\n self.tasks = [Task(self._api, self.id, **task_def)\n for task_def in resp.result]\n\n return self.tasks\n\n else:\n raise resp.result", "def get_tasks(self):\n return self.task_collection", "def view_tasks():\n task_list = []\n incomplete_task_list = Tasks.objects.filter(is_complete=False)\n for task in incomplete_task_list:\n tasks = [] #create data structure\n tasks.append(task.id) #add ID \n tasks.append(task.task_text) #add text\n task_list.append(tasks) #append data structure\n\n return task_list", "def get_tasks(self):\n return self.stn.get_tasks()", "def query_project_tasks(self, project_data):\n\n # Get project ID.\n project_id = project_data[0][0]\n query = \"select task_datest, task_dateend, task_info, skill_descrpt, \" \\\n \"TS_Qty \" \\\n \"from skill, task_skills, task \" \\\n \"where task_skills.task_id = task.task_id \" \\\n \"and proj_id = '{}' \" \\\n \"and task_skills.skill_id = skill.skill_id \" \\\n \"order by task_datest\".format(project_id)\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def task_list(self) -> List[\"Task\"]: # noqa: F821\n return list(self.tasks.values())", "def get_all():\n return list(tasks.find({}))", "def get_all_tasks(self):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"SELECT task FROM goals\"\"\")\r\n\t\t\ttup_list = self.c.fetchall()\r\n\t\treturn [tup[0] for tup in tup_list]", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "async def list_tasks(fields: Set[str] = None):\n tasks = celery_app.describe_tasks()\n tasks = [TaskOut(**task).dict(include=fields) for task in tasks]\n return tasks", "def all_tasks(request):\n return Task.objects.select_related('project').filter(user=request.user).exclude(folder='trash')" ]
[ "0.8217871", "0.7975788", "0.75386", "0.7525342", "0.7506066", "0.74947053", "0.7418993", "0.7322707", "0.7289081", "0.7264354", "0.72270566", "0.7187831", "0.71321934", "0.711599", "0.7102491", "0.7091863", "0.7089866", "0.70818645", "0.7079432", "0.70347834", "0.7004059", "0.69971246", "0.6995982", "0.6969395", "0.6964492", "0.6952018", "0.69279855", "0.690952", "0.69059634", "0.68921155" ]
0.8169617
1
User with permissions can replace completion status of task if it exists and is part of the project
def update_task_status(project_id, task_id): completion_status = request.get_json()['completion_status'] project = Project.query.filter_by(id=project_id).first() if not project: return { 'success': False, 'message': f"No project with the specified id {project_id} found.", } else: permission = has_project_permission(project, g.user) task = Task.query.filter_by(id=task_id).first() if not task: abort(404, f'There is no task with ID of {task_id}.') if task: task.completion_status = completion_status db_session.add(task) db_session.commit() return { 'success': True, 'result': task_schema.dump(task), 'message': f"Successfully Updated the Completion Status of {task.name}." }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def task_status():\n pass", "def upload_priviledge(request):\n try:\n check = UserProfile.objects.filter(contributor = 1)\n uncheck = UserProfile.objects.filter(contributor = 0)\n \n i = UserProfile.objects.filter(contributor=1).count()\n k = UserProfile.objects.filter(contributor = 0).count()\n \n j = 0\n while j < i:\n c = check[j]\n usr = User.objects.get(username=c.user)\n perm_id = Permission.objects.get(codename = 'add_task')\n if usr.has_perm('translation.add_task'):\n pass\n else:\n usr.user_permissions.add(perm_id)\n usr.save()\n j += 1\n j = 0\n while j < k:\n u = uncheck[j]\n usr = User.objects.get(username=u.user)\n if not usr.has_perm('translation.add_task'):\n pass\n else:\n usr.user_permissions.remove(perm_id)\n usr.save()\n j += 1\n \n data = {'msg':''}\n messages.success(request, \"User's upload priviledge updated successfully.\")\n return render_to_response('my_admin_tools/menu/background_task.html',data,context_instance=RequestContext(request))\n except:\n msg = traceback.format_exc()\n data = {'msg':msg}\n messages.error(request, \"Update user's upload priviledge failed.\")\n return render_to_response('my_admin_tools/menu/background_task.html',data,context_instance=RequestContext(request))", "def check_indicator_files(tasks):\n\n for task in tasks:\n if task[\"status\"]==\"unknown\":\n if os.path.exists(task[\"result\"]):\n task[\"status\"]=\"previously completed\"\n else:\n task[\"status\"]=\"to do\"\n return", "def test_allowed_if_in_task(self):\n\n @task_or_superuser_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n request = self.factory.get(\"/\")\n request.META[_TASK_NAME_HEADER] = \"test\"\n\n response = view(request)\n self.assertEqual(response.status_code, 200)", "def post(self):\n task = self.params.task\n task.completed = not task.completed\n task.put()\n render_json(self, obj=task.as_json())", "def update_project_status(project_id):\n completion_status = request.get_json()['completion_status']\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n if is_project_manager(project, g.user):\n project.completion_status = completion_status\n db_session.add(project)\n db_session.commit()\n return {\n 'success': True,\n 'result': task_schema.dump(project),\n 'message': f\"Successfully Updated the Completion Status of {project.name}.\"\n }", "def test_set_task_complete_view(self):\n self.task.status = Task.STATUS_CHOICES.ready_for_review\n self.task.save()\n pk = self.task.pk\n url = reverse('set_task_complete', kwargs={'pk': pk})\n response = self.client.post(url)\n self.assertEqual(response.status_code, 403)\n task = Task.objects.get(pk=pk)\n self.assertIsNone(task.reviewed_by)\n self.assertEqual(task.status, Task.STATUS_CHOICES.ready_for_review)\n # Create a staff user and login as staff user\n staff_user = self.create_user(username='staff_user',\n password='password',)\n staff_user.is_staff = True\n staff_user.save()\n self.client.login(username='staff_user', password='password')\n\n response = self.client.post(url)\n self.assertEqual(response.status_code, 302)\n task = Task.objects.get(pk=pk)\n self.assertEqual(task.reviewed_by, staff_user)\n self.assertEqual(task.status, Task.STATUS_CHOICES.complete)", "def not_complete(request):\n print(\"not_complete method in tutor_helper.py\")\n if user_auth(request):\n user = User.objects.get(email=request.user.email)\n print(\"\\t\", user)\n current_user = UserInformation.objects.get(user=user)\n if current_user.current_main_set is None:\n return False\n if current_user.completed_sets is not None:\n if current_user.current_main_set not in current_user.completed_sets.all():\n print(\"not complete\")\n print(current_user.current_main_set)\n return True\n else:\n if current_user.completed_sets is None:\n return True\n return False", "def assign_task(user_name, task_name, work_server_ip):\r\n\r\n database_handler.update_records(\"current_tasks\",\r\n {\"server_ip\": work_server_ip, \"Task_status\": TaskStatusNames.in_progress.value},\r\n condition=\"Task_name=$? and user_name=$?\", code_args=[task_name, user_name])", "def test_project_user_membership_awaiting_authorisation_status(self):\n self.membership.status = ProjectUserMembership.AWAITING_AUTHORISATION\n self.assertTrue(self.membership.awaiting_authorisation())\n\n self.membership.status = ProjectUserMembership.AUTHORISED\n self.assertFalse(self.membership.awaiting_authorisation())", "def uncomplete(self):\n ### TODO: needs test code for code coverage!\n ## (it has been tested through the calendar-cli test code)\n if not hasattr(self.vobject_instance.vtodo, \"status\"):\n self.vobject_instance.vtodo.add(\"status\")\n self.vobject_instance.vtodo.status.value = \"NEEDS-ACTION\"\n if hasattr(self.vobject_instance.vtodo, \"completed\"):\n self.vobject_instance.vtodo.remove(self.vobject_instance.vtodo.completed)\n self.save()", "def set_task_in_progress(self):\n\n tasks = self._get_all_tasks()\n\n task_id = tasks[self.tasks_view.currentRow()].Id\n\n self.tasks_flow.set_status(task_id, 1)\n\n # Refresh the table\n self.write_tasks_table()", "def user_requested_access(user):\r\n user = CourseCreator.objects.get(user=user)\r\n if user.state != CourseCreator.GRANTED:\r\n user.state = CourseCreator.PENDING\r\n user.save()", "def _set_current_task_status_on_exit():\n global current\n global taskdb\n\n if current.task is not None \\\n and taskdb is not None \\\n and current.task.status != tasks.COMPLETE:\n current.task.status = tasks.ERROR\n taskdb.put(current.task.task_name, current.task)", "def updateTask(task):\n # First check to see if task exists\n detailed_ticket = jutdaapi.get_detailed_ticket(task._ticket_id)\n if not detailed_ticket:\n print 'task does not exist yet'\n return False\n # If so, check that things have actually changed (diff edited and orig)\n database_task = ticketToTask(detailed_ticket)\n if task._orig == task:\n return 'no changes to make'\n return True\n # If so, check that no one else has made changes (diff orig and database)\n if not database_task == task._orig:\n print 'task has changed in database; refresh task!'\n return False\n #priority = (task.priority + 2) / 2\n priority = task.priority\n if task.assigner not in ['no one', 'Unassigned']:\n title = '('+task.assigner.title()+') '+task.name\n #if task.name[-1] == ' ':\n # title = task.name + 'for: '+task.assigner.title()\n #else:\n # title = task.name + ' for: '+task.assigner.title()\n else:\n title = task.name\n description = task.description\n #if task.assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+task.assigner+'\"/>'\n if 't' not in task.id:\n description += '<tasktrackermeta id=\"'+task.id+'\"/>'\n return jutdaapi.edit_ticket(task._ticket_id, title=title, queue=None, submitter_email=None,\n description=description, priority=priority)", "def mark_as_in_progress(self, task):\n raise NotImplementedError('')", "async def access(cls, entry: \"TaskEntry\"):\n return True", "def is_task_stagnant(task):", "def mark_completed(self,status):\r\n if status == \"r\":\r\n self.status = \"c\"#It is to test the mark complete function in the test_book.py, otherwise this program works fine in the main.py\r\n return True\r\n elif status == \"c\":\r\n return False", "def test_check_contributing_state_completed_user_not_contributed(self):\r\n app = AppFactory.create()\r\n task = TaskFactory.create(app=app, n_answers=2)\r\n TaskRunFactory.create_batch(2, task=task)\r\n user = UserFactory.create()\r\n\r\n contributing_state = helpers.check_contributing_state(app_id=app.id,\r\n user_id=user.id)\r\n\r\n assert task.state == 'completed', task.state\r\n assert contributing_state == 'completed', contributing_state", "def run(self):\n modify_tasks = filter(self._task_filter, acm.FAelTask.Select(''))\n print([task.Name() for task in modify_tasks])\n for task in modify_tasks:\n #new_task = task.Clone()\n self._update(task)\n try:\n task.Commit()\n except:\n print('Skipping: Task already exists')", "def test_user_with_no_more_tasks_find_volunteers_project_completed(self):\r\n\r\n self.register()\r\n user = User.query.first()\r\n app = AppFactory.create(owner=user)\r\n task = TaskFactory.create(app=app, n_answers=1)\r\n taskrun = TaskRunFactory.create(task=task, user=user)\r\n res = self.app.get('/app/%s/newtask' % app.short_name)\r\n\r\n assert task.state == 'completed', task.state\r\n message = \"Sorry, you've contributed to all the tasks for this project, but this project still needs more volunteers, so please spread the word!\"\r\n assert message not in res.data\r\n self.signout()", "def custom_wait_for_completion(task_description, output):\n state = 'UNSUBMITTED'\n while not (state == 'COMPLETED' or state =='FAILED'):\n output.add_live_msg(ms.STATUS.format(state))\n time.sleep(5)\n \n #search for the task in task_list\n for task in task_description:\n current_task = gs.isTask(task)\n if current_task:\n state = current_task.state\n if state == 'RUNNING' or state == 'FAILED': \n break\n \n return state", "async def done(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n lst.remove(member.id)\r\n await self.config.guild(ctx.guild).neededlist.set(lst)\r\n await self.config.member(member).clear()\r\n await ctx.send(\"Removed member from pending list\")\r\n\r\n else:\r\n await ctx.send(\"Member not in the pending list\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")", "def test_24_get_specific_completed_task_user(self):\r\n\r\n #model.rebuild_db()\r\n with self.flask_app.app_context():\r\n self.create()\r\n self.register()\r\n\r\n user = db.session.query(User)\\\r\n .filter(User.name == self.user.username)\\\r\n .first()\r\n app = db.session.query(App).first()\r\n task = db.session.query(Task)\\\r\n .filter(App.id == app.id)\\\r\n .first()\r\n for i in range(10):\r\n task_run = TaskRun(app_id=app.id, task_id=task.id, user_id=user.id,\r\n info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n #self.app.get('api/app/%s/newtask' % app.id)\r\n\r\n ntask = Task(id=task.id, state='completed')\r\n #self.signin()\r\n assert ntask not in db.session\r\n db.session.merge(ntask)\r\n db.session.commit()\r\n\r\n res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),\r\n follow_redirects=True)\r\n msg = 'You have already participated in this task'\r\n assert msg in res.data, res.data\r\n assert 'Try with another one' in res.data, res.data\r\n self.signout()", "def mark_as_done(self, task):\n raise NotImplementedError('')", "def _update_instructor_task(instructor_task, task_result):\r\n # Pull values out of the result object as close to each other as possible.\r\n # If we wait and check the values later, the values for the state and result\r\n # are more likely to have changed. Pull the state out first, and\r\n # then code assuming that the result may not exactly match the state.\r\n task_id = task_result.task_id\r\n result_state = task_result.state\r\n returned_result = task_result.result\r\n result_traceback = task_result.traceback\r\n\r\n # Assume we don't always save the InstructorTask entry if we don't have to,\r\n # but that in most cases we will update the InstructorTask in-place with its\r\n # current progress.\r\n entry_needs_updating = True\r\n entry_needs_saving = False\r\n task_output = None\r\n\r\n if instructor_task.task_state == PROGRESS and len(instructor_task.subtasks) > 0:\r\n # This happens when running subtasks: the result object is marked with SUCCESS,\r\n # meaning that the subtasks have successfully been defined. However, the InstructorTask\r\n # will be marked as in PROGRESS, until the last subtask completes and marks it as SUCCESS.\r\n # We want to ignore the parent SUCCESS if subtasks are still running, and just trust the\r\n # contents of the InstructorTask.\r\n entry_needs_updating = False\r\n elif result_state in [PROGRESS, SUCCESS]:\r\n # construct a status message directly from the task result's result:\r\n # it needs to go back with the entry passed in.\r\n log.info(\"background task (%s), state %s: result: %s\", task_id, result_state, returned_result)\r\n task_output = InstructorTask.create_output_for_success(returned_result)\r\n elif result_state == FAILURE:\r\n # on failure, the result's result contains the exception that caused the failure\r\n exception = returned_result\r\n traceback = result_traceback if result_traceback is not None else ''\r\n log.warning(\"background task (%s) failed: %s %s\", task_id, returned_result, traceback)\r\n task_output = InstructorTask.create_output_for_failure(exception, result_traceback)\r\n elif result_state == REVOKED:\r\n # on revocation, the result's result doesn't contain anything\r\n # but we cannot rely on the worker thread to set this status,\r\n # so we set it here.\r\n entry_needs_saving = True\r\n log.warning(\"background task (%s) revoked.\", task_id)\r\n task_output = InstructorTask.create_output_for_revoked()\r\n\r\n # save progress and state into the entry, even if it's not being saved:\r\n # when celery is run in \"ALWAYS_EAGER\" mode, progress needs to go back\r\n # with the entry passed in.\r\n if entry_needs_updating:\r\n instructor_task.task_state = result_state\r\n if task_output is not None:\r\n instructor_task.task_output = task_output\r\n\r\n if entry_needs_saving:\r\n instructor_task.save()", "def show_tasks_status(user, tasks):\n employee_name = user[0]['username']\n all_tasks = tasks\n completed = 0\n title_completed_tasks = ''\n for task in all_tasks:\n if task['completed'] is True:\n completed += 1\n title_completed_tasks += '\\t ' + task['title'] + '\\n'\n print('Employee {} is done with tasks({}/{}):'\n .format(employee_name, completed, len(all_tasks)))\n print(title_completed_tasks, end='')", "def change_task(self):\n sel_task = self.find_task()\n if sel_task is False:\n return\n\n # We have a valid task, let's change it.\n self.clear_screen()\n self.display_task(sel_task)\n print \"\\n'd': Mark this task done\"\n print \"'t': Change tags of this task\"\n print \"'x': Remove this task permanently (cannot be undone)\"\n print \"'c': Cancel and return to main menu.\"\n selection = None\n\n # Continue until user cancels\n while selection != 'c':\n selection = raw_input(\n \"Enter command for selected task > \").strip().lower()\n\n if selection == 'd':\n sel_task.mark_done(self.user)\n self.current_collection.archive()\n break\n\n if selection == 't':\n user_input = raw_input(\n \"Overwrite existing tags? y/n > \"\n ).strip().lower()\n if user_input in ('y', 'yes'):\n del sel_task.tags\n user_tags = raw_input(\n \"Enter new tags (comma separated) (optional). > \")\n sel_task.tags = [\n tag.strip() for tag in user_tags.split(',')]\n break\n\n if selection == 'x':\n if raw_input(\"Delete this task? y/n > \") in ('y', 'Y'):\n delete = self.current_collection.delete(sel_task)\n if delete:\n raw_input(\"Task deleted. Press Enter\")\n break\n else:\n raw_input(\"Task not deleted. Try again.\")\n continue\n else:\n print \"Please enter valid command.\"\n return", "def _add_task_action(self, task):\n if not task.is_alive():\n return" ]
[ "0.61753845", "0.60224724", "0.60031533", "0.5881787", "0.5879694", "0.5829092", "0.5815816", "0.5802099", "0.5792718", "0.57676065", "0.57620394", "0.5757087", "0.574239", "0.5696005", "0.56877255", "0.5670999", "0.56441444", "0.56272525", "0.5616785", "0.56102926", "0.5608175", "0.559789", "0.5581435", "0.5563056", "0.5559478", "0.5507551", "0.55045074", "0.54986686", "0.54895645", "0.5487179" ]
0.62761444
0
Inherit receive_voucher method to create journal entry when the operation is treasury feeding
def receive_voucher(self, cr, uid, ids, context=None): super(account_voucher, self).receive_voucher(cr, uid, ids, context=context) if self.browse(cr, uid, ids, context=context)[0].operation_type == 'treasury': self.action_move_line_create(cr, uid, ids, context=context) return self.write(cr, uid, ids, {'state': 'receive'}, context=context)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ticket_created(self, ticket):", "def on_submit(self):\n\n\t\tfor accounting_entry in self.get('accounting_entries'):\n\t\t\tledger_entry_doc = frappe.get_doc({\n\t\t\t\t'doctype': 'Ledger Entry',\n\t\t\t\t'posting_date': self.posting_date,\n\t\t\t\t'account': accounting_entry.account,\n\t\t\t\t'debit': accounting_entry.debit,\n\t\t\t\t'credit': accounting_entry.credit,\n\t\t\t\t'voucher_type': 'Journal Entry',\n\t\t\t\t'voucher_number': self.name,\n\t\t\t\t'company': self.company\n\t\t\t})\n\t\t\tledger_entry_doc.insert()", "def _setup_ledger_entry(\n entity_from,\n entity_to,\n currency='cur',\n amount=100.30,\n is_issued=False,\n action=None,\n transaction=None\n):\n ledger_rec = Ledger(\n entity_from=entity_from,\n entity_to=entity_to,\n currency=currency,\n amount=amount,\n is_issued=is_issued,\n action=action,\n transaction=transaction\n )\n ledger_rec.save()\n\n return ledger_rec", "def done(self, cr, uid, ids, context=None):\n \n voucher_obj = self.pool.get('account.voucher')\n voucher_line_obj = self.pool.get('account.voucher.line')\n admin_affairs_model_obj = self.pool.get('admin.affairs.model')\n affairs_account_obj = self.pool.get('admin_affairs.account') \n model_id = admin_affairs_model_obj.search(cr, uid, [('model','=','environment.and.safety')], context=context)[0] \n affairs_account = affairs_account_obj.search(cr, uid, [('model_id','=',model_id)], context=context)\n if not affairs_account:\n raise osv.except_osv(_('Warning !'), _('Please insert account configuration For Environment and safety'))\n affairs_account_id = affairs_account[0]\n \n affairs_account_record = affairs_account_obj.browse(cr, uid, affairs_account_id,context=context) \n for record in self.browse(cr, uid, ids, context=context):\n if not record.allowances_lines_after :\n raise osv.except_osv(_('Partner Amount !'), _('Sorry no partner Amount After Rate To Transfer!'))\n notes = _(\"Enviroment and Safety allowances Contract: %s\")%(record.name)\n \n journal_id = affairs_account_record.journal_id\n analytic_id = affairs_account_record.analytic_id\n account_id = affairs_account_record.account_id\n\n\t\t# Creating Voucher / Ratitication\n voucher_id = voucher_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'type': 'ratification',\n 'date': time.strftime('%Y-%m-%d'),\n 'partner_id': record.partner_id.id,\n 'journal_id': journal_id and journal_id.id , \n 'state': 'draft',\n\t\t\t\t\t 'notes':record.notes,\n\t\t\t\t\t 'narration':notes ,\n \t 'company_id':record.company_id.id,\n })\n \t# Creating Voucher / Ratitication Lines\n for line in record.allowances_lines_after:\n '''account_id =line.category_id.account_id\n if not account_id:\n account_id = line.category_id.parent_id.account_id\n \n if not account_id:\n account_id = affairs_account_record.account_id \n\n if not account_id:\n raise osv.except_osv(_('Invalid action !'), _('Please insert Account configuration For Environment and safety Service')) ''' \n \n account_analytic_id =line.category_id.analytic_id\n if not account_analytic_id:\n account_analytic_id = line.category_id.parent_id.analytic_id \n \n if not account_analytic_id:\n account_analytic_id = affairs_account_record.analytic_id\n \n vocher_line_id = voucher_line_obj.create(cr, uid, {\n 'amount': record.amount_total,\n 'voucher_id': voucher_id,\n\t\t\t\t\t 'account_id':account_id and account_id.id,\n\t\t\t\t\t 'account_analytic_id':account_analytic_id and account_analytic_id.id ,\n 'type': 'dr',\n 'name':'environment and Safety allowances :' + record.name,\n })\n\t\t\n\t\t# Selecting Voucher Number / Refernece \n\n voucher_number = self.pool.get('account.voucher').browse(cr,uid,voucher_id)\n\n copy_attachments(self,cr,uid,[record.id],'services.contracts.archive',voucher_id,'account.voucher', context)\n self.write(cr, uid, ids, {'state':'done','transfer':True,'voucher_no':voucher_number.number}) \n return True", "def create_voucher(self, amount, expires=None, message='', gid=None):\n expires = absdatetime(expires, default=self.EXP_VOUCHER).isoformat()\n voucher = self.request('post', 'vouchers/', json.dumps({\n 'amount': amount,\n 'until': expires,\n 'message': message,\n 'gid': gid\n }))\n return {\n 'raw': voucher,\n 'urls': {\n 'redirect': urljoin(self.usr_frontend, '/vouchers/%s' % voucher['vid_encoded']),\n 'popup': urljoin(self.usr_frontend, '/popup/vouchers/%s' % voucher['vid_encoded'])\n }\n }", "def Ticket(ticket):\n try:\n data = ticket_module.verify(ticket)\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))", "def create(self, *args, **kwargs):\n\n if not args and not kwargs:\n raise Exception('attributes for Voucher are missing')\n\n initial_attributes = args[0] if args else kwargs\n attributes = dict((k, v) for k, v in initial_attributes.items())\n attributes.update({'service': self.SERVICE})\n _, _, voucher = self.http_client.post(\"/vouchers\", body=attributes)\n return voucher", "def report(self, trade, is_entry):\n pass", "def new_check(self, cr, uid, ids, context=None):\n voucher_pool = self.pool.get('account.voucher')\n move = self.pool.get('account.move').browse(cr, uid, context.get('active_id',[]), context=context)\n cr.execute(\"SELECT COALESCE(sum(credit),0) amount,ml.partner_id,COALESCE(date_maturity,%s) date_maturity,ml.id id \" \\\n \"FROM account_move_line ml INNER JOIN account_move m ON m.id = ml.move_id \" \\\n \"INNER JOIN account_account acc ON acc.id = ml.account_id INNER JOIN account_account_type acc_type ON acc_type.id = acc.user_type \" \\\n \"WHERE m.id = %s AND ml.credit > 0 AND acc.type = 'liquidity' GROUP BY ml.partner_id,date_maturity,ml.id\",(move.date,str(move.id),))\n suppliers = cr.dictfetchall()\n voucher_id = False\n for supplier in suppliers:\n voucher = {\n 'account_id':move.journal_id.default_credit_account_id.id,\n 'company_id':move.company_id.id,\n 'period_id':move.period_id.id,\n 'date':move.date,\n 'amount':supplier['amount'],\n 'journal_id':move.journal_id.id,\n 'pay_journal_id':move.journal_id.id,\n 'move_id':int(move.id),\n 'ref': move.name,\n 'partner_id':supplier['partner_id'],\n 'amount_in_word':amount_to_text_ar(supplier['amount'], 'ar'),\n 'type':'payment',\n 'allow_check':1,\n 'chk_status':True,\n 'date_due':supplier['date_maturity']\n }\n voucher_id = voucher_pool.create(cr, uid, voucher, context=context)\n voucher_pool.write(cr, uid, voucher_id, {'state': 'posted'}, context=context)\n self.write(cr, uid, ids, {'payment_id':voucher_id}, context=context)\n return voucher_id", "def create(self, cr, uid, vals, context=None):\n if ('name' not in vals) or (vals.get('name')=='/'):\n vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'payment.enrich')\n \n #for the case of the solidarity box request\n if 'amount' not in vals:\n vals['amount']=self.pool.get('enrich.category').read(cr, uid, vals['enrich_category'], ['amount'], context=context)['amount']\n\n return super(payment_enrich, self).create(cr, uid, vals, context)", "def button_fac_cob_ent(self):\n invoice = self._fac_ent()\n\n # pagar la factura\n # hacer configuracion para modificar esto\n receipt_obj = self.env['account.voucher.receiptbook']\n receipt = receipt_obj.search([('name', 'like', 'Recibos')], limit=1)\n\n journal = self.journal_id\n res = invoice.invoice_pay_customer()\n context = res['context']\n\n account_voucher_obj = self.env['account.voucher']\n voucher = account_voucher_obj.create({\n 'partner_id': context['default_partner_id'],\n 'journal_id': journal.id,\n 'account_id': journal.default_debit_account_id.id,\n 'type': context['type'],\n 'amount': context['default_amount'],\n 'net_amount': context['default_amount'],\n 'receiptbook_id': receipt.id,\n 'company_id': self.env.user.company_id.id\n })\n voucher.signal_workflow('proforma_voucher')\n\n account_move_line_obj = self.env['account.move.line']\n\n # obtener un recordser vacio\n lines2rec = account_move_line_obj.browse()\n\n # obtener las lineas a conciliar de facturas\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', invoice.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n # obtener las lineas a conciliar de pagos\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', voucher.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n period_obj = self.env['account.period']\n period = period_obj.find()\n\n # reconciliar las lineas de factura con pagos\n lines2rec.reconcile('manual',\n journal.default_debit_account_id.id, # writeoff_acc_id\n period.id, # writeoff_period_id,\n journal.id) # writeoff_journal_id)\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }", "def obj_create(self, bundle, **kwargs):\n logger.info(\"Creating a new acknowledgement...\")\n #Create the object\n bundle.obj = Acknowledgement()\n #hydrate\n bundle = self.full_hydrate(bundle)\n \n #Set the customer\n try:\n logger.info(\"Setting customer...\")\n bundle.obj.customer = Customer.objects.get(pk=bundle.data[\"customer\"][\"id\"])\n bundle.obj.discount = bundle.obj.customer.discount\n except:\n logger.error(\"Customer with ID {0} could not be found.\".format(bundle.data['customer']['id']))\n raise\n \n #Set the employee\n try:\n logger.info(\"Setting employee...\")\n bundle.obj.employee = bundle.request.user\n except User.DoesNotExist:\n logger.error(\"User with ID {0} could not be found\".format(bundle.data['employee']['id']))\n raise\n except KeyError:\n logger.critical(\"Missing employee ID.\")\n raise\n \n #Set Status\n bundle.obj.status = \"ACKNOWLEDGED\"\n \n #Set the project or create a new one\n if \"project\" in bundle.data:\n try:\n project = Project.objects.get(pk=bundle.data['project']['id'])\n except KeyError, Project.DoesNotExist:\n try:\n project = Project()\n project.codename = bundle.data['project']['codename']\n project.save()\n except KeyError:\n project = None\n \n bundle.obj.project = project\n \n #Create items without saving them \n logger.info(\"Creating items...\")\n self.items = [Item.create(acknowledgement=bundle.obj,\n commit=False,\n **product) for product in bundle.data[\"items\"]]\n \n #Calculate the total price\n logger.info(\"Calculating balance of the order...\")\n bundle.obj.calculate_totals(self.items)\n bundle = self.save(bundle)\n \n #Save the items\n logger.info(\"Saving the items to the database...\")\n for item in self.items:\n item.acknowledgement = bundle.obj\n item.save()\n \n log_message = \"Ack {0} created on {1}. Schedule to be delivered on {1}\"\n log_message = log_message.format(bundle.obj.id,\n bundle.obj.time_created.strftime('%B %d, %Y'),\n bundle.obj.delivery_date.strftime('%B %d, %Y'))\n log = Log(message=log_message,\n delivery_date=bundle.obj.delivery_date,\n acknowledgement=bundle.obj)\n log.save()\n #Create and upload the pdfs to the \n #S3 system. The save the pdfs as\n #Attributes of the acknowledgement\n logger.info(\"Creating PDF documents...\")\n bundle.obj.create_and_upload_pdfs()\n \n \n #Add the url of the pdf to the outgoing data\n #only for when an acknowledgement is create\n try:\n ack = bundle.obj.acknowledgement_pdf\n production = bundle.obj.production_pdf\n bundle.data['pdf'] = {'acknowledgement': ack.generate_url(),\n 'production': production.generate_url()}\n except AttributeError: \n logger.warn('Missing acknowledgement or production pdf')\n \n #Conditionally email ack to Decoroom\n if \"decoroom\" in bundle.obj.customer.name.lower():\n try:\n logger.info(\"Emailing Decoroom Co., Ltd. the order details...\")\n bundle.obj.email_decoroom()\n except Exception as e:\n logger.error(\"Unable to mail decoroom.\")\n logger.error(e)\n \n \n \n logger.info(u\"Acknowledgement #{0} created for {1}\".format(bundle.obj.id, \n bundle.obj.customer.name)) \n return bundle", "def __init__(self, donorReference='', kind=\"other\", receiverReference='', serviceUnitsError=0.0, diverseReference='', serviceUnitsEnergy=0.0, reversedId='', PricingStructure=None, line=None, UserAttributes=None, AuxiliaryAccount=None, VendorShift=None, Receipt=None, Meter=None, CustomerAccount=None, CashierShift=None, *args, **kw_args):\n #: Reference to the entity that is the source of 'amount' (for example: customer for token purchase; or supplier for free issue token).\n self.donorReference = donorReference\n\n #: Kind of transaction. Values are: \"other\", \"serviceChargePayment\", \"accountPayment\", \"tokenSalePayment\", \"tokenCancellation\", \"taxChargePayment\", \"tokenExchange\", \"tokenGrant\", \"diversePayment\", \"auxiliaryChargePayment\", \"meterConfigurationToken\", \"tokenFreeIssue\", \"transactionReversal\"\n self.kind = kind\n\n #: Reference to the entity that is the recipient of 'amount' (for example, supplier for service charge payment; or tax receiver for VAT).\n self.receiverReference = receiverReference\n\n #: Number of service units not reflected in 'serviceUnitsEnergy' due to process rounding or truncating errors.\n self.serviceUnitsError = serviceUnitsError\n\n #: Formal reference for use with diverse payment (traffic fine for example).\n self.diverseReference = diverseReference\n\n #: Actual amount of service units that is being paid for.\n self.serviceUnitsEnergy = serviceUnitsEnergy\n\n #: (if 'kind' is transactionReversal) Reference to the original transaction that is being reversed by this transaction.\n self.reversedId = reversedId\n\n self._PricingStructure = None\n self.PricingStructure = PricingStructure\n\n self.line = line\n\n self._UserAttributes = []\n self.UserAttributes = [] if UserAttributes is None else UserAttributes\n\n self._AuxiliaryAccount = None\n self.AuxiliaryAccount = AuxiliaryAccount\n\n self._VendorShift = None\n self.VendorShift = VendorShift\n\n self._Receipt = None\n self.Receipt = Receipt\n\n self._Meter = None\n self.Meter = Meter\n\n self._CustomerAccount = None\n self.CustomerAccount = CustomerAccount\n\n self._CashierShift = None\n self.CashierShift = CashierShift\n\n super(Transaction, self).__init__(*args, **kw_args)", "def landlord_button_deposite_received(self):\n payment_id = False\n acc_pay_form = self.env.ref(\n 'account.view_account_payment_form')\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'sale')], limit=1)\n payment_obj = self.env['account.payment']\n payment_method_id = self.env.ref(\n 'account.account_payment_method_manual_in')\n for tenancy_rec in self:\n if tenancy_rec.acc_pay_dep_rec_id and \\\n tenancy_rec.acc_pay_dep_rec_id.id:\n return {\n 'view_type': 'form',\n 'view_id': acc_pay_form.id,\n 'view_mode': 'form',\n 'res_model': 'account.payment',\n 'res_id': tenancy_rec.acc_pay_dep_rec_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }\n if tenancy_rec.deposit == 0.00:\n raise Warning(_('Please Enter Deposit amount.'))\n if tenancy_rec.deposit < 0.00:\n raise Warning(\n _('The deposit amount must be strictly positive.'))\n vals = {\n 'partner_id': tenancy_rec.property_owner_id.parent_id.id,\n 'partner_type': 'customer',\n 'journal_id': account_jrnl_obj.id,\n 'payment_type': 'inbound',\n 'communication': 'Deposit Received',\n 'tenancy_id': tenancy_rec.id,\n 'amount': tenancy_rec.deposit,\n 'property_id': tenancy_rec.property_id.id,\n 'payment_method_id': payment_method_id.id\n }\n payment_id = payment_obj.create(vals)\n return {\n 'view_mode': 'form',\n 'view_id': acc_pay_form.id,\n 'view_type': 'form',\n 'res_id': payment_id and payment_id.id,\n 'res_model': 'account.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'current',\n 'domain': '[]',\n 'context': {\n 'close_after_process': True,\n }\n }", "def call_transfer_fund(self):\n ## 1) Create expense line for current student\n ## 2) Create Deposite lines for oney transfer student\n\n ## 1\n student_pool = self.env['op.student']\n partner_obj = self.env['res.partner']\n employee_pool = self.env['hr.employee']\n\n if not self.pin_varification:\n raise except_orm(_('Warning!'),\n _(\"Enter Valid PIN to proceed!\"))\n\n\n student_id = student_pool.search([('user_id', '=', self._uid)])\n\n ## Validate Enter PIN\n if student_id:\n self.validate_current_user_pin(student_id)\n\n expense_vals = {\n 'name': student_id.id,\n 'amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s\" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n 'create_invoice': False,\n # 'student_id': student_id.id,\n }\n\n student_expenses_id = self.env['student.expenses'].sudo().create(expense_vals)\n self.total_expense_balance = student_id.stud_balance_amount\n\n ## Get employee form account id\n employee_id = employee_pool.sudo().search([('ean13', '=', self.account_no)])\n\n ## Search EMployee By Employee ID\n search_by_id_employee_id = employee_pool.sudo().search([('identification_id', '=', self.account_no)])\n\n ## Search by student matrix ID\n search_by_id_student_id = student_pool.sudo().search([('gr_no', '=', self.account_no)])\n\n if not self.account_no:\n ## Logic for search by User Name\n employee_id = self.pass_employee_id.sudo()\n student_id = self.pass_student_id.sudo()\n else:\n ## Get partner form account id\n student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if student_id:\n deposite_vals = {\n 'name': student_id.id,\n # 'amount': self.amount_to_transfer,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n if not self.account_no:\n trans_student_id = student_id.sudo()\n else:\n trans_student_id = student_pool.sudo().search([('ean13', '=', self.account_no)])\n if trans_student_id:\n self.total_deposite_balance = trans_student_id.stud_balance_amount\n elif employee_id:\n deposite_vals = {\n 'name': employee_id.id,\n 'employee_id': employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = employee_id.available_balance\n\n elif search_by_id_employee_id:\n deposite_vals = {\n 'name': search_by_id_employee_id.id,\n 'employee_id': search_by_id_employee_id.identification_id,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n employee_deposite_id = self.env['employee.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_employee_id.available_balance\n\n elif search_by_id_student_id:\n deposite_vals = {\n 'name': search_by_id_student_id.id,\n 'employee_id': search_by_id_student_id.gr_no,\n 'paid_amount': self.amount_transfer,\n 'date': datetime.datetime.now(),\n 'create_invoice': True,\n 'source': \"Transfer Amount of %s to account no %s (%s) on date %s - %s \" % (self.amount_transfer, self.account_no, self.name, datetime.datetime.now(), self.description),\n }\n student_deposite_id = self.env['student.deposits'].sudo().create(deposite_vals)\n self.total_deposite_balance = search_by_id_student_id.stud_balance_amount\n\n # return True\n compose_form = self.env.ref('deposite_management.transfer_confirmation_popup_view', False)\n\n try:\n template_id = self.env.ref('deposite_management.email_template_student_fund_transfer', False)\n except ValueError:\n template_id = False\n values = self.env['email.template'].generate_email(template_id.id, self.id)\n\n ## Append Student email id to send mail\n if values and 'email_to' in values:\n values['email_to'] = student_id.sudo().email\n mail_id = self.env['mail.mail'].sudo().create(values)\n if mail_id:\n mail_send_id = mail_id.send()\n\n try:\n template_id_new = self.env.ref('deposite_management.email_template_student_fund_transfer_self_notification', False)\n except ValueError:\n template_id_new = False\n values_new = self.env['email.template'].generate_email(template_id_new.id, self.id)\n ## Append email id to send mail\n if values_new and 'email_to' in values_new:\n if student_id and trans_student_id:\n values_new['email_to'] = trans_student_id.email\n elif employee_id:\n values_new['email_to'] = employee_id.sudo().work_email\n mail_id_new = self.env['mail.mail'].sudo().create(values_new)\n if mail_id_new:\n mail_send_id = mail_id_new.send()\n ## return wizard after click on Fund Transfer Button\n return {\n 'name': _('Fund Transfer Done'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'fund.confirmation.msg',\n 'view_id': compose_form.id,\n 'target': 'new',\n }", "def pre_approve(self, cr, uid, ids, context={}):\n \tfor voucher in self.browse(cr, uid, ids, context=context):\n \t if not voucher.department_id.analytic_account_id:\n \t raise osv.except_osv(_('Configration Check!'), _(\"Please add cost center for your department!\"))\n \t periods = self.pool.get('account.period').search(cr, uid, [('date_start','<=',voucher.date),('date_stop','>=',voucher.date),('company_id','=',voucher.company_id.id)], context=context)\n\n\n res=0.0\n if voucher.purpose:\n if not voucher.purpose.account_id: raise osv.except_osv(_('Warning!'), _('Please configure account for this purpose!')) \n voucher_line = {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': voucher.amount,\n \t\t'amount': voucher.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': voucher.department_id.analytic_account_id and voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id,\n \t }\n new_amount = res and res or voucher.amount \n voucher_line.update({'amount':new_amount,'untax_amount':new_amount})\n \t if voucher.line_ids :\n for line in voucher.line_ids:\n \t\t self.pool.get('account.voucher.line').write(cr, uid, line.id, {\n \t\t'voucher_id': voucher.id,\n \t\t'partner_id': voucher.partner_id.id,\n \t\t'untax_amount': res or line.amount,\n \t\t'amount': line.amount,\n 'name': voucher.narration,\n \t\t'type': 'dr',\n \t\t'account_analytic_id': line.account_analytic_id and line.account_analytic_id.id or voucher.department_id.analytic_account_id.id,\n 'account_id': voucher.purpose.account_id.id or line.account_id.id,\n \t }, context=context)\n \t else:\n\n \t\t new_voucher_line = self.pool.get('account.voucher.line').create(cr, uid, voucher_line, context=context)\n context.update({'purchase':True})\n self.create_budget_confirmation(cr, uid, [voucher.id], context)\n \tself.write(cr, uid, ids,{'state': 'preapprove','type':'purchase','ratification':True}, context=context)\n #cxt = context.copy()\n #cxt.update({'type':'ratification'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approved'}, context=context)\n\n \t'''self.write(cr, uid, ids, {'state': 'preapprove'})\n if not super(account_voucher, self).create_budget_confirmation(cr, uid, ids, context=context):\n self.write(cr, uid, ids, {'state': 'approve','type':'purchase','ratification':True}, context=context)'''\n return True", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def loan(self):\n self.rent_date = datetime.datetime.now()\n self.back_date = datetime.datetime.now() + datetime.timedelta(14)\n self.book.loan()\n self.book.save()\n self.save()", "def exchange_payment(self, cr, uid, ids, context=None):\n data = self.browse(cr, uid, ids, context=context)[0]\n check_log_pool = self.pool.get('check.log')\n sequence_pool = self.pool.get('ir.sequence')\n move_pool = self.pool.get('account.move') \n move_line_pool = self.pool.get('account.move.line')\n\n voucher_obj = self.pool.get('account.voucher')\n old_voucher_ids = voucher_obj.search(cr, uid, [('move_id', '=', context['active_id'])], context=context)\n old_chk_log_ids = check_log_pool.search(cr,uid,[('name','in',old_voucher_ids),('status','=','active')], context=context)\n '''if chk_log_ids:\n check_log_pool.write(cr, uid, chk_log_ids, {'status':'delete','deleted':True},context=context)'''\n if old_chk_log_ids:\n raise osv.except_osv(_('Warning'), _('This move have already exchanged'))\n voucher_id = self.check_move_data(cr, uid, ids, context=context)\n if not voucher_id:\n raise osv.except_osv(_('Warning'), _('The account in credit lines must be of type liquidity'))\n if data.new_no and voucher_id:\n move = move_pool.browse(cr, uid, context['active_id'], context=context)\n journal_id=move and move.journal_id\n if self._exchange_journal_seq(journal_id, context=context):\n chk_log_ids = check_log_pool.search(cr,uid,[('status','=','active')], context=context)\n sequence_pool.write(cr, uid, [journal_id.check_sequence.id], {'number_next_actual':data.new_no}, context=context)\n next_seq = sequence_pool.get_id(cr, uid, journal_id.check_sequence.id, context=context)\n lines = move_line_pool.search(cr, uid,[('move_id','=',context['active_id'])], context=context)\n line = move_line_pool.browse(cr, uid, lines, context=context)[0]\n check_log_pool.create(cr, uid,{'name': voucher_id, 'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id,'company_id':move.company_id.id}, context=context)\n #check_log_pool.create(cr, uid,{'partner_id':line.partner_id.id,'date_due':move.date,'status': 'active', 'check_no': next_seq, 'journal_id':journal_id.id,'company_id':move.company_id.id}, context=context)\n move_pool.write(cr, uid,[context['active_id']], {'ref' : next_seq or ' '}, context=context)\n move_line_pool.write(cr, uid,lines, {'ref' : next_seq or ' '}, context=context)\n return {'type':'ir.actions.act_window_close'}", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def post(self, request): # FIRST EXAMPLE\n model = self._create_booking(\n request=request) # when _create_booking is invoked, historio Client will log model\n print('save me')", "def CreateTransaction(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def prepare_ticket(self, req, ticket, fields, actions):", "def AdminTicket(ticket):\n try:\n data, = xmlrpclib.loads(ticket)[0]\n name = data['slivers'][0]['name']\n if data != None:\n deliver_ticket(data)\n logger.log('api_calls: Admin Ticket delivered for %s' % name)\n Create(database.db.get(name))\n except Exception, err:\n raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))", "def ticket_created(self, ticket):\n if 'ticket' not in self.sources:\n return\n gnp = GrowlNotificationPacket(notification='ticket',\n title='Ticket #%d created' % ticket.id,\n description=self._ticket_repr(ticket))\n gs = GrowlSender(self.env)\n gs.notify(self._get_hosts('ticket'), gnp)", "def test_add_journal_entry(self):\n url = reverse('journal')\n data = {\n 'game': {\n 'id': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'coverId': self.game.cover_id,\n 'backdropId': self.game.backdrop_id\n },\n 'date': '2019-06-28',\n 'review': 'cool game',\n 'spoilers': False,\n 'liked': True,\n 'rating': 5,\n 'entry_type': 'Finished',\n 'platform': 'PC',\n }\n response = self.client.post(url, data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def processChit(self, serder, sigers):\n # fetch pre, dig,seal to process\n ked = serder.ked\n pre = serder.pre\n sn = self.validateSN(ked)\n\n # Only accept receipt if for last seen version of receipted event at sn\n ldig = self.db.getKeLast(key=snKey(pre=pre, sn=sn)) # retrieve dig of last event at sn.\n seal = SealEvent(**ked[\"a\"])\n if self.pre and self.pre == seal.i: # own chit\n if self.pre == pre: # skip own chits of own events\n raise ValidationError(\"Own pre={} chit of own event {}.\"\n \"\".format(self.pre, ked))\n if not self.local: # skip own chits of nonlocal events\n raise ValidationError(\"Own pre={} seal in chit of nonlocal event \"\n \"{}.\".format(self.pre, ked))\n\n if ldig is not None and seal.i in self.kevers: # verify digs match last seen and receipt dig\n # both receipted event and receipter in database\n # so retreive\n ldig = bytes(ldig).decode(\"utf-8\")\n\n # retrieve event by dig assumes if ldig is not None that event exists at ldig\n dgkey = dgKey(pre=pre, dig=ldig)\n lraw = bytes(self.db.getEvt(key=dgkey)) # retrieve receipted event at dig\n # assumes db ensures that raw must not be none because ldig was in KE\n lserder = Serder(raw=lraw) # deserialize event raw\n\n if not lserder.compare(dig=ked[\"d\"]): # stale receipt at sn discard\n raise ValidationError(\"Stale receipt at sn = {} for rct = {}.\"\n \"\".format(ked[\"s\"], ked))\n\n # retrieve dig of last event at sn of receipter.\n sdig = self.db.getKeLast(key=snKey(pre=seal.i, sn=int(seal.s, 16)))\n if sdig is None:\n # receipter's est event not yet in receipter's KEL\n # receipter's seal event not in receipter's KEL\n self.escrowVREvent(serder, sigers, seal, dig=ked[\"d\"])\n raise UnverifiedTransferableReceiptError(\"Unverified receipt: \"\n \"missing establishment event of transferable \"\n \"validator, receipt={}.\".format(ked))\n\n # retrieve last event itself of receipter\n sraw = self.db.getEvt(key=dgKey(pre=seal.i, dig=bytes(sdig)))\n # assumes db ensures that sraw must not be none because sdig was in KE\n sserder = Serder(raw=bytes(sraw))\n if not sserder.compare(dig=seal.d): # seal dig not match event\n raise ValidationError(\"Bad chit seal at sn = {} for rct = {}.\"\n \"\".format(seal.s, ked))\n\n verfers = sserder.verfers\n if not verfers:\n raise ValidationError(\"Invalid seal est. event dig = {} for \"\n \"receipt from pre ={} no keys.\"\n \"\".format(seal.d, seal.i))\n\n # convert sn in seal to fully qualified SeqNumber 24 bytes, raw 16 bytes\n sealet = seal.i.encode(\"utf-8\") + Seqner(sn=int(seal.s, 16)).qb64b + seal.d.encode(\"utf-8\")\n\n for siger in sigers: # verify sigs\n if siger.index >= len(verfers):\n raise ValidationError(\"Index = {} to large for keys.\"\n \"\".format(siger.index))\n\n siger.verfer = verfers[siger.index] # assign verfer\n if siger.verfer.verify(siger.raw, lserder.raw): # verify sig\n # good sig so write receipt quadruple to database\n quadruple = sealet + siger.qb64b\n self.db.addVrc(key=dgkey, val=quadruple) # dups kept\n\n else: # escrow either receiptor or receipted event not yet in database\n self.escrowVREvent(serder, sigers, seal, dig=ked[\"d\"])\n raise UnverifiedTransferableReceiptError(\"Unverified receipt: \"\n \"missing associated event for transferable \"\n \"validator receipt={}.\".format(ked))", "def create_landlord_invoice(self):\n if self.tenancy_id.is_landlord_rent:\n account_jrnl_obj = self.env['account.journal'].search(\n [('type', '=', 'purchase')], limit=1)\n inv_lines_values = {\n # 'origin': 'tenancy.rent.schedule',\n 'name': 'Rent Cost for' + self.tenancy_id.name,\n 'quantity': 1,\n 'price_unit': self.amount or 0.00,\n 'account_id':\n self.tenancy_id.property_id.account_depreciation_expense_id.id or False,\n 'analytic_account_id': self.tenancy_id.id or False,\n }\n owner_rec = self.tenancy_id.property_owner_id\n invo_values = {\n 'partner_id': self.tenancy_id.property_owner_id.id or False,\n 'type': 'in_invoice',\n 'invoice_line_ids': [(0, 0, inv_lines_values)],\n 'property_id': self.tenancy_id.property_id.id or False,\n 'invoice_date': self.start_date or False,\n # 'account_id': owner_rec.property_account_payable_id.id,\n # 'schedule_id': self.id,\n 'new_tenancy_id': self.tenancy_id.id,\n 'journal_id': account_jrnl_obj.id or False\n }\n\n acc_id = self.env['account.move'].with_context({'default_type': 'in_invoice'}).create(invo_values)\n self.write({'invc_id': acc_id.id, 'inv': True})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': self._context,\n }", "def post(self, amount, other_account, description, self_memo=\"\", other_memo=\"\", datetime=None):\r\n\r\n #Note: debits are always positive, credits are always negative. They should be negated before displaying\r\n #(expense and liability?) accounts\r\n tx = self._new_transaction()\r\n\r\n if datetime:\r\n tx.t_stamp = datetime\r\n #else now()\r\n\r\n tx.description = description\r\n tx.save()\r\n\r\n a1 = self._make_ae(self._DEBIT_IN_DB()*amount, self_memo, tx)\r\n a1.save()\r\n a2 = other_account._make_ae(-self._DEBIT_IN_DB()*amount, other_memo, tx)\r\n a2.save()\r\n\r\n return (a1,a2)", "def action_budget_create(self, cr, uid, ids, context=None):\n payment_term_obj = self.pool.get('account.payment.term')\n for porder in self.browse(cr, uid, ids, context=context):\n period = self.pool.get('account.period').find(cr,uid,porder.date_order, context = context)[0] \n result = []\n confirmation_dict={\n 'reference': porder.name,\n 'period_id': period,\n 'partner_id':porder.partner_id.id,\n 'amount': porder.amount_total,\n 'note':'',\n 'date':porder.date_order,\n 'type':'purchase'}\n\n for line in porder.order_line:\n confirmation_ids=[]\n account_id = self._choose_account_from_po_line(cr, uid, line, context=context)\n notes = _(\"Purchase Approval: %s \\nDescription: %s.\\nDate: %s \\nProducts: %s \") % (porder.name , porder.notes , porder.date_order , line.name )\n\n result= payment_term_obj.compute(cr, \n uid, porder.payment_term_id.id, line.price_subtotal,porder.date_order or False, context=context)\n for r in result:\n confirmation_dict.update(\n {'date':r[0],\n 'amount':r[1],\n 'note':notes,\n 'name':'/',\n 'general_account_id': account_id,\n 'account_analytic_id': line.account_analytic_id.id or False,\n })\n confirmation_id = self.pool.get('account.budget.confirmation').create(cr, uid, confirmation_dict)\n confirmation_ids.append(confirmation_id)\n line.write({'confirmation_ids':[(6, 0, confirmation_ids)] ,'state': 'waiting_budget'})\n self.write(cr, uid, ids, {'state': 'waiting_budget'})\n return True" ]
[ "0.6029652", "0.5823393", "0.57141507", "0.5677925", "0.56722486", "0.5605178", "0.55741674", "0.5452405", "0.5438415", "0.5426153", "0.53734416", "0.5366408", "0.5342838", "0.53389865", "0.5308819", "0.5275973", "0.5271341", "0.52083504", "0.51759064", "0.51672345", "0.5151666", "0.5123002", "0.51097095", "0.50944513", "0.5059341", "0.5058299", "0.5057641", "0.5057458", "0.5045237", "0.5041119" ]
0.6805315
0
Returns the edit distance of x to y, where x and y are two strings. The edit distance is the minimum number of operations, among insertion, deletion and substitution, that we need to turn x into y. If return_matrix = True, the matrix used to calculate the edit distance is returned, instead of the edit distance. This algorithm uses a dynamic programming solution.
def min_edit_distance(x: str, y: str, return_matrix: bool = False) -> object: m = _get_edit_distance_matrix(x, y) for i in range(1, len(x) + 1): for j in range(1, len(y) + 1): # How do we obtain the m[i][j] value? # We need to look at three positions while iterating: # 1. m[i - 1][j -1] # 2. m[i][j - 1] # 3. m[i - 1][j] # x[i - 1] and y[j - 1] are the characters. # Note: i and j start from 1. # If the characters are equal, we don't need to perform any of the # operations: insertion, deletion or substitution, and the minimum # edit distance to convert x[i - 1] to y[j - 1] is the same as the # one to convert x[i] to s[j], because, as stated above, x[i - 1] # and y[j - 1] are equal, so we don't have to perform any other # operation. if x[i - 1] == y[j - 1]: m[i][j] = m[i - 1][j - 1] else: m[i][j] = min(m[i - 1][j - 1] + 1, m[i - 1] [j] + 1, m[i][j - 1] + 1) return m[len(x)][len(y)] if not return_matrix else m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_distance(x, y):\n\n global recursion_depth\n global num_function_calls\n recursion_depth += 1\n num_function_calls += 1\n indent = \" \" * recursion_depth\n print(\"%sBEGIN edit_distance(\\\"%s\\\", \\\"%s\\\")\" % (indent, x, y))\n n = len(x)\n m = len(y)\n if n == 0:\n ed = m\n elif m == 0:\n ed = n\n else:\n ed1 = edit_distance(x, y[0:m-1]) + 1\n ed2 = edit_distance(x[0:n-1], y) + 1\n ed3 = edit_distance(x[0:n-1], y[0:m-1]) + (1 if x[-1] != y[-1] else 0)\n ed = min(ed1, ed2, ed3)\n print(\"%sEND edit_distance(\\\"%s\\\", \\\"%s\\\")\" % (indent, x, y))\n recursion_depth -= 1\n return ed", "def edit_distance_dp(str1, str2):\n rows = len(str1) + 1\n cols = len(str2) + 1\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\n for row in range(rows):\n for col in range(cols):\n if row == 0 or col == 0:\n dp_table[row][col] = max(row, col)\n else:\n if str1[row - 1] == str2[col - 1]:\n dp_table[row][col] = dp_table[row - 1][col - 1]\n else:\n replace = dp_table[row - 1][col - 1]\n insert = dp_table[row][col - 1]\n delete = dp_table[row - 1][col]\n dp_table[row][col] = min(replace, insert, delete) + 1\n \n return dp_table[rows-1][cols-1]", "def edit_distance_dp(str1, str2):\n rows = len(str1) + 1\n cols = len(str2) + 1\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\n\n for row in range(rows):\n for col in range(cols):\n if row == 0 or col == 0:\n dp_table[row][col] = max(row, col)\n \n elif str1[row-1] == str2[col-1]:\n dp_table[row][col] = dp_table[row-1][col-1]\n \n else:\n insert = dp_table[row-1][col]\n delete = dp_table[row][col-1]\n replace = dp_table[row-1][col-1]\n \n dp_table[row][col] = min(insert, delete, replace) + 1\n\n print(dp_table)\n return dp_table[-1][-1]", "def find_edit_distance(string1,string2):\n M=zeros((len(string1)+1,len(string2)+1), dtype=int)\n for i in xrange(1,len(string1)+1):\n M[i][0]=i\n for j in xrange(1,len(string2)+1):\n M[0][j]=j\n for i in xrange(1,len(string1)+1):\n for j in xrange(1,len(string2)+1):\n if(string1[i-1]!=string2[j-1]):\n M[i][j] = min(M[i - 1][j] + 1, M[i][j - 1] + 1, M[i - 1][j - 1] + 1)\n else:\n M[i][j] = M[i - 1][j - 1]\n return M[len(string1)][len(string2)]", "def _get_edit_distance_matrix(x: str, y: str) -> list:\n matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]\n\n for j in range(len(matrix[0])):\n matrix[0][j] = j\n\n for i, _ in enumerate(matrix):\n matrix[i][0] = i\n\n return matrix", "def edit_distance(s1: str, s2: str) -> int:\n # dp[a][b] is the edit distance between s1[:a] and s2[:b]\n dp = [[0 for _ in range(len(s2) + 1)] for _ in range(len(s1) + 1)]\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n dp[i][j] = 0\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n # The two base cases: the empty string compared to another string\n # alway has the edit distance of the length of the other string,\n # because you just insert all of the characters from the other\n # string\n if i == 0:\n dp[i][j] = j\n elif j == 0:\n dp[i][j] = i\n # If the characters are equal, we don't add anything to the edit\n # distance\n elif s1[i - 1] == s2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n # We have 3 cases when the characters aren't equal: we have an\n # insertion, a deletion, or a substitution.\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1],\n dp[i - 1][j - 1]) + 1\n print(dp)\n return dp[-1][-1]", "def distances(a, b):\n # generating matrix\n matrix = [[(0, None) for x in range(len(b) + 1)] for y in range(len(a) + 1)]\n\n # base case\n for i in range(1, len(a) + 1):\n matrix[i][0] = (i, Operation.DELETED)\n for j in range(1, len(b) + 1):\n matrix[0][j] = (j, Operation.INSERTED)\n\n # fill in matrix with tuples (cost, operation)\n for i in range(1, len(a) + 1):\n for j in range(1, len(b) + 1):\n # edit distance algorithm\n # costs for deletion, insertion and substitution\n delete_cost = matrix[i - 1][j][0] + 1\n insert_cost = matrix[i][j - 1][0] + 1\n substitute_cost = matrix[i - 1][j - 1][0]\n if a[i - 1] != b[j - 1]:\n substitute_cost += 1\n\n # edit distance is min cost of deletion, insertion, substitution\n if delete_cost < insert_cost and delete_cost < substitute_cost:\n matrix[i][j] = (delete_cost, Operation.DELETED)\n elif insert_cost < substitute_cost:\n matrix[i][j] = (insert_cost, Operation.INSERTED)\n else:\n matrix[i][j] = (substitute_cost, Operation.SUBSTITUTED)\n\n return matrix", "def editDistance(l1, l2):\n cache = [[None for i in range(len(l2) + 1)] for j in range(len(l1) + 1)]\n \n for row in range(len(l1) + 1):\n for col in range(len(l2) + 1):\n if row == 0 and col == 0:\n cache[row][col] = 0\n elif col == 0:\n cache[row][col] = row\n elif row == 0:\n cache[row][col] = col\n elif l1[row - 1] == l2[col - 1]:\n cache[row][col] = cache[row - 1][col - 1]\n else:\n a = cache[row - 1][col]\n b = cache[row][col - 1]\n c = cache[row - 1][col - 1]\n cache[row][col] = min(a, b, c) + 1\n\n return findResult(l1, l2, cache)", "def edit_distance(s1, s2, transpositions=False):\r\n # set up a 2-D array\r\n len1 = len(s1)\r\n len2 = len(s2)\r\n lev = _edit_dist_init(len1 + 1, len2 + 1)\r\n\r\n # iterate over the array\r\n for i in range(len1):\r\n for j in range(len2):\r\n _edit_dist_step(lev, i + 1, j + 1, s1, s2, transpositions=transpositions)\r\n return lev[len1][len2]", "def edit_distance_dp(str_1, m, str_2, n):\n # table for storing sub-problems\n sub = [[0 for i in range(n + 1)] for j in range(m + 1)] # padded for empty cases\n\n # fill table\n for i in range(m + 1):\n for j in range(n + 1):\n if i == 0:\n # str_1 is empty, or we have not selected any substring of it\n sub[i][j] = j # the difference is all of str_2, equivalent to len(str_2) = j removals\n elif j == 0:\n # str_2 is empty, or we have not selected any substring of it\n sub[i][j] = i # the difference is all of str_1, equivalent to len(str_1_ = i removals\n elif str_1[i - 1] == str_2[j - 1]:\n # last chars are equal, so no edits needed; continue for sub-problems\n sub[i][j] = sub[i - 1][j - 1]\n else:\n # last chars are not equal, solve for all 3 subproblems\n insert_char = sub[i][j - 1]\n remove_char = sub[i - 1][j]\n replace_char = sub[i - 1][j - 1]\n sub[i][j] = 1 + min(insert_char, remove_char, replace_char)\n\n return sub[m][n] # solution lies in last cell", "def edit_distance(str1, str2):\n\n if not str1:\n return len(str2)\n if not str2:\n return len(str1)\n\n DP = [[-1 for __ in str2] for ___ in str1]\n DP[0][0] = 0 if str1[0] == str2[0] else 1\n\n\n for x, let1 in enumerate(str1):\n startat = 0\n if x == 0:\n startat = 1\n for y, let2 in enumerate(str2[startat:], startat):\n minimum = float('inf')\n if x != 0:\n minimum = min(DP[x-1][y] + 1, minimum)\n if y != 0:\n minimum = min(DP[x-1][y-1] + (0 if let1 == let2 else 1), minimum)\n if y != 0:\n minimum = min(DP[x][y-1] + 1, minimum)\n\n DP[x][y] = minimum\n\n return DP[len(str1) - 1][len(str2) - 1]", "def edit_distance_dp(str1, str2):\r\n rows = len(str1) + 1\r\n cols = len(str2) + 1\r\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\r\n\r\n # TODO: Fill in the table using a nested for loop.\r\n\r\n return dp_table[rows-1][cols-1]", "def extended_min_edit_distance(x: str, y: str) -> tuple:\n m = _get_edit_distance_matrix(x, y)\n\n o = _get_coordinates_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n\n coordinates = (i - 1, j - 1)\n\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n _min = -1\n if m[i][j - 1] + 1 < m[i - 1][j] + 1:\n _min = m[i][j - 1] + 1\n coordinates = (i, j - 1)\n else:\n _min = m[i - 1][j] + 1\n coordinates = (i - 1, j)\n\n if m[i - 1][j - 1] + 1 < _min:\n _min = m[i - 1][j - 1] + 1\n coordinates = (i - 1, j - 1)\n\n m[i][j] = _min\n o[i][j] = coordinates\n\n return m[len(x)][len(y)], o", "def get_levenshtein_distance(a, b):\r\n n, m = len(a), len(b)\r\n if n > m:\r\n # Make sure n <= m, to use O(min(n,m)) space\r\n a, b = b, a\r\n n, m = m, n\r\n current_row = range(n+1) # Keep current and previous row, not entire matrix\r\n\r\n for i in range(1, m+1):\r\n previous_row, current_row = current_row, [i]+[0]*n\r\n for j in range(1, n+1):\r\n add, delete, change = previous_row[j]+1, current_row[j-1]+1, previous_row[j-1]\r\n if a[j-1] != b[i-1]:\r\n change += 1\r\n current_row[j] = min(add, delete, change)\r\n return current_row[n]", "def editing_distance(str1: str, str2: str) -> int:\r\n if not str1 and not str2:\r\n return 0\r\n if not str1:\r\n return len(str2)\r\n if not str2:\r\n return len(str1)\r\n if str1[0] == str2[0]:\r\n return min(editing_distance(str1[1::], str2[1::]), 1 + editing_distance(str1, str2[1::]),\r\n 1 + editing_distance(str1[1::], str2))", "def edit_distance(str1, str2):\n if len(str1) == 0 or len(str2) == 0:\n return max(len(str1), len(str2))\n if str1[-1] == str2[-1]:\n return edit_distance(str1[:-1], str2[:-1])\n insert = edit_distance(str1, str2[:-1])\n delete = edit_distance(str1[:-1], str2)\n replace = edit_distance(str1[:-1], str2[:-1])\n return min(insert, delete, replace) + 1", "def line_edits(s1, s2):\n l1 = s1.splitlines()\n l2 = s2.splitlines()\n \n result = editDistance(l1, l2)\n \n result = result[::-1]\n \n return result", "def edit_distance(str_1, str_2):\n return edit_distance_dp(str_1, len(str_1), str_2, len(str_2))", "def distance(a: object, b: object) -> object:\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n,m)) space\n a, b = b, a\n n, m = m, n\n\n current_row = range(n + 1) # Keep current and previous row, not entire matrix\n for i in range(1, m + 1):\n previous_row, current_row = current_row, [i] + [0] * n\n for j in range(1, n + 1):\n add, delete, change = previous_row[j] + 1, current_row[j - 1] + 1, previous_row[j - 1]\n if a[j - 1] != b[i - 1]:\n change += 1\n current_row[j] = min(add, delete, change)\n\n return current_row[n]", "def leveinshtein_distance(source,target):\r\n\t#Step 1\r\n\ts_len=len(source)\r\n\tt_len=len(target)\r\n\tcost=0\r\n\tif(s_len==0):\r\n\t\treturn t_len\r\n\tif(t_len==0):\r\n\t\treturn s_len\r\n\tprint(\"Dimensions:\\n\\tN:%d\\n\\tM:%d\"%(s_len,t_len))\r\n\t#Step 2\r\n\tmatrix=[[0 for _ in range(0,t_len+1)] for _ in range(0, s_len+1)]\r\n\t#Initialize first row 0..s_len\r\n\tfor idx in range(0,s_len+1):\r\n\t\tmatrix[idx][0]=idx\r\n\t#Initialize the first column 0..t_len\r\n\tfor idx in range(0, t_len+1):\r\n\t\tmatrix[0][idx]=idx\r\n\tprint(\"===Original===\")\r\n\tprint_matrix(matrix,source,target)\r\n\t#Step 3\r\n\tfor i in range(1,s_len+1):\r\n\t\tch=source[i-1]\r\n\t\t#print(ch)\r\n\t\t#Step 4\r\n\t\tfor j in range(1,t_len+1):\r\n\t\t\t#print(\">%s\"%target[j-1])\r\n\t\t\t#Step 5\r\n\t\t\tif ch==target[j-1]:\r\n\t\t\t\tcost=0\r\n\t\t\telse:\r\n\t\t\t\tcost=1\r\n\t\t\t#Step 6\r\n\t\t\t\r\n\t\t\t#print(\"(i,j)=>(%d,%d)\"%(i,j))\r\n\t\t\t#print(matrix[i][j])\r\n\t\t\tmatrix[i][j]=minimum(\r\n\t\t\t\tmatrix[i-1][j]+1,\r\n\t\t\t\tmatrix[i][j-1]+1,\r\n\t\t\t\tmatrix[i-1][j-1]+cost\r\n\t\t\t)\r\n\tprint(\"===Final Matrix===\")\r\n\tprint_matrix(matrix,source,target)\r\n\treturn matrix[s_len-1][t_len-1]", "def edit_distance(str1, str2):\r\n pass", "def calculate_edit_distance(str1, str2, pos1, pos2):\n \n result = None\n \n # If either of the strings is an empty string, return the length\n # of the other string. \n if pos1 == 0:\n result = pos2\n elif pos2 == 0:\n result = pos1\n \n # Check if the last character of the strings are identical. If\n # they are, move on to the next character.\n elif str1[pos1-1] == str2[pos2-1]:\n result = calculate_edit_distance(str1, str2, pos1-1, pos2-1)\n\n # If the last characters are not the same, one character is\n # different between these two strings at the pos 1 and 2. Move on\n # to the next character, and add one to the distance.\n else:\n # Iteratively, find which case holds true. The options are:\n # - insertion in string1\n # - deletion in string1\n # - substitution between strings 1 and 2 at pos1 and pos2.\n # Choose the minimum of the three cases.\n result = 1 + min(calculate_edit_distance(str1, str2, pos1, pos2-1),\n calculate_edit_distance(str1, str2, pos1-1, pos2),\n calculate_edit_distance(str1, str2, pos1-1, pos2-1))\n \n return result", "def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()", "def minDistance(self, word1: str, word2: str) -> int: \n len_one = len(word1)\n len_two = len(word2)\n\n # Create matrix which will keep a running count of the minimum number\n # of edits needed \n dp = [[0 for c in range(len_one + 1)] for r in range(len_two + 1)]\n\n # In this case, the rows correspond to the letters of word2\n # while the columns correspond to the letters of word1\n for i in range(0, len_two + 1):\n for j in range(0, len_one + 1):\n # The first row column should just be a linear increasing\n # function of j. It is the equivalent of saying starting \n # from nothing, how many edits must be made to have a string\n # of length j\n if j == 0:\n dp[i][j] = i\n # Same for i. See the example matrix.\n elif i == 0:\n dp[i][j] = j\n else:\n # need i - 1 and j - 1, otherwise an index errror will occur.\n # Remember that our matrix is of size len_one + 1)] for r in range(len_two + 1\n if word2[i - 1] == word1[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1])\n\n return dp[-1][-1]", "def edit_distance(left, right):\n similarities = np.zeros((len(left) + 1, len(right) + 1), dtype=np.int32)\n similarities[:, 0] = range(len(left) + 1)\n similarities[0, :] = range(len(right) + 1)\n\n for l in range(1, len(left) + 1):\n for r in range(1, len(right) + 1):\n sub_cost = 0 if left[l - 1] == right[r - 1] else 1\n similarities[l][r] = min(similarities[l - 1][r] + 1,\n similarities[l][r - 1] + 1,\n similarities[l - 1][r - 1] + sub_cost)\n return similarities[len(left), len(right)]", "def distance_matrix(self, x, y, keyboard_weight=None):\r\n # create distance matrix\r\n size_x = len(x) + 1\r\n size_y = len(y) + 1\r\n dist_matrix = np.zeros((size_x, size_y))\r\n for i in range(size_x):\r\n dist_matrix[i, 0] = i\r\n for j in range(size_y):\r\n dist_matrix[0, j] = j\r\n\r\n ## fill distance matrix\r\n # no keyboard weight\r\n if not keyboard_weight:\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n subs = dist_matrix[i-1, j-1] + 1\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # manhattan keyboard weight\r\n elif keyboard_weight == \"manhattan\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.manhattan_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # euclidean keyboard weight\r\n elif keyboard_weight == \"euclidean\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.euclidean_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n \r\n return dist_matrix", "def edit_distance(str1, str2, reconstruct_answer=False, method=alignments.Levinshtein(),\n swap_case_on_mismatch=True):\n method = alignments.Levinshtein() if method is None else method\n return align(str1, str2, reconstruct_answer, method, swap_case_on_mismatch)", "def distance(self, x, y, keyboard_weight=None):\r\n dist_matrix = self.distance_matrix(x, y, keyboard_weight)\r\n return dist_matrix[-1, -1]", "def levenshtein_distance(first, second):\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [range(second_length) for x in range(first_length)]\n for i in range(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n\n return distance_matrix[first_length-1][second_length-1]", "def dameraulevenshtein(seq1, seq2):\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]" ]
[ "0.72378236", "0.68309796", "0.6778071", "0.6751621", "0.6589623", "0.6551195", "0.6455476", "0.6449539", "0.64448684", "0.6433347", "0.6388333", "0.6365304", "0.6345468", "0.63007766", "0.61905324", "0.61462915", "0.6144506", "0.61297476", "0.60540056", "0.60415614", "0.6036047", "0.5982973", "0.5933078", "0.59289825", "0.5925237", "0.58530843", "0.5840588", "0.58294874", "0.58256525", "0.58223283" ]
0.80208546
0
Returns a tuple whose first item is the minimum edit distance, and the second item is a list of lists containing the instructions (in the "language of coordinates") to convert a string to another.
def extended_min_edit_distance(x: str, y: str) -> tuple: m = _get_edit_distance_matrix(x, y) o = _get_coordinates_matrix(x, y) for i in range(1, len(x) + 1): for j in range(1, len(y) + 1): coordinates = (i - 1, j - 1) if x[i - 1] == y[j - 1]: m[i][j] = m[i - 1][j - 1] else: _min = -1 if m[i][j - 1] + 1 < m[i - 1][j] + 1: _min = m[i][j - 1] + 1 coordinates = (i, j - 1) else: _min = m[i - 1][j] + 1 coordinates = (i - 1, j) if m[i - 1][j - 1] + 1 < _min: _min = m[i - 1][j - 1] + 1 coordinates = (i - 1, j - 1) m[i][j] = _min o[i][j] = coordinates return m[len(x)][len(y)], o
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_distance(str1, str2):\r\n pass", "def build_min_edit_instructions(x: str, y: str, o: list) -> list:\n\n instructions = [] # List for the instructions.\n\n c = (len(x), len(y)) # Initial coordinates for o.\n\n while c != (0, 0):\n # 3 cases:\n # 1. Go diagonally (to the left) => replace, if characters are different\n # 2. Go left => remove from the first string\n # 3. Go up => remove from the second string\n\n next_c = o[c[0]][c[1]]\n\n # Case 1.\n if next_c[0] < c[0] and next_c[1] < c[1]:\n\n if x[c[0] - 1] != y[c[1] - 1]:\n instructions.append(\"Replace char at index \" + str(c[0] - 1) +\n \" (\" + x[c[0] - 1] + \") from '\" + x +\n \"' with char at index \" + str(c[1] - 1) +\n \" (\" + y[c[1] - 1] + \") from '\" + y + \"'.\")\n # Case 3.\n elif next_c[0] == c[0] and next_c[1] < c[1]:\n instructions.append(\"Insert into '\" + x + \"' at index \" +\n str(c[1] - 1) + \" char at index \" +\n str(c[1] - 1) + \" (\" + y[c[1] - 1] + \") from '\"\n + y + \"'.\")\n\n # Case 2.\n else: # next_c[0] < c[0] and next_c[1] == c[1]\n instructions.append(\"Delete from '\" + x + \"' char at index \" +\n str(c[0] - 1) + \" (\" + x[c[0] - 1] + \").\")\n\n c = next_c\n\n instructions.reverse()\n return instructions", "def find_edit_distance(string1,string2):\n M=zeros((len(string1)+1,len(string2)+1), dtype=int)\n for i in xrange(1,len(string1)+1):\n M[i][0]=i\n for j in xrange(1,len(string2)+1):\n M[0][j]=j\n for i in xrange(1,len(string1)+1):\n for j in xrange(1,len(string2)+1):\n if(string1[i-1]!=string2[j-1]):\n M[i][j] = min(M[i - 1][j] + 1, M[i][j - 1] + 1, M[i - 1][j - 1] + 1)\n else:\n M[i][j] = M[i - 1][j - 1]\n return M[len(string1)][len(string2)]", "def minimum_edit_distance(seq1,seq2):\n if len(seq1) > len(seq2):\n seq1,seq2 = seq2,seq1\n distances = range(len(seq1) + 1)\n for index2,char2 in enumerate(seq2):\n newDistances = [index2+1]\n for index1,char1 in enumerate(seq1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1],\n distances[index1+1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]", "def tile_coordinates(text):\n UL = (text[1]), (text[2]) # Upper Left\n UR = (text[3]), (text[2]) # Upper Right\n LR = (text[3]), (text[4]) # Lower Right\n LL = (text[1]), (text[4]) # Lower Left\n coordinates = (UL, UR, LR, LL)\n return text[0], [tuple(float(x) for x in xs) for xs in coordinates]", "def line_edits(s1, s2):\n l1 = s1.splitlines()\n l2 = s2.splitlines()\n \n result = editDistance(l1, l2)\n \n result = result[::-1]\n \n return result", "def min_edit_distance(x: str, y: str, return_matrix: bool = False) -> object:\n m = _get_edit_distance_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n # How do we obtain the m[i][j] value?\n # We need to look at three positions while iterating:\n # 1. m[i - 1][j -1]\n # 2. m[i][j - 1]\n # 3. m[i - 1][j]\n\n # x[i - 1] and y[j - 1] are the characters.\n\n # Note: i and j start from 1.\n\n # If the characters are equal, we don't need to perform any of the\n # operations: insertion, deletion or substitution, and the minimum\n # edit distance to convert x[i - 1] to y[j - 1] is the same as the\n # one to convert x[i] to s[j], because, as stated above, x[i - 1]\n # and y[j - 1] are equal, so we don't have to perform any other\n # operation.\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n m[i][j] = min(m[i - 1][j - 1] + 1, m[i - 1]\n [j] + 1, m[i][j - 1] + 1)\n\n return m[len(x)][len(y)] if not return_matrix else m", "def edit_distance(str_1, str_2):\n return edit_distance_dp(str_1, len(str_1), str_2, len(str_2))", "def edit_distance(str1, str2):\n\n if not str1:\n return len(str2)\n if not str2:\n return len(str1)\n\n DP = [[-1 for __ in str2] for ___ in str1]\n DP[0][0] = 0 if str1[0] == str2[0] else 1\n\n\n for x, let1 in enumerate(str1):\n startat = 0\n if x == 0:\n startat = 1\n for y, let2 in enumerate(str2[startat:], startat):\n minimum = float('inf')\n if x != 0:\n minimum = min(DP[x-1][y] + 1, minimum)\n if y != 0:\n minimum = min(DP[x-1][y-1] + (0 if let1 == let2 else 1), minimum)\n if y != 0:\n minimum = min(DP[x][y-1] + 1, minimum)\n\n DP[x][y] = minimum\n\n return DP[len(str1) - 1][len(str2) - 1]", "def edit_distance(s1: str, s2: str) -> int:\n # dp[a][b] is the edit distance between s1[:a] and s2[:b]\n dp = [[0 for _ in range(len(s2) + 1)] for _ in range(len(s1) + 1)]\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n dp[i][j] = 0\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n # The two base cases: the empty string compared to another string\n # alway has the edit distance of the length of the other string,\n # because you just insert all of the characters from the other\n # string\n if i == 0:\n dp[i][j] = j\n elif j == 0:\n dp[i][j] = i\n # If the characters are equal, we don't add anything to the edit\n # distance\n elif s1[i - 1] == s2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n # We have 3 cases when the characters aren't equal: we have an\n # insertion, a deletion, or a substitution.\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1],\n dp[i - 1][j - 1]) + 1\n print(dp)\n return dp[-1][-1]", "def retupleize_geo_strings(value):\n if not value:\n return value\n elif \"(\" not in value:\n return value\n try:\n # Is this a dirty, dirty hack, or inspiration?\n # Location is retrieved as a string from the database\n # The alternative is to retrieve and process the\n # entire activity dataset...\n return eval(value)\n except NameError:\n # Not everything with a parentheses is a tuple.\n return value", "def edit_distance_dp(str1, str2):\r\n rows = len(str1) + 1\r\n cols = len(str2) + 1\r\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\r\n\r\n # TODO: Fill in the table using a nested for loop.\r\n\r\n return dp_table[rows-1][cols-1]", "def edit_distance(s1, s2, transpositions=False):\r\n # set up a 2-D array\r\n len1 = len(s1)\r\n len2 = len(s2)\r\n lev = _edit_dist_init(len1 + 1, len2 + 1)\r\n\r\n # iterate over the array\r\n for i in range(len1):\r\n for j in range(len2):\r\n _edit_dist_step(lev, i + 1, j + 1, s1, s2, transpositions=transpositions)\r\n return lev[len1][len2]", "def calculate_edit_distance(str1, str2, pos1, pos2):\n \n result = None\n \n # If either of the strings is an empty string, return the length\n # of the other string. \n if pos1 == 0:\n result = pos2\n elif pos2 == 0:\n result = pos1\n \n # Check if the last character of the strings are identical. If\n # they are, move on to the next character.\n elif str1[pos1-1] == str2[pos2-1]:\n result = calculate_edit_distance(str1, str2, pos1-1, pos2-1)\n\n # If the last characters are not the same, one character is\n # different between these two strings at the pos 1 and 2. Move on\n # to the next character, and add one to the distance.\n else:\n # Iteratively, find which case holds true. The options are:\n # - insertion in string1\n # - deletion in string1\n # - substitution between strings 1 and 2 at pos1 and pos2.\n # Choose the minimum of the three cases.\n result = 1 + min(calculate_edit_distance(str1, str2, pos1, pos2-1),\n calculate_edit_distance(str1, str2, pos1-1, pos2),\n calculate_edit_distance(str1, str2, pos1-1, pos2-1))\n \n return result", "def stringTimeToTuple_NEW(st):\n st, ms = split(st, '.')\n y, m, d, h, n, s = split(st, '_')\n return y,m,d,h,n,s,ms", "def edit_distance_dp(str1, str2):\n rows = len(str1) + 1\n cols = len(str2) + 1\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\n for row in range(rows):\n for col in range(cols):\n if row == 0 or col == 0:\n dp_table[row][col] = max(row, col)\n else:\n if str1[row - 1] == str2[col - 1]:\n dp_table[row][col] = dp_table[row - 1][col - 1]\n else:\n replace = dp_table[row - 1][col - 1]\n insert = dp_table[row][col - 1]\n delete = dp_table[row - 1][col]\n dp_table[row][col] = min(replace, insert, delete) + 1\n \n return dp_table[rows-1][cols-1]", "def _point_as_tuple(input_string: str) -> _Tuple[float]:\n out = tuple(float(coordinate) for coordinate in input_string.split(','))\n if len(out) == DIMENSIONS:\n return out\n raise TypeError", "def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()", "def _prepare_to_convert(coordinates: str) -> tuple:\n degrees, minutes, seconds = True, True, True\n\n if coordinates == coordinates.replace(\"°\", \" \"): degrees = False\n if coordinates == coordinates.replace(\"′\", \" \"): minutes = False\n if coordinates == coordinates.replace(\"″\", \" \"): seconds = False\n\n coordinates = coordinates.replace(\"°\", \" \").replace(\"′\", \" \").replace(\"″\", \" \").split(\" \")\n del (coordinates[-1])\n\n if seconds is False: coordinates.append(0)\n if minutes is False: coordinates.insert(0, 1)\n if degrees is False: coordinates.insert(0, 0)\n\n for i in range(len(coordinates)):\n coordinates[i] = float(coordinates[i])\n return tuple(coordinates)", "def __coding_coordinate(self):\n region1 = self.long_side_len\n region2 = self.short_side_len\n length = len(self.seq)\n if self.direction == '+':\n a_s = 0\n a_e = region2\n b_s = self.length - region1\n b_e = self.length - 1\n elif self.direction == '-':\n a_s = 0\n a_e = region1\n b_s = self.length - region2\n b_e = self.length - 1\n return (a_s, a_e, b_s, b_e)", "def _AN_to_coords(self, move: str):\n\n orig_move = move\n\n extra_info = \"\"\n\n # remove all characters that don't matter when parsing\n for pointless_char in \"x+#\":\n move = move.replace(pointless_char, \"\")\n\n # Handle castling\n if CASTLE_QUEENSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 2), CASTLE_QUEENSIDE\n elif CASTLE_KINGSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 6), CASTLE_KINGSIDE\n\n # Pawn promotion\n if move[-2] == \"=\":\n extra_info = move[-1] if self.white_to_move else move[-1].lower()\n move = move[:-2]\n\n # Destination of move, this is the only guaranteed substring in the move\n dest_str = move[-2:]\n dest = State._EAN_coords_to_board_coords(dest_str)\n move = move[:-2]\n\n # Deduce what piece actually made the move, if there is no shown there is no pawn\n # Note in AN pieces are always uppercase and location is lowercase,\n # so this makes it simple to check if we have a piece or a location\n piece = \"P\"\n if move and move[0].isupper():\n piece = move[0]\n move = move[1:]\n if not self.white_to_move:\n piece = piece.lower()\n\n # At this point the only info the move should contain is a hint on where the piece is coming from\n loc_hint = move\n\n possible_moves = self.get_all_moves()\n possible_moves = filter(lambda x: dest_str in x, possible_moves) # Filter to only moves that land on the right destination\n possible_moves = list(filter(lambda x: loc_hint in x[0:2], possible_moves)) # Filter to only moves that match the hint in the algebraic notation\n for possible_move in possible_moves:\n row, col = State._EAN_coords_to_board_coords(possible_move[0:2])\n if self.board[row][col] == piece:\n return (row, col), dest, extra_info\n\n raise ValueError(\"Algebraic notation parsing failed, no valid move found matching the given move \" + orig_move\n + \" with board state\\n\" + str(self))", "def __getCoordinateListForString(self, string, firstCharacter):\r\n coordinateList = []\r\n charactersEntered = 0\r\n breakReason = None\r\n\r\n statusDict = self.getVKBStatus()\r\n\r\n for i in range(len(string)):\r\n # Get coordinates for keys as long the case needs to be changed. FIXME! add switchCase to loop\r\n # no need to care about case in Email layout with other than alphabet characters\r\n if string[i] in self.__layoutMaps[self.currentLayout].keys() and \\\r\n (self.__layoutMaps[self.currentLayout][string[i]][1]&self.currentCase):\r\n coords, case, repeat = self.__layoutMaps[self.currentLayout][string[i]]\r\n if self.currentLayout&self.LAYOUT_ITUT and len(coordinateList) and coords==coordinateList[-1]:\r\n breakReason = 'delay'\r\n break # To add some delay between characters on same button\r\n for r in range(repeat):\r\n coordinateList.append(coords)\r\n if self.currentCase==self.CASE_UPPER and not self.capsLockOn:\r\n self.currentCase = self.CASE_LOWER\r\n charactersEntered +=1\r\n # In common-noun mode, VKB changes to uppercase after dot character so break loop.\r\n if string[i]=='.' and statusDict['minor-mode']=='common-noun' and not self.capsLockOn:\r\n self.currentCase = self.CASE_UPPER\r\n break\r\n else:\r\n breakReason = 'case'\r\n break\r\n\r\n return (coordinateList,charactersEntered,breakReason)", "def diffWaysToCompute(self, input):\n ops = {'+': lambda a,b: a+b,\n '-': lambda a, b: a-b,\n '*': lambda a, b: a*b}\n\n def _ways(string):\n res = []\n if not string:\n return res\n\n for i, c in enumerate(string):\n if c in \"+-*\":\n left = _ways(string[0:i])\n right = _ways(string[i + 1:])\n res += [ops[c](l, r) for l, r in itertools.product(left, right)]\n if not res:\n res.append(int(string))\n return res\n\n return _ways(input)", "def _parse_string_coords(*args, which='x', **kwargs):\n # NOTE: Why FixedLocator and not IndexLocator? The latter requires plotting\n # lines or else error is raised... very strange.\n # NOTE: Why IndexFormatter and not FixedFormatter? The former ensures labels\n # correspond to indices while the latter can mysteriously truncate labels.\n res = []\n for arg in args:\n arg = _to_arraylike(arg)\n if _is_string(arg) and arg.ndim > 1:\n raise ValueError('Non-1D string coordinate input is unsupported.')\n if not _is_string(arg):\n res.append(arg)\n continue\n idx = np.arange(len(arg))\n kwargs.setdefault(which + 'locator', mticker.FixedLocator(idx))\n kwargs.setdefault(which + 'formatter', pticker._IndexFormatter(_to_ndarray(arg))) # noqa: E501\n kwargs.setdefault(which + 'minorlocator', mticker.NullLocator())\n res.append(idx)\n return *res, kwargs", "def parseTupleList(self,string):\r\n string = string.replace(\"[\",\"\")\r\n string = string.replace(\"),\",\"*\")\r\n string = string.replace(\"(\", \"\")\r\n string = string.replace(\")\", \"\")\r\n string = string.replace(\"]\", \"\")\r\n string = string.split(\"*\")\r\n for i in xrange(len(string)):\r\n string[i] = string[i].split(\",\")\r\n for i in xrange(len(string)):\r\n for j in xrange(len(string[i])):\r\n string[i][j] = int(string[i][j])\r\n string[i] = tuple(string[i])\r\n return string", "def get_string_info(string):\n line_count = 1\n column_count = 1\n for char in string:\n if char == '\\n':\n column_count = 1\n line_count += 1\n else:\n column_count += 1\n return Coords(line_count, column_count, len(string))", "def _l_distance(first_string, second_string):\n if len(first_string) > len(second_string):\n first_string, second_string = second_string, first_string\n distances = list(range(len(first_string) + 1))\n for index2, char2 in enumerate(second_string):\n new_distances = [index2 + 1]\n for index1, char1 in enumerate(first_string):\n if char1 == char2:\n new_distances.append(distances[index1])\n else:\n new_distances.append(1 + min((distances[index1], distances[index1 + 1], new_distances[-1])))\n distances = new_distances\n return distances[-1]", "def convert_instruction(instruction: str) -> Tuple[int, int, int]:\n\t# NOOP\n\tif match := NOOP_REGEX.match(instruction):\n\t\tinstruction_type = 0\n\t# ADD\n\telif match := ADD_REGEX.match(instruction):\n\t\tinstruction_type = 1\n\t# MINUS\n\telif match := MINUS_REGEX.match(instruction):\n\t\tinstruction_type = 2\n\t# GOTO\n\telif match := GOTO_REGEX.match(instruction):\n\t\tinstruction_type = encode_label(match.group(\"TARGET\")) + 2\n\t# No match\n\telse:\n\t\traise ValueError(f\"Unrecognized instruction: {instruction}\")\n\t# get a and c from the label and variable capture groups\n\tlabel = encode_label(match.group(\"LABEL\"))\n\tvariable = encode_var(match.group(\"VAR\")) - 1\n\treturn label, instruction_type, variable", "def makepts(str):\n\tastr = str.replace(' ','').split('-')\n\tdef fromstring(strCoords):\n\t\tcoords = strCoords.split(',')\n\t\treturn [float(coords[0]), float(coords[1])]\n\treturn [ fromstring(strCoords) for strCoords in astr]", "def edit_distance_dp(str1, str2):\n rows = len(str1) + 1\n cols = len(str2) + 1\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\n\n for row in range(rows):\n for col in range(cols):\n if row == 0 or col == 0:\n dp_table[row][col] = max(row, col)\n \n elif str1[row-1] == str2[col-1]:\n dp_table[row][col] = dp_table[row-1][col-1]\n \n else:\n insert = dp_table[row-1][col]\n delete = dp_table[row][col-1]\n replace = dp_table[row-1][col-1]\n \n dp_table[row][col] = min(insert, delete, replace) + 1\n\n print(dp_table)\n return dp_table[-1][-1]" ]
[ "0.59002924", "0.5818325", "0.58050925", "0.5697112", "0.5696955", "0.56715965", "0.5608784", "0.56067246", "0.5575531", "0.54917693", "0.54911697", "0.5451414", "0.5377977", "0.5371696", "0.53555745", "0.53307635", "0.5321868", "0.53198034", "0.5303609", "0.5267929", "0.5261673", "0.5252241", "0.5249255", "0.5238306", "0.52321035", "0.52295804", "0.5219167", "0.5197007", "0.51920664", "0.5162644" ]
0.5924839
0
Interprets the coordinates o (returned as second item of the tuple returned by extended_min_edit_distance) and creates a comprehensible list of instructions. The indices mentioned in the instructions are with respect to the original strings x and y and not with respect to the strings after having applied an modification, after an instruction.
def build_min_edit_instructions(x: str, y: str, o: list) -> list: instructions = [] # List for the instructions. c = (len(x), len(y)) # Initial coordinates for o. while c != (0, 0): # 3 cases: # 1. Go diagonally (to the left) => replace, if characters are different # 2. Go left => remove from the first string # 3. Go up => remove from the second string next_c = o[c[0]][c[1]] # Case 1. if next_c[0] < c[0] and next_c[1] < c[1]: if x[c[0] - 1] != y[c[1] - 1]: instructions.append("Replace char at index " + str(c[0] - 1) + " (" + x[c[0] - 1] + ") from '" + x + "' with char at index " + str(c[1] - 1) + " (" + y[c[1] - 1] + ") from '" + y + "'.") # Case 3. elif next_c[0] == c[0] and next_c[1] < c[1]: instructions.append("Insert into '" + x + "' at index " + str(c[1] - 1) + " char at index " + str(c[1] - 1) + " (" + y[c[1] - 1] + ") from '" + y + "'.") # Case 2. else: # next_c[0] < c[0] and next_c[1] == c[1] instructions.append("Delete from '" + x + "' char at index " + str(c[0] - 1) + " (" + x[c[0] - 1] + ").") c = next_c instructions.reverse() return instructions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extended_min_edit_distance(x: str, y: str) -> tuple:\n m = _get_edit_distance_matrix(x, y)\n\n o = _get_coordinates_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n\n coordinates = (i - 1, j - 1)\n\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n _min = -1\n if m[i][j - 1] + 1 < m[i - 1][j] + 1:\n _min = m[i][j - 1] + 1\n coordinates = (i, j - 1)\n else:\n _min = m[i - 1][j] + 1\n coordinates = (i - 1, j)\n\n if m[i - 1][j - 1] + 1 < _min:\n _min = m[i - 1][j - 1] + 1\n coordinates = (i - 1, j - 1)\n\n m[i][j] = _min\n o[i][j] = coordinates\n\n return m[len(x)][len(y)], o", "def _get_edit_distance_matrix(x: str, y: str) -> list:\n matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]\n\n for j in range(len(matrix[0])):\n matrix[0][j] = j\n\n for i, _ in enumerate(matrix):\n matrix[i][0] = i\n\n return matrix", "def min_edit_distance(x: str, y: str, return_matrix: bool = False) -> object:\n m = _get_edit_distance_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n # How do we obtain the m[i][j] value?\n # We need to look at three positions while iterating:\n # 1. m[i - 1][j -1]\n # 2. m[i][j - 1]\n # 3. m[i - 1][j]\n\n # x[i - 1] and y[j - 1] are the characters.\n\n # Note: i and j start from 1.\n\n # If the characters are equal, we don't need to perform any of the\n # operations: insertion, deletion or substitution, and the minimum\n # edit distance to convert x[i - 1] to y[j - 1] is the same as the\n # one to convert x[i] to s[j], because, as stated above, x[i - 1]\n # and y[j - 1] are equal, so we don't have to perform any other\n # operation.\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n m[i][j] = min(m[i - 1][j - 1] + 1, m[i - 1]\n [j] + 1, m[i][j - 1] + 1)\n\n return m[len(x)][len(y)] if not return_matrix else m", "def generatecoordinates(self, x, y):\n entry = []\n for u, v in zip(x, y):\n if u == \"_\":\n entry.append(v)\n else:\n entry.append(v-1)\n\n return entry", "def list_posns(lot, x, y):\n return [position(t, x, y) for t in lot]", "def extensions(self):\n def swap(marker, mx, x2, my, y2):\n \"\"\"\n If proper conditions are met, jump over the peg depending on the\n condition\n @param marker: map, list of list\n @param mx: Original x coordinate\n @param x2: Replacement x coordinate\n @param my: Original y coordinate\n @param y2: Replacement y coordinate\n @return: list[list[str]]\n \"\"\"\n # creates a deep copy\n # each if statement checks whether to move the piece N S E W by\n # comparing the current coordinates and the new coordinates\n map = [x[:] for x in marker]\n map[my][mx], map[y2][x2] = map[y2][x2], map[my][mx]\n if my < y2:\n map[my+1][mx] = \".\"\n elif my > y2:\n map[my-1][mx] = \".\"\n elif mx < x2:\n map[my][mx+1] = \".\"\n else:\n map[my][mx-1] = \".\"\n return map\n\n def legal_move(marker, x, y, direction):\n \"\"\"\n Checks if there is a potential move at the direction of\".\"\n coordinate\n @param marker: map of the board\n @param x: x coordinate\n @param y: y coordinate\n @param direction : North South East West of the \".\"\n @return: boolean\n \"\"\"\n # first if statement determines the directions\n # second if statement checks if the \"potential move\" is within the index\n if direction == \"N\":\n if 0 <= y-2 < len(marker):\n return marker[y-2][x] == marker[y-1][x] == '*'\n if direction == \"S\":\n if 0 <= y+2 < len(marker):\n return marker[y+2][x] == marker[y+1][x] == '*'\n if direction == \"W\":\n if 0 <= x-2 < len(marker[0]):\n return marker[y][x-2] == marker[y][x-1] == '*'\n if direction == \"E\":\n if 0 <= x+2 < len(marker[0]):\n return marker[y][x+2] == marker[y][x+1] == '*'\n return False\n\n combos = []\n # For loops go through the coordinates\n # each if statement checks and appends the new scenario\n # iff there is a legal move available\n for y in range(len(self._marker)):\n for x in range(len(self._marker[0])):\n if self._marker[y][x] == '.':\n if legal_move(self._marker, x, y, 'N'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y-2), self._marker_set))\n if legal_move(self._marker, x, y, 'S'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y+2), self._marker_set))\n if legal_move(self._marker, x, y, 'W'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x-2, y, y), self._marker_set))\n if legal_move(self._marker, x, y, 'E'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x+2, y, y), self._marker_set))\n return combos", "def evolve_assuming_no_enemy_and_get_origin_and_target_and_move(self, moves_as_yx_coordinates_direction_list):\n origin_target_and_moves = []\n for location, direction in moves_as_yx_coordinates_direction_list:\n y, x = location\n if direction is STILL:\n self.strength[y, x] += self.prod[y, x]\n continue\n new_x, new_y = self.get_new_coordinates(x, y, direction)\n origin_target_and_moves.append((location, (new_y, new_x), direction))\n if self.owners[(new_y, new_x)] == self.playerID:\n self.strength[new_y, new_x] += self.strength[y, x]\n self.strength[y, x] = 0\n elif self.strength[y, x] < self.strength[new_y, new_x]:\n self.strength[new_y, new_x] -= self.strength[y, x]\n else: # site gets overtaken!\n self.strength[new_y, new_x] = self.strength[y, x] - self.strength[new_y, new_x]\n self.owners[new_y, new_x] = self.playerID\n self.strength[y, x] = 0\n if self.strength[(new_y, new_x)] > 255:\n self.strength[(new_y, new_x)] = 255\n return origin_target_and_moves", "def i_coords(self):\n ref_x = np.arange(-self.ref_w / 2, self.ref_w / 2 + 0.002, 0.002)\n\n if self.ref_shape == 'c': # Curved reflector\n dist_coords1 = [(ref_x[i], pos_on_semicircle(ref_x[i], self.R, self.c_xy)) for i in range(self.I)]\n dist_coords2 = [(ref_x[i + 1], pos_on_semicircle(ref_x[i + 1], self.R, self.c_xy)) for i in range(self.I)]\n a_i = [distance(dist_coords1[i], dist_coords2[i]) for i in range(self.I)]\n\n cx_i = [ref_x[i] + (ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n cy_i = [pos_on_semicircle(x, self.R, self.c_xy) for x in cx_i]\n i_coords = list(zip(cx_i, cy_i))\n else: # Flat reflector\n a_i = [(ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n cx_i = [ref_x[i] + (ref_x[i + 1] - ref_x[i]) / 2 for i in range(self.I)]\n i_coords = [(x, self.h) for x in cx_i]\n d = {'ref_x': ref_x, 'A_i': a_i, 'I_coords': i_coords, 'cx_i': cx_i}\n\n return d", "def _generate_relative_location_action(ui_object_list, ui_v_dist, ui_h_dist):\n action_list = []\n for object_idx, ui_object in enumerate(ui_object_list):\n if object_idx > ui_v_dist.shape[0]:\n assert False, ('ui_object_idx %d out of virtical distance bound %d' %\n (object_idx, ui_v_dist.shape[0]))\n if object_idx > ui_h_dist.shape[0]:\n assert False, ('ui_object_idx %d out of horizontal distance bound %d' %\n (object_idx, ui_h_dist.shape[0]))\n\n if _valid_clickable_object(ui_object) or _valid_typable_object(ui_object):\n neighbor_dict = _get_single_direction_neighbors(object_idx, ui_v_dist,\n ui_h_dist)\n for neighbor_context, neighbor_index in neighbor_dict.items():\n neighbor_object = ui_object_list[neighbor_index]\n if _valid_object_with_name(neighbor_object):\n for neighbor_context_str in neighbor_context.value:\n action_list.extend(\n _generate_relative_location_rule_action(ui_object, object_idx,\n neighbor_object,\n neighbor_context_str))\n return action_list", "def _AN_to_coords(self, move: str):\n\n orig_move = move\n\n extra_info = \"\"\n\n # remove all characters that don't matter when parsing\n for pointless_char in \"x+#\":\n move = move.replace(pointless_char, \"\")\n\n # Handle castling\n if CASTLE_QUEENSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 2), CASTLE_QUEENSIDE\n elif CASTLE_KINGSIDE in move:\n row = self._get_castling_row()\n return (row, 4), (row, 6), CASTLE_KINGSIDE\n\n # Pawn promotion\n if move[-2] == \"=\":\n extra_info = move[-1] if self.white_to_move else move[-1].lower()\n move = move[:-2]\n\n # Destination of move, this is the only guaranteed substring in the move\n dest_str = move[-2:]\n dest = State._EAN_coords_to_board_coords(dest_str)\n move = move[:-2]\n\n # Deduce what piece actually made the move, if there is no shown there is no pawn\n # Note in AN pieces are always uppercase and location is lowercase,\n # so this makes it simple to check if we have a piece or a location\n piece = \"P\"\n if move and move[0].isupper():\n piece = move[0]\n move = move[1:]\n if not self.white_to_move:\n piece = piece.lower()\n\n # At this point the only info the move should contain is a hint on where the piece is coming from\n loc_hint = move\n\n possible_moves = self.get_all_moves()\n possible_moves = filter(lambda x: dest_str in x, possible_moves) # Filter to only moves that land on the right destination\n possible_moves = list(filter(lambda x: loc_hint in x[0:2], possible_moves)) # Filter to only moves that match the hint in the algebraic notation\n for possible_move in possible_moves:\n row, col = State._EAN_coords_to_board_coords(possible_move[0:2])\n if self.board[row][col] == piece:\n return (row, col), dest, extra_info\n\n raise ValueError(\"Algebraic notation parsing failed, no valid move found matching the given move \" + orig_move\n + \" with board state\\n\" + str(self))", "def coordinates(self):\n logging.debug('Get coordinates from text')\n result = []\n blocks = self.del_comm(blocks=True)\n coor = re.compile('[FXYZ][+-]?[0-9]+(\\.[0-9]+)?')\n for line in blocks:\n coord_line = False\n comm = line.split()\n temp = []\n for c in comm:\n if c == 'G1':\n coord_line = True\n if coord_line and coor.match(c):\n temp.append(c)\n if temp:\n result.append(temp)\n return result", "def get_instructions(self) -> str:\n instructions = \"Players take turns to occupy available positions \" \\\n \"on the \" \\\n \"board. Once half or more of a ley-line has been \" \\\n \"occupied\" \\\n \"one player, that ley-line is entirely captured by \" \\\n \"said player. The winner is the person who captures \" \\\n \"half\" \\\n \"or more of the ley-lines first.\"\n return instructions", "def compress(script):\n\n DEBUG = 0\n \n CES = []\n which = ''\n start = -1\n stop = -1\n for edit in script:\n command, position = edit\n\n startrun = 0\n endrun = 0\n\n if which == '':\n startrun = 1\n\n elif command == 'i':\n if which == 'd':\n endrun = 1\n else:\n if stop != position - 1:\n endrun = 1\n else:\n stop = stop + 1\n\n elif command == 'd':\n if which == 'i':\n endrun = 1\n else:\n if start != position:\n endrun = 1\n else:\n stop = stop + 1\n else:\n raise \"Unknown Command\", \"Unexpected command %s\" % command\n\n if endrun:\n CES.append( ( which, start, stop ) )\n startrun = 1\n if startrun:\n which = command\n start = position\n stop = position\n\n # End last run\n if which != '':\n CES.append( ( which, start, stop ) )\n\n if DEBUG:\n print \"Compressed edit script: \"\n print CES\n\n return CES", "def ia_reflexion(data_ia, data_map):\n ia = data_ia['ia_id']\n enemy = data_ia['enemy_id']\n commands = {}\n\n new_positions = []\n moved_units = []\n\n for ia_unit in data_ia[ia]:\n unit_has_attacked = False\n unit_targets = []\n\n for enemy_unit in data_ia[enemy]:\n # Find each possible target for the Dwarves.\n if data_ia[ia][ia_unit][0] == 'D':\n if (ia_unit[0] - 1) <= enemy_unit[0] <= (ia_unit[0] + 1) and (ia_unit[1] - 1) <= enemy_unit[1] <= (ia_unit[1] + 1):\n # Add the unit to the target list.\n unit_targets.append(enemy_unit)\n\n # Find each possible target for the Elves - ATTACK\n else:\n for i in range(2):\n if (ia_unit[0] - (1 + i)) <= enemy_unit[0] <= (ia_unit[0] + (1 + i)) and (ia_unit[1] - (1 + i)) <= enemy_unit[1] <= (ia_unit[1] + (1 + i)):\n # Add the unit to the target list.\n unit_targets.append(enemy_unit)\n\n # Find the weakest units.\n if unit_targets:\n target = unit_targets[0]\n for enemy_unit in unit_targets:\n if data_ia[enemy][enemy_unit][0] == 'D' or data_ia[enemy][enemy_unit][1] < data_ia[enemy][target][1]:\n target = enemy_unit\n\n # Write the attack.\n commands[data_ia[ia][ia_unit][2]] = [ia_unit, ' -a-> ', target]\n unit_has_attacked = True\n\n # Find the weakest of all enemy's units - MOVE\n if not unit_has_attacked:\n target_list = data_ia[enemy].keys()\n target = target_list[0]\n\n for enemy_unit in data_ia[enemy]:\n if data_ia[enemy][enemy_unit][0] == 'D' or data_ia[enemy][enemy_unit][1] < data_ia[enemy][target][1]:\n target = enemy_unit\n\n target_cell = [ia_unit[0], ia_unit[1]]\n # Move on Y axis\n if target and abs(ia_unit[1] - target[1]) > abs(ia_unit[0] - target[0]) and 1 <= ia_unit[0] <= data_map['map_size'] and 1 <= ia_unit[1] <= data_map['map_size']:\n if ia_unit[1] > target[1]:\n target_cell[1] -= 1\n else:\n target_cell[1] += 1\n # Move on X axis\n elif target and 1 <= ia_unit[0] <= data_map['map_size'] and 1 <= ia_unit[1] <= data_map['map_size']:\n if ia_unit[0] > target[0]:\n target_cell[0] -= 1\n else:\n target_cell[0] += 1\n\n new_target = False\n # Check if he can move on the targeted position.\n enemy_positions = data_ia[enemy].keys()\n ia_positions = data_ia[ia].keys()\n for units in moved_units:\n del ia_positions[ia_positions.index(units)]\n\n # If the units can't move, find another free cell.\n if target_cell in (new_positions or enemy_positions or ia_positions):\n new_target_cells = []\n for line in range(target_cell[0] - 1, target_cell[0] + 2):\n for column in range(target_cell[1] - 1, target_cell[1] + 2):\n\n # Append the possible free cell to the list.\n if (line, column) not in (new_positions or enemy_positions or ia_positions):\n new_target_cells.append((line, column))\n\n # Choose the nearest free cell.\n if new_target_cells:\n new_target = new_target_cells[0]\n for cell in new_target_cells:\n if abs(ia_unit[0] - cell[0]) + abs(ia_unit[1] - cell[1]) < abs(ia_unit[0] - new_target[0]) + abs(ia_unit[1] - new_target[1]):\n new_target = new_target_cells[new_target_cells.index(cell)]\n\n # Save the new target in the correct variable.\n if new_target:\n target_cell = new_target\n\n # Write the move\n if target_cell != ia_unit:\n commands[data_ia[ia][ia_unit][2]] = [ia_unit, ' -m-> ', target_cell]\n new_positions.append(target_cell)\n moved_units.append(ia_unit)\n\n return commands", "def build_ops_matrix_and_ws(s, t, min_m):\n\n # init edit vars\n working_string = s\n edits = [s]\n\n rows = len(min_m) - 1\n cols = len(min_m[0]) - 1\n\n # init ops matrix with spaces in each cell, except (0,0)\n ops = [[\" \" for x in range(cols + 1)] for x in range(rows + 1)]\n ops[0][0] = \"0\"\n\n col = 0\n row = 0\n\n while True:\n\n # bail out if we are in the corner\n if row == rows and col == cols:\n break\n\n # we are not at a matrix boundary\n if row != rows and col != cols:\n\n # down - delete\n if str(min_m[row + 1][col]).strip() != \"\":\n ops[row + 1][col] = \"D\"\n\n # manipulate working string with implied edit ops\n if col == 0:\n working_string = working_string[1:]\n elif col == cols:\n working_string = working_string[:col]\n else:\n working_string = working_string[:col] + working_string[col + 1:]\n edits.append(working_string + \" <- delete '\" + str(s[row]) + \"' pos: \" + str(col))\n\n # move current cell\n if row < rows:\n row += 1\n\n # right - insert\n elif str(min_m[row][col + 1]).strip() != \"\":\n ops[row][col + 1] = \"I\"\n\n # manipulate working string with implied edit ops\n if col == 0:\n working_string = str(t[col]) + working_string\n elif col == cols:\n working_string = working_string + str(t[col])\n else:\n working_string = working_string[:row - 1] + str(t[col]) + working_string[row - 1:]\n edits.append(working_string + \" <- insert '\" + str(t[col]) + \"' pos: \" + str(col))\n\n # move current cell\n if col < cols:\n col += 1\n\n # diagonal - sub\n else:\n ops[row + 1][col + 1] = \"S\"\n\n # manipulate working string with implied edit ops\n if col == cols:\n working_string = working_string[:row] + str(t[col])\n else:\n working_string = working_string[:col] + str(t[col]) + working_string[col + 1:]\n edits.append(working_string + \" <- substitute '\" + str(t[col]) + \"' pos: \" + str(col))\n\n # move current cell\n if row < rows:\n row += 1\n if col < cols:\n col += 1\n\n # if at matrix edge, can only move down\n elif col == cols:\n # down - delete\n if str(min_m[row + 1][col]).strip() != \"\":\n ops[row + 1][col] = \"D\"\n\n # manipulate working string with implied edit ops\n if row == 0:\n working_string = working_string[1:]\n elif col == cols:\n working_string = working_string[:col]\n else:\n working_string = working_string[:row] + working_string[row + 1:]\n edits.append(working_string + \" <- delete '\" + str(s[row]) + \"' pos: \" + str(col))\n\n # update current cell\n if row < rows:\n row += 1\n\n # must be at row boundary, can only move right\n else:\n # right - insert\n if str(min_m[row][col + 1]).strip() != \"\":\n ops[row][col + 1] = \"I\"\n\n # manipulate working string with implied edit ops\n if col == 0:\n working_string = str(t[col]) + working_string\n elif col == cols:\n working_string = working_string + str(t[col])\n else:\n working_string = working_string[:row] + str(t[col]) + working_string[row:]\n edits.append(working_string + \" <- insert '\" + str(t[col]) + \"' pos: \" + str(col))\n\n # update current cell\n if col < cols:\n col += 1\n\n return edits, ops", "def _derive_modifiers(self, tx):\n return [tx + str(pixel) for pixel in self.adjacent_pixels]", "def tex_coords(top, bottom, n, e, s, w):\n top = tex_coord(*top)\n bottom = tex_coord(*bottom)\n n = tex_coord(*n)\n e = tex_coord(*e)\n s = tex_coord(*s)\n w = tex_coord(*w)\n result = []\n result.extend(top)\n result.extend(bottom)\n result.extend(n)\n result.extend(e)\n result.extend(s)\n result.extend(w)\n return result", "def reverse(script):\n\n offset = 0\n RCES = [] # Reversed, compressed edit script\n\n for edit in script:\n command, start, stop = edit\n if command == \"i\":\n RCES.append( ('d', start + offset, stop + offset) )\n offset = offset - (stop - start) - 1\n elif command == \"d\":\n RCES.append( ('i', start + offset, stop + offset) )\n offset = offset + (stop - start) + 1\n else:\n raise \"Unknown Command\", \"Unexpected command %s\" % command\n\n return RCES", "def xy2ind(self, x, y):\n return self.sub2ind(*self.xy2sub(x, y))", "def get_correct_coords(start_x=0,viewing_distance=12.0,field_height=10,field_width=10,pixel_width=0.282,pixel_height=0.282,**config):\n \n x = (start_x + np.arange(np.ceil(-field_width/2.0),np.ceil(field_width/2.0),1))*pixel_width\n y = np.arange(np.ceil(-field_height/2.0),np.ceil(field_height/2.0),1)*pixel_height\n x,y = np.meshgrid(x,y)\n coords = np.vstack((x.ravel(),y.ravel())).T\n return coords", "def handle_instructions(instructions):\n row_instructions = instructions[0:7]\n column_instructions = instructions[7:10]\n row = bisect(row_instructions, (0, 127), \"F\", \"B\")\n column = bisect(column_instructions, (0, 7), \"L\", \"R\")\n return row, column", "def extract(input_data: str) -> list:\n instructions = list()\n for instruction in input_data.split('\\n'):\n op, arg = instruction.split(' ')\n arg = int(arg)\n assert op in ('acc', 'jmp', 'nop')\n instructions.append(Instruction(op, arg))\n return instructions", "def condense_coords(matches):\n x = []\n y = []\n for m in matches:\n x += m['matches']['p'][0]\n x += m['matches']['q'][0]\n y += m['matches']['p'][1]\n y += m['matches']['q'][1]\n coords = np.transpose(np.vstack((np.array(x), np.array(y))))\n return coords", "def get_topleft_coords(self, label):\n return map(lambda coord: tuple(coord), self.coords[label])", "def find_obstacle_loc(self, obstacle_list):\n\n x_obst = []\n y_obst = []\n #x_obst_append = x_obst.append\n #y_obst_append = y_obst.append\n locs = []\n\n for x in obstacle_list:\n if x < self.width:\n x_obst.append(x*self.resolution + self.resolution/2)\n else:\n x_obst.append((x % self.width)*self.resolution + self.resolution/2)\n\n for y in obstacle_list:\n y_obst.append((y/self.width)*self.resolution + self.resolution/2)\n\n locs = map(lambda x: x, zip(x_obst, y_obst))\n\n return(locs)", "def get_positions(wire):\n x = 0\n y = 0\n positions = [(0, 0)]\n\n for instruction in wire:\n direction = instruction[0]\n dist = int(instruction[1:])\n if direction == \"R\":\n for pos in range(1, dist+1):\n positions.append((x + pos, y))\n x += dist\n elif direction == \"L\":\n for pos in range(1, dist+1):\n positions.append((x - pos, y))\n x -= dist\n elif direction == \"U\":\n for pos in range(1, dist + 1):\n positions.append((x, y + pos))\n y += dist\n elif direction == \"D\":\n for pos in range(1, dist + 1):\n positions.append((x, y - pos))\n y -= dist\n else:\n raise ValueError(\"Direction not recognised\")\n\n return positions", "def research_pos(self, map_list, character): \n list_pos = []\n for y in range(15): \n for x, c in enumerate(map_list[y]):\n if character in c and c == character:\n list_pos.append((x*50, y*50)) \n return list_pos", "def tile_coordinates(text):\n UL = (text[1]), (text[2]) # Upper Left\n UR = (text[3]), (text[2]) # Upper Right\n LR = (text[3]), (text[4]) # Lower Right\n LL = (text[1]), (text[4]) # Lower Left\n coordinates = (UL, UR, LR, LL)\n return text[0], [tuple(float(x) for x in xs) for xs in coordinates]", "def compute_start_end_points(linestrings):\n starts = []\n stops = []\n for ls in linestrings:\n pt = Point(ls.coords[0])\n starts.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n pt = Point(ls.coords[-1])\n stops.append(round(CONUS[\"poly\"].exterior.project(pt), 2))\n return starts, stops", "def get_instructions(self):\n tmp_ins = []\n idx = 0\n for i in self.method.get_instructions():\n if idx >= self.start and idx < self.end:\n tmp_ins.append(i)\n\n idx += i.get_length()\n return tmp_ins" ]
[ "0.6467284", "0.5889748", "0.5710616", "0.5507128", "0.53792125", "0.5310419", "0.5272096", "0.52095956", "0.5205697", "0.5166738", "0.5116879", "0.50985277", "0.50777525", "0.50473917", "0.5031812", "0.5012823", "0.5010603", "0.5003589", "0.49800825", "0.49780247", "0.49609813", "0.4951677", "0.49447465", "0.49439132", "0.49330738", "0.49314967", "0.49303603", "0.49252582", "0.4897878", "0.48747456" ]
0.805715
0
Userfriendly approach which lets the user manually select the area corresponding to the verticle line. If not correctly selected the first time, the user can redo this procedure. Close plots manually to continue in the procedure. Type Y for 'yes' or N for 'no' when deciding if the area is good.
def select_vert(img): # Local variable which breaks loop if area of interest is selected well OK = False # Main while-loop while OK == False: # Plot image fig, ax = plt.subplots(figsize=(10, 10)) ax.imshow(img, cmap="gray") # Let user specify points coord = np.asarray(plt.ginput(4, show_clicks=True)) p = Polygon(coord, linewidth=1, edgecolor='r', facecolor='none') plt.gca().add_artist(p) # Include area of interest in plot plt.draw() plt.show() # Ask user to accept or reject the proposed area of interest val = input("Is the region correct ([Y]/n)?\n") # Break if OK, re-do if not if val == "Y" or val == "": OK = True """ Creates a mask which marks the vertical line based on the coordinates given by the user. """ x, y = np.meshgrid(np.arange(img.shape[0]), np.arange(img.shape[1]), indexing='xy') x, y = x.flatten(), y.flatten() pts = np.vstack((x,y)).T pts_t = tuple(map(tuple, pts)) mask = np.ones((img.shape[0],img.shape[1])) for (x,y) in pts_t: if p.get_path().contains_point((x,y)): mask[y][x] = 0 # Return mask which is the area of interest with value 1, 0 else return mask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_select_treatment_area(self):\n self.txt_shoulder.hide()\n self.txt_arm.hide()\n self.txt_thigh.hide()\n self.line_shoulder.hide()\n self.line_arm.hide()\n self.line_thigh.hide()", "def OnMouseOut( self, event ):\n self.whichChoice = 0\n event.context.triggerRedraw(1)", "def choose_ROI(self):\n self.dialog.show()", "def on_click(event):\n ax = event.inaxes\n \n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n \n if self.current_plot == 'single':\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n self.ax_zoomed = True\n self.current_ax = ax\n ax.set_position([0.1, 0.05, 0.85, 0.80])\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n \n for axis in self.sp_fig.axes:\n if axis is not ax:\n axis.set_visible(False)\n \n except ValueError:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if self.ax_zoomed:\n self.ax_zoomed = False\n #self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n elif self.current_plot == 'multi':\n if ax is None:\n # Occurs when a region not in an axis is clicked...\n return\n if event.button is 1:\n if not self.ax_zoomed:\n # Change over to a single baseline plot\n try:\n ant1, ant2 = ax.get_title().split(\" \")\n except:\n ant1 = int(ax.get_title().strip('Tile').strip('Antenna').strip('Stand'))\n ant2 = ant1 \n try:\n self.spin_ref_ant.setValue(int(ant1))\n self.spin_ref_ant2.setValue(int(ant2))\n self.plot_select.setCurrentIndex(0)\n self.current_plot = 'single'\n \n self.updatePlot()\n except:\n raise\n self.sp_fig.canvas.mpl_disconnect(self.fig_connect)\n \n elif event.button is 3:\n if not self.ax_zoomed:\n ax.set_position([0.1, 0.1, 0.85, 0.85])\n # TODO: fix labelling of zoom plots\n ax.set_xlabel(\"Frequency\")\n #ax.set_ylabel(\"Time\")\n self.orig_position = ax.get_position()\n for axis in event.canvas.figure.axes:\n # Hide all the other axes...\n if axis is not ax:\n axis.set_visible(False)\n self.ax_zoomed=True\n else:\n self.updatePlot()\n \n else:\n # No need to re-draw the canvas if it's not a left or right click\n return\n \n event.canvas.draw()", "def option(self, event):\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()", "def area_circulo():\n diametro_o_radio = raw_input(\"Desea obtener area de circulo con 'radio' o 'diametro?: \")\n if diametro_o_radio == \"radio\":\n radio_1 = int(raw_input(\"introduce el valor del radio: \"))\n area_radio(radio_1)\n elif diametro_o_radio == \"diametro\":\n diametro_1 = int(raw_input(\"introduce el valor del diametro: \"))\n area_diametro(diametro_1)\n else:\n print \"introduzca un valor correcto, 'radio' o 'diametro'\"", "def yesButton(self):\n \n self.answer=\"yes\"\n self.top.destroy()", "def validate(self):\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def validate(self):\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def end(self, event):\n plt.close()", "def updateSelectionArea(self):\n self.machine.setSelectionArea(self.points,\n fill='hatch',\n color=self.machine.color)\n eventDict = prepareDrawingSignal('drawingProgress',\n 'polygon',\n self.points,\n self.machine.parameters)\n self.machine.plot.notify(**eventDict)", "def update(self):\n self.plot.draw()\n \n func=str(self.edit1b.currentText())\n if self.win.test()==0:\n x=np.linspace(0,10,200)\n elif self.win.test()==1:\n x=np.linspace(0,0.40,200)\n \n pattern1=r'Steel'\n pattern2=r'Aluminium'\n pattern3=r'[\\d]+'\n \n if (func!='Comparison Chart'):\n self.edit2b.setDisabled(False)\n self.edit3b.setDisabled(False)\n self.edit4b.setDisabled(False)\n if (func=='Quenched/Tempered Steel'):\n alpha = 0.0025\n elif (func=='Annealed Steel'):\n alpha = 0.01\n elif (func=='Steel (input Su)'):\n S = str(self.edit2b.text())\n if (self.win.test()==0):\n S = str(float(S)/6.895)\n alpha = notch.alpha(eval(S))\n elif (func=='Aluminium Alloy 356.0 as cast'):\n rho = 0.08\n elif (func=='Aluminium Alloy 6061'):\n rho = 0.025\n elif (func=='Aluminium Alloy 7075'):\n rho = 0.015\n elif (func=='Material dropdown'):\n pass\n \n y1=[]\n if re.search(pattern1,func):\n Su=notch.su_s(alpha)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsp(alpha,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsp(alpha,r,self.win.test())))\n elif re.search(pattern2,func):\n Su=notch.su_a(rho)\n if (self.win.test()==0):\n Su = Su*6.895\n for i in range(len(x)):\n y1.append(notch.nsn(rho,x[i],self.win.test()))\n y=np.asarray(y1)\n if (re.search(pattern3,str(self.edit3b.text()))):\n r=eval(str(self.edit3b.text()))\n self.edit4b.setText(str(notch.nsn(rho,r,self.win.test())))\n \n self.edit2b.setText(str(Su))\n func1 = 'Steel (Su='+str(self.edit2b.text())+')'\n if (func!='Steel (input Su)'):\n self.plot.redraw(x,y,func, self.xlabel)\n elif (func=='Steel (input Su)'):\n self.plot.redraw(x,y,func1, self.xlabel)\n \n elif (func=='Comparison Chart'):\n self.edit2b.setText(\"\")\n self.edit2b.setDisabled(True)\n self.edit3b.setText(\"\")\n self.edit3b.setDisabled(True)\n self.edit4b.setText(\"\")\n self.edit4b.setDisabled(True)\n self.plot.draw_comp(self.xlabel, self.win.test())", "def select_area(ev, x, y, _1, _2):\n global x_init, y_init, drawing, top_left, bottom_right, orig_img, img\n\n if ev == cv.EVENT_LBUTTONDOWN:\n drawing = True\n x_init = x\n y_init = y\n elif ev == cv.EVENT_MOUSEMOVE and drawing:\n draw_rect(img, x_init, y_init, x, y)\n elif ev == cv.EVENT_LBUTTONUP:\n drawing = False\n draw_rect(img, x_init, y_init, x, y)\n\n mask = prepare_mask(x, y)\n remove_from_scene(mask)", "def _quit_figure(event):\n\tif event.key == 'q':\n\t\tplt.close(event.canvas.figure)", "def roi_slough(self):\n print(\"controller - roi_slough!\")\n self.view.processing_gui.ask_zone_type(\"Slough\")", "def validate(self):\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.parent.fitmodel = self.model\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def correct(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[1] - self.data[2]\n self.pltw.updatePlot()\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.parent.updateUI()\n self.hide()", "def callback_handle_right_mouse_click(self, event):\n\n if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.variables.actively_drawing_shape = False\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.variables.actively_drawing_shape = False\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.variables.actively_drawing_shape = False", "def enable_remove_plot(self):\n pass\n #if self.cb_plotpanel.GetCount() == 0:\n # self.bt_close_plot.Disable()\n #else:\n # self.bt_close_plot.Enable()", "def validate(self):\n self.parent.copyCurrentWinState(self.pltw)\n if self.incr:\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n else:\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2][::-1]\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.pltw.updatePlot()\n self.parent.updateUI()\n self.hide()", "def make_area_plots(df, x_input = \"Mean Predicted Avg\",\n y_input = \"Empirical Probability\"):\n\n df = df.copy()\n\n # Get the regularizer and reset coeff\n coeff = [float(i.split(\"evidence_new_reg_\")[1]) if \"evidence\" in i else i for i in df['method_name']]\n df[\"method_name\"] = coeff\n df[\"Data\"] = convert_dataset_names(df[\"dataset\"])\n df[\"Method\"] = df[\"method_name\"]\n\n trials = 'trial_number'\n methods = 'Method'\n\n # Make area plot\n uniq_methods = set(df[\"Method\"].values)\n method_order = sorted(uniq_methods,\n key=lambda x : x if isinstance(x, float) else -1)\n method_df = []\n datasets = set()\n for data, sub_df in df.groupby(\"Data\"):\n # Add datasets\n datasets.add(data)\n x_vals = sub_df[x_input]\n y_vals = sub_df[y_input]\n methods_sub = sub_df[\"Method\"]\n trials_sub= sub_df['trial_number']\n for method_idx, method in enumerate(method_order):\n # Now summarize these lines\n bool_select = (methods_sub == method)\n lines_y = y_vals[bool_select]\n lines_x = x_vals[bool_select]\n trials_temp = trials_sub[bool_select]\n areas = []\n # create area!\n for trial, line_x, line_y in zip(trials_sub, lines_x, lines_y):\n new_y = np.abs(np.array(line_y) - np.array(line_x))\n area = simps(new_y, line_x)\n to_append = {\"Area from parity\": area,\n \"Regularizer Coeff, $\\lambda$\": method,\n \"method_name\": method,\n \"Data\": data,\n \"Trial\" : trial}\n method_df.append(to_append)\n method_df = pd.DataFrame(method_df)\n method_df_evidence = method_df[[isinstance(i, float) for i in\n method_df['method_name']]].reset_index()\n method_df_ensemble = method_df[[\"ensemble\" in str(i) for i in\n method_df['method_name']]].reset_index()\n data_colors = {\n dataset : sns.color_palette()[index]\n for index, dataset in enumerate(datasets)\n }\n\n min_x = np.min(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n max_x= np.max(method_df_evidence[\"Regularizer Coeff, $\\lambda$\"])\n\n sns.lineplot(x=\"Regularizer Coeff, $\\lambda$\", y=\"Area from parity\",\n hue=\"Data\", alpha=0.8, data=method_df_evidence,\n palette = data_colors)\n\n for data, subdf in method_df_ensemble.groupby(\"Data\"):\n\n color = data_colors[data]\n area = subdf[\"Area from parity\"].mean()\n std = subdf[\"Area from parity\"].std()\n plt.hlines(area, min_x, max_x, linestyle=\"--\", color=color, alpha=0.8)\n\n ensemble_line = plt.plot([], [], color='black', linestyle=\"--\",\n label=\"Ensemble\")\n # Now make ensemble plots\n plt.legend(bbox_to_anchor=(1.1, 1.05))", "def plot_ideal_asa_bad_selected(self, env):\n print(f'Plotting ideal, ASA, and bad skill - selected runs - {env}')\n params = self.env_params[env]\n plt.figure()\n\n\n # Basic run\n self.draw_reward_range(\n self.data(env, 'asa')\n .filter_basic_runs(),\n env, color='black', label='Base run'\n )\n\n # With skills\n skills = [\n ('ideal', 'With ideal skill', '#2BAB2B'),\n ('asa', 'With ASA skill', 'royalblue'),\n ('bad', 'With bad skill', '#FF3333')\n ]\n for dataset, label, color in skills:\n self.draw_reward_range(\n self.data(env, dataset)\n .filter_seed_and_resumed_from(params['true_asa_runs'])\n .filter_itr_from(12)\n .append_prev_itr(),\n env, color=color, label=label\n )\n\n\n # Finalize\n y_label = y_label='Average discounted reward' if env == 'gw' else None\n self.tidy_plot(env, w=12, h=8, y_label=y_label)\n plt.legend(loc='lower right')\n self.show_save_plot(f'ideal-asa-bad-selected-{env}')", "def validate(self):\n self.parent.copyCurrentWinState(self.pltw)\n self.pltw.blklst[self.blkno][self.ypos] = self.data[2]\n self.pltw.dirty = True\n self.pltw.activecurv = self.cpos\n self.pltw.updatePlot()\n self.parent.smoothtyp = self.type\n self.parent.smoothFilter = self.nfilter\n self.parent.smoothpass = self.npass\n self.parent.updateUI()\n self.hide()", "def test_rectangle_close(self):\n before_b = \"\"\"\\\n before\n aaa bbb\n aaa bbb\n aaa bbb\n aaa bbb\n after\n \"\"\"\n after_b = \"\"\"\\\n before\n aaabbb\n aaabbb\n aaabbb\n aaabbb\n after\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.3\", \"5.6\"),\n after_sel=(\"2.3\", \"5.3\"),\n command_name=\"rectangle-close\",\n )", "def yesButton(self):\n \n self.answer=self.yesMessage.lower()\n self.top.destroy()", "def onMdiArea(self, subWin):\n plt = Plot.getPlot()\n if plt != subWin:\n self.updateUI()", "def cancel():\n global confirmation, output1, place_for_enter\n output1.delete(1.0, END)\n confirmation.after(1, confirmation.destroy)\n place_for_enter.delete(0, END)", "def refinement_func_area(tri_points, area):\r\n max_area = 0.005\r\n return bool(area > max_area)", "def ask_roi_confirmation(self, img_cv2_mask, img_cv2_roi, tissue, scale_factor, ring):\n print(\"controller - ask_roi_confirmation!\")\n self.pressure_img.close_all()\n #try:\n self.view.processing_gui.ask_roi_confirmation(img_cv2_mask, img_cv2_roi, tissue, scale_factor, ring)\n #except:\n #self.view.popupmsg(\"Alguna cosa ha fallat. Torna-ho a intentar!\")", "def decision(question):\n return click.confirm(question, show_default=True)" ]
[ "0.55472", "0.55037045", "0.54417837", "0.5397977", "0.53612226", "0.5340556", "0.5328645", "0.5273989", "0.5273989", "0.5203098", "0.51560444", "0.51241", "0.51219076", "0.5109118", "0.5108845", "0.5095289", "0.5091633", "0.50630957", "0.5059883", "0.50550723", "0.50508314", "0.5038442", "0.5012943", "0.49802715", "0.498012", "0.4963154", "0.49457586", "0.4906997", "0.49060947", "0.4894205" ]
0.6410726
0
Configure celery instance using config from Flask app
def configure_celery(app: Flask) -> Celery: TaskBase = celery_holder.celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) celery_holder.celery.conf.update(app.config) celery_holder.celery.Task = ContextTask return celery_holder.celery
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_celery(config_name):\n app = Flask(__name__)\n\n # apply configuration\n cfg = os.path.join(os.getcwd(), 'config', config_name + '.py')\n app.config.from_pyfile(cfg)\n\n # Initialize aws client\n aws_client = boto3.Session(\n aws_access_key_id=app.config['AWS_ACCESS_KEY'],\n aws_secret_access_key=app.config['AWS_ACCESS_KEY_SECRET'],\n region_name=app.config['AWS_REGION']\n )\n\n # initialize extensions\n db.init_app(app)\n\n celery = Celery(\n app.import_name,\n broker=app.config['CELERY_BROKER_URL'],\n backend=app.config['CELERY_BACKEND_URL']\n )\n celery.conf.update(app.config)\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n\n return celery, app, aws_client", "def make_celery(app: flask.app.Flask) -> Celery:\n\n celery = Celery(app.import_name)\n celery.config_from_object(GetConfig.configure('celery'))\n celery.conf.update(app.config)\n logger.info(\"Celery configurations: BROKER_URL= {} RESULT_BANKEND = {} \"\n \"\".format(celery.conf.get(\"BROKER_URL\"),\n celery.conf.get(\"CELERY_RESULT_BACKEND\")))\n\n class ContextTask(celery.Task):\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n return celery", "def make_celery(app: Flask):\n\n c = Celery(\n app.import_name,\n backend=app.config[\"CELERY_RESULT_BACKEND\"],\n broker=app.config[\"CELERY_BROKER_URL\"],\n )\n c.conf.update(app.config)\n\n class ContextTask(c.Task):\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return self.run(*args, **kwargs)\n\n # noinspection PyPropertyAccess\n c.Task = ContextTask\n return c", "def make_celery():\n\n celery_instance = Celery(\"celeryapp\",\n broker='amqp://guest@localhost//',\n backend='amqp://',\n include=['celeryapp.tasks'])\n celery_instance.config_from_object(settings.config)\n\n # # modify the TaskBase class\n # TaskBase = celery_instance.Task\n # class ContextTask(TaskBase):\n # abstract = True\n # def __call__(self, *args, **kwargs):\n # with app.app_context():\n # return TaskBase.__call__(self, *args, **kwargs)\n # celery_instance.Task = ContextTask\n return celery_instance", "def make_celery(app):\n celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n\n return celery", "def make_celery(app):\n celery = Celery(app.import_name)\n\n # Determine which Celery configuration to load:\n # The order is:\n # 1. `SM_CELERY_CONFIG` Environment Variable\n # 2. The default \"celeryconfig.py\"\n celery.config_from_object(get_celery_config_file())\n celery.conf.update(app.config)\n\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n return celery", "def make_celery(app):\n from celery import Celery\n celery = Celery(app.name)\n celery.conf.update(app.config['CELERY_CONFIG'])\n\n class ContextTask(celery.Task):\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n celery.autodiscover_tasks(['app'])\n return celery", "def create_app():\n app = Flask(__name__)\n bootstrap.init_app(app)\n # TODO Make the secret key an actual secret key\n app.config['SECRET_KEY'] = 'dev_key'\n app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'\n app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'\n celery = Celery(app.name, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n\n main_blueprint = construct_blueprint(celery)\n app.register_blueprint(main_blueprint)\n\n return celery, app", "def create_celery_app(app: Flask) -> Celery:\n conf = app.config['FOCA'].jobs\n\n # Instantiate Celery app\n celery = Celery(\n app=__name__,\n broker=f\"pyamqp://{conf.host}:{conf.port}//\",\n backend=conf.backend,\n include=conf.include,\n )\n calling_module = ':'.join([stack()[1].filename, stack()[1].function])\n logger.debug(f\"Celery app created from '{calling_module}'.\")\n\n # Update Celery app configuration with Flask app configuration\n celery.conf['FOCA'] = app.config['FOCA']\n logger.debug('Celery app configured.')\n\n class ContextTask(celery.Task): # type: ignore\n # https://github.com/python/mypy/issues/4284)\n \"\"\"Create subclass of task that wraps task execution in application\n context.\n \"\"\"\n def __call__(self, *args, **kwargs):\n \"\"\"Wrap task execution in application context.\"\"\"\n with app.app_context(): # pragma: no cover\n return self.run(*args, **kwargs)\n\n celery.Task = ContextTask\n logger.debug(\"App context added to 'celery.Task' class.\")\n\n return celery", "def celery_config() -> Dict:\n with open(script_dir + 'config.yml', 'r') as yamlfile:\n cfg = yaml.load(yamlfile, Loader=yaml.SafeLoader)\n celery_cfg = cfg['celery']\n result = {\n 'main': celery_cfg['main'],\n 'broker': celery_cfg['broker_url'],\n 'backend': celery_cfg['backend_url'],\n }\n return result", "def make_celery(app_name: str = __name__) -> Celery:\n return Celery(app_name)", "def app(self):\n app = Flask('testapp')\n app.config.update({\n 'HADES_CELERY_APP_NAME': 'test',\n 'HADES_BROKER_URI': 'rpc://broker/',\n 'HADES_RESULT_BACKEND_URI': 'rpc://backend/',\n })\n return app", "def get_celery_config_file():\n return importlib.import_module(\"security_monkey.{}\".format(os.environ.get(\"SM_CELERY_CONFIG\", \"celeryconfig\")),\n \"security_monkey\")", "def __init__(self, config_path, setup_celery=True):\n _log.info(\"GNU MediaGoblin %s main server starting\", __version__)\n _log.debug(\"Using config file %s\", config_path)\n ##############\n # Setup config\n ##############\n\n # Open and setup the config\n global_config, app_config = setup_global_and_app_config(config_path)\n\n setup_crypto()\n\n ##########################################\n # Setup other connections / useful objects\n ##########################################\n\n # Setup Session Manager, not needed in celery\n self.session_manager = session.SessionManager()\n\n # load all available locales\n setup_locales()\n\n # Set up plugins -- need to do this early so that plugins can\n # affect startup.\n _log.info(\"Setting up plugins.\")\n setup_plugins()\n\n # Set up the database\n self.db = setup_database()\n\n # Register themes\n self.theme_registry, self.current_theme = register_themes(app_config)\n\n # Get the template environment\n self.template_loader = get_jinja_loader(\n app_config.get('local_templates'),\n self.current_theme,\n PluginManager().get_template_paths()\n )\n\n # Set up storage systems\n self.public_store, self.queue_store = setup_storage()\n\n # set up routing\n self.url_map = get_url_map()\n\n # set up staticdirector tool\n self.staticdirector = get_staticdirector(app_config)\n\n # Setup celery, if appropriate\n if setup_celery and not app_config.get('celery_setup_elsewhere'):\n if os.environ.get('CELERY_ALWAYS_EAGER', 'false').lower() == 'true':\n setup_celery_from_config(\n app_config, global_config,\n force_celery_always_eager=True)\n else:\n setup_celery_from_config(app_config, global_config)\n\n #######################################################\n # Insert appropriate things into mediagoblin.mg_globals\n #\n # certain properties need to be accessed globally eg from\n # validators, etc, which might not access to the request\n # object.\n #######################################################\n\n setup_globals(app=self)\n\n # Workbench *currently* only used by celery, so this only\n # matters in always eager mode :)\n setup_workbench()\n\n # instantiate application meddleware\n self.meddleware = [common.import_component(m)(self)\n for m in meddleware.ENABLED_MEDDLEWARE]", "def local_celery():\n click.echo('Start Celery on Machine')\n ret = subprocess.call(\n ['celery', 'worker', '-A', 'celery_worker.celery', '--loglevel=info', '-P', 'eventlet'])\n sys.exit(ret)", "def config_as(self, app_name):\n # Cast to target app\n # ====================================================================\n app_names = [ app_cls.__name__ for app_cls in LazyApp.AVAILABLE_APPS ]\n target_app_idx = app_names.index(app_name)\n target_app = LazyApp.AVAILABLE_APPS[target_app_idx]\n self.__class__ = target_app\n\n # Configure remote worker\n # ====================================================================\n # Tell remote worker to become target worker\n message = { 'action': 'worker', 'content': target_app.MATCHED_WORKER }\n self.send(message)\n\n # Use default worker config\n # ====================================================================\n response = self.recv()\n if response is None:\n raise Exception(\"Remote worker has been closed\")\n\n # Send changed worker config to server\n worker_config = response['content']\n message = { 'action': 'config', 'content': worker_config }\n self.send(message)\n\n # You're ready to go\n logger.info(f\"{self.__class__.__name__} has been configured properly\")", "def ready(self):\n # pylint: disable=import-outside-toplevel\n\n from ..analytics import tasks as analytics_tasks\n from ..auditing import tasks as auditing_tasks\n from ..datacenters import tasks as datacenters_tasks\n from ..globalsearch import tasks as meilisearch_tasks\n from ..objectives import tasks as objective_tasks\n from ..repos import tasks as repos_tasks\n from ..services import tasks as service_tasks\n\n # pylint: enable=import-outside-toplevel\n\n celery_app.add_periodic_task(timedelta(hours=1), repos_tasks.sync_repos)\n celery_app.add_periodic_task(timedelta(hours=1), repos_tasks.schedule_pulls)\n celery_app.add_periodic_task(timedelta(days=1), repos_tasks.sync_zoo_file)\n celery_app.add_periodic_task(\n timedelta(hours=1), service_tasks.schedule_sentry_sync\n )\n celery_app.add_periodic_task(\n timedelta(days=1), service_tasks.sync_sonarqube_projects\n )\n celery_app.add_periodic_task(\n timedelta(days=1), objective_tasks.schedule_objective_snapshots\n )\n celery_app.add_periodic_task(\n timedelta(days=1), auditing_tasks.take_issue_table_snapshots\n )\n celery_app.add_periodic_task(timedelta(days=1), auditing_tasks.cleanup_issues)\n celery_app.add_periodic_task(\n timedelta(days=1), analytics_tasks.take_dependency_snapshots\n )\n celery_app.add_periodic_task(\n timedelta(days=1), analytics_tasks.check_python_lib_licenses\n )\n celery_app.add_periodic_task(\n timedelta(days=1), datacenters_tasks.schedule_infra_mapping\n )\n celery_app.add_periodic_task(\n timedelta(hours=1), meilisearch_tasks.index_db_model_instances\n )\n celery_app.add_periodic_task(\n timedelta(minutes=30), meilisearch_tasks.index_openapi_definitions\n )", "def __init__(self):\n # BASE_DIR:///artifice/scraper/\n self.BASE_DIR = os.path.dirname(loc)\n\n # prototypes\n self._eth0 = '0.0.0.0'\n self._exposed_port = 8080\n self._db_name = 'site.db'\n self._redis_pword = 'password'\n self._redis_host = 'localhost'\n self._redis_port = 6379\n self._celery_broker_uname = 'michael'\n self._celery_broker_pword = 'michael123'\n self._celery_broker_host = 'localhost'\n self._celery_broker_virtual_host = 'michael_vhost'\n\n # flask\n self.TESTING = False\n self.URL_PREFIX = ''\n self.FLASK_PORT = self._exposed_port\n self.FLASK_HOST = '0.0.0.0'\n self.FLASK_DEBUG = False\n self.FLASK_USE_RELOADER = False\n self.FLASK_THREADED = True\n\n # logging\n self.LOG_FILE = 'flask.log'\n self.LOG_LEVEL = 'INFO'\n self.CELERY_LOG_LEVEL = 'ERROR'\n self.CELERY_LOG_FILE = 'celery.log'\n self.STDOUT = True\n\n # database\n self.DROP_TABLES = True\n self.SQLALCHEMY_TRACK_MODIFICATIONS = False\n self.SQLALCHEMY_DATABASE_URI = 'sqlite:///{}'.format(\n os.path.join(self.BASE_DIR, self._db_name))\n\n # redis\n self.REDIS_URL = 'redis://{}:@{}:{}/0'.format(\n self._redis_pword,\n self._redis_host,\n self._redis_port)\n self.REDIS_HIT_COUNTER = 'HIT_COUNTER'\n\n # defaults\n self.ARGS_DEFAULT_LIMIT = 10\n self.ARGS_DEFAULT_STATUS = ['READY', 'TASKED', 'DONE']\n\n self.SUPERVISOR_ENABLED = True\n self.SUPERVISOR_DEBUG = False\n self.SUPERVISOR_POLITE = 1\n\n # celery\n self.CELERY_WORKERS = 8\n self.CELERY_MODULE = 'background'\n self.CELERY_BROKER = 'amqp://{}:{}@{}/{}'.format(\n self._celery_broker_uname,\n self._celery_broker_pword,\n self._celery_broker_host,\n self._celery_broker_virtual_host)\n self.CELERY_BACKEND = 'rpc://'\n self.CELERY_INCLUDE = ['artifice.scraper.background.tasks']\n\n # endpoints\n self.URL_FOR_STATUS = 'http://{}:{}/status'.format(self._eth0, self._exposed_port)\n self.URL_FOR_QUEUE = 'http://{}:{}/queue'.format(self._eth0, self._exposed_port)\n self.URL_FOR_CONTENT = 'http://{}:{}/content'.format(self._eth0, self._exposed_port)", "def get_instance(cls, module_name):\n if module_name in cls.instance:\n return cls.instance[module_name]\n\n # Celery Broker\n celery_broker = 'amqp://{0}:{1}@{2}:{3}'.format(\n settings.DATABASES[module_name][\"rabbitmq\"][\"USER\"],\n settings.DATABASES[module_name][\"rabbitmq\"][\"PASSWORD\"],\n settings.DATABASES[module_name][\"rabbitmq\"][\"HOST\"],\n settings.DATABASES[module_name][\"rabbitmq\"][\"PORT\"]\n )\n\n # Celery App\n cls.instance[module_name] = Celery(\n \"{0}_{1}\".format(settings.CELERY_APP_NAME, module_name),\n broker=celery_broker,\n include=settings.CELERY_TASKS\n )\n\n # Celery Beat (Periodic Tasks)\n cls.instance[module_name].conf.CELERYBEAT_SCHEDULE = settings.CELERYBEAT_SCHEDULE\n\n # Routes\n cls.instance[module_name].conf.CELERY_ROUTES = settings.CELERY_ROUTES\n\n # Other settings\n cls.instance[module_name].conf.CELERY_ACCEPT_CONTENT = settings.CELERY_ACCEPT_CONTENT\n cls.instance[module_name].conf.CELERY_TIMEZONE = settings.CELERY_TIMEZONE\n cls.instance[module_name].conf.CELERY_RESULT_BACKEND = False\n cls.instance[module_name].conf.CELERY_IGNORE_RESULT = True\n cls.instance[module_name].conf.CELERY_SEND_EVENTS = False # Will not create celeryev.* queues\n cls.instance[module_name].conf.CELERY_EVENT_QUEUE_EXPIRES = 60 # Will delete all celeryev. queues without consumers after 1 minute.\n cls.instance[module_name].conf.CELERY_TASK_SERIALIZER = 'json'\n cls.instance[module_name].conf.CELERY_MESSAGE_COMPRESSION = 'gzip'\n\n return cls.instance[module_name]", "def ensure_conf(app):\n name = 'redbeat_conf'\n app = app_or_default(app)\n try:\n config = getattr(app, name)\n except AttributeError:\n config = RedBeatConfig(app)\n setattr(app, name, config)\n\n return config", "def init_app(self, app):\r\n\r\n app.config.setdefault('REDIS_URLS', {\r\n 'main': 'redis://localhost:6379/0',\r\n 'admin': 'redis://localhost:6379/1',\r\n })\r\n\r\n app.before_request(self.before_request)\r\n\r\n self.app = app", "def app():\n app = App(\n name=\"testapp\",\n processes=1,\n concurrency=4,\n prefetch_count=1,\n retry_backoff=lambda retries: 0.01,\n maintenance_interval=0.1,\n schedule_interval=0.1,\n heartbeat_interval=0.1,\n heartbeat_timeout=1,\n grace_period=1,\n )\n\n @app.task\n def example(n):\n if random.random() < 0.2:\n raise Chaos(\"Random task failure\")\n return n\n\n for i in range(100):\n example.delay(i)\n\n return app", "def main(global_config, **settings):\n setup_loggers(settings)\n LOGGER.info(\"Initiating weaver application\")\n\n # validate and fix configuration\n weaver_config = get_weaver_configuration(settings)\n settings.update({\"weaver.configuration\": weaver_config})\n\n # Parse extra_options and add each of them in the settings dict\n LOGGER.info(\"Parsing extra options...\")\n settings.update(parse_extra_options(settings.get(\"weaver.extra_options\", \"\")))\n\n # load requests options if found, otherwise skip\n LOGGER.info(\"Checking for request options file...\")\n req_file = get_weaver_config_file(settings.get(\"weaver.request_options\", \"\"),\n WEAVER_DEFAULT_REQUEST_OPTIONS_CONFIG,\n generate_default_from_example=False)\n if req_file:\n LOGGER.info(\"Loading request options...\")\n with open(req_file, \"r\") as f:\n settings.update({\"weaver.request_options\": yaml.safe_load(f)})\n else:\n LOGGER.warning(\"No request options found.\")\n\n # add default caching regions if they were omitted in config file\n if settings.get(\"weaver.celery\", False):\n LOGGER.info(\"Celery runner detected. Skipping cache options setup.\")\n else:\n LOGGER.info(\"Adding default caching options...\")\n setup_cache(settings)\n\n LOGGER.info(\"Setup celery configuration...\")\n local_config = Configurator(settings=settings)\n if global_config.get(\"__file__\") is not None:\n local_config.include(\"pyramid_celery\")\n local_config.configure_celery(global_config[\"__file__\"])\n local_config.include(\"weaver\")\n\n if settings.get(\"weaver.celery\", False):\n LOGGER.info(\"Celery runner detected. Skipping process registration.\")\n else:\n LOGGER.info(\"Registering builtin processes...\")\n register_builtin_processes(local_config)\n\n LOGGER.info(\"Registering WPS-1 processes from configuration file...\")\n wps_processes_file = get_settings(local_config).get(\"weaver.wps_processes_file\")\n register_wps_processes_from_config(wps_processes_file, local_config)\n\n return local_config.make_wsgi_app()", "def to_main():\n if env.is_staging:\n print \"Reverting back to PRODUCTION is not allowed for STAGING!\"\n return\n with cd(env.code_dir):\n run('ln -sf celeryconfig-production.py ./api/celeryconfig.py')\n restart_api()", "def sync_marathon_app():\n # Identify the hosts and ports of executing tasks\n try:\n c = MarathonClient(MARATHON_ROOT_URL)\n\n app = c.get_app(MARATHON_APP)\n\n container_port = MARATHON_APP_PORT\n\n port_index = None\n if app and app.container and app.container.docker and app.container.docker.port_mappings:\n for i in range(len(app.container.docker.port_mappings)):\n if container_port == app.container.docker.port_mappings[i].container_port:\n # Set port index to use for identifying the exposed port\n # that maps to internal container port\n port_index = i\n break\n\n if port_index is None:\n raise Exception('Unable to correlate container to host port.')\n\n instances = []\n for task in app.tasks:\n logging.info('Queuing configuration refresh of %s at %s:%s' %\n (task.id, task.host, task.ports[port_index]))\n instances.append('%s:%s' % (task.host, task.ports[port_index]))\n\n reload_config(instances)\n\n except MarathonError, ex:\n print 'Error making Marathon API call: %s' % ex.message", "def __init__(self):\n ignore_logger(\"airflow.task\")\n\n sentry_flask = FlaskIntegration()\n\n # LoggingIntegration is set by default.\n integrations = [sentry_flask]\n\n executor_class, _ = ExecutorLoader.import_default_executor_cls(validate=False)\n\n if executor_class.supports_sentry:\n from sentry_sdk.integrations.celery import CeleryIntegration\n\n sentry_celery = CeleryIntegration()\n integrations.append(sentry_celery)\n\n dsn = None\n sentry_config_opts = conf.getsection(\"sentry\") or {}\n if sentry_config_opts:\n sentry_config_opts.pop(\"sentry_on\")\n old_way_dsn = sentry_config_opts.pop(\"sentry_dsn\", None)\n new_way_dsn = sentry_config_opts.pop(\"dsn\", None)\n # supported backward compatibility with old way dsn option\n dsn = old_way_dsn or new_way_dsn\n\n unsupported_options = self.UNSUPPORTED_SENTRY_OPTIONS.intersection(sentry_config_opts.keys())\n if unsupported_options:\n log.warning(\n \"There are unsupported options in [sentry] section: %s\",\n \", \".join(unsupported_options),\n )\n\n sentry_config_opts[\"before_send\"] = conf.getimport(\"sentry\", \"before_send\", fallback=None)\n sentry_config_opts[\"transport\"] = conf.getimport(\"sentry\", \"transport\", fallback=None)\n\n if dsn:\n sentry_sdk.init(dsn=dsn, integrations=integrations, **sentry_config_opts)\n else:\n # Setting up Sentry using environment variables.\n log.debug(\"Defaulting to SENTRY_DSN in environment.\")\n sentry_sdk.init(integrations=integrations, **sentry_config_opts)", "def celery():\n try:\n subprocess.call(\n ['celery', 'multi', 'start', '2', '-A', 'celery_worker.celery', '--loglevel=DEBUG', '--autoscale=4,1',\n '-Ofair', '--logfile=celery_logs/celery-worker-%n.log', '--pidfile=celery_logs/celery-worker-%n.pid '\n , '-P', 'eventlet'])\n except Exception as e:\n click.echo('Exception occurred. Run code locally')", "def init_extensions(app: Flask):\n\n global celery\n celery = make_celery(app)\n csrf.init_app(app)\n db.init_app(app)\n lm.init_app(app)\n mail.init_app(app)\n migrate.init_app(app, db)", "def restart_celery():\n os.system('flask kill_celery')\n os.system('flask celery')", "def config():\n config_django()\n config_svisor()" ]
[ "0.77063173", "0.75819045", "0.7530678", "0.7459084", "0.7273119", "0.7219687", "0.71717805", "0.71142334", "0.695116", "0.6718956", "0.66823643", "0.6564181", "0.65242624", "0.62319493", "0.6199454", "0.61328864", "0.6120141", "0.6090478", "0.6005244", "0.5987801", "0.5875242", "0.58613914", "0.5836776", "0.5790785", "0.57870924", "0.57691747", "0.5727369", "0.57193935", "0.5648073", "0.56319255" ]
0.858661
0
Builds the computation graph of the mask head of Feature Pyramid Network.
def fpn_mask_graph(rois, feature_maps, image_meta, pool_size, num_classes, train_bn=True): # ROI Pooling # Shape: [batch, boxes, pool_height, pool_width, channels] x = modellib.PyramidROIAlign([pool_size, pool_size], name="roi_align_mask")([rois, image_meta] + feature_maps) # Conv layers x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv1")(x) x = KL.TimeDistributed(modellib.BatchNorm(), name='mrcnn_mask_bn1')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv2")(x) x = KL.TimeDistributed(modellib.BatchNorm(), name='mrcnn_mask_bn2')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv3")(x) x = KL.TimeDistributed(modellib.BatchNorm(), name='mrcnn_mask_bn3')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"), name="mrcnn_mask_conv4")(x) x = KL.TimeDistributed(modellib.BatchNorm(), name='mrcnn_mask_bn4')(x, training=train_bn) x = KL.Activation('relu')(x) x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"), name="mrcnn_mask_deconv")(x) x = KL.TimeDistributed(KL.Conv2D(1, (1, 1), strides=1, activation="sigmoid"), name="mrcnn_mask")(x) # Duplicate output for fg/bg detections x = KL.Concatenate(axis=-1)([x for i in range(num_classes)]) return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n # ROI Pooling\n # Shape: [batch, boxes, pool_height, pool_width, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x", "def build_graph(self, inputs, masks):\n with vs.variable_scope(\"SimpleSoftmaxLayer\"):\n\n # Linear downprojection layer\n logits = tf.contrib.layers.fully_connected(inputs, num_outputs=1, activation_fn=None) # shape (batch_size, seq_len, 1)\n logits = tf.squeeze(logits, axis=[2]) # shape (batch_size, seq_len)\n\n # Take softmax over sequence\n masked_logits, prob_dist = masked_softmax(logits, masks, 1)\n\n return masked_logits, prob_dist", "def build_graph(self, inputs, masks):\n with vs.variable_scope(\"SimpleSoftmaxLayer\"):\n\n # Linear downprojection layer\n logits = tf.contrib.layers.fully_connected(inputs, num_outputs=1, activation_fn=None) # shape (batch_size, seq_len, 1)\n logits = tf.squeeze(logits, axis=[2]) # shape (batch_size, seq_len)\n\n # Take softmax over sequence\n masked_logits, prob_dist = masked_softmax(logits, masks, 1)\n\n return masked_logits, prob_dist", "def build_graph(self, reps, context_mask):\n cx_len = context_mask.shape[1]\n with vs.variable_scope(self.scope):\n reps = tf.contrib.layers.fully_connected(reps,\n num_outputs=self.hidden_sz)\n logits_start, probdist_start = self._pred_start(reps, context_mask)\n end_reps = tf.concat([reps, tf.expand_dims(probdist_start, 2)], 2)\n # [batch_sz]: index of starting word\n start_idx = tf.argmax(probdist_start, 1)\n # # [batch_sz, context_length]: 1 if valid for end word else 0.1\n start_mask = 1 - 0.9 * tf.cast(tf.sequence_mask(start_idx, cx_len, dtype=tf.int32), tf.float32) \n # a position is valid for end work if both context mask and start mask are both 1\n\n logits_end, probdist_end = self._pred_end(end_reps, context_mask)\n logits_end = logits_end * start_mask\n probdist_end = probdist_end * start_mask\n return (logits_start, probdist_start, logits_end, probdist_end)", "def build_graph(self, values, values_mask, keys_mask, keys, use_mask=True):\n\n with vs.variable_scope(\"CoAttn\"):\n\n print('value_vec_size is: ', self.value_vec_size)\n print('num_values size is: ', values.shape[1])\n print('num_keys size is: ', keys.shape[1])\n print('value_vec_size is (key):', keys.shape[2])\n # Declare variable \n W = tf.get_variable(\"W\", shape = (self.value_vec_size, self.value_vec_size), \\\n initializer = tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(\"b\", shape = (self.value_vec_size), initializer = tf.constant_initializer(0))\n\n # Compute projected question hidden states\n\n Q = tf.tanh(tf.tensordot(values, W, 1) + tf.reshape(b, [1, 1, self.value_vec_size])) # (batch_size, num_values, value_vec_size)\n\n\n Q = concat_sentinel('question_sentinel', Q, self.value_vec_size) # (batch_size, num_values, value_vec_size)\n # Q = tf.nn.dropout(Q, self.keep_prob)\n print('Q shape is: ', Q.shape)\n # sentinel = tf.get_variable(name='question_sentinel', shape=tf.shape(Q)[2], \\\n # initializer=tf.contrib.layers.xavier_initializer(), dtype = tf.float32)\n # sentinel = tf.tile(sentinel, [tf.shape(original_tensor)[0], 1, 1])\n # concat_tensor = tf.concat([original_tensor, sentinel], 2)\n\n print('Q shape is: ', Q.shape)\n D = keys # (batch_size, num_keys, value_vec_size)\n D = concat_sentinel('document_sentinel', D, self.value_vec_size)\n # D = tf.nn.dropout(D, self.keep_prob)\n\n # key = document, value = question here\n ### End your code here to implement 'Sentinel Vector'\n # Compute affinity matrix L\n L = tf.matmul(D, tf.transpose(Q, perm=[0, 2, 1])) # shape (batch_size, num_keys, num_values)\n\n # Compute Context-to-Question (C2Q) Attention, we obtain C2Q attention outputs\n if use_mask:\n print('tf.shape(values)[0] is: ', tf.shape(values)[0])\n print('tf.ones([tf.shape(values)[0], 1] is ', tf.ones([tf.shape(values)[0], 1], dtype=tf.int32))\n values_mask = tf.expand_dims(tf.concat([values_mask, tf.ones([tf.shape(values)[0], 1], dtype=tf.int32)], axis=1), 1)\n print \"value_mask shape:\", values_mask.shape\n print \"L shape:\", L.shape\n _, A_D = masked_softmax(L, mask=values_mask, dim=2) #(batch_size, num_keys, num_values)\n else:\n A_D = tf.nn.softmax(L, dim=-1)\n\n C2Q_Attn = tf.matmul(A_D, Q) # (batch_size, num_keys, value_vec_size)\n print('C2Q_Attn shapeis ', C2Q_Attn.shape)\n\n # Compute Question-to-Context (Q2C) Attention, we obtain Q2C attention outputs\n if use_mask:\n keys_mask = tf.expand_dims(tf.concat([keys_mask, tf.ones([tf.shape(keys)[0], 1], dtype=tf.int32)], axis=1), 1)\n print \"key_mask shape:\", keys_mask.shape\n print \"L shape:\", L.shape\n _, A_Q = masked_softmax(tf.transpose(L, perm=[0, 2, 1]), mask=keys_mask, dim=-1) # (batch_size, num_values, num_keys)\n else:\n A_Q = tf.nn.softmax(tf.transpose(L, perm=[0, 2, 1]), dim=2)\n\n Q2C_Attn = tf.matmul(A_Q, D) # (batch_size, num_values, key_vec_size)\n print('Q2C_Attn shapeis ', Q2C_Attn.shape)\n\n # Compute second-level attention outputs S\n S = tf.matmul(A_D, Q2C_Attn) # (batch_size, num_keys, value_vec_size)\n print('S size is: ', S.shape)\n\n # Concatenate C2Q_Attn and S:\n C_D = tf.concat([D, C2Q_Attn, S], 2) # (batch_size, num_keys, 3 * value_vec_size)\n # C_D = tf.nn.dropout(C_D, self.keep_prob)\n print('co_context size is: ', C_D.shape)\n\n # co_input = tf.concat([tf.transpose(D, perm = [0, 2, 1]), C_D], 1)\n # print('co_input size is: ', co_input.shape)\n size = int(self.value_vec_size)\n \n if self.device == 'gpu':\n bidirection_rnn = tf.contrib.cudnn_rnn.CudnnLSTM(1, size, 3*size, dropout=0.2, direction=cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION, dtype=tf.float32)\n C_D = tf.transpose(C_D, perm=[1, 0, 2])\n print 'C_D shape', C_D.shape\n input_h = tf.zeros([2, tf.shape(values)[0], size])\n input_c = tf.zeros([2, tf.shape(values)[0], size])\n params = tf.get_variable(\"RNN\", shape=(estimate_cudnn_parameter_size(3*self.value_vec_size, size, 2)),\n initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n \n U, _, _ = bidirection_rnn(C_D, input_h, input_c, params)\n#\n print 'U shape:', U.shape\n U = tf.transpose(U, perm=[1, 0, 2])\n\n else:\n (u_fw_out, u_bw_out), _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=DropoutWrapper(rnn_cell.BasicLSTMCell(size),input_keep_prob=self.keep_prob), cell_bw=DropoutWrapper(rnn_cell.BasicLSTMCell(size),input_keep_prob=self.keep_prob), \n inputs=C_D, dtype = tf.float32)\n U = tf.concat([u_fw_out, u_bw_out], 2)\n\n U = tf.nn.dropout(U[:,:-1, :], self.keep_prob)\n # U = tf.nn.dropout(U, self.keep_prob)\n print('U shape is: ', U.shape)\n \n return U,A_D,A_Q", "def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()", "def build_mask_head(cfg, input_shape):\n name = cfg.MODEL.ROI_MASK_HEAD.NAME\n return ROI_MASK_HEAD_REGISTRY.get(name)(cfg, input_shape)", "def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()", "def _build_graph(self):\n self._setup_placeholders()\n self._embed()\n self.p_emb = tf.concat([self.p_emb, tf.expand_dims(self.em, -1)], -1)\n self._encode()\n self._match()\n self._fuse()\n\n with tf.variable_scope('boundary'):\n self._decode()\n with tf.variable_scope('content'):\n self._content()\n with tf.variable_scope('verif'):\n self._verify()\n\n self._compute_loss()", "def build_graph(self, values, values_mask, keys_mask, keys, use_mask=True, sentinel=True):\n\n with vs.variable_scope(\"encoder_initialization\"):\n\n print('value_vec_size is: ', self.value_vec_size)\n print('num_values size is: ', values.shape[1])\n print('num_keys size is: ', keys.shape[1])\n print('value_vec_size is (key):', keys.shape[2])\n # Declare variable \n # Compute projected question hidden states\n W = tf.get_variable(\"W\", shape = (self.value_vec_size, self.value_vec_size), \\\n initializer = tf.contrib.layers.xavier_initializer())\n b = tf.get_variable(\"b\", shape = (values.shape[1], self.value_vec_size), initializer = tf.constant_initializer(0))\n Q = tf.tanh(tf.tensordot(values, W, 1) + tf.expand_dims(b, axis=0)) # (batch_size, num_values, value_vec_size)\n D = keys # (batch_size, num_keys, value_vec_size)\n Q_length = values.shape[1]\n D_length = keys.shape[1]\n if sentinel:\n Q = concat_sentinel('question_sentinel', Q, self.value_vec_size) # (batch_size, num_values, value_vec_size)\n D = concat_sentinel('document_sentinel', D, self.value_vec_size)\n Q_length += 1\n D_length += 1\n\n with vs.variable_scope(\"coattention_layer_1\"):\n S_D_1, S_Q_1, C_D_1 = coattention(\\\n Q, Q_length, D, D_length, values_mask, keys_mask, use_mask)\n\n with vs.variable_scope('encode_summaries_from_coattention_layer_1'):\n\n print('Q Length is: ', Q_length)\n print('D length is: ', D_length)\n\n size = int(self.value_vec_size)\n\n if self.device == 'gpu':\n bidirection_rnn = tf.contrib.cudnn_rnn.CudnnLSTM(1, size, 2*size, direction=cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION, dtype=tf.float32)\n S_Q_1 = tf.transpose(S_Q_1, perm=[1, 0, 2])\n print 'S_Q_1 shape', S_Q_1.shape\n input_h = tf.zeros([2, tf.shape(values)[0], size])\n input_c = tf.zeros([2, tf.shape(values)[0], size])\n params = tf.get_variable(\"RNN\", shape=(estimate_cudnn_parameter_size(self.value_vec_size, size, 2)),\n initializer=tf.contrib.layers.xavier_initializer(), dtype=tf.float32)\n E_Q_2 , _, _ = bidirection_rnn(S_Q_1, input_h, input_c, params)\n print 'E_Q_2 shape:', E_Q_2 .shape\n E_Q_2 = tf.transpose(E_Q_2 , perm=[1, 0, 2])\n E_Q_2 = tf.nn.dropout(E_Q_2, self.keep_prob)\n\n else:\n cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n cell = DropoutWrapper(cell, input_keep_prob=self.keep_prob)\n Q_fw_bw_encodings, _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell,\n cell_bw = cell,\n dtype = tf.float32,\n inputs = S_Q_1,\n # sequence_length = Q_length\n )\n E_Q_2 = tf.concat(Q_fw_bw_encodings, 2)\n\n\t# add gpu lstm\n D_fw_bw_encodings, _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell,\n cell_bw = cell,\n dtype = tf.float32,\n inputs = S_D_1,\n # sequence_length = D_length\n ) \n E_D_2 = tf.concat(D_fw_bw_encodings, 2)\n\n with vs.variable_scope('coattention_layer_2'):\n S_D_2, S_Q_2, C_D_2 = coattention(\\\n E_Q_2, Q_length, E_D_2, D_length, values_mask, keys_mask, use_mask)\n\n with vs.variable_scope('final_encoder'):\n document_representations = tf.concat(\\\n [D, E_D_2, S_D_1, S_D_2, C_D_1, C_D_2], 2)#(N, D, 2H)\n\n size = int(self.value_vec_size)\n\n\t# add gpu lstm\n cell = tf.nn.rnn_cell.BasicLSTMCell(size)\n outputs, _ = tf.nn.bidirectional_dynamic_rnn(\n cell_fw = cell,\n cell_bw = cell,\n dtype = tf.float32,\n inputs = document_representations,\n # sequence_length = D_length,\n )\n encoding = tf.concat(outputs, 2)\n\n encoding = encoding[:, :-1, :]\n return encoding, None,None", "def init_mask_head(self, mask_roi_extractor, mask_head):\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(build_head(head))\n if self.recursive:\n for i in range(self.num_stages):\n self.mask_head[i] = self.mask_head[0]", "def build_graph(self, values, values_mask, keys, keys_mask):\n with vs.variable_scope(\"CrossAttn\"):\n\n # Calculate attention distribution\n values_t = tf.transpose(values, perm=[0, 2, 1]) # (batch_size, value_vec_size, num_values)\n attn_matrix = tf.matmul(keys, values_t) # shape (batch_size, num_keys, num_values)\n\n values_mask_matrix = tf.expand_dims(values_mask, 1) # shape (batch_size, 1, num_values)\n keys_mask_matrix = tf.expand_dims(keys_mask, 2) # shape (batch_size, num_keys, 1)\n\n _, attn_dist_values = masked_softmax(attn_matrix, values_mask_matrix, 2) # shape (batch_size, num_keys, num_values). take softmax over values\n _, attn_dist_keys = masked_softmax(attn_matrix, keys_mask_matrix, 1) # shape (batch_size, num_keys, num_values). take softmax over keys\n \n attn_dist_keys = tf.transpose(attn_dist_keys, perm=[0, 2, 1]) # shape (batch_size, num_values, num_keys)\n\n att_vec_for_keys = tf.matmul(attn_dist_values, values) # shape (batch_size, num_keys, value_vec_size)\n att_vec_for_values = tf.matmul(attn_dist_keys, keys) # shape (batch_size, num_values, value_vec_size)\n\n # Apply dropout\n att_vec_for_keys = tf.nn.dropout(att_vec_for_keys, self.keep_prob)\n att_vec_for_values = tf.nn.dropout(att_vec_for_values, self.keep_prob)\n\n return att_vec_for_keys, att_vec_for_values", "def build_graph(self):\n\n\n\n self.inputs.append( #uint8\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='input/lr')) \n\n self.label.append(\n tf.placeholder(tf.float32, shape=[None, None, None, self.channel],\n name='label/hr'))", "def build_graph(self):\n pass", "def _build_graph(self):\n pass", "def _build_graph_general(self): \n\n #Find a canonical coloring scheme\n #Each node has a color that is determined by the non-mapped aspects\n nodecolors=set()\n for nl in self.net.iter_node_layers():\n nodecolors.add(self._slice_node_layer_not_allowed(nl))\n nodecolors_sorted=sorted(list(nodecolors))\n del nodecolors\n self._assert_full_order(nodecolors_sorted)\n self.colormap=dict( ((color,colorid) for colorid,color in enumerate(nodecolors_sorted) ))\n\n #each aux node has a color that is determined by the aspect\n self.auxcolormap=dict( ((auxcolor, auxcolorid+len(self.colormap)) for auxcolorid,auxcolor in enumerate(sorted(self.asp)) ) )\n\n\n #Add the underlying network\n #node-layers:\n for nl in self.net.iter_node_layers():\n nlid=self._get_node_id(nl)\n color=self._slice_node_layer_not_allowed(nl)\n colorid=self.colormap[color]\n self.add_node(nlid,colorid)\n\n #edges between node-layers:\n for nl1 in self.net.iter_node_layers():\n for nl2 in self.net[nl1]:\n nl1id=self._get_node_id(nl1)\n nl2id=self._get_node_id(nl2)\n self.add_link(nl1id,nl2id)\n\n\n #Add the auxiliary nodes and edges\n #add the aux nodes\n for a in self.asp:\n for elayer in self.net.slices[a]:\n auxid=self._get_auxnode_id( (a,elayer) )\n auxcolorid=self.auxcolormap[a]\n self.add_node(auxid,auxcolorid)\n \n #add the aux edges\n for nl in self.net.iter_node_layers():\n for a in self.asp:\n nlid=self._get_node_id(nl)\n auxid=self._get_auxnode_id( (a,nl[a]) )\n self.add_link(nlid,auxid)", "def _build_computation_graph(self):\n raise NotImplementedError", "def build_graph(self):\n\t\tself.n_hidden = 100\n\t\tself.weights_hidden = tf.get_variable(\"weights_hidden\", [self.state_size, self.n_hidden], initializer = tf.random_normal_initializer())\n\t\tself.bias_hidden = tf.get_variable(\"bias_hidden\", [self.n_hidden], initializer = tf.constant_initializer(0.1))\n\n\t\tself.weights_out = tf.get_variable(\"weights_out\", [self.n_hidden, self.action_size], initializer = tf.random_normal_initializer())\n\t\tself.bias_out = tf.get_variable(\"bias_out\", [self.action_size], initializer = tf.constant_initializer(0.1))", "def build_graph(self):\n n_classes = self.n_classes\n\n (self.feed('data')\n .conv(3, 3, 64, 1, 1, name='conv1_1', trainable=False)\n .conv(3, 3, 64, 1, 1, name='conv1_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool1')\n .conv(3, 3, 128, 1, 1, name='conv2_1', trainable=False)\n .conv(3, 3, 128, 1, 1, name='conv2_2', trainable=False)\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool2')\n .conv(3, 3, 256, 1, 1, name='conv3_1')\n .conv(3, 3, 256, 1, 1, name='conv3_2')\n .conv(3, 3, 256, 1, 1, name='conv3_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool3')\n .conv(3, 3, 512, 1, 1, name='conv4_1')\n .conv(3, 3, 512, 1, 1, name='conv4_2')\n .conv(3, 3, 512, 1, 1, name='conv4_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool4')\n .conv(3, 3, 512, 1, 1, name='conv5_1')\n .conv(3, 3, 512, 1, 1, name='conv5_2')\n .conv(3, 3, 512, 1, 1, name='conv5_3'))\n\n self.compute_rDeRF() # dummy\n\n # Classification\n (self.feed('conv5_3')\n .max_pool(2, 2, 2, 2, padding='VALID', name='pool6')\n .reshape(shape=(-1, 7, 7, 512), name='pool6_reshape')\n .fc(4096, name='fc6')\n .dropout(0.5, name='drop6')\n .fc(4096, name='fc7')\n .dropout(0.5, name='drop7')\n # .make_time(name='drop7_reduced')\n .fc(n_classes, relu=False, name='cls_score')\n .softmax(name='cls_prob'))\n pass", "def _build_mask(self, xg, yg):\n\n # 1. create mask based on meshes\n points = np.vstack((xg.flatten(), yg.flatten())).T\n\n # 2. extract edge points using el_pos\n edge_points = self.node[np.arange(16)]\n path = Path(edge_points, closed=False)\n mask = path.contains_points(points)\n\n return mask", "def build_graph(self):\n raise NotImplementedError", "def init_mask_head(self, mask_roi_extractor, mask_head):\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(build_head(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = nn.ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(self.num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(\n build_roi_extractor(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor", "def build_graph(self, values, values_mask, keys):\n \n \n\n with vs.variable_scope(\"CoAttn\"):\n \n #########################################################################\n # Introduce a non-linear projection layer on top of the question encoding\n # to allow for varation between question and document encoding space\n W_q = tf.get_variable(name=\"W_q\", \\\n shape=[2*self.key_vec_size,2*self.key_vec_size],\\\n initializer=tf.contrib.layers.xavier_initializer(),\\\n dtype=tf.float32)\n\n W_q = tf.expand_dims(tf.ones([tf.shape(values)[0],1]), 1) * W_q\n\n b_q = tf.Variable(tf.constant(0.0,\\\n shape=[2*self.key_vec_size,]),\\\n dtype=tf.float32,\\\n name='b_q')\n \n Q = tf.nn.tanh(tf.matmul(values,W_q)+b_q)\n #########################################################################\n\n #########################################################################\n # Coattention Encoder \n \n # L --> affinity matrix\n l = tf.matmul(keys,tf.transpose(Q,perm=[0,2,1]))\n \n # mask affinity matrix, to not care about padded values\n l_mask = tf.expand_dims(values_mask, 1)\n \n # a_q --> attention weights across document for each word in the question\n _, a_q = masked_softmax(l, l_mask, 2)\n \n # a_d --> attention weights across question for each word in the document\n a_d = tf.nn.softmax(tf.transpose(l,perm=[0, 2, 1]))\n \n # compute attention context of the document in light of each word of the question\n c_q = tf.matmul(tf.transpose(a_q,perm=[0,2,1]),keys)\n \n # parallel computation of q*a_d & c_q*a_q\n c_d = tf.matmul(tf.transpose(a_d,perm=[0,2,1]),tf.concat([Q,c_q],2))\n \n # d_c_d as concatinated d;c_d as inpit for the Bi-LSTM\n d_c_d = tf.concat([keys,c_d],axis=2)\n \n d_c_d_length = tf.cast(\\\n tf.reduce_sum(\\\n tf.sign(\\\n tf.reduce_max(\\\n tf.abs(d_c_d), axis=2)\\\n ),\\\n axis=1), \\\n tf.int32)\n \n with tf.variable_scope('coattentionencoder'):\n u_lstm_forward = tf.contrib.rnn.BasicLSTMCell(self.key_vec_size) \n u_lstm_backward = tf.contrib.rnn.BasicLSTMCell(self.key_vec_size)\n big_u,_ = tf.nn.bidirectional_dynamic_rnn(cell_bw=u_lstm_backward,cell_fw=u_lstm_forward,dtype=tf.float32,inputs=d_c_d,time_major=False,sequence_length=d_c_d_length)\n \n # Dropout on the concatinated big U containing forward and backward pass\n output = tf.nn.dropout(tf.concat(big_u,2), self.keep_prob)\n\n\n return output", "def mask_rcnn_head_generator(params):\n head_params = params.mrcnn_head\n return heads.MaskrcnnHead(\n params.architecture.num_classes,\n params.architecture.mask_target_size,\n head_params.num_convs,\n head_params.num_filters,\n head_params.use_separable_conv,\n params.batch_norm_activation.activation,\n head_params.use_batch_norm,\n batch_norm_activation=batch_norm_activation_generator(\n params.batch_norm_activation))", "def build_graph(self, values, values_mask, keys):\n with vs.variable_scope(\"BasicAttn\"):\n\n # Calculate attention distribution\n values_t = tf.transpose(values, perm=[0, 2, 1]) # (batch_size, value_vec_size, num_values)\n attn_logits = tf.matmul(keys, values_t) # shape (batch_size, num_keys, num_values)\n attn_logits_mask = tf.expand_dims(values_mask, 1) # shape (batch_size, 1, num_values)\n _, attn_dist = masked_softmax(attn_logits, attn_logits_mask, 2) # shape (batch_size, num_keys, num_values). take softmax over values\n\n # Use attention distribution to take weighted sum of values\n output = tf.matmul(attn_dist, values) # shape (batch_size, num_keys, value_vec_size)\n\n # Apply dropout\n output = tf.nn.dropout(output, self.keep_prob)\n\n return attn_dist, output", "def subgraph_mask(self, size):\n init_matrix = np.random.randn(size,size)\n Tcs = csgraph.minimum_spanning_tree(init_matrix)\n mask_matrix = Tcs.toarray()\n return mask_matrix", "def __create_graph(self):\n # create the nodes\n for h in range(self.height):\n row: List[JuncNode] = list()\n for w in range(self.width):\n jnodes: List[Node] = [self.add_node() for _ in range(4)]\n jn = JuncNode(jnodes, (h, w))\n row.append(jn)\n self.__juncs.append(row)\n # create all connections\n self.__create_connections()", "def build_graph(self, values, values_mask, keys):\n with vs.variable_scope(\"BasicAttn\"):\n # Calculate attention distribution\n values_t = tf.transpose(values, perm=[0, 2, 1]) # (batch_size, value_vec_size, num_values)\n attn_logits = tf.matmul(keys, values_t) # shape (batch_size, num_keys, num_values)\n attn_logits_mask = tf.expand_dims(values_mask, 1) # shape (batch_size, 1, num_values)\n _, attn_dist = masked_softmax(attn_logits, attn_logits_mask, 2) # shape (batch_size, num_keys, num_values). take softmax over values\n # Use attention distribution to take weighted sum of values\n output = tf.matmul(attn_dist, values) # shape (batch_size, num_keys, value_vec_size)\n # Apply dropout\n output = tf.nn.dropout(output, self.keep_prob)\n return attn_dist, output", "def buildGraph(self):\r\n\r\n print 'Building graph...'\r\n\r\n self.buildQ()\r\n self.buildP()\r\n self.buildReconstructionTerm()\r\n self.buildConditionalPriorTerm()\r\n self.buildWPriorTerm()\r\n self.buildZPriorTerm()\r\n\r\n self.buildObjective()\r\n self.buildGrad()", "def preprocess_graph(self):\n image = tf.placeholder(\n tf.float32,\n shape=[self.img_h, self.img_w, self.col_channels])\n patches = self.create_patches(image)\n return {'image': image,\n 'patches': patches}" ]
[ "0.70537764", "0.63258725", "0.63258725", "0.61573416", "0.61179805", "0.6080467", "0.6060265", "0.6056162", "0.60199755", "0.59952897", "0.5957809", "0.5887339", "0.58784956", "0.58744246", "0.5869617", "0.5815184", "0.5795823", "0.5788427", "0.5768547", "0.57574487", "0.5752745", "0.5720762", "0.5718269", "0.5698192", "0.56954277", "0.56824636", "0.5671801", "0.5663084", "0.56372225", "0.5626411" ]
0.64246434
1
Sets model layers as trainable if their names match the given regular expression.
def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=0): # Print message on the first call (but not on recursive calls) if verbose > 0 and keras_model is None: modellib.log("Selecting layers to train") keras_model = keras_model or self.keras_model # In multi-GPU training, we wrap the model. Get layers # of the inner model because they have the weights. layers = keras_model.inner_model.layers if hasattr(keras_model, "inner_model")\ else keras_model.layers for layer in layers: # Is the layer a model? if layer.__class__.__name__ == 'Model': if verbose > 0: print("In model: ", layer.name) self.set_trainable( layer_regex, keras_model=layer, indent=indent + 4) continue if not layer.weights: continue # Is it trainable? trainable = bool(re.fullmatch(layer_regex, layer.name)) # Update layer. If layer is a container, update inner layer. if layer.__class__.__name__ == 'TimeDistributed': layer.layer.trainable = trainable else: layer.trainable = trainable # Print trainble layer names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainble layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))", "def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n # print(\"layer.__class__.__name__\", layer.__class__.__name__)\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(f\"{' '*indent}{layer.name:25} ({layer.__class__.__name__})\")", "def set_trainable_layers(trainable, image_model):\n if ((not isinstance(trainable, bool) or not trainable)\n and getattr(image_model, 'layers', None) is not None):\n # enable portion of network depending on the depth\n if not trainable:\n for layer in image_model.layers:\n layer.trainable = False\n else:\n # Set all layers past a certain depth to trainable\n # using a fractional scale\n num_depths = len(image_model.layers_by_depth)\n num_untrainable_depths = np.round((1.0 - trainable) * num_depths)\n should_train = False\n for i, layers in enumerate(image_model.layers):\n if i > num_untrainable_depths:\n should_train = True\n for layer in layers:\n layer.trainable = should_train", "def set_trainable(model, train):\r\n model.trainable = train\r\n for l in model.layers:\r\n l.trainable = train", "def set_trainable(model, toset):\n for layer in model.layers:\n layer.trainable = toset\n model.trainable = toset", "def set_trainability_by_layer_traversal(self, trainable):\n for layer in self._flatten_layers(include_self=False):\n layer.trainable = trainable", "def _set_freeze_layers(self):\n for layer in self.encoder.layers[:self.freeze_layers]:\n layer.trainable = False", "def filter_layers(m, lst):\n for l in m.layers:\n l.active = l.name in lst", "def add_regularization(self, regularizer):\n for layer in self.prenet_layers:\n for weights in layer.trainable_variables:\n if 'bias' not in weights.name:\n # print(\"Added regularizer to {}\".format(weights.name))\n if weights.dtype.base_dtype == tf.float16:\n tf.add_to_collection(\n 'REGULARIZATION_FUNCTIONS', (weights, regularizer)\n )\n else:\n tf.add_to_collection(\n ops.GraphKeys.REGULARIZATION_LOSSES,\n regularizer(weights),\n )", "def set_config_layers_by_name(self, name, **items):\n for layer in self._layers:\n if layer.name.lower().startswith(name.lower()):\n self.set_config_layer(layer.name, **items)", "def reenable_layers(style, layers):\n layer_select = '|'.join([l.replace('\\\\', '\\\\\\\\').replace('|', '\\\\|')\n .replace('.', '\\\\.').replace('+', '\\\\+')\n .replace('*', '\\\\*') for l in layers])\n style = re.sub(\n r'(<Layer[^>]+name=[\"\\'](?:{})[\"\\'][^>]+)status=[\"\\']off[\"\\']'.format(layer_select),\n r'\\1', style, flags=re.DOTALL)\n style = re.sub(\n r'(<Layer[^>]+)status=[\"\\']off[\"\\']([^>]+name=[\"\\'](?:{})[\"\\'])'.format(layer_select),\n r'\\1\\2', style, flags=re.DOTALL)\n return style", "def setFilters(self, regex=None):\n if regex is not None:\n try:\n self.__regex = re.compile(regex)\n except Exception as e:\n return\n\n self.__all_filters = (self.__regex,)\n\n self.__customFilterEnabled = any(self.__all_filters)\n self.invalidateFilter()", "def train(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.train()", "def print_layer_trainable(model_name):\n\n print('trainable : layer name')\n print('- '*30)\n for layer in model_name.layers:\n # if layer.trainable:\n print(\"{0}:\\t{1}\".format(layer.trainable, layer.name))\n \n return", "def _do_layer_adaptation(self, param_name):\n if self.exclude_from_layer_adaptation:\n for r in self.exclude_from_layer_adaptation:\n if re.search(r, param_name) is not None:\n return False\n return True", "def _match_layer(self, layer, pattern):\n\n if self.candidate_layers and \\\n layer['config']['name'] not in self.candidate_layers:\n return False\n\n if not self._match_pattern(layer['class_name'], pattern.class_name):\n return False\n\n layer_config = layer['config']\n for key, value in pattern.config.items():\n # Either the provided value should equal the config value, or\n # be a regex match to str(value).\n if not (self._match_pattern(str(layer_config.get(key)), str(value)) or \\\n layer_config.get(key) == value):\n return False\n\n return True", "def enable_fine_tunning(conv_base, config):\n\n conv_base.trainable = True\n\n set_trainable = False\n for layer in conv_base.layers:\n if FINE_TUNING_LAYERS[conv_base.name] in layer.name:\n set_trainable = True\n if set_trainable:\n layer.trainable = True\n print(f\"[INFO] Setting {layer.name} trainable\")\n else:\n layer.trainable = False", "def set_regularization(self, reg_type, reg_val, ffnet_target=0, layer_target=None):\n\n if layer_target is None:\n # set all layers\n for nn in range(self.num_networks):\n layer_target = range(self.networks[nn].num_layers)\n elif not isinstance(layer_target, list):\n layer_target = [layer_target]\n\n # set regularization at the layer level\n for layer in layer_target:\n self.networks[ffnet_target].layers[layer].set_regularization(\n reg_type, reg_val)", "def select_layers(m, enable, disable):\n for l in m.layers:\n if l.name in enable:\n l.active = True\n if l.name in disable:\n l.active = False", "def set_import_filter(self, regex: Union[str, re.Pattern], blacklist: bool = False):\n self._variables['IMPORT_FILTER'] = (\n re.compile(regex) if isinstance(regex, str) else regex,\n bool(blacklist)\n )", "def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):\n num_models = 4 if also_need_pretrained_weights else 8\n valid_models = ['efficientnet_b' + str(i) for i in range(num_models)]\n if model_name.replace('-', '_') not in valid_models:\n raise ValueError('model_name should be one of: ' + ', '.join(valid_models))", "def add_layers(self, layers):\n\n existing_layers = self.layers\n assert len(existing_layers) > 0\n for layer in layers:\n assert layer.get_mlp() is None\n layer.set_mlp(self)\n layer.set_input_space(existing_layers[-1].get_output_space())\n existing_layers.append(layer)\n assert layer.layer_name not in self.layer_names\n self.layer_names.add(layer.layer_name)", "def train_network(self, batch_size, epochs):\n\n if self.eq_train: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights_eq) \n else: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights)", "def set_train_mode(training, mnet, hnet, hhnet, dis):\n for net in [mnet, hnet, hhnet, dis]:\n if net is not None:\n if training:\n net.train()\n else:\n net.eval()", "def freeze_keras_model(model):\n model.trainable = True\n for layer in model.layers[::-1]:\n if \"input_calibration\" not in layer.name:\n layer.trainable = False # freeze this layer", "def add_layer(self, freeze = True, add = True):\n if add:\n self.num_layers += 1\n if self.conv_dim == 1:\n new_cnn = layers.Conv1D(self.n_filters,\n (self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0], self.n_filters),\n padding=\"same\",\n name='cnn_1d_{}'.format(self.num_layers-1),\n kernel_initializer = initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n elif self.conv_dim == 2:\n new_cnn = layers.Conv2D(self.n_filters,\n (self.n_kernels, self.n_kernels),\n activation='elu',\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n padding=\"same\",\n name='cnn_2d_{}'.format(self.num_layers-1),\n kernel_initializer=initializers.get(self.initializer),\n bias_initializer=initializers.get(\"zeros\"),\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer\n )\n self.list_cnn.append(new_cnn)\n\n if freeze:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = False\n else:\n for index in range(0,self.num_layers-1):\n self.list_cnn[index].trainable = True", "def check_layer_name(field):\n \n hygienize = field.replace(\"\\\"\", \"\")\n layer_name = (hygienize.split(\".\"))[0]\n \n if layer_name in layer_names:\n return True\n return False", "def find_trainable_layer(self, layer):\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def unfreeze(self, exclude_range=None):\n # make all layers trainable\n for i, layer in enumerate(self.model.layers):\n layer.trainable = True\n if exclude_range:\n for i, layer in enumerate(self.model.layers[:exclude_range]):\n layer.trainable = False\n self._recompile()\n return" ]
[ "0.7040944", "0.7010938", "0.6323237", "0.63104945", "0.59898615", "0.59447974", "0.5602651", "0.5426624", "0.5401681", "0.538814", "0.5264683", "0.52443", "0.5213373", "0.51864445", "0.51501006", "0.50745326", "0.5071175", "0.50471604", "0.50219536", "0.5000034", "0.4999891", "0.4987712", "0.49632022", "0.49613637", "0.49513474", "0.4928147", "0.48774874", "0.4874955", "0.48672205", "0.48608145" ]
0.71956694
0
Sets the model log directory and epoch counter.
def set_log_dir(self, model_path=None): # Set date and epoch counter as if starting a new model self.epoch = 0 # now = datetime.datetime.now() # # # If we have a model path with date and epochs use them # if model_path: # # Continue from we left of. Get epoch and date from the file name # # A sample model path might look like: # # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 # regex = r".*/[\w-]+(\d{4})(\d{2})(\d{2})T(\d{2})(\d{2})/mask\_rcnn\_[\w-]+(\d{4})\.h5" # m = re.match(regex, model_path) # if m: # now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)), # int(m.group(4)), int(m.group(5))) # # Epoch number in file is 1-based, and in Keras code it's 0-based. # # So, adjust for that then increment by one to start from the next epoch # self.epoch = int(m.group(6)) - 1 + 1 # print('Re-starting from epoch %d' % self.epoch) # Directory for training logs self.log_dir = os.path.join(self.model_dir, "siamese_{}_{}_{}".format(self.config.MODEL.lower(), self.config.NAME.lower(), self.config.EXPERIMENT.lower())) # Create log_dir if not exists if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) # Path to save after each epoch. Include placeholders that get filled by Keras. self.checkpoint_path = os.path.join(self.log_dir, "siamese_mrcnn_*epoch*.h5") self.checkpoint_path = self.checkpoint_path.replace("*epoch*", "{epoch:04d}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_log_dir(self, model_path=None):\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n regex = r\".*/\\w+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/mask\\_rcnn\\_\\w+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n self.epoch = int(m.group(6)) + 1\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"yolo_V3_{}_*epoch*.ckpt\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")", "def set_log_dir(self, model_path=None):\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5\n regex = r\".*/\\w+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})/FCN\\_DenseNet\\_\\w+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n self.epoch = int(m.group(6)) + 1\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"FCN_DenseNet_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")", "def set_log_dir(self, model_path=None):\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]OOD\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, f\"{self.config.NAME.lower()}{now:%Y%m%dT%H%M}\")\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, f\"OOD_{self.config.NAME.lower()}_*epoch*.h5\")\n self.checkpoint_path = self.checkpoint_path.replace(\"*epoch*\", \"{epoch:04d}\")", "def set_model_for_train(self):\n if self.train_time is None:\n self.train_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n\n self.curr_folder = data_functions.create_path(\n self.save_path, self.train_time)\n logger.info(f\"training results will be stored in: {self.curr_folder}\")\n\n self.save_model_params()\n self.train_generator, self.val_generator = \\\n self.clarifruit_train_val_generators()\n keras_logs_path = self.set_model_checkpint()\n\n return keras_logs_path", "def _save_model(self, epoch, batch, logs):\n self.save(self._get_file_path(epoch, batch, logs))", "def before_epoch(self):\n if self.trainer._mode == 'train':\n with open(os.path.join(self.root_path, 'metrics.txt'), 'a+') as fout:\n if hasattr(self.trainer, '_metrics'):\n fout.write(\n str(self.trainer._epoch - 1) + '\\t' +\n str(self.trainer._metrics) + '\\n')", "def start_training(self, logdir: str, **info):\n pass", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def __init__(self, \n log_dir, \n checkpoint_dir):\n self.log_dir = log_dir\n self.checkpoint_dir = checkpoint_dir", "def on_epoch_begin(self, epoch, logs={}):\n self.current_progress = 0\n self.loss = 0\n self.accuracy = 0", "def __init__(self, run_config):\n print('Initializing logs...')\n log_root = run_config['log_root_path']\n self._save_iter = run_config['save_iter']\n self._best_epoch = False\n if run_config['resume_path']:\n # resume an old experiment\n self.log_dir = run_config['resume_path']\n if os.path.exists(os.path.join(log_root, self.log_dir)):\n self.log_path = os.path.join(log_root, self.log_dir)\n print(' Resuming experiment ' + self.log_dir)\n else:\n raise Exception('Experiment folder ' + self.log_dir + ' not found.')\n else:\n # start a new experiment\n if 'log_dir' not in run_config:\n self.log_dir = ''\n else:\n self.log_dir = run_config['log_dir']\n self.log_dir += strftime(\"%b_%d_%Y_%H_%M_%S\") + '/'\n self.log_path = os.path.join(log_root, self.log_dir)\n os.makedirs(self.log_path)\n os.system(\"rsync -au --include '*/' --include '*.py' --exclude '*' . \" + self.log_path + \"source\")\n os.makedirs(os.path.join(self.log_path, 'metrics'))\n os.makedirs(os.path.join(self.log_path, 'checkpoints'))\n self.epoch = 1\n print(' Starting experiment ' + self.log_dir)", "def __init__(self, trainer, logger_dir, experiment_name):\n if not os.path.isdir(logger_dir):\n raise IOError(\"{} is not a valid directory\".format(logger_dir))\n\n super().__init__(trainer)\n experiment_dir = os.path.join(logger_dir, experiment_name)\n os.mkdir(experiment_dir)\n self.train_writer = tensorboardX.SummaryWriter(os.path.join(experiment_dir,\n \"train_log\"))\n self.validation_writer = tensorboardX.SummaryWriter(os.path.join(experiment_dir,\n \"validation_log\"))\n\n self.iteration = 0", "def set_log_dir(dir):\r\n LogOptions._LOG_DIR = dir", "def __init__(self):\r\n self.file_object = './ExecutionLogs/PredictFromModel.log'\r\n\r\n \"\"\" Initialize logger class for log writing \"\"\"\r\n self.log_writer = logger.logger(self.file_object)", "def __init__(self, log_dir):\n self.writer = tf.summary.create_file_writer(log_dir)", "def test_01_train(self):\n today = date.today()\n log_file = os.path.join(LOG_DIR, \"{}-train-{}-{}.log\".format(LOG_PREFIX, today.year, today.month))\n if os.path.exists(log_file):\n os.remove(log_file)\n \n ## update the log\n country = 'india'\n date_range = ('2017-11-29', '2019-05-24')\n metric = {'rmse':0.5}\n runtime = \"00:00:01\"\n model_version = 0.1\n model_version_note = \"test model\"\n \n update_train_log(country, date_range, metric, runtime,\n model_version, model_version_note, test=True, prefix=LOG_PREFIX)\n\n self.assertTrue(os.path.exists(log_file))", "def __init__(self, log_dir):\n self.writer = tf.summary.FileWriter(log_dir)\n self.log_dict = {}", "def logdir(self, logdir) -> None:\n self._logdir = logdir\n self._update_logdir()\n for child_metric_real, child_metric_fake in self.children_real_fake:\n child_metric_real.logdir, child_metric_fake.logdir = logdir, logdir", "def _setup_dir(self):\n if not os.path.exists(self._save_dir):\n logger.info(\"save_dir {} does not exist, \"\n \"creating it\".format(self._save_dir))\n os.makedirs(self._save_dir)\n\n # Log the run parameters.\n logger.info(\"Writing logs to {}\".format(self._log_dir))\n\n if not os.path.exists(self._log_dir):\n logger.info(\"log path {} does not exist, \"\n \"creating it\".format(self._log_dir))\n os.makedirs(self._log_dir)", "async def set_log_dir(self, ctx, log_dir):\n self.log_dir = log_dir\n await ctx.send(f\"Successfully set log directory to {log_dir}\")", "def __init__(self, log_dir):\n self.writer = tf.summary.FileWriter(log_dir)", "def before_run(self, runner) -> None:\n if self.out_dir is not None:\n # The final `self.out_dir` is the concatenation of `self.out_dir`\n # and the last level directory of `runner.work_dir`\n basename = osp.basename(runner.work_dir.rstrip(osp.sep))\n self.out_dir = self.file_backend.join_path(self.out_dir, basename)\n runner.logger.info(\n f'Text logs will be saved to {self.out_dir} after the '\n 'training process.')\n\n self.json_log_path = f'{runner.timestamp}.json'", "def on_train_begin(self, logs=None):\n f = open(self.log_file_path, \"a\")\n f.write(f\"{'=' * 5}{self.model_name}({self.hp_log_title}){'=' * 5}\\n\")\n f.close()", "def __init__(self, log_path):\n # create a map for storing LogImg objects\n self.log_img_map = OrderedDict()\n\n # set the path to the log directory\n self.log_path = log_path\n\n # check if log directory already exists or create it\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # set current training step\n self.train_step = 0", "def on_train_end(self, logs=None):\n self.epoch_iter = 0", "def on_epoch_begin(self, epoch, logs={}):\n\n if epoch > 0:\n return\n\n architecture = self.log_dir + '/architecture.yml'\n model = self.get_model(self.model)\n yaml_string = model.to_yaml()\n with open(architecture, 'w') as fp:\n fp.write(yaml_string)", "def __init__(self, trainer, logger_dir=None):\n if logger_dir is not None and not os.path.isdir(logger_dir):\n raise IOError(\"{} is not a valid directory\".format(logger_dir))\n\n super().__init__(trainer)\n\n self.logger = logging.getLogger(\"Trainer\")\n self.logger.setLevel(logging.INFO)\n\n if logger_dir:\n file_handler = logging.FileHandler(os.path.join(logger_dir,\n \"training.log\"))\n self.logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n self.logger.addHandler(console_handler)\n\n self.iteration = 0", "def on_train_begin(self, logs):\n self.train_start = timeit.default_timer()\n self.metrics_names = self.model.metrics_names\n print('Training for {} steps ...'.format(self.params['nb_steps']))", "def set_train_epoch(self, epoch: int):\n if hasattr(self, 'cls_head'):\n self.cls_head.set_train_epoch(epoch)", "def on_train_begin(self, logs={}):\n if \"samples\" in self.params:\n self.sample_amount = self.params[\"samples\"]\n elif \"nb_sample\" in self.params:\n self.sample_amount = self.params[\"nb_sample\"]\n else:\n self.sample_amount = self.params[\"steps\"]\n self.mode = 1\n\n self.epochs = self.params[\"epochs\"]\n self.plyto.update_size(self.sample_amount)\n self.plyto.update_total_steps(self.epochs)" ]
[ "0.8340662", "0.82427096", "0.8216617", "0.69299877", "0.6209155", "0.61991924", "0.61920685", "0.61835784", "0.6142625", "0.6124378", "0.6124006", "0.61115265", "0.6072062", "0.6066884", "0.60577244", "0.60125613", "0.598763", "0.59855914", "0.59802425", "0.59746325", "0.5951319", "0.5950806", "0.59503555", "0.59399855", "0.5901747", "0.58780503", "0.5856116", "0.5840013", "0.58181983", "0.5802455" ]
0.8334618
1
Process the new or edit office forms
def office_edit_process_view(request): status = '' success = True # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer authority_required = {'verified_volunteer'} if not voter_has_authority(request, authority_required): return redirect_to_sign_in_page(request, authority_required) ballotpedia_office_id = request.POST.get('ballotpedia_office_id', False) # Related to office_held ballotpedia_race_id = request.POST.get('ballotpedia_race_id', False) # Related to contest_office ballotpedia_race_office_level = request.POST.get('ballotpedia_race_office_level', False) ballotpedia_office_name = request.POST.get('ballotpedia_office_name', False) ballotpedia_is_marquee = request.POST.get('ballotpedia_is_marquee', False) ctcl_uuid = request.POST.get('ctcl_uuid', False) district_id = request.POST.get('district_id', False) google_civic_office_name = request.POST.get('google_civic_office_name', False) google_civic_office_name2 = request.POST.get('google_civic_office_name2', False) google_civic_office_name3 = request.POST.get('google_civic_office_name3', False) google_civic_office_name4 = request.POST.get('google_civic_office_name4', False) google_civic_office_name5 = request.POST.get('google_civic_office_name5', False) google_civic_election_id = request.POST.get('google_civic_election_id', 0) ocd_division_id = request.POST.get('ocd_division_id', False) office_held_we_vote_id = request.POST.get('office_held_we_vote_id', False) office_id = convert_to_int(request.POST.get('office_id', 0)) office_name = request.POST.get('office_name', False) primary_party = request.POST.get('primary_party', False) state_code = request.POST.get('state_code', False) vote_usa_office_id = request.POST.get('vote_usa_office_id', False) is_battleground_race = request.POST.get('is_battleground_race', False) remove_duplicate_process = request.POST.get('remove_duplicate_process', False) redirect_to_contest_office_list = convert_to_int(request.POST.get('redirect_to_contest_office_list', 0)) election_state = '' if state_code is not False: election_state = state_code elif google_civic_election_id: election_manager = ElectionManager() results = election_manager.retrieve_election(google_civic_election_id) if results['election_found']: election = results['election'] election_state = election.get_election_state() # Check to see if this office is already in the database office_on_stage_found = False office_on_stage = None try: office_query = ContestOffice.objects.filter(id=office_id) if len(office_query): office_on_stage = office_query[0] office_on_stage_found = True except Exception as e: handle_record_not_found_exception(e, logger=logger) success = False if success: try: if office_on_stage_found: office_on_stage_id = office_on_stage.id google_civic_election_id = office_on_stage.google_civic_election_id else: # Create new office_on_stage = ContestOffice( office_name=office_name, google_civic_election_id=google_civic_election_id, state_code=election_state, ) office_on_stage_id = office_on_stage.id google_civic_election_id = office_on_stage.google_civic_election_id office_on_stage_found = True if office_on_stage_found: # Update # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and office_on_stage.ballotpedia_is_marquee = positive_value_exists(ballotpedia_is_marquee) if ballotpedia_office_id is not False: office_on_stage.ballotpedia_office_id = convert_to_int(ballotpedia_office_id) if ballotpedia_office_name is not False: office_on_stage.ballotpedia_office_name = ballotpedia_office_name if ballotpedia_race_id is not False: office_on_stage.ballotpedia_race_id = convert_to_int(ballotpedia_race_id) if ballotpedia_race_office_level is not False: office_on_stage.ballotpedia_race_office_level = ballotpedia_race_office_level if ctcl_uuid is not False: office_on_stage.ctcl_uuid = ctcl_uuid if district_id is not False: office_on_stage.district_id = district_id if positive_value_exists(election_state): office_on_stage.state_code = election_state if google_civic_office_name is not False: office_on_stage.google_civic_office_name = google_civic_office_name if google_civic_office_name2 is not False: office_on_stage.google_civic_office_name2 = google_civic_office_name2 if google_civic_office_name3 is not False: office_on_stage.google_civic_office_name3 = google_civic_office_name3 if google_civic_office_name4 is not False: office_on_stage.google_civic_office_name4 = google_civic_office_name4 if google_civic_office_name5 is not False: office_on_stage.google_civic_office_name5 = google_civic_office_name5 # Save office is_battleground_race for this year, and then prepare to update all related objects office_on_stage.is_battleground_race = positive_value_exists(is_battleground_race) election_day_text = office_on_stage.get_election_day_text() year = 0 years_false_list = [] years_true_list = [] if positive_value_exists(election_day_text): date_as_integer = convert_we_vote_date_string_to_date_as_integer(election_day_text) year = date_as_integer // 10000 if positive_value_exists(year): if positive_value_exists(is_battleground_race): years_false_list = [] years_true_list = [year] else: years_false_list = [year] years_true_list = [] years_list = list(set(years_false_list + years_true_list)) if ocd_division_id is not False: office_on_stage.ocd_division_id = ocd_division_id if office_held_we_vote_id is not False: office_on_stage.office_held_we_vote_id = office_held_we_vote_id from office_held.models import OfficeHeldManager office_held_manager = OfficeHeldManager() office_held_results = office_held_manager.retrieve_office_held( office_held_we_vote_id=office_held_we_vote_id, read_only=True) if office_held_results['office_held_found']: office_held = office_held_results['office_held'] office_on_stage.office_held_name = office_held.office_held_name if office_name is not False: office_on_stage.office_name = office_name if primary_party is not False: office_on_stage.primary_party = primary_party if vote_usa_office_id is not False: office_on_stage.vote_usa_office_id = vote_usa_office_id office_on_stage.save() office_on_stage_id = office_on_stage.id office_on_stage_we_vote_id = office_on_stage.we_vote_id messages.add_message(request, messages.INFO, 'Office updated.') # ################################## # Update "is_battleground_race" for candidates under this office through the link CandidateToOfficeLink # We can't automatically update all of these candidates with the office's setting, # because we may be saving a primary election office which isn't a battleground race, # and the candidate may have made it through to the general election which # *is* a battleground. # from candidate.controllers import update_candidates_with_is_battleground_race # results = update_candidates_with_is_battleground_race(office_we_vote_id=office_on_stage.we_vote_id) if positive_value_exists(office_on_stage_we_vote_id) and len(years_list) > 0: from politician.controllers import update_parallel_fields_with_years_in_related_objects results = update_parallel_fields_with_years_in_related_objects( field_key_root='is_battleground_race_', master_we_vote_id_updated=office_on_stage_we_vote_id, years_false_list=years_false_list, years_true_list=years_true_list, ) if not results['success']: status += results['status'] status += "FAILED_TO_UPDATE_PARALLEL_FIELDS_FROM_OFFICE " messages.add_message(request, messages.ERROR, status) return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) + "?google_civic_election_id=" + str(google_civic_election_id) + "&state_code=" + str(state_code)) except Exception as e: handle_record_not_saved_exception(e, logger=logger) messages.add_message(request, messages.ERROR, 'Could not save office (create new): ' + str(e)) else: messages.add_message(request, messages.ERROR, 'Could not save office, success = False from above: ' + status) if redirect_to_contest_office_list: return HttpResponseRedirect(reverse('office:office_list', args=()) + '?google_civic_election_id=' + str(google_civic_election_id) + '&state_code=' + str(state_code)) if remove_duplicate_process: return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) + "?google_civic_election_id=" + str(google_civic_election_id) + "&state_code=" + str(state_code)) else: return HttpResponseRedirect(reverse('office:office_edit', args=(office_id,)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def office_edit_process_view(request):\n authority_required = {'verified_volunteer'} # admin, verified_volunteer\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n office_id = convert_to_int(request.POST.get('office_id', 0))\n office_name = request.POST.get('office_name', False)\n google_civic_office_name = request.POST.get('google_civic_office_name', False)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n primary_party = request.POST.get('primary_party', False)\n state_code = request.POST.get('state_code', False)\n\n election_state = ''\n if state_code is not False:\n election_state = state_code\n elif google_civic_election_id:\n election_manager = ElectionManager()\n results = election_manager.retrieve_election(google_civic_election_id)\n if results['election_found']:\n election = results['election']\n election_state = election.get_election_state()\n\n # Check to see if this office is already in the database\n office_on_stage_found = False\n try:\n office_query = ContestOffice.objects.filter(id=office_id)\n if len(office_query):\n office_on_stage = office_query[0]\n office_on_stage_found = True\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n\n try:\n if office_on_stage_found:\n # Update\n # Removed for now: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if office_name is not False:\n office_on_stage.office_name = office_name\n if google_civic_office_name is not False:\n office_on_stage.google_civic_office_name = google_civic_office_name\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n if positive_value_exists(election_state):\n office_on_stage.state_code = election_state\n office_on_stage.save()\n office_on_stage_id = office_on_stage.id\n messages.add_message(request, messages.INFO, 'Office updated.')\n google_civic_election_id = office_on_stage.google_civic_election_id\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n else:\n # Create new\n office_on_stage = ContestOffice(\n office_name=office_name,\n google_civic_election_id=google_civic_election_id,\n state_code=election_state,\n )\n # Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and\n if primary_party is not False:\n office_on_stage.primary_party = primary_party\n office_on_stage.save()\n messages.add_message(request, messages.INFO, 'New office saved.')\n\n # Come back to the \"Create New Office\" page\n return HttpResponseRedirect(reverse('office:office_new', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id))\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, 'Could not save office.')\n\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n \"?google_civic_election_id=\" + google_civic_election_id)", "def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))", "def office_update(request, slug, id):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n office_reference = get_object_or_404(Office, id=id,company=company)\n office_form = OfficeForm(instance=office_reference)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents info, \n if request.method == 'GET':\n return render_to_response('office_form.html',{'form':office_form, 'info': office_reference},context_instance=RequestContext(request))\n else:\n office_form = OfficeForm(request.POST, instance=office_reference)\n #if is POST Validates the form is well filled and save it redirecting to the company page \n if office_form.is_valid():\n office_form.save(commit = False)\n\n return HttpResponseRedirect('/company/'+str(slug))\n #if not well filled redirect to the original update page and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'info': office_reference},\n context_instance=RequestContext(request))", "def show_employee_edit_form(self, staff_ob, number):\n\n print(self.LENGTH_STAR * \"*\")\n print(f\"EDIT {staff_ob.role.upper()}\\n\")\n\n if number == 1:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s address\\nThe current address is: {staff_ob.address}\")\n new_address = self.get_address()\n while new_address == False:\n new_address = self.get_address()\n self.check_action_edit_form(staff_ob, number, new_address)\n\n elif number == 2:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s mobile number\\nThe current mobile number is: {staff_ob.mobile_number}\")\n new_mobile_number = self.get_mobile_number()\n while new_mobile_number == False:\n new_mobile_number = self.get_mobile_number\n self.check_action_edit_form(staff_ob, number, new_mobile_number)\n \n elif number == 3:\n print(self.LENGTH_STAR * \"*\")\n print(f\"You are changing {staff_ob.name}´s email\\nThe current the email is: {staff_ob.email}\")\n new_email = self.get_email()\n while new_email == False:\n new_email = self.get_email()\n self.check_action_edit_form(staff_ob, number, new_email)\n \n print(f\"\\n{staff_ob.name}'s information successfully changed!\\n\")\n \n return", "def post_save(self, request, instance, instance_data, created): # NOQA: C901\n # import wdb; wdb.set_trace()\n if \"appendix_table\" in instance_data:\n for data in instance_data[\"appendix_table\"]:\n self._save_sub_form(\n Appendix, AppendixForm, data, visit_report_instance=instance\n )\n\n if \"faces\" in instance_data:\n for data in instance_data[\"faces\"]:\n self._save_sub_form(\n Face, FaceForm, data, visit_report_instance=instance\n )\n\n if \"steps\" in instance_data:\n for data in instance_data[\"steps\"]:\n self._save_sub_form(\n Step, StepForm, data, visit_report_instance=instance\n )\n\n if \"scenarios\" in instance_data:\n for data in instance_data[\"scenarios\"]:\n sub_instance = self._save_sub_form(\n Scenario, ScenarioForm, data, visit_report_instance=instance\n )\n\n if \"financial_aids\" in data:\n for sub_data in data[\"financial_aids\"]:\n self._save_sub_form(\n FinancialAid,\n FinancialAidForm,\n sub_data,\n scenario_instance=sub_instance,\n )\n\n if \"financings\" in data:\n for sub_data in data[\"financings\"]:\n self._save_sub_form(\n Financing,\n FinancingForm,\n sub_data,\n scenario_instance=sub_instance,\n )\n\n if \"scenario_summaries\" in data:\n for sub_data in data[\"scenario_summaries\"]:\n self._save_sub_form(\n ScenarioSummary,\n ScenarioSummaryForm,\n sub_data,\n scenario_instance=sub_instance,\n )\n\n if \"systems\" in instance_data:\n for data in instance_data[\"systems\"]:\n self._save_sub_form(\n System, SystemForm, data, visit_report_instance=instance\n )\n\n if \"work_recommendations\" in instance_data:\n for data in instance_data[\"work_recommendations\"]:\n self._save_sub_form(\n WorkRecommendation,\n WorkRecommendationForm,\n data,\n visit_report_instance=instance,\n )\n\n return", "def process_show_form(self, request, step, form):\n pass", "def save(self, *args, **kwargs):\n step_numeral, step_name = kwargs.pop('step', (None, None))\n\n if step_numeral == 1:\n \"\"\"\n Basic Form: Application & File Uploader\n \"\"\"\n return self.cleaned_data\n if step_numeral == 2:\n \"\"\"\n Basic Form + Mapping Fields\n \"\"\"\n return self.cleaned_data\n\n if step_numeral == 3:\n pass # end-user is previewing", "def edit_document():", "def check_action_edit_form(self, staff_ob, number, new_info):\n \n print(\"\\nS Save \\nB Back\\n\")\n action_str = self.choose_action([\"s\",\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"s\", \"b\"])\n\n if action_str == \"s\":\n if number == 1:\n if staff_ob.role == self.PILOT.capitalize():\n updated_staff_ob = PilotsModel(staff_ob.ssn, staff_ob.name, staff_ob.role, staff_ob.rank, staff_ob.license_type, new_info, staff_ob.mobile_number, staff_ob.email)\n else:\n updated_staff_ob = CabinCrewModel(staff_ob.ssn, staff_ob.name, staff_ob.role, staff_ob.rank, new_info, staff_ob.mobile_number, staff_ob.email)\n elif number == 2:\n if staff_ob.role == self.PILOT.capitalize():\n updated_staff_ob = PilotsModel(staff_ob.ssn, staff_ob.name, staff_ob.role, staff_ob.rank, staff_ob.license_type, staff_ob.address, new_info, staff_ob.email)\n else:\n updated_staff_ob = CabinCrewModel(staff_ob.ssn, staff_ob.name, staff_ob.role, staff_ob.rank, staff_ob.address, new_info, staff_ob.email)\n elif number == 3:\n if staff_ob.role == self.PILOT.capitalize():\n updated_staff_ob = PilotsModel(staff_ob.ssn, staff_ob.name, staff_ob.role, staff_ob.rank, staff_ob.license_type, staff_ob.address, staff_ob.mobile_number, new_info)\n else:\n updated_staff_ob = CabinCrewModel(staff_ob.ssn, staff_ob.name, staff_ob.role, staff_ob.rank, staff_ob.address, staff_ob.mobile_number, new_info)\n \n if updated_staff_ob.role == self.PILOT.capitalize():\n self.llapi.update_new_pilot_information(updated_staff_ob)\n else:\n self.llapi.update_new_crew_member_information(updated_staff_ob)\n\n elif action_str == \"b\":\n return", "def post(self, request, *args, **kwargs):\n self.object = OTML.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n factura_form = Factura_LineaFormSet(self.request.POST, instance=self.object)\n ot_linea_form = OT_LineaFormSet(self.request.POST, instance=self.object)\n if (form.is_valid() and factura_form.is_valid()\n and ot_linea_form.is_valid()):\n return self.form_valid(form, factura_form, ot_linea_form)\n else:\n return self.form_invalid(form, factura_form, ot_linea_form)", "def office_merge_process_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n contest_office_manager = ContestOfficeManager()\n\n is_post = True if request.method == 'POST' else False\n\n if is_post:\n # merge = request.POST.get('merge', False)\n skip = request.POST.get('skip', False)\n # Contest office 1 is the one we keep, and Contest office 2 is the one we will merge into Contest office 1\n contest_office1_we_vote_id = request.POST.get('contest_office1_we_vote_id', 0)\n contest_office2_we_vote_id = request.POST.get('contest_office2_we_vote_id', 0)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n redirect_to_contest_office_list = positive_value_exists(request.POST.get('redirect_to_contest_office_list', False))\n remove_duplicate_process = positive_value_exists(request.POST.get('remove_duplicate_process', False))\n state_code = request.POST.get('state_code', '')\n else:\n # merge = request.GET.get('merge', False)\n skip = request.GET.get('skip', False)\n # Contest office 1 is the one we keep, and Contest office 2 is the one we will merge into Contest office 1\n contest_office1_we_vote_id = request.GET.get('contest_office1_we_vote_id', 0)\n contest_office2_we_vote_id = request.GET.get('contest_office2_we_vote_id', 0)\n google_civic_election_id = request.GET.get('google_civic_election_id', 0)\n redirect_to_contest_office_list = positive_value_exists(request.GET.get('redirect_to_contest_office_list', False))\n remove_duplicate_process = positive_value_exists(request.GET.get('remove_duplicate_process', False))\n state_code = request.GET.get('state_code', '')\n\n if positive_value_exists(skip):\n results = contest_office_manager.update_or_create_contest_offices_are_not_duplicates(\n contest_office1_we_vote_id, contest_office2_we_vote_id)\n if not results['new_contest_offices_are_not_duplicates_created']:\n messages.add_message(request, messages.ERROR, 'Could not save contest_offices_are_not_duplicates entry: ' +\n results['status'])\n messages.add_message(request, messages.INFO, 'Prior contest offices skipped, and not merged.')\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n contest_office1_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office1_we_vote_id)\n if contest_office1_results['contest_office_found']:\n contest_office1_on_stage = contest_office1_results['contest_office']\n contest_office1_id = contest_office1_on_stage.id\n else:\n messages.add_message(request, messages.ERROR, 'Could not retrieve office 1.')\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n contest_office2_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office2_we_vote_id)\n if contest_office2_results['contest_office_found']:\n contest_office2_on_stage = contest_office2_results['contest_office']\n contest_office2_id = contest_office2_on_stage.id\n else:\n messages.add_message(request, messages.ERROR, 'Could not retrieve contest office 2.')\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n # # TODO: Migrate bookmarks\n # bookmark_item_list_manager = BookmarkItemList()\n # bookmark_results = bookmark_item_list_manager.retrieve_bookmark_item_list_for_contest_office(\n # contest_office2_we_vote_id)\n # if bookmark_results['bookmark_item_list_found']:\n # messages.add_message(request, messages.ERROR, \"Bookmarks found for Contest Office 2 - \"\n # \"automatic merge not working yet.\")\n # return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n # \"?google_civic_election_id=\" + str(google_civic_election_id) +\n # \"&state_code=\" + str(state_code))\n\n # Merge attribute values\n conflict_values = figure_out_office_conflict_values(contest_office1_on_stage, contest_office2_on_stage)\n\n for attribute in CONTEST_OFFICE_UNIQUE_IDENTIFIERS:\n conflict_value = conflict_values.get(attribute, None)\n if conflict_value == \"CONFLICT\":\n if is_post:\n choice = request.POST.get(attribute + '_choice', '')\n else:\n choice = request.GET.get(attribute + '_choice', '')\n if contest_office2_we_vote_id == choice:\n setattr(contest_office1_on_stage, attribute, getattr(contest_office2_on_stage, attribute))\n elif conflict_value == \"CONTEST_OFFICE2\":\n setattr(contest_office1_on_stage, attribute, getattr(contest_office2_on_stage, attribute))\n else:\n pass\n\n # Preserve unique google_civic_office_name, _name2, _name3, _name4, and _name5\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name2):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name2)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name3):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name3)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name4):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name4)\n if positive_value_exists(contest_office2_on_stage.google_civic_office_name5):\n contest_office1_on_stage = add_contest_office_name_to_next_spot(\n contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name5)\n\n # TODO: Merge quick_info's office details in future\n\n # Merge ballot item's office details\n ballot_items_results = move_ballot_items_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id,\n contest_office1_on_stage)\n if not ballot_items_results['success']:\n messages.add_message(request, messages.ERROR, ballot_items_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # Merge public positions - DALE 2020-06-04 I think we will want to alter this soon\n public_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id,\n True)\n if not public_positions_results['success']:\n messages.add_message(request, messages.ERROR, public_positions_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # Merge friends-only positions - DALE 2020-06-04 I think we will want to alter this soon\n friends_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id,\n False)\n if not friends_positions_results['success']:\n messages.add_message(request, messages.ERROR, friends_positions_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # TODO: Migrate images?\n\n # Finally, move candidates last\n candidates_results = move_candidates_to_another_office(contest_office2_id, contest_office2_we_vote_id,\n contest_office1_id, contest_office1_we_vote_id)\n if not candidates_results['success']:\n messages.add_message(request, messages.ERROR, candidates_results['status'])\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n # Save contest_office2_on_stage to remove maplight_id, which much be unique,\n # before we try to save contest_office1_on_stage below\n if positive_value_exists(contest_office2_on_stage.maplight_id):\n contest_office2_on_stage.maplight_id = None\n contest_office2_on_stage.save()\n\n # Note: wait to wrap in try/except block\n contest_office1_on_stage.save()\n # There isn't any office data to refresh from other master tables\n\n # Remove contest office 2\n contest_office2_on_stage.delete()\n\n if redirect_to_contest_office_list:\n return HttpResponseRedirect(reverse('office:office_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n if remove_duplicate_process:\n return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n return HttpResponseRedirect(reverse('office:office_summary', args=(contest_office1_on_stage.id,)))", "def form_valid(self, form, contacto_linea_form, direccion_linea_form):\n self.object = form.save()\n contacto_linea_form.instance = self.object\n contacto_linea_form.save()\n direccion_linea_form.instance = self.object\n direccion_linea_form.save()\n\n if self.request.POST.get('_popup', 0):\n nombre = self.object.nombre.upper()\n id = self.object.id\n return HttpResponse(\n '<script type=\"text/javascript\">opener.dismissAddAnotherPopup( window, \\'%s\\', \\'%s\\' );</script>'\n % (id, nombre))\n else:\n return HttpResponseRedirect(self.get_success_url())", "def process_all():\n\tconfilepath = check_args()\n\tif confilepath != \"\": #check arguments and sets some global variables \n\t\tconfig = read_conf(confilepath) #read config-file\n\t\tinst = get_hgf_institute(config) #check which hgf-institute\n\t\tbuild_or_remove_fielddesc(config) #create/delete fielddescriptors (fields + marctags)\n\t\tinsert_repnr_fielddesc(inst) #report number as hidden input in submit \n\t\tbuild_or_remove_doctypes(config,inst) #create/delete doctypes\n\t\tbuild_or_remove_schema(config) #create/delete collections for submit form\n\t\tgenerate_css(fieldlabels,inst) #create css_file \n\telse: pass", "def show_create_form(self, staff_str):\n\n print(self.LENGTH_STAR * \"*\")\n print(f\"CREATE A NEW {staff_str.upper()}\\n\")\n \n print(\"B Back\\nC Continue\\n\")\n action_str = self.choose_action([\"b\", \"c\"])\n while action_str == False:\n action_str = self.choose_action([\"b\", \"c\"])\n if action_str == \"b\":\n return\n elif action_str == \"c\":\n print(f\"\\nCreating a new {staff_str}\\n\")\n name = self.get_name()\n while name == False:\n name = self.get_name()\n ssn = self.get_ssn()\n while ssn == False:\n ssn = self.get_ssn()\n address = self.get_address()\n while address == False:\n address = self.get_address()\n mobile_number = self.get_mobile_number()\n while mobile_number == False:\n mobile_number = self.get_mobile_number()\n email = self.get_email()\n while email == False:\n email = self.get_email()\n \n if staff_str == self.PILOT:\n rank = self.get_pilot_rank()\n while rank == False:\n rank = self.get_pilot_rank()\n license_type = self.get_license_type()\n while license_type == False:\n license_type = self.get_license_type()\n\n else:\n rank = self.get_cabin_crew_rank()\n while rank == False:\n rank = self.get_cabin_crew_rank()\n\n print(\"\\nS Save \\nB Back\\n\")\n\n action_str = self.choose_action([\"s\",\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"s\", \"b\"])\n\n if action_str == \"s\":\n if staff_str == self.PILOT:\n new_staff_object = PilotsModel(ssn, name, \"Pilot\", rank, license_type, address, mobile_number, email)\n self.llapi.create_new_pilot(new_staff_object)\n\n elif staff_str == self.CREW:\n new_staff_object = CabinCrewModel(ssn, name, \"Cabin crew\", rank, address, mobile_number, email)\n self.llapi.create_new_cabin_crew(new_staff_object)\n\n print(f\"{staff_str.capitalize()} {new_staff_object.name} successfully created!\\n\")\n return\n \n elif action_str == \"b\":\n return", "def make_form(self):", "def document_new():\n\n t = request.form['type']\n if t == 'book':\n doc = Book(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n edition=request.form['edition'],\n publisher=request.form['publisher'],\n publishment_year=request.form['publishment_year'],\n bestseller='bestseller' in request.form,\n reference='reference' in request.form\n )\n elif t == 'av':\n doc = AVMaterial(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors'])\n )\n elif t == 'article':\n doc = JournalArticle(\n title=request.form['title'],\n price=request.form['price'],\n keywords=comma_to_list(request.form['keywords']),\n authors=comma_to_list(request.form['authors']),\n issue_editor=request.form['issue_editor'],\n issue_publication_date=request.form['issue_publication_date'],\n journal=request.form['journal']\n )\n\n for i in range(int(request.form['copies'])):\n dc = DocumentCopy(document=doc)\n\n db.session.add(doc)\n db.session.commit()\n\n log(session['login'], 'created', 'document {}'.format(doc.id))\n\n # TODO\n return redirect('/admin/documents')", "def post(self, request, *args, **kwargs):\r\n\t\tself.object = None\r\n\t\tform_class = self.get_form_class()\r\n\t\tform_class.user = request.user\r\n\t\tform = self.get_form(form_class)\r\n\t\tcruiseday_form = CruiseDayFormSet(self.request.POST)\r\n\t\tparticipant_form = ParticipantFormSet(self.request.POST)\r\n\t\tdocument_form = DocumentFormSet(self.request.POST, self.request.FILES)\r\n\t\tequipment_form = EquipmentFormSet(self.request.POST)\r\n\t\tinvoice_form = InvoiceFormSet(self.request.POST)\r\n\r\n\t\tif not self.request.user.userdata.email_confirmed and self.request.user.userdata.role == \"\":\r\n\t\t\tmessages.add_message(self.request, messages.WARNING, mark_safe(\"You have not yet confirmed your email address. Your account will not be eligible for approval or submitting cruises before this is done. If you typed the wrong email address while signing up, correct it in your profile and we'll send you a new one. You may have to add [email protected] to your contact list if our messages go to spam.\"+\"<br><br><a class='btn btn-primary' href='\"+reverse('resend-activation-mail')+\"'>Resend activation email</a>\"))\r\n\t\telif self.request.user.userdata.email_confirmed and self.request.user.userdata.role == \"\":\r\n\t\t\tmessages.add_message(self.request, messages.WARNING, \"Your user account has not been approved by an administrator yet. You may save cruise drafts and edit them, but you may not submit cruises for approval before your account is approved.\")\r\n\r\n\t\t# check if all our forms are valid, handle outcome\r\n\t\tif (form.is_valid() and cruiseday_form.is_valid() and participant_form.is_valid() and document_form.is_valid() and equipment_form.is_valid() and invoice_form.is_valid()):\r\n\t\t\treturn self.form_valid(form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form)\r\n\t\telse:\r\n\t\t\treturn self.form_invalid(form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form)", "def render_form():", "def after_successful_edit(self):\n pass", "def change_forms(self, *args, **keywords):\n change_to = \"MAIN\"\n\n # Tell the VentApp object to change forms.\n self.parentApp.change_form(change_to)", "def forms(self):\n edit = EquipmentChownForm\n return {\n 'edit': edit,\n }", "def process_existing_election_information_from_webform(request, context):\n election_dict = transform_webform_to_json(parser.parse(request.POST.urlencode()))\n if not verify_that_all_relevant_election_webform_keys_exist(election_dict, new_election=False):\n error_message = f\"Did not find all of the following necessary keys in input: \" \\\n f\"{ELECTION_JSON_KEY__DATE}, {ELECTION_JSON_WEBFORM_KEY__TIME}, \" \\\n f\"{ELECTION_JSON_KEY__ELECTION_TYPE}, \" \\\n f\"{ELECTION_JSON_KEY__WEBSURVEY}, {ELECTION_ID}, {ELECTION_JSON_KEY__NOMINEES}\"\n logger.info(\n f\"[elections/process_existing_election_webform.py process_existing_election_information_from_webform()]\"\n f\" {error_message}\"\n )\n context.update(create_webform_election_context_from_user_inputted_election_dict(error_message, election_dict))\n return render(request, 'elections/update_election/update_election__webform.html', context)\n\n if not validate_user_command(request, create_new_election=False):\n error_message = \"Unable to understand user command\"\n logger.info(\n f\"[elections/process_existing_election_webform.py process_existing_election_information_from_webform()] \"\n f\"{error_message, election_dict}\"\n )\n context.update(create_webform_election_context_from_user_inputted_election_dict(error_message, election_dict))\n return render(request, 'elections/update_election/update_election__webform.html', context)\n\n election = get_existing_election_by_id(election_dict[ELECTION_ID])\n if election is None:\n error_message = f\"The Selected election for date {election_dict[ELECTION_JSON_KEY__DATE]} \" \\\n f\"does not exist\"\n logger.info(\n f\"[elections/process_existing_election_webform.py process_existing_election_information_from_webform()]\"\n f\" {error_message}\"\n )\n context.update(create_webform_election_context_from_user_inputted_election_dict(error_message, election_dict))\n return render(request, 'elections/update_election/update_election__webform.html', context)\n\n success, error_message = validate_election_type(election_dict[ELECTION_JSON_KEY__ELECTION_TYPE])\n if not success:\n logger.info(\n f\"[elections/process_existing_election_webform.py process_existing_election_information_from_webform()]\"\n f\" {error_message}\"\n )\n context.update(create_webform_election_context_from_user_inputted_election_dict(error_message, election_dict))\n return render(request, 'elections/update_election/update_election__webform.html', context)\n\n success, error_message = validate_http_link(election_dict[ELECTION_JSON_KEY__WEBSURVEY], \"websurvey\")\n if not success:\n logger.info(\n f\"[elections/process_existing_election_webform.py process_existing_election_information_from_webform()] \"\n f\"{error_message}\"\n )\n context.update(create_webform_election_context_from_user_inputted_election_dict(error_message, election_dict))\n return render(request, 'elections/update_election/update_election__webform.html', context)\n\n success, error_message = validate_webform_election_date_and_time(\n election_dict[ELECTION_JSON_KEY__DATE], election_dict[ELECTION_JSON_WEBFORM_KEY__TIME]\n )\n if not success:\n logger.info(\n f\"[elections/process_existing_election_webform.py process_existing_election_information_from_webform()]\"\n f\" {error_message}\"\n )\n context.update(create_webform_election_context_from_user_inputted_election_dict(error_message, election_dict))\n return render(request, 'elections/update_election/update_election__webform.html', context)\n success, error_message = validate_nominees_for_existing_election_jformat(\n election.id, election_dict[ELECTION_JSON_KEY__NOMINEES]\n )\n if not success:\n logger.info(\n f\"[elections/process_existing_election_webform.py process_existing_election_information_from_webform()]\"\n f\" {error_message}\"\n )\n context.update(create_webform_election_context_from_user_inputted_election_dict(error_message, election_dict))\n return render(request, 'elections/update_election/update_election__webform.html', context)\n\n update_existing_election_obj_from_jformat(\n election, f\"{election_dict[ELECTION_JSON_KEY__DATE]} {election_dict[ELECTION_JSON_WEBFORM_KEY__TIME]}\",\n election_dict[ELECTION_JSON_KEY__ELECTION_TYPE], election_dict[ELECTION_JSON_KEY__WEBSURVEY]\n )\n save_new_or_update_existing_nominees_jformat(election, election_dict)\n if request.POST[UPDATE_EXISTING_ELECTION__NAME] == SAVE_ELECTION__VALUE:\n if ELECTION_ID in request.session:\n del request.session[ELECTION_ID]\n return HttpResponseRedirect(f'{settings.URL_ROOT}elections/{election.slug}/')\n else:\n request.session[ELECTION_ID] = election.id\n return HttpResponseRedirect(f'{settings.URL_ROOT}elections/{ENDPOINT_MODIFY_VIA_WEBFORM}')", "def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def _create_form(date, place, userid, invited, instance=None):\n print \"invited:{0}\".format(invited)\n matchdict = { 'place': place , 'date': date, 'creator': userid,\n 'invited': invited}\n return", "def post(self, request, *args, **kwargs):\r\n\t\tself.object = get_object_or_404(Cruise, pk=self.kwargs.get('pk'))\r\n\t\tif not self.object.is_editable_by(request.user):\r\n\t\t\traise PermissionDenied\r\n\t\tform_class = self.get_form_class()\r\n\t\tform_class.user = request.user\r\n\t\tform = self.get_form(form_class)\r\n\t\tcruiseday_form = CruiseDayFormSet(self.request.POST, instance=self.object)\r\n\t\tparticipant_form = ParticipantFormSet(self.request.POST, instance=self.object)\r\n\t\tdocument_form = DocumentFormSet(data=request.POST, files=request.FILES, instance=self.object)\r\n\t\tequipment_form = EquipmentFormSet(self.request.POST, instance=self.object)\r\n\t\tinvoice_form = InvoiceFormSet(self.request.POST, instance=self.object)\r\n\r\n\t\t# check if all our forms are valid, handle outcome\r\n\t\tif (form.is_valid() and cruiseday_form.is_valid() and participant_form.is_valid() and document_form.is_valid() and equipment_form.is_valid() and invoice_form.is_valid()):\r\n\t\t\treturn self.form_valid(form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form)\r\n\t\telse:\r\n\t\t\treturn self.form_invalid(form, cruiseday_form, participant_form, document_form, equipment_form, invoice_form)", "def post(self, request, *args, **kwargs):\n self.object = OT.objects.get(pk=kwargs['pk'])\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n factura_form = Factura_LineaFormSet(self.request.POST, instance=self.object)\n remito_form = Remito_LineaFormSet(self.request.POST, instance=self.object)\n ot_linea_form = OT_LineaFormSet(self.request.POST, instance=self.object)\n if (form.is_valid() and factura_form.is_valid()\n and ot_linea_form.is_valid() and remito_form.is_valid()):\n return self.form_valid(form, factura_form, remito_form, ot_linea_form)\n else:\n return self.form_invalid(form, factura_form, remito_form, ot_linea_form)", "def get_forms(id, model_class, form_class):\n current_investment_object = get_current_element(model_class, id)\n empty_form = form_class()\n modify_form = form_class(instance=current_investment_object)\n\n return modify_form, empty_form", "def fl_do_forms():\n _fl_do_forms = library.cfuncproto(\n library.load_so_libforms(), \"fl_do_forms\", \\\n cty.POINTER(xfdata.FL_OBJECT), [], \\\n \"\"\"FL_OBJECT * fl_do_forms() \"\"\")\n library.check_if_flinitialized()\n retval = _fl_do_forms()\n return retval", "def add_document():\n\n user = User(connection=connection, cursor=cursor)\n all_users = user.get_all_users()\n\n context = {\n 'all_users': all_users\n }\n\n if request.method == 'POST':\n creators_ids = request.form.getlist('choose_creators') # if there is no such name, returns empty list\n controllers_ids = request.form.getlist('choose_controllers')\n\n request_form = dict(request.form)\n request_form.pop('choose_creators') # there is no need in it now\n request_form.pop('choose_controllers')\n\n request_form['creators_ids'] = creators_ids\n request_form['controllers_ids'] = controllers_ids\n\n request_form['date_of_creation'] = datetime.strptime(request_form['date_of_creation'],\n '%Y-%m-%d')\n request_form['date_of_registration'] = datetime.strptime(request_form['date_of_registration'],\n '%Y-%m-%d')\n\n add_new_document_schema = AddNewDocument()\n errors = add_new_document_schema.validate(data=request_form)\n\n if errors:\n abort(400, str(errors))\n\n args = add_new_document_schema.dump(request_form)\n\n document = Document(connection=connection, cursor=cursor)\n document.add_document(\n document_name=args['document_name'],\n document_type=args['document_type'],\n date_of_creation=args['date_of_creation'],\n date_of_registration=args['date_of_registration'],\n controllers_ids=args['controllers_ids'],\n creators_ids=args['creators_ids'],\n )\n\n return redirect(url_for('documentation.show_documents'))\n\n return render_template('pages/inputs/add_document.html', **context)" ]
[ "0.65139395", "0.6487149", "0.6185686", "0.6144711", "0.60302484", "0.5803553", "0.5786804", "0.57757664", "0.57180357", "0.5691069", "0.55917436", "0.558072", "0.5578744", "0.55752146", "0.5531387", "0.55180806", "0.55093175", "0.549522", "0.5483268", "0.54766715", "0.54463726", "0.5440675", "0.5438425", "0.54280967", "0.5382643", "0.53680223", "0.5366507", "0.53552955", "0.5352893", "0.53389263" ]
0.6624268
0
Called on task startup to copy all static resources into the output path (and to make sure the output path exists as a directory).
def setup_output_path(self): self.logger.info('setting up output path') try: self.output_path.mkdir() except FileExistsError: pass try: (self.output_path / 'simple').mkdir() except FileExistsError: pass for filename in resource_listdir(__name__, 'static'): if filename == 'index.html': # Skip template continue with (self.output_path / filename).open('wb') as f: source = resource_stream(__name__, 'static/' + filename) f.write(source.read()) source.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def copy_static(self, outdir):\n pass", "def copy_static_resources(self):\n if not hasattr(settings, 'STATIC_ROOT'):\n raise MissingStaticRoot()\n destination = os.path.join(STORAGE_PATH, 'static')\n if os.path.exists(destination):\n shutil.rmtree(destination)\n shutil.copytree(settings.STATIC_ROOT, destination)", "def collect_static():\n\n check_promt = (\n not env.prompt or\n console.confirm(\n \"Collect static files and copy them to collect_static?\",\n default=True,\n )\n )\n\n if check_promt:\n with cd(\"%s\" % env.work_path):\n with prefix(\"source %s/bin/activate\" % env.env_path):\n run(\n \"./manage.py collectstatic\"\n \" --noinput\"\n )", "def deploy_static(): \n from fabdeploy.django import collectstatic as django_collectstatic\n# run(\"rm -rf %(root_path)s%(project_name)s/static/*\" % env) # call again git_add_commit_pull\n django_collectstatic()", "def cp_static_files(self,inpath,outpath): \n if inpath==self.static_dir:\n dest=os.path.join(outpath,os.path.basename(inpath))\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(inpath,dest))\n copyfiles(inpath,dest) \n else:\n for folder in os.listdir(inpath):\n if folder == 'static':\n logger.info('found static folder, copy all...')\n dest=os.path.join(outpath,folder)\n src=os.path.join(inpath,folder)\n if os.path.exists(dest):\n logger.warning('Remove old static folder')\n shutil.rmtree(dest) #not efficient. Should do it incrementaly...\n logger.info('cp_static_files %s -> %s' %(src,dest))\n copyfiles(src,dest)\n return 0", "def collect_static_files():\n with env.cd(settings.PROJECT_PATH), prefix(COMMANDS['set_environment']), \\\n prefix(COMMANDS['activate_virtualenv']):\n env.run('python rnacentral/manage.py collectstatic --noinput')", "def copy_static(root_directory, dist_directory, sdk_directory):\n\n for static in configuration.STATICS:\n context = {\n \"root\": root_directory,\n \"sdk\": sdk_directory,\n \"dist\": dist_directory\n }\n\n source = templates.from_string(static[\"source\"], context)\n target = templates.from_string(static[\"target\"], context)\n target = os.path.join(dist_directory, target)\n\n # Perform the action.\n sys.stdout.write(\"Copying '%s'\\n\" % source)\n\n if static[\"type\"] == \"directory\":\n recursive_overwrite(source, target)\n else:\n shutil.copy(source, target)", "def collectstatic(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n run('bin/django collectstatic --noinput')", "def collectstatic():\n puts(yellow(\"Collect statics\"))\n django_manage('collectstatic', '-l', '--noinput')", "def collectstatic():\n sudo(env.activate)\n sudo('cd %s' % env.whole_path_symlinked + '/aurora; python manage.py collectstatic;')", "def generate_static_site(self, output_root=None, extra_context=None):\n self.app.config['BUILD_PATH'] = output_root\n\n # use this hook for registering URLs to freeze\n self.call_hook(\"generate\", self, output_root, extra_context)\n\n if output_root is not None:\n # realpath or this gets generated relative to the tarbell package\n self.app.config['FREEZER_DESTINATION'] = os.path.realpath(output_root)\n\n self.freezer.freeze()", "def collect_assets(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, \"collectstatic --noinput > /dev/null\"))", "def copy_web_resources(output_dir):\n mypath = os.path.dirname(os.path.realpath(__file__))\n web_path = os.path.join(mypath, 'web')\n\n for (dirpath, dirnames, filenames) in os.walk(web_path):\n relpath = os.path.relpath(dirpath, web_path)\n tgtpath = os.path.join(output_dir, relpath)\n if not os.path.exists(tgtpath):\n os.makedirs(tgtpath)\n\n for f in [os.path.join(dirpath, filename) for filename in filenames]:\n shutil.copy(f, tgtpath)", "def collectstatic():\n local(\"docker-compose exec web python3 manage.py {}\".format(\n 'collectstatic --noinput'))", "def pre_start(self):\n self.make_runpath_dirs()", "def make_build(self):\n for asset in self.assets.values():\n if asset.has_bundles():\n asset.collect_files()\n if not os.path.exists(self.config.output_dir):\n os.makedirs(self.config.output_dir)\n if self.config.copy_only_bundles:\n for asset in self.assets.values():\n if not asset.minify and asset.files:\n for f in asset.files:\n copy_file(f.abs_path, self._get_output_path(f.abs_path))\n else:\n copy_excludes = {}\n for asset in self.assets.values():\n if asset.minify and asset.files:\n for f in asset.files:\n copy_excludes[f.abs_path] = f\n for root, dirs, files in os.walk(self.config.input_dir):\n for fpath in files:\n current_file_path = os.path.join(root, fpath)\n if current_file_path not in copy_excludes:\n copy_file(current_file_path, self._get_output_path(current_file_path))\n self._minify()", "def add_dirs_to_static(static_webapp_name):\n static_dir = '$HOME/webapps/%s' % static_webapp_name\n with settings(warn_only=True):\n with cd(static_dir):\n run(\"mkdir static && mkdir media\")\n run(\"rm index.html\")\n run(\"touch index.html\")\n with cd(code_dir):\n run(\"mkdir %s/static\" % project_name)", "def build_finished(app, exception):\n if app.config.offline_skin_js_path is not None:\n copy_static_entry(path.join(app.builder.srcdir, app.config.offline_skin_js_path), path.join(app.builder.outdir, '_static'), app.builder)\n if app.config.offline_wavedrom_js_path is not None:\n copy_static_entry(path.join(app.builder.srcdir, app.config.offline_wavedrom_js_path), path.join(app.builder.outdir, '_static'), app.builder)", "def init():\n pass\n # destination_dir = os.getcwd() + '/deploy'\n # try:\n # os.makedirs(destination_dir)\n # except OSError as e:\n # if e.errno == errno.EEXIST:\n # print('''AWS \"deploy\" directory already exists in this folder\n # \\n''', destination_dir)\n # copy_tree(deploy_path_join('../deploy'), destination_dir)", "def gen_static(self, output_folder):\n files = []\n for l in self.file_listers:\n files += l()\n for f in files:\n _logger.info(\"generating %s\" % f)\n content = self.get(f)\n loc = os.path.join(output_folder, f)\n d = os.path.dirname(loc)\n if not os.path.exists(d):\n os.makedirs(d)\n with open(loc, \"wb\") as file_:\n file_.write(content)", "def update_static_files(self):\n\n params = self.chose_param_value(\"--static\")\n self._check_path_availability([\"get_static_dir\", \"get_static_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_static_dir(),\n self.analizer.get_static_dir_to(),\n params\n )\n return self.write_debug_message(\"Static files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about static files\")", "def assemble_resource_directories(project, base_dir):\n resource_path = os.path.join(base_dir, project.resources_path)\n os.makedirs(os.path.join(resource_path, 'images'))\n os.makedirs(os.path.join(resource_path, 'fonts'))\n os.makedirs(os.path.join(resource_path, 'data'))", "def ensure_static_exists():\n for entry in html_static_path:\n static_path = os.path.join(__repo_docs__, entry)\n if not os.path.isdir(static_path):\n os.makedirs(static_path)", "def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def make_static_assets(opts):\n\n css_filename = do_css(opts['css_source_dir'], opts['out_dir'])\n js_filename = do_js(opts['js_source_dir'], opts['out_dir'])\n return {\n 'primary_css': css_filename,\n 'js': js_filename\n }", "def collect_links(self, env=None):\n for asset in self.assets.values():\n if asset.has_bundles():\n asset.collect_files()\n if env is None:\n env = self.config.env\n if env == static_bundle.ENV_PRODUCTION:\n self._minify(emulate=True)\n self._add_url_prefix()", "def add_static_paths(app):\n app.env.book_theme_resources_changed = False\n\n output_static_folder = Path(app.outdir) / \"_static\"\n theme_static_files = resources.contents(theme_static)\n\n if (\n app.config.html_theme_options.get(\"theme_dev_mode\", False)\n and output_static_folder.exists()\n ):\n # during development, the JS/CSS may change, if this is the case,\n # we want to remove the old files and ensure that the new files are loaded\n for path in output_static_folder.glob(\"sphinx-book-theme*\"):\n if path.name not in theme_static_files:\n app.env.book_theme_resources_changed = True\n path.unlink()\n # note sphinx treats theme css different to regular css\n # (it is specified in theme.conf), so we don't directly use app.add_css_file\n for fname in resources.contents(theme_static):\n if fname.endswith(\".css\"):\n if not (output_static_folder / fname).exists():\n (output_static_folder / fname).write_bytes(\n resources.read_binary(theme_static, fname)\n )\n app.env.book_theme_resources_changed = True\n\n # add javascript\n for fname in resources.contents(theme_static):\n if fname.endswith(\".js\"):\n app.add_js_file(fname)", "def build_in_dir(self, build_dir: str):\n target_resource_dir = os.path.join(build_dir, \"static\")\n\n # If any additional task files are required via a source_dir, copy those as well\n extra_dir_path = self.args.blueprint.get(\"extra_source_dir\", None)\n if extra_dir_path is not None:\n extra_dir_path = os.path.expanduser(extra_dir_path)\n copy_tree(extra_dir_path, target_resource_dir)\n\n # Copy the built core and the given task file to the target path\n use_bundle = os.path.expanduser(self.args.blueprint.task_source)\n target_path = os.path.join(target_resource_dir, \"bundle.js\")\n\n should_link_task_source = self.args.blueprint.get(\"link_task_source\", False)\n if should_link_task_source:\n os.symlink(use_bundle, target_path)\n else:\n shutil.copy2(use_bundle, target_path)\n\n # Write a built file confirmation\n with open(os.path.join(build_dir, self.BUILT_FILE), \"w+\") as built_file:\n built_file.write(self.BUILT_MESSAGE)", "def init_static_data(log_to_console=False):\n # These are annoyingly necessary to live in the DB, currently. \n # Really this should be app logic, I think.\n load_report_types()\n load_roles()\n loc_file = getattr(settings, \"STATIC_LOCATIONS\")\n if loc_file:\n load_locations(loc_file, log_to_console=log_to_console)\n product_file = getattr(settings, \"STATIC_PRODUCTS\")\n if product_file:\n load_products(product_file, log_to_console=log_to_console)" ]
[ "0.77394503", "0.7600356", "0.6926869", "0.6820733", "0.6785674", "0.6785608", "0.6746367", "0.65858966", "0.65577996", "0.6496821", "0.6481917", "0.6263674", "0.61868364", "0.61790466", "0.61160195", "0.6110984", "0.60765094", "0.6062903", "0.6055576", "0.6050808", "0.60248095", "0.5965623", "0.5954091", "0.5897081", "0.5896472", "0.5852622", "0.58121634", "0.5791904", "0.57722044", "0.5769956" ]
0.7670651
1
Handle incoming requests to (re)build index files. These will be in the form of "HOME", a request to write the homepage with some associated statistics, or "PKG", a request to write the index for the specified package.
def handle_index(self, queue): msg, *args = queue.recv_pyobj() if msg == 'PKG': package = args[0] if package not in self.package_cache: self.package_cache.add(package) self.write_root_index() self.write_package_index(package, self.db.get_package_files(package)) elif msg == 'HOME': status_info = args[0] self.write_homepage(status_info) elif msg == 'SEARCH': search_index = args[0] self.write_search_index(search_index) else: self.logger.error('invalid index_queue message: %s', msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_root_index(self):\n self.logger.info('writing package index')\n temp_dir = self.output_path / 'simple'\n with tempfile.NamedTemporaryFile(mode='w', dir=str(temp_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Pi Wheels Simple Index'),\n tag.meta(name='api-version', value=2),\n ),\n tag.body(\n (tag.a(package, href=package), tag.br())\n for package in self.package_cache\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name,\n str(self.output_path / 'simple' / 'index.html'))", "def from_pypi(request, fpkgs='/find-packages'):\n name = request.matchdict['name']\n version = request.matchdict['version']\n dists = PyPi.release_urls(name, version)\n flash = request.session.flash\n if not dists:\n flash(\"%s-%s not found\" %(name, version))\n return HTTPFound(fpkgs)\n\n candidates = [x for x in dists if request.index.EXTS.match(x['filename'])]\n\n if candidates[0]['md5_digest'] in request.index_data:\n logger.debug('Package %s-%s already in index' %(name, version))\n return HTTPFound('/index/%s' %name)\n\n details = candidates[0]\n url = details['url']\n filename = details['filename']\n newfile = None\n try:\n resp = requests.get(url)\n newfile = request.file_root / filename\n newfile.write_bytes(resp.content)\n except HTTPError, e:\n error = \"HTTP Error: %d %s - %s\" %(e.code, exc.status_map[e.code].title, url)\n logger.error(error)\n flash(error)\n except URLError, e:\n logger.error(\"URL Error: %s, %s\", e.reason , url)\n flash('Url error attempting to grab %s: %s' %(url, e.reason))\n\n if newfile is not None:\n try:\n added_event = event.PackageAdded(request.index, path=newfile)\n request.registry.notify(added_event)\n flash('%s-%s was installed into the index successfully.' % (name, version))\n return HTTPFound('/index/%s' %name)\n except Exception, e:\n flash('Issue with adding %s to index: See logs: %s' % (newfile.name, e))\n\n return HTTPFound(fpkgs)", "def write_package_index(self, package, files):\n self.logger.info('writing index for %s', package)\n pkg_dir = self.output_path / 'simple' / package\n mkdir_override_symlink(pkg_dir)\n with tempfile.NamedTemporaryFile(mode='w', dir=str(pkg_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Links for {}'.format(package))\n ),\n tag.body(\n tag.h1('Links for {}'.format(package)),\n ((tag.a(\n f.filename,\n href='{f.filename}#sha256={f.filehash}'.format(f=f), # noqa: E501\n rel='internal'), tag.br())\n for f in files)\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name, str(pkg_dir / 'index.html'))\n try:\n # Workaround for #20: after constructing the index for a\n # package attempt to symlink the \"canonicalized\" package\n # name to the actual package directory. The reasons for\n # doing things this way are rather complex...\n #\n # The older package name must exist for the benefit of\n # older versions of pip. If the symlink already exists *or\n # is a directory* we ignore it. Yes, it's possible to have\n # two packages which both have the same canonicalized name,\n # and for each to have different contents. I don't quite\n # know how PyPI handle this but their XML and JSON APIs\n # already include such situations (in a small number of\n # cases). This setup is designed to create canonicalized\n # links where possible but not to clobber \"real\" packages\n # if they exist.\n #\n # What about new packages that want to take the place of a\n # canonicalized symlink? We (and TransferState.commit)\n # handle that by removing the symlink and making a\n # directory in its place.\n canon_dir = pkg_dir.with_name(canonicalize_name(pkg_dir.name)) # noqa: E501\n canon_dir.symlink_to(pkg_dir.name)\n except FileExistsError:\n pass", "def process(self):\r\n\r\n index = cindex.Index.create()\r\n self.headers = {}\r\n\r\n for f in self.files:\r\n if f in self.processed:\r\n continue\r\n\r\n print \"Processing `%s'\" % (os.path.basename(f),)\r\n\r\n tu = index.parse(f, self.flags)\r\n\r\n if len(tu.diagnostics) != 0:\r\n fatal = False\r\n\r\n for d in tu.diagnostics:\r\n sys.stderr.write(d.format)\r\n sys.stderr.write(\"\\n\")\r\n\r\n if d.severity == cindex.Diagnostic.Fatal or \\\r\n d.severity == cindex.Diagnostic.Error:\r\n fatal = True\r\n\r\n if fatal:\r\n sys.stderr.write(\"\\nCould not generate documentation due to parser errors\\n\")\r\n sys.exit(1)\r\n\r\n if not tu:\r\n sys.stderr.write(\"Could not parse file %s...\\n\" % (f,))\r\n sys.exit(1)\r\n\r\n # Extract comments from files and included files that we are\r\n # supposed to inspect\r\n extractfiles = [f]\r\n\r\n for inc in tu.get_includes():\r\n filename = str(inc.include)\r\n self.headers[filename] = True\r\n\r\n if filename in self.processed or (not filename in self.files) or filename in extractfiles:\r\n continue\r\n\r\n extractfiles.append(filename)\r\n\r\n for e in extractfiles:\r\n db = comment.CommentsDatabase(e, tu)\r\n\r\n self.add_categories(db.category_names)\r\n self.commentsdbs[e] = db\r\n\r\n self.visit(tu.cursor.get_children())\r\n\r\n for f in self.processing:\r\n self.processed[f] = True\r\n\r\n self.processing = {}\r\n\r\n # Construct hierarchy of nodes.\r\n for node in self.all_nodes:\r\n q = node.qid\r\n\r\n if node.parent is None:\r\n par = self.find_parent(node)\r\n\r\n # Lookup categories for things in the root\r\n if (par is None or par == self.root) and (not node.cursor is None):\r\n location = node.cursor.extent.start\r\n db = self.commentsdbs[location.file.name]\r\n\r\n if db:\r\n par = self.category_to_node[db.lookup_category(location)]\r\n\r\n if par is None:\r\n par = self.root\r\n\r\n par.append(node)\r\n\r\n # Resolve comment\r\n cm = self.find_node_comment(node)\r\n\r\n if cm:\r\n node.merge_comment(cm)\r\n\r\n # Keep track of classes to resolve bases and subclasses\r\n classes = {}\r\n\r\n # Map final qid to node\r\n for node in self.all_nodes:\r\n q = node.qid\r\n self.qid_to_node[q] = node\r\n\r\n if isinstance(node, nodes.Class):\r\n classes[q] = node\r\n\r\n # Resolve bases and subclasses\r\n for qid in classes:\r\n classes[qid].resolve_bases(classes)\r\n\r\n self.markup_code(index)", "def dispatch(self):\n if self.index:\n if self.rebuild:\n answer = input(\"You sure you want to rebuild? [yes/No]? \")\n if answer.lower() == 'yes':\n # Drop and recreate tables\n db = IndexDb()\n db.clear()\n db.setup()\n print(f\"{ats()} Index cleared...\")\n else:\n print(f\"{ats()} Aborting...\")\n sys.exit(1)\n self.build_index()\n\n if self.download:\n self.download_filings()\n\n print(f\"{ats()} Finished. Good job!\")\n ok()", "def make_index(base_dir: str, _start: bool = True) -> None:\n if _start:\n logger.info(\"Generating package index\")\n\n index_path = os.path.join(base_dir, \"Packages\")\n index_gzip_path = os.path.join(base_dir, \"Packages.gz\")\n\n with open(index_path, \"w\") as index_file, gzip.open(\n index_gzip_path, \"wt\"\n ) as index_gzip_file:\n for entry in os.scandir(base_dir):\n if entry.name in (\"Packages\", \"Packages.gz\"):\n pass\n elif entry.is_dir():\n make_index(entry.path, _start=False)\n elif entry.is_file() and entry.name.endswith(\".ipk\"):\n with open(entry.path, \"rb\") as package:\n metadata = read_ipk_metadata(package)\n\n metadata += textwrap.dedent(\n f\"\"\"\\\n Filename: {entry.name}\n SHA256sum: {file_sha256(entry.path)}\n Size: {os.path.getsize(entry.path)}\n\n \"\"\"\n )\n\n index_file.write(metadata)\n index_gzip_file.write(metadata)", "def do_GET(self):\n try:\n \n # parse the requested page and see if it's valid\n parse_status, explanation_str = self.parse_header(self.path)\n \n # parse_status:\n # -1: error\n # 0: /log/* request\n # 1: /detailed/node/timestamp request\n print str(self.parse_header(self.path))\n \n explanation_str = str(explanation_str)\n \n # error\n if parse_status == -1:\n # invalid header, close the connection and die but notify user\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid request ('+explanation_str+')')\n print '-1'\n return\n \n # 1: /detailed/node/timestamp request\n elif parse_status == 1:\n print '1'\n # just need to respond with the file that's contained in explanation_str\n # and once we verify that it exists, we're golden\n \n # path to the \"detailed\" file\n file_path = explanation_str\n \n if os.path.isfile(file_path):\n try:\n # TODO: make HTML here to nav around previous node things\n detailed_file_handle = open(file_path, 'r')\n self.send_response(200)\n self.send_header('Content-type',\t'text/plain')\n self.end_headers() \n self.wfile.write(detailed_file_handle.read())\n detailed_file_handle.close()\n return\n except Exception, e:\n print 'Error while sending detailed log file'\n print e\n return\n else:\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid file request')\n return\n \n # 0: /log/* request\n elif parse_status == 0:\n print '0'\n # request was successfull, we just want the filename from index\n log_index = explanation_str\n \n success_status, log_filename = self.get_filename_from_index(log_index)\n \n if success_status == -1:\n # some kind of error of which the description is stored in log_filename\n #sockobj.send('The server encountered an error opening the file, please'+\\\n # ' try your request again')\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers() \n self.wfile.write('The server encountered an error opening the file, please'+\\\n ' try your request again')\n return\n \n # the file exists!\n # just dump the file at this point, and then...\n \n # send the HTML file\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.send_html_file(log_filename, log_index)\n return\n\n # invalid type\n else:\n self.send_response(200)\n self.send_header('Content-type',\t'text/html')\n self.end_headers()\n self.wfile.write('Invalid request type 2')\n return\n \n except IOError:\n self.send_error(404,'File Not Found: %s' % self.path)\n \n return", "def updateIndex(self):\n for root, dirs, files in os.walk(self.serverdir):\n for d in dirs:\n if not d.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, d), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,d)), os.path.getmtime(os.path.join(root, d)))\n for f in files:\n if not f.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, f), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,f)), os.path.getmtime(os.path.join(root, f)))", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def _rebuild_index(self):\n from django.core.management import call_command\n call_command('rebuild_index', interactive=False, verbosity=0)", "def updateIndex(ix, pool_path):\n \n logger.debug('updating search index')\n writer = ix.writer()\n \n exercise_list = [f.name for f in os.scandir(pool_path) if f.is_dir()]\n for ex in exercise_list:\n if ex == '.search_index':\n continue\n task_file = os.path.abspath(os.path.join(pool_path, ex, 'task.tex'))\n if os.path.isfile(task_file):\n logger.info('parsing ' + task_file)\n metaData, task_texcode = parseTaskFile(task_file)\n else:\n logger.warning(ex + ' does not include a task.tex file. skipping entry')\n continue\n \n solution_file = os.path.abspath(os.path.join(pool_path, ex, 'solution.tex'))\n if os.path.isfile(solution_file):\n with open(solution_file, 'r') as f:\n solution_texcode = f.read()\n else:\n logger.warning(ex + ' does not include a solution.tex file')\n solution_texcode = ''\n \n if metaData['date'] == '':\n lastupdate = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)\n else:\n lastupdate = parse_date(metaData['date'])\n\n writer.add_document(\n folder_name=ex,\n task=task_texcode,\n solution=solution_texcode,\n language=metaData['language'],\n maintainer=metaData['author'],\n lastupdate=lastupdate,\n keywords=re.sub(r',\\s+', ',', metaData['keywords'])\n )\n\n writer.commit()", "async def index_handler(req: web.Request) -> web.Response:\n if req.app[\"client_path\"] is None:\n try:\n client_path = await virtool.utils.get_client_path()\n except FileNotFoundError:\n return await client_path_error()\n\n req.app[\"client_path\"] = client_path\n req.app.router.add_static(\"/static\", client_path)\n\n force_reset = req[\"client\"].force_reset\n\n if req[\"client\"].user_id and not force_reset:\n path = os.path.join(req.app[\"client_path\"], \"index.html\")\n\n html = mako.template.Template(filename=path).render()\n\n html = html.replace(\"VERSION\", req.app[\"version\"])\n\n html = html.replace('\"DEV\"', \"true\" if req.app[\"settings\"][\"dev\"] else \"false\")\n\n return web.Response(body=html, content_type=\"text/html\")\n\n path_base = \"login\"\n\n if force_reset:\n path_base = \"reset\"\n\n return_to = get_return_to_from_path(req)\n\n return web.Response(status=302, headers={\"Location\": f\"/{path_base}?return_to={return_to}\"})", "async def get_index(\n request: Request,\n) -> IndexResponse:\n metadata = get_metadata(\n package_name=\"ook\",\n application_name=config.name,\n )\n # Construct these URLs; this doesn't use request.url_for because the\n # endpoints are in other FastAPI \"apps\".\n doc_url = request.url.replace(path=f\"/{config.path_prefix}/redoc\")\n return IndexResponse(\n metadata=metadata,\n api_docs=AnyHttpUrl(str(doc_url), scheme=request.url.scheme),\n )", "def build_index(self):\n url = self.start_url\n\n # Search from last available date if not rebuilding and index is not empty\n if not self.rebuild > 0:\n recent_filings = self.get_most_recent_filings()\n pdt = recent_filings[0].date_filing\n # Reformat date to SEC format MM/DD/YYYY\n formatted_date = f\"{pdt:02}/{pdt:02}/{pdt.year}\"\n url = self.url_str.format(domain=self.domain_name, start=formatted_date, end=defaults['end_date'])\n\n page_counter = 0\n entries_counter = 0\n\n print(f\"{ats()} Starting index build...\" if self.rebuild else f\"{ats()} Starting index update...\")\n # Iterate through search results pages until no Next button found\n while True:\n page = self.load_page(url)\n # Scrape, parse and record into database current search results page\n entries_counter += self.scrape_page(page)\n page_counter += 1\n print(f\"{ats()} Scraped results page {page_counter}, {entries_counter} entries...\")\n # Get url of next search results page\n url = self.get_next(page)\n if url is None:\n # Exit loop if no more search results\n break\n if self.n_limit and entries_counter >= self.n_limit:\n # Exit if reached user-specified limit\n break\n\n # Do some reporting\n if self.rebuild:\n print(f'{ats()} Index built! Total {page_counter} search result pages scraped. '\n f'{entries_counter} index entries created.')\n else:\n print(f'{ats()} Index updated! Total {page_counter} search result page(s) scraped. '\n f'{entries_counter} index entries (re)added.')", "def main():\n\n # need parser twice because we first need to load ini file\n # bootstrap pyramid and then load plugins\n pre_parser = argparse.ArgumentParser(\n description=\"Reindex AppEnlight data\", add_help=False\n )\n pre_parser.add_argument(\n \"-c\", \"--config\", required=True, help=\"Configuration ini file of application\"\n )\n pre_parser.add_argument(\"-h\", \"--help\", help=\"Show help\", nargs=\"?\")\n pre_parser.add_argument(\n \"-t\", \"--types\", nargs=\"+\", help=\"Which parts of database should get reindexed\"\n )\n args = pre_parser.parse_args()\n\n config_uri = args.config\n setup_logging(config_uri)\n log.setLevel(logging.INFO)\n env = bootstrap(config_uri)\n parser = argparse.ArgumentParser(description=\"Reindex AppEnlight data\")\n choices = {\n \"reports\": \"appenlight.scripts.reindex_elasticsearch:reindex_reports\",\n \"logs\": \"appenlight.scripts.reindex_elasticsearch:reindex_logs\",\n \"metrics\": \"appenlight.scripts.reindex_elasticsearch:reindex_metrics\",\n \"slow_calls\": \"appenlight.scripts.reindex_elasticsearch:reindex_slow_calls\",\n \"template\": \"appenlight.scripts.reindex_elasticsearch:update_template\",\n }\n for k, v in env[\"registry\"].appenlight_plugins.items():\n if v.get(\"fulltext_indexer\"):\n choices[k] = v[\"fulltext_indexer\"]\n parser.add_argument(\n \"-t\",\n \"--types\",\n nargs=\"*\",\n choices=[\"all\"] + list(choices.keys()),\n default=[],\n help=\"Which parts of database should get reindexed\",\n )\n parser.add_argument(\n \"-c\", \"--config\", required=True, help=\"Configuration ini file of application\"\n )\n args = parser.parse_args()\n\n if \"all\" in args.types:\n args.types = list(choices.keys())\n\n print(\"Selected types to reindex: {}\".format(args.types))\n\n log.info(\"settings {}\".format(args.types))\n\n if \"template\" in args.types:\n get_callable(choices[\"template\"])()\n args.types.remove(\"template\")\n for selected in args.types:\n get_callable(choices[selected])()", "def index_all_files(self, root_dir):\n pass", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def on_index(self, handler):\n print \"Server sent index page to {0}.\".format(\n handler.client_address[0]\n )", "def build(req):\n\n errors = []\n v = {}\n data = {\"tab\": {}, \"expanded\": {}}\n tdir = \"/tmp/\" + f.get_tmp_file_name()\n\n index = get_html()\n\n if \"tab_zip\" not in req.files:\n return {\"errors\": [\"No tab file\"]}\n if \"expanded_zip\" not in req.files:\n return {\"errors\": [\"No expanded file\"]}\n\n ## tab\n data[\"tab\"][\"ext\"] = f.get_ext(req.files[\"tab_zip\"].filename)\n if data[\"tab\"][\"ext\"] == \"zip\":\n os.mkdir(tdir)\n if not f.extract_zip(req.files[\"tab_zip\"], tdir + \"/tab\"):\n return {\"errors\": [\"Wrong tab zip file\"]}\n\n file_name = \"index2.html\"\n try:\n os.rename(tdir + \"/tab/index.html\", tdir + \"/tab/\" + file_name)\n except os.FileNotFoundError:\n return {\"errors\": [\"No index.html in tab zip\"]}\n elif not data[\"tab\"][\"ext\"]:\n return {\"errors\": [\"No tab file\"]}\n else:\n f.save_file(req.files[\"tab_zip\"], tdir + \"/tab.\" + data[\"tab\"][\"ext\"])\n\n ## expanded\n data[\"expanded\"][\"ext\"] = f.get_ext(req.files[\"expanded_zip\"].filename)\n if data[\"expanded\"][\"ext\"] == \"zip\":\n if not f.extract_zip(req.files[\"expanded_zip\"], tdir + \"/expanded\"):\n return {\"errors\": [\"Wrong expanded zip file\"]}\n\n file_name = \"index2.html\"\n try:\n os.rename(tdir + \"/expanded/index.html\", tdir + \"/expanded/\" + file_name)\n except os.FileNotFoundError:\n return {\"errors\": [\"No index.html in expanded zip\"]}\n elif not data[\"expanded\"][\"ext\"]:\n return {\"errors\": [\"No expanded file\"]}\n else:\n f.save_file(req.files[\"expanded_zip\"], tdir + \"/expanded.\" + data[\"expanded\"][\"ext\"])\n\n v[\"expandMS\"] = str(f.get_int_param(\"expand_seconds\") * 1000)\n\n v[\"width\"] = f.strtoken(f.get_param(\"size\"), 1, \"x\")\n v[\"height\"] = f.strtoken(f.get_param(\"size\"), 2, \"x\")\n\n v[\"backgroundColor\"] = f.get_param(\"background_color\")\n\n v[\"clicktag_layer_select\"] = \"true\" if f.get_param(\"clicktag_layer\") else \"false\"\n\n v[\"tabURL\"] = \"\"\n v[\"tabImage\"] = \"\"\n\n if data[\"tab\"][\"ext\"] == \"zip\":\n v[\"tabUrl\"] = \"tab/index2.html\"\n else:\n v[\"tabImage\"] = \"tab.\"+data[\"tab\"][\"ext\"]\n\n v[\"expandedURL\"] = \"\"\n v[\"expandedImage\"] = \"\"\n if data[\"expanded\"][\"ext\"] == \"zip\":\n v[\"expandedURL\"] = \"expanded/index2.html\"\n else:\n v[\"expandedImage\"] = \"expanded.\"+data[\"expanded\"][\"ext\"]\n\n return {\"errors\": errors, \"dir\": tdir, \"index\": index, \"vars\": v}", "def process( self ):\n\t\t\n\t\tprint( self._query[\"header\"], file = self._file )\n\t\tself._file.flush()\n\n\t\tfor root, dirs, files in os.walk(self._directory):\n\t\t\tpath = root.split(os.sep)\n\n\t\t\tif( root.endswith(\"logFiles\") and ( root.find(\"template\") == -1 ) ):\n\t\t\t\tLogProcessor._process_dir(root, self._file_list, self._columns, self._file, self._meta)", "def on_new_site(self, files):\n init_index()", "def redir_index():\n return redirect(url_for(\"index\"), code=301)", "def sendIndex(self):\n self.updateIndex()\n outpkg = json.dumps(self.serverindex)\n self.send(outpkg)", "def index(filename):\n return send_from_directory(\"./build\", filename)", "def build_index(in_dir, out_dict, out_postings):\n print('indexing...')\n\n maxInt = sys.maxsize\n\n while True:\n # decrease the maxInt value by factor 10 \n # as long as the OverflowError occurs.\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt/10)\n\n #Dicitionary for saving the normalized weights for document vector\n lengths = dict()\n\n #Number of docs read from csv\n total_docs = 1\n max_docs = 1000\n\n #Data stored in csv read file line by line and save columns data\n with open(os.path.join(in_dir), 'r', encoding=\"utf8\") as data_csv:\n reader = csv.DictReader(data_csv)\n #each line corresponds to a document\n for doc in reader:\n\n #if(total_docs > max_docs):\n # break\n\n #If line is blank, just skip\n if doc is None:\n continue\n \n #save the different columns of the doc\n doc_id = int(doc[\"document_id\"])\n #Remove punctuation in title and content\n doc_title = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"title\"])\n doc_content = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"content\"])\n doc_date = doc[\"date_posted\"]\n doc_year = doc_date[0:4]\n doc_court = doc[\"court\"]\n\n #The dictionaryies are updated, postings lists are updated or new terms added\n update_terms_zones_dictionary(doc_id, doc_title, \".title\")\n update_terms_zones_dictionary(doc_id, doc_content, \".content\")\n update_date_field_dictionary(doc_id, doc_year)\n update_court_field_dictionary(doc_id, doc_court)\n\n total_docs += 1\n\n data_csv.close()\n\n #This section stores the Log TF using the word counts in the postings in the dictionary\n #It saves the Log TF in an auxiliary dictionary named lengths\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n\n for docID_termF in postings_list:\n #Get the vector for the doc, where the docId is docID_termF[0]\n #If there is no vector for this doc, then create a new dict\n #I am using dictionaries as the vector for the word only for the calculations\n doc_vector = lengths.get(docID_termF[0], dict())\n #I add the logarithmic term frequency to that document vector\n doc_vector[word] = 1 + math.log(docID_termF[1], 10)\n #Save that to its corresponding doc\n lengths[docID_termF[0]] = doc_vector\n\n #This section normalizes the Log TFs \n for doc_vector in lengths.values():\n #We store each of the values in a list and then use:\n #np.linalg.norm to do the normalization = sqrt(sum(values^2))\n weights = doc_vector.values()\n #We get the vectors magnitude\n magnitude = np.linalg.norm(np.array(list(weights)))\n for word in doc_vector.keys():\n #For every word entry in the vector \n #normalize by dividing the weight by the magnitude\n doc_vector[word] = doc_vector[word] / magnitude\n\n #This section replaces the word count in the tuple of the dictionary with the Normalized Log TF\n #It also sorts the postings list by doc ID\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n new_postings_list = list()\n for docID_termF in postings_list:\n docID_termF = ( docID_termF[0], lengths[docID_termF[0]][word] )\n new_postings_list.append(docID_termF)\n new_postings_list.sort()\n dictionary[word] = new_postings_list\n\n ''' \n with open('ugly_dictionary.txt', 'w') as fp:\n json.dump(dictionary, fp)\n '''\n #Determine the relevance of each doc by the court that it has in its court field\n #Save the relevant docs and their relevance\n relevant_courts_dict = { \"SG Court of Appeal\":2, \"SG Privy Council\":2, \"UK House of Lords\":2, \"UK Supreme Court\":2,\n \"High Court of Australia\":2, \"CA Supreme Court\":2, \"SG High Court\":1.5, \"Singapore International Commercial Court\":1.5,\n \"HK High Court\": 1.5, \"HK Court of First Instance\": 1.5, \"UK Crown Court\": 1.5, \"UK Court of Appeal\": 1.5, \"UK High Court\": 1.5, \n \"Federal Court of Australia\": 1.5, \"NSW Court of Appeal\": 1.5, \"NSW Court of Criminal Appeal\": 1.5, \"NSW Supreme Court\": 1.5}\n\n relevant_docs = dict()\n \n for court_name in relevant_courts_dict:\n court_postings_list = court_dictionary.get(court_name, -1)\n if(court_postings_list != -1):\n for docid in court_postings_list:\n #save a dictionary of docID and its relevance (2 or 1.5) according to its court\n relevant_docs[docid] = relevant_courts_dict[court_name]\n\n #This section traverse each word (key) in the dictionary, get its postings list and save it in a different file \n postings_list_file = open(out_postings, \"wb\") \n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n dictionary[word] = (document_frequency, postings_list_position)\n for date in date_dictionary:\n #Get postings list for the date\n postings_list = date_dictionary[date]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n date_dictionary[date] = (document_frequency, postings_list_position)\n for court in court_dictionary:\n #Get postings list for the date\n postings_list = court_dictionary[court]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n court_dictionary[court] = (document_frequency, postings_list_position)\n #Close the postings lists file\n postings_list_file.close() \n\n #Now open the dictionary file and save the three dictionaries\n with open(out_dict, 'wb') as dictionary_file:\n pickle.dump(total_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(date_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(court_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(relevant_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n \n '''\n The structure we have is:\n\n dictionary.txt: Has three dictionaries\n {word.zone : [doc_freq, pointer], word.zone: [doc_freq, pointer], ...}\n {date : [doc_freq, pointer], date: [doc_freq, pointer], ...}\n {court : [doc_freq, pointer], court: [doc_freq, pointer], ...}\n\n postings.txt: Has the postings for the three dictionaries\n For the dictionary postings:\n [[docID,termFrequency],[docID,termFrequency]]\n [[docID,termFrequency]] ...\n For the date_dictionary postings:\n [docId, docId, docId, docId]\n For the court_dictionary postings:\n [docId, docId, docId, docId]\n ...\n\n Both documents together would be:\n { word.zone: [doc_freq, [[docID,termFrequency], ... ]], \n word.zone: [doc_freq, [[docID,termFrequency].}, ...]] }\n { date: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n { court: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n\n lengths.txt\n [document: [word: weight, word: weight, ...], document: [word: weight, word: weight, ...]]\n Decided to make it like this to keep control of which weights correspond to which words\n Although for a document I will traverse all the weights to get the score\n If the word is not in the document vector [which in my case is a dictionary], then its weight is 0\n This way I am no using a sparse matrix\n\n '''", "def handle_request(self,host,path,data=b''):\n\t\tif data:\n\t\t\tself.response_code(4,\"Uploads are not accepted.\")\n\t\t\treturn\n\t\tif not hasattr(self,\"root\"):\n\t\t\tself.response_code(5,\"Server is unable to handle requests at this time due to misconfiguration.\")\n\t\t\treturn\n\t\tself.root = os.path.abspath(self.root)\n\t\tif not (prefix:=os.path.abspath(os.path.join(self.root,host))).startswith(self.root):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not (filepath:=os.path.abspath(os.path.join(prefix,unquote(path.lstrip(\"/\"))))).startswith(prefix):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not os.path.exists(filepath):\n\t\t\tself.response_code(4,\"Not Found\")\n\t\t\treturn\n\t\tif os.path.isdir(filepath):\n\t\t\tif os.path.exists(os.path.join(filepath,\"index.gmi\")):\n\t\t\t\tfilepath = os.path.join(filepath,\"index.gmi\")\n\t\t\telse:\n\t\t\t\tself.response_code(5,\"Cowardly refusing to generate folder listing.\")\n\t\t\t\treturn\n\t\text = os.path.splitext(filepath)[1]\n\t\tmimetype = mimetypes.guess_type(filepath,False)\n\t\tif ext in self.OVERRIDE_MIMETYPES:\n\t\t\tmimetype = self.OVERRIDE_MIMETYPES[ext]\n\t\tmimetype = mimetype or \"application/octet-stream\"\n\t\twith open(filepath,\"rb\") as f:\n\t\t\tself.response_code(2,mimetype)\n\t\t\tshutil.copyfileobj(f,self.wfile)", "def build_index():\n pass", "def do_GET(self):\n content = self._regenerate(self.path)\n if content:\n self._send_regenerated_head(content)\n self.wfile.write(content)\n else:\n SimpleHTTPRequestHandler.do_GET(self)", "def main(argv):\n parser = argparse.ArgumentParser(description='Build and serve HTML Sphinx docs')\n\n parser.add_argument(\n '--port',\n help='Serve on this port, default 8000',\n type=int,\n default=8000)\n\n parser.add_argument(\n '--source',\n help='Directory of source Sphinx (reStructuredText) docs',\n type=os.path.realpath,\n default='docs/source')\n\n parser.add_argument(\n '--destination',\n help='Where to build the HTML output',\n type=os.path.realpath,\n default='docs/build/html')\n\n parser.add_argument(\n '--doctrees',\n help='Where the doctrees are built',\n type=os.path.realpath,\n default='docs/build/doctrees')\n\n options = parser.parse_args(argv)\n\n bound_build_docs = partial(build_docs, options.source, options.destination, options.doctrees)\n\n # Do the initial build\n bound_build_docs()\n\n # Watch the source directory for changes, build docs again if detected\n observer = Observer()\n observer.schedule(\n BuildDocsHandler(bound_build_docs),\n path=options.source, recursive=True)\n observer.start()\n\n # Set the root for the request handler, overriding Python stdlib current\n # working directory.\n DocsHTTPRequestHandler._root = options.destination\n\n server = SocketServer.TCPServer(\n ('', options.port),\n DocsHTTPRequestHandler)\n\n try:\n logger.info('Serving on localhost:{}'.format(options.port))\n server.serve_forever()\n except KeyboardInterrupt:\n sys.stdout.write('\\n')\n logger.info('(stopping server)')\n observer.stop()\n finally:\n observer.join()\n\n logging.info('Server stopped, exiting')\n sys.exit(0)", "def write_data_index(self):\n content = \"\"\n self.remove(os.path.join(config[\"data_subdir\"], \"*\"))\n if self.data_index:\n print(\"- writing data index\")\n content += f\"# [{config['github_repo_name']}]({config['github_pages_url']})\\n\"\n content += \"\\n## Index of Data files in this Repository\\n\"\n for data, links in sorted(self.data_index.items(), key=lambda x: natsort(x[0])):\n if links:\n content += f\"\\n### {data}\\n\"\n content += f\"![{data}]({config['data_subdir']}/{data})\\n\"\n for link in links:\n content += f\"* {link}\\n\"\n data_src = os.path.join(self.src_dir, config[\"data_subdir\"], data)\n data_dst = os.path.join(self.dst_dir, config[\"data_subdir\"], data)\n print(f\"- copying {data_src} to {data_dst}\")\n shutil.copy(data_src, data_dst)\n self.write_md2html(\"data_index\", content)" ]
[ "0.6072376", "0.59161943", "0.5912372", "0.5844684", "0.58258736", "0.58107734", "0.5639663", "0.55557406", "0.54828745", "0.5466139", "0.5445653", "0.5366465", "0.5359489", "0.5355283", "0.5319221", "0.5296515", "0.52074337", "0.5156442", "0.5136858", "0.51300204", "0.51299584", "0.5126457", "0.51052916", "0.5103489", "0.50894153", "0.5048674", "0.5025741", "0.5008948", "0.49880537", "0.49744263" ]
0.7049984
0
(Re)writes the index of all packages. This is implicitly called when a request to write a package index is received for a package not present in the task's cache.
def write_root_index(self): self.logger.info('writing package index') temp_dir = self.output_path / 'simple' with tempfile.NamedTemporaryFile(mode='w', dir=str(temp_dir), encoding='utf-8', delete=False) as index: try: index.file.write('<!DOCTYPE html>\n') index.file.write( tag.html( tag.head( tag.title('Pi Wheels Simple Index'), tag.meta(name='api-version', value=2), ), tag.body( (tag.a(package, href=package), tag.br()) for package in self.package_cache ) ) ) except BaseException: index.delete = True raise else: os.fchmod(index.file.fileno(), 0o644) os.replace(index.name, str(self.output_path / 'simple' / 'index.html'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _rebuild_index(self):\n from django.core.management import call_command\n call_command('rebuild_index', interactive=False, verbosity=0)", "def updateIndex(ix, pool_path):\n \n logger.debug('updating search index')\n writer = ix.writer()\n \n exercise_list = [f.name for f in os.scandir(pool_path) if f.is_dir()]\n for ex in exercise_list:\n if ex == '.search_index':\n continue\n task_file = os.path.abspath(os.path.join(pool_path, ex, 'task.tex'))\n if os.path.isfile(task_file):\n logger.info('parsing ' + task_file)\n metaData, task_texcode = parseTaskFile(task_file)\n else:\n logger.warning(ex + ' does not include a task.tex file. skipping entry')\n continue\n \n solution_file = os.path.abspath(os.path.join(pool_path, ex, 'solution.tex'))\n if os.path.isfile(solution_file):\n with open(solution_file, 'r') as f:\n solution_texcode = f.read()\n else:\n logger.warning(ex + ' does not include a solution.tex file')\n solution_texcode = ''\n \n if metaData['date'] == '':\n lastupdate = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)\n else:\n lastupdate = parse_date(metaData['date'])\n\n writer.add_document(\n folder_name=ex,\n task=task_texcode,\n solution=solution_texcode,\n language=metaData['language'],\n maintainer=metaData['author'],\n lastupdate=lastupdate,\n keywords=re.sub(r',\\s+', ',', metaData['keywords'])\n )\n\n writer.commit()", "def handle_index(self, queue):\n msg, *args = queue.recv_pyobj()\n if msg == 'PKG':\n package = args[0]\n if package not in self.package_cache:\n self.package_cache.add(package)\n self.write_root_index()\n self.write_package_index(package,\n self.db.get_package_files(package))\n elif msg == 'HOME':\n status_info = args[0]\n self.write_homepage(status_info)\n elif msg == 'SEARCH':\n search_index = args[0]\n self.write_search_index(search_index)\n else:\n self.logger.error('invalid index_queue message: %s', msg)", "def write_package_index(self, package, files):\n self.logger.info('writing index for %s', package)\n pkg_dir = self.output_path / 'simple' / package\n mkdir_override_symlink(pkg_dir)\n with tempfile.NamedTemporaryFile(mode='w', dir=str(pkg_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Links for {}'.format(package))\n ),\n tag.body(\n tag.h1('Links for {}'.format(package)),\n ((tag.a(\n f.filename,\n href='{f.filename}#sha256={f.filehash}'.format(f=f), # noqa: E501\n rel='internal'), tag.br())\n for f in files)\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name, str(pkg_dir / 'index.html'))\n try:\n # Workaround for #20: after constructing the index for a\n # package attempt to symlink the \"canonicalized\" package\n # name to the actual package directory. The reasons for\n # doing things this way are rather complex...\n #\n # The older package name must exist for the benefit of\n # older versions of pip. If the symlink already exists *or\n # is a directory* we ignore it. Yes, it's possible to have\n # two packages which both have the same canonicalized name,\n # and for each to have different contents. I don't quite\n # know how PyPI handle this but their XML and JSON APIs\n # already include such situations (in a small number of\n # cases). This setup is designed to create canonicalized\n # links where possible but not to clobber \"real\" packages\n # if they exist.\n #\n # What about new packages that want to take the place of a\n # canonicalized symlink? We (and TransferState.commit)\n # handle that by removing the symlink and making a\n # directory in its place.\n canon_dir = pkg_dir.with_name(canonicalize_name(pkg_dir.name)) # noqa: E501\n canon_dir.symlink_to(pkg_dir.name)\n except FileExistsError:\n pass", "def rebuild_all_indexes():\n response = _get_lambda_client().invoke(\n FunctionName=indexer_function_name,\n InvocationType=\"Event\",\n )", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def sendIndex(self):\n self.updateIndex()\n outpkg = json.dumps(self.serverindex)\n self.send(outpkg)", "def do_api_calls_update_cache(self):\n self.get_nodes()\n self.write_to_cache(self.inventory, self.cache_path_cache)\n self.write_to_cache(self.index, self.cache_path_index)", "def make_index(base_dir: str, _start: bool = True) -> None:\n if _start:\n logger.info(\"Generating package index\")\n\n index_path = os.path.join(base_dir, \"Packages\")\n index_gzip_path = os.path.join(base_dir, \"Packages.gz\")\n\n with open(index_path, \"w\") as index_file, gzip.open(\n index_gzip_path, \"wt\"\n ) as index_gzip_file:\n for entry in os.scandir(base_dir):\n if entry.name in (\"Packages\", \"Packages.gz\"):\n pass\n elif entry.is_dir():\n make_index(entry.path, _start=False)\n elif entry.is_file() and entry.name.endswith(\".ipk\"):\n with open(entry.path, \"rb\") as package:\n metadata = read_ipk_metadata(package)\n\n metadata += textwrap.dedent(\n f\"\"\"\\\n Filename: {entry.name}\n SHA256sum: {file_sha256(entry.path)}\n Size: {os.path.getsize(entry.path)}\n\n \"\"\"\n )\n\n index_file.write(metadata)\n index_gzip_file.write(metadata)", "def reindex(self):\n raise NotImplementedError()", "def updateIndex(self):\n for root, dirs, files in os.walk(self.serverdir):\n for d in dirs:\n if not d.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, d), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,d)), os.path.getmtime(os.path.join(root, d)))\n for f in files:\n if not f.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, f), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,f)), os.path.getmtime(os.path.join(root, f)))", "def reindex(self):", "def reindex(self):", "def index_later(self):\n return", "def write_search_index(self, search_index):\n self.logger.info('writing search index')\n with tempfile.NamedTemporaryFile(mode='w', dir=str(self.output_path),\n encoding='utf-8',\n delete=False) as index:\n try:\n json.dump(search_index, index,\n check_circular=False, separators=(',', ':'))\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o664)\n os.replace(index.name, str(self.output_path / 'packages.json'))", "def install_packages_from_index(self, env=None):\n # extract non-source packages from package list\n index_packages = [p for p in self.pkg_arguments if not\n utils.assert_package_is_source(p)]\n # skip this step if there are no packages to be installed\n if not index_packages:\n print(\"No index packages set for installation. Skipping ...\")\n return\n # build command for installing packages from index\n cmd_args = {\n 'exe': self.pkg_executable,\n 'cmds': \" \".join(self.pkg_commands),\n 'flags': \" \".join(self.pkg_flags),\n 'pkgs': \" \".join(index_packages),\n }\n cmd_install_index = self.cmd_install.format(**cmd_args)\n print(\"Installing index packages to environment ...\")\n with click_spinner.spinner():\n errno, stdout, stderr = utils.run_command(cmd_install_index,\n env=env, shell=True)\n if errno:\n raise Exception(\"Installation of packages failed (STDERR: {}\"\n .format(stderr))", "def replace(self):\n with zero_downtime_index(self.alias_name, self.index_config()) as target_index:\n self.index_all(target_index)", "def rebuild_index(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n with get_db_connection() as db:\n c = db.cursor()\n execute_with_retry(db, c, self._rebuild_index)", "def _write_index(self):\n # Make sure to only write non-default objects to the index.\n self.store.write_object(\n object=[obj.to_dict() for ns in self.index.values() for obj in ns.values() if not obj.is_default],\n object_id=self.identifier\n )", "def write_index(self):\n self.Lock = True\n self.file_out.seek(self.index_offset)\n for identifier, offset in self.index.items():\n self._write_identifier(identifier)\n self._write_offset(offset)", "def build_index(self):\n self.rebuild_index()", "def update_from_index(self, data, **kw):\n packages = _iter_paragraphs_path(data, **kw)\n self.add_packages([{'deb822': p} for p in packages])", "def _update_index(self):\n start_time = datetime.datetime.now()\n sys.stdout.write(\"Updating index. Depending on the size of your music \"\n \"collection this may take some time, so please be patient. \"\n \"(Update started at %s)\\n\" % start_time)\n new_index_file = \"%s/music_index_%s.txt\" % (self.index_dir,\n start_time.strftime(\"%Y%m%d_%H%M%S\"))\n files = (os.path.join(tup[0], f) for d in self.music_dirs \n for tup in os.walk(d) \n for f in tup[2] )\n \n with open(new_index_file, \"w\") as fh:\n for filename in files:\n fh.write(\"%s\\n\" % filename)\n \n end_time = datetime.datetime.now()\n sys.stdout.write(\"Music index updated (created index file '%s')\\n\" \n \"Update duration:%s\\n\" % \n (new_index_file, end_time - start_time))", "def save_index(self):\n vsn_objs = [dict(Id = v['id'], Name = v['name']) for v in self.versions]\n self.backend.write_json(dict(\n Versions = vsn_objs,\n Channels = [], # This is unused.\n ApiVersion = 0,\n ), self.index_path())", "def update_index(signum):\n cdx = redis_cli.zrange('ipfs:cdxj', 0, -1)\n cdx = ''.join(cdx)\n buff = BytesIO(cdx)\n\n # Add New Index\n res = ipfs_api.add(CustomNameStream(buff, 'index.cdxj'))\n print('Updating Index: ' + str(res))\n\n # Register with IPNS\n res = ipfs_api.name_publish(res['Hash'])\n print res", "def solr_reindex(where=None):\n config = get_config(where)\n with settings(host_string=config['host_string']), cd(config['installation_dir']):\n cmd = 'bin/django update_index dasa --batch-size=5000 --remove --verbosity=2'\n run(cmd)", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def _Dynamic_UpdateIndex(self, index, void, request_id=None):\n self._RemoteSend(index, void, \"UpdateIndex\", request_id)\n return", "def reindex(self):\n result = self.database.command('reIndex', self.name)\n del result['serverUsed']\n return result", "def reset_file_index_cache() -> None:\n fileindex_cache_five_minutes.invalidate()" ]
[ "0.6794511", "0.6569954", "0.65582806", "0.65071815", "0.6454542", "0.642781", "0.62335306", "0.6226423", "0.6206927", "0.61409336", "0.6128224", "0.6067755", "0.6067755", "0.6058486", "0.6013691", "0.60030484", "0.59971786", "0.5969887", "0.59236366", "0.5873569", "0.5845069", "0.58189785", "0.5770584", "0.57364976", "0.5725883", "0.57140076", "0.57017857", "0.56936353", "0.56662285", "0.5630509" ]
0.7205941
0
(Re)writes the index of the specified package. The file metadata (including the hash) is retrieved from the database, never from the filesystem.
def write_package_index(self, package, files): self.logger.info('writing index for %s', package) pkg_dir = self.output_path / 'simple' / package mkdir_override_symlink(pkg_dir) with tempfile.NamedTemporaryFile(mode='w', dir=str(pkg_dir), encoding='utf-8', delete=False) as index: try: index.file.write('<!DOCTYPE html>\n') index.file.write( tag.html( tag.head( tag.title('Links for {}'.format(package)) ), tag.body( tag.h1('Links for {}'.format(package)), ((tag.a( f.filename, href='{f.filename}#sha256={f.filehash}'.format(f=f), # noqa: E501 rel='internal'), tag.br()) for f in files) ) ) ) except BaseException: index.delete = True raise else: os.fchmod(index.file.fileno(), 0o644) os.replace(index.name, str(pkg_dir / 'index.html')) try: # Workaround for #20: after constructing the index for a # package attempt to symlink the "canonicalized" package # name to the actual package directory. The reasons for # doing things this way are rather complex... # # The older package name must exist for the benefit of # older versions of pip. If the symlink already exists *or # is a directory* we ignore it. Yes, it's possible to have # two packages which both have the same canonicalized name, # and for each to have different contents. I don't quite # know how PyPI handle this but their XML and JSON APIs # already include such situations (in a small number of # cases). This setup is designed to create canonicalized # links where possible but not to clobber "real" packages # if they exist. # # What about new packages that want to take the place of a # canonicalized symlink? We (and TransferState.commit) # handle that by removing the symlink and making a # directory in its place. canon_dir = pkg_dir.with_name(canonicalize_name(pkg_dir.name)) # noqa: E501 canon_dir.symlink_to(pkg_dir.name) except FileExistsError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_root_index(self):\n self.logger.info('writing package index')\n temp_dir = self.output_path / 'simple'\n with tempfile.NamedTemporaryFile(mode='w', dir=str(temp_dir),\n encoding='utf-8',\n delete=False) as index:\n try:\n index.file.write('<!DOCTYPE html>\\n')\n index.file.write(\n tag.html(\n tag.head(\n tag.title('Pi Wheels Simple Index'),\n tag.meta(name='api-version', value=2),\n ),\n tag.body(\n (tag.a(package, href=package), tag.br())\n for package in self.package_cache\n )\n )\n )\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o644)\n os.replace(index.name,\n str(self.output_path / 'simple' / 'index.html'))", "def make_index(base_dir: str, _start: bool = True) -> None:\n if _start:\n logger.info(\"Generating package index\")\n\n index_path = os.path.join(base_dir, \"Packages\")\n index_gzip_path = os.path.join(base_dir, \"Packages.gz\")\n\n with open(index_path, \"w\") as index_file, gzip.open(\n index_gzip_path, \"wt\"\n ) as index_gzip_file:\n for entry in os.scandir(base_dir):\n if entry.name in (\"Packages\", \"Packages.gz\"):\n pass\n elif entry.is_dir():\n make_index(entry.path, _start=False)\n elif entry.is_file() and entry.name.endswith(\".ipk\"):\n with open(entry.path, \"rb\") as package:\n metadata = read_ipk_metadata(package)\n\n metadata += textwrap.dedent(\n f\"\"\"\\\n Filename: {entry.name}\n SHA256sum: {file_sha256(entry.path)}\n Size: {os.path.getsize(entry.path)}\n\n \"\"\"\n )\n\n index_file.write(metadata)\n index_gzip_file.write(metadata)", "def updateIndex(ix, pool_path):\n \n logger.debug('updating search index')\n writer = ix.writer()\n \n exercise_list = [f.name for f in os.scandir(pool_path) if f.is_dir()]\n for ex in exercise_list:\n if ex == '.search_index':\n continue\n task_file = os.path.abspath(os.path.join(pool_path, ex, 'task.tex'))\n if os.path.isfile(task_file):\n logger.info('parsing ' + task_file)\n metaData, task_texcode = parseTaskFile(task_file)\n else:\n logger.warning(ex + ' does not include a task.tex file. skipping entry')\n continue\n \n solution_file = os.path.abspath(os.path.join(pool_path, ex, 'solution.tex'))\n if os.path.isfile(solution_file):\n with open(solution_file, 'r') as f:\n solution_texcode = f.read()\n else:\n logger.warning(ex + ' does not include a solution.tex file')\n solution_texcode = ''\n \n if metaData['date'] == '':\n lastupdate = datetime.datetime(1970, 1, 1, 0, 0, 0, 0)\n else:\n lastupdate = parse_date(metaData['date'])\n\n writer.add_document(\n folder_name=ex,\n task=task_texcode,\n solution=solution_texcode,\n language=metaData['language'],\n maintainer=metaData['author'],\n lastupdate=lastupdate,\n keywords=re.sub(r',\\s+', ',', metaData['keywords'])\n )\n\n writer.commit()", "def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))", "def write_data_index(self):\n content = \"\"\n self.remove(os.path.join(config[\"data_subdir\"], \"*\"))\n if self.data_index:\n print(\"- writing data index\")\n content += f\"# [{config['github_repo_name']}]({config['github_pages_url']})\\n\"\n content += \"\\n## Index of Data files in this Repository\\n\"\n for data, links in sorted(self.data_index.items(), key=lambda x: natsort(x[0])):\n if links:\n content += f\"\\n### {data}\\n\"\n content += f\"![{data}]({config['data_subdir']}/{data})\\n\"\n for link in links:\n content += f\"* {link}\\n\"\n data_src = os.path.join(self.src_dir, config[\"data_subdir\"], data)\n data_dst = os.path.join(self.dst_dir, config[\"data_subdir\"], data)\n print(f\"- copying {data_src} to {data_dst}\")\n shutil.copy(data_src, data_dst)\n self.write_md2html(\"data_index\", content)", "def handle_index(self, queue):\n msg, *args = queue.recv_pyobj()\n if msg == 'PKG':\n package = args[0]\n if package not in self.package_cache:\n self.package_cache.add(package)\n self.write_root_index()\n self.write_package_index(package,\n self.db.get_package_files(package))\n elif msg == 'HOME':\n status_info = args[0]\n self.write_homepage(status_info)\n elif msg == 'SEARCH':\n search_index = args[0]\n self.write_search_index(search_index)\n else:\n self.logger.error('invalid index_queue message: %s', msg)", "def _rebuild_index(self):\n from django.core.management import call_command\n call_command('rebuild_index', interactive=False, verbosity=0)", "def reindex(self):", "def reindex(self):", "def update_index(signum):\n cdx = redis_cli.zrange('ipfs:cdxj', 0, -1)\n cdx = ''.join(cdx)\n buff = BytesIO(cdx)\n\n # Add New Index\n res = ipfs_api.add(CustomNameStream(buff, 'index.cdxj'))\n print('Updating Index: ' + str(res))\n\n # Register with IPNS\n res = ipfs_api.name_publish(res['Hash'])\n print res", "def write_main_index(self):\n\n for miEntry in self.mainIndex:\n self.db_file.write(miEntry.get_representation())", "def write_search_index(self, search_index):\n self.logger.info('writing search index')\n with tempfile.NamedTemporaryFile(mode='w', dir=str(self.output_path),\n encoding='utf-8',\n delete=False) as index:\n try:\n json.dump(search_index, index,\n check_circular=False, separators=(',', ':'))\n except BaseException:\n index.delete = True\n raise\n else:\n os.fchmod(index.file.fileno(), 0o664)\n os.replace(index.name, str(self.output_path / 'packages.json'))", "def _update_index(self):\n start_time = datetime.datetime.now()\n sys.stdout.write(\"Updating index. Depending on the size of your music \"\n \"collection this may take some time, so please be patient. \"\n \"(Update started at %s)\\n\" % start_time)\n new_index_file = \"%s/music_index_%s.txt\" % (self.index_dir,\n start_time.strftime(\"%Y%m%d_%H%M%S\"))\n files = (os.path.join(tup[0], f) for d in self.music_dirs \n for tup in os.walk(d) \n for f in tup[2] )\n \n with open(new_index_file, \"w\") as fh:\n for filename in files:\n fh.write(\"%s\\n\" % filename)\n \n end_time = datetime.datetime.now()\n sys.stdout.write(\"Music index updated (created index file '%s')\\n\" \n \"Update duration:%s\\n\" % \n (new_index_file, end_time - start_time))", "def _add_to_index( env, meta_dict, file_str, logger ):\n global adapter_glob\n if adapter_glob is not None:\n adapter = adapter_glob\n else:\n logger.warning( u\"Connecting to index...\" )\n adapter = adapter_file.adapter(env)\n adapter_glob = adapter\n doc = document(\n env[\"metadata\"][\"known_keys\"].keys(),\n meta_dict,\n env,\n )\n return adapter.add(doc, boosts=env[\"metadata\"][\"boosts\"])\n #logger.info(u\"Added to index [%s]\", file_str)", "def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')", "def from_pypi(request, fpkgs='/find-packages'):\n name = request.matchdict['name']\n version = request.matchdict['version']\n dists = PyPi.release_urls(name, version)\n flash = request.session.flash\n if not dists:\n flash(\"%s-%s not found\" %(name, version))\n return HTTPFound(fpkgs)\n\n candidates = [x for x in dists if request.index.EXTS.match(x['filename'])]\n\n if candidates[0]['md5_digest'] in request.index_data:\n logger.debug('Package %s-%s already in index' %(name, version))\n return HTTPFound('/index/%s' %name)\n\n details = candidates[0]\n url = details['url']\n filename = details['filename']\n newfile = None\n try:\n resp = requests.get(url)\n newfile = request.file_root / filename\n newfile.write_bytes(resp.content)\n except HTTPError, e:\n error = \"HTTP Error: %d %s - %s\" %(e.code, exc.status_map[e.code].title, url)\n logger.error(error)\n flash(error)\n except URLError, e:\n logger.error(\"URL Error: %s, %s\", e.reason , url)\n flash('Url error attempting to grab %s: %s' %(url, e.reason))\n\n if newfile is not None:\n try:\n added_event = event.PackageAdded(request.index, path=newfile)\n request.registry.notify(added_event)\n flash('%s-%s was installed into the index successfully.' % (name, version))\n return HTTPFound('/index/%s' %name)\n except Exception, e:\n flash('Issue with adding %s to index: See logs: %s' % (newfile.name, e))\n\n return HTTPFound(fpkgs)", "def write_index(self):\n self.Lock = True\n self.file_out.seek(self.index_offset)\n for identifier, offset in self.index.items():\n self._write_identifier(identifier)\n self._write_offset(offset)", "def reindex(self):\n raise NotImplementedError()", "def reindex(self):\n self.index.drop_db()\n objectpath = os.path.join(self.rootpath, self.OBJECTPATH)\n for root, dirs, files in os.walk(objectpath, topdown=False):\n for name in files:\n blob_uuid = name\n self.index.update_from_metadata(self.load_blob_metadata(blob_uuid))", "def build_index(self):\n self.rebuild_index()", "def update_from_index(self, data, **kw):\n packages = _iter_paragraphs_path(data, **kw)\n self.add_packages([{'deb822': p} for p in packages])", "def build_index():\n pass", "def updateIndex(self):\n for root, dirs, files in os.walk(self.serverdir):\n for d in dirs:\n if not d.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, d), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,d)), os.path.getmtime(os.path.join(root, d)))\n for f in files:\n if not f.startswith('.'):\n relpath = os.path.relpath(os.path.join(root, f), self.serverdir)\n self.serverindex[relpath] = (self.getNametype(os.path.join(root,f)), os.path.getmtime(os.path.join(root, f)))", "def update_image_index(release_label, apt_repo, common_config, image_name):\n s3 = boto3.client('s3')\n\n # Helper methods\n json.load_s3 = lambda f: json.load(s3.get_object(Bucket=apt_repo, Key=f)['Body'])\n json.dump_s3 = lambda obj, f: s3.put_object(Bucket=apt_repo,\n Key=f,\n Body=json.dumps(obj, indent=2))\n\n index_key = release_label + '/images/index'\n\n _, flavour, distribution, release_label, timestamp = image_name.split('_')\n\n # Read checksum from generated file\n with open(f'/tmp/{image_name}', 'r') as checksum_file:\n checksum = checksum_file.read().replace('\\n', '').split(' ')[0]\n os.remove(f'/tmp/{image_name}')\n\n image_data = {\n 'raw': {\n flavour: {\n distribution: {\n 'file': image_name,\n 'checksum': checksum\n }\n }\n }\n }\n\n data = {}\n try:\n # Wait for file to be ready to write\n wait_for_index(s3, apt_repo, index_key)\n data = json.load_s3(index_key)\n except botocore.exceptions.ClientError as error:\n # If file doesn't exists, we'll create a new one\n if error.response['Error']['Code'] == 'NoSuchKey':\n click.echo('Index file doesn\\'t exist, creating a new one')\n\n try:\n data[timestamp] = merge_dicts(data[timestamp], image_data)\n except KeyError:\n data[timestamp] = image_data\n\n # Write data to index file\n json.dump_s3(data, index_key)\n tag_file(s3, apt_repo, index_key, 'Lock', 'False')\n\n # Invalidate image index cache\n if 'cloudfront_distribution_id' in common_config:\n invalidate_file_cloudfront(common_config['cloudfront_distribution_id'], index_key)", "def write_index(self, outdir, froot='gen', relative_to=None):\r\n if self.written_modules is None:\r\n raise ValueError('No modules written')\r\n # Get full filename path\r\n path = os.path.join(outdir, froot+self.rst_extension)\r\n # Path written into index is relative to rootpath\r\n if relative_to is not None:\r\n relpath = outdir.replace(relative_to + os.path.sep, '')\r\n else:\r\n relpath = outdir\r\n idx = open(path,'wt')\r\n w = idx.write\r\n w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\\n\\n')\r\n w('.. toctree::\\n\\n')\r\n for f in self.written_modules:\r\n w(' %s\\n' % os.path.join(relpath,f))\r\n idx.close()", "def rewrite(self):\n for f in self.files:\n metadata = dict()\n metadata[\"description\"] = f.metadata.get(\"desc\", \"Unknown\")\n metadata[\"script\"] = os.path.basename(f.filename)\n metadata[\"requires\"] = []\n for package, component in f.requires:\n if package == self.key:\n metadata[\"requires\"].append(\"/\" + component)\n else:\n metadata[\"requires\"].append(package + \"/\" + component)\n metadata[\"provides\"] = [ p[1] for p in f.provides ]\n # Resolve symlinks\n real_filename = os.path.realpath(f.filename)\n LOG.info(\"Editing: \" + real_filename)\n new_filename = f.filename + \".new\"\n new = file(new_filename, \"w\")\n new.write(\"/*\\n---\\n\")\n new.write(yaml.dump(metadata))\n new.write(\"\\n...\\n*/\\n\")\n new.write(file(f.filename).read())\n new.close()\n os.rename(new_filename, real_filename)\n\n package_data = dict()\n package_data[\"name\"] = self.key\n package_data[\"sources\"] = []\n package_data[\"version\"] = \"Unknown\"\n package_data[\"copyright\"] = \"Unknown\"\n package_data[\"description\"] = \"Unknown\"\n target_dir = os.path.dirname(self.scripts_json_filename)\n # package.yml is typically in the parent of the scripts.json dir\n if os.path.basename(target_dir) == \"Source\":\n target_dir = os.path.dirname(target_dir)\n target_filename = os.path.join(target_dir, \"package.yml\")\n for f in self.files:\n common = os.path.commonprefix([target_filename, f.filename])\n source_file = f.filename[len(common):]\n package_data[\"sources\"].append(source_file)\n LOG.info(\"Writing: \" + target_filename)\n out = file(target_filename, \"w\")\n out.write(yaml.dump(package_data))\n out.close()", "def create_new_index(self, path: str):\n if path.endswith(\"/\"):\n path = path[:-1]\n self.file_index = ([(root, files)\n for root, dirs, files in os.walk(path)\n if files])\n self.modified_time = os.path.getmtime(path)\n \n with open(os.path.join(\n INDEX_DIR, path.replace(\"/\", \"_\") + \".pkl\"\n ), \"wb\") as f:\n pickle.dump((self.file_index, self.modified_time), f)", "def write_genre_index(self):\n for giEntry in self.genreIndex:\n # Write to file\n self.db_file.write(giEntry.get_representation())", "def update_info_when_add(descriptor, rel_path_from_repository,\n mtime, file_sha1_hash, index_dict):\n # If the file is already tracked, update it\n if rel_path_from_repository in index_dict.keys():\n # If the file is already up to date, no need to rewrite.\n if (mtime == index_dict[rel_path_from_repository][0]\n and\n file_sha1_hash == index_dict[rel_path_from_repository][2]):\n return\n # Move the file descriptor to the correct position\n lseek(descriptor, index_dict[rel_path_from_repository][5], 0)\n # Update the timestamp. current sha1 hash, add sha1 hash\n update_file_index(descriptor, \" \".join([mtime,\n file_sha1_hash,\n file_sha1_hash]), 0)\n # Else add a new index line.\n else:\n lseek(descriptor, 0, 2)\n add_new_index(descriptor, mtime, file_sha1_hash,\n rel_path_from_repository)", "def index_xml(directory, db):\n xml.index_directory(directory, db)" ]
[ "0.6997369", "0.6264878", "0.6230651", "0.60717297", "0.6042983", "0.6042155", "0.60356575", "0.59532493", "0.59532493", "0.5952769", "0.5850773", "0.5839971", "0.5826849", "0.5805045", "0.5797951", "0.57742566", "0.5764337", "0.5759863", "0.5757269", "0.57194316", "0.5701246", "0.5694564", "0.56638885", "0.56479025", "0.5625071", "0.55880183", "0.55796987", "0.5506631", "0.54794854", "0.54793924" ]
0.7133477
0
A simple worker loop tutorial, where a person goes to an assigned office during work time and goes back home after work.
def simple_worker_loop() -> None: print('\nSimple worker loop tutorial', flush=True) # the first thing to do at the start of any experiment is to initialize a few global parameters # these parameters are shared across the entire repo ps.init_globals( seed=0, # if None, the experiment is not seeded and would initialized differently each time registry=None, # if None, a registry is created and used # a registry does bookkeeping of all people and locations used in the experiment ) # init locations home = ps.env.Home() work = ps.env.Office() # any subclass of BusinessLocation can be a workplace, e.g. Bar, Restaurant, Hospital, etc. # init a worker person = ps.env.Worker( person_id=ps.env.PersonID('worker', age=35), # person_id is a unique id for this person home=home.id, # specify the home_id that person is assigned to work=work.id, # specify the id of the person's workplace ) # Init simulator sim = ps.env.PandemicSim( locations=[work, home], # a list of all locations persons=[person] # a list of all persons ) # PandemicSim by default creates and uses randomized testing and an SEIR infection model # Iterate through steps in the simulator, where each step advances an hour for _ in trange(24, desc='Simulating hour'): sim.step() # Or iterate by advancing in days by calling step_day in the simulator for _ in trange(10, desc='Simulating day'): sim.step_day() # The above loop iterates the simulator with no movement restrictions # To impose restrictions, for example, Stage-2 of austin_regulations sim.impose_regulation(ps.sh.austin_regulations[2]) # Calling step_day now will run the simulator under Stage-2 regulation for _ in trange(10, desc='Simulating day (Under Stage-2)'): sim.step_day()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main() -> None:\n worker = Worker()\n worker.do_work()", "def run_worker(self):\n\n # exec(open('restarter.py').read())\n # sys.exit()\n self.update_session_state()\n currentTime = QTime().currentTime()\n fromTime = QTime(int(self.settings.TECHFROMHOUR), int(self.settings.TECHFROMMIN))\n toTime = QTime(int(self.settings.TECHTOHOUR), int(self.settings.TECHTOMIN))\n sessionState = self.lblMarket.text()\n\n if fromTime < currentTime < toTime:\n print(\"Worker skept-Technical break : \", fromTime.toString(\"hh:mm\"), \" to \", toTime.toString(\"hh:mm\"))\n self.update_console(\"Technical break untill \" + toTime.toString(\"hh:mm\"))\n\n else:\n self.update_console(\"Starting Worker- UI Paused\")\n self.uiTimer.stop() # to not cause an errors when lists will be resetted\n worker = Worker(\n self.ibkrworker.process_positions_candidates) # Any other args, kwargs are passed to the run function\n worker.signals.result.connect(self.update_ui)\n worker.signals.status.connect(self.update_status)\n worker.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(worker)", "def startworking():\r\n #In the future have the manager program or from the website implement this arguments to a route\r\n #the program will download the file from the website\r\n global exe_name\r\n global Task_Conditional\r\n task_data = None\r\n while task_data is None:\r\n task_data = recieve_data_from_server(\"get_task\")\r\n if task_data is None:\r\n time.sleep(5)\r\n else:\r\n exe_name = task_data[\"exe_name\"]\r\n print('Working on the task \"{}\"'.format(exe_name))\r\n get_file(exe_name)\r\n Task_Conditional = task_data[\"Task_conditional\"]\r\n print(\"loading\")\r\n t1 = time.time()\r\n task_divider(task_data[\"first_num\"], task_data[\"last_num\"])\r\n t2 = time.time()\r\n print(\"ready {}\".format(t2-t1))", "def main():\n global repeat\n regime = collect()\n start = int(raw_input(\"Which line of the exercise script would you like to begin with? \")) - 1\n regime = regime[start:]\n say(\"Ready?\")\n time.sleep(1)\n for exercise in regime:\n coach(exercise[:-1])\n while repeat:\n repeat = False\n coach(exercise[:-1])\n say(\"Session complete.\")", "def doctest_BackgroundWorkerThread_scheduleNextWork():", "def doctest_BackgroundWorkerThread_forSite():", "def do_work(self):", "async def home(network):\n for w in get_all_wallets():\n WALLETS_GLOBAL.append(w)\n while True:\n msg = \"Proof Wallet: Home\\n\\n\"\n msg += \"1) Create wallet\\n\"\n msg += \"2) Load wallet\\n\"\n msg += \"3) Restore wallet\\n\"\n msg += \"4) Exit\\n\"\n ch = await ux_show_story(msg, ['1', '2', '3', '4'])\n if ch == '1':\n await create_wallet(network)\n elif ch == '2':\n await load_wallet(network)\n elif ch == '3':\n await restore_wallet(network)\n else:\n sys.exit(0)", "def run(self):\n self.workhorse_.run()\n try:\n while(True):\n self.workhorse_.heartbeat()\n self.periodic_snapshot()\n except workflow.NoMoreWork:\n print \"Fini.\"\n exit(0)\n exit(-1)", "def show_flight_schedule_of_employee(self, staff_ob):\n\n print(\"Continue to pick dates\")\n print(\"\\nB Back\\nC Continue\\n\")\n\n action_str = self.choose_action([\"b\", \"c\"])\n while action_str == False:\n action_str = self.choose_action([\"b\", \"c\"])\n\n if action_str == \"b\":\n return\n\n elif action_str == \"c\":\n \n valid_interval = False\n while valid_interval != True:\n date_from = self.get_date_from()\n while date_from == False:\n date_from = self.get_date_from()\n date_to = self.get_date_to()\n while date_to == False:\n date_to = self.get_date_to()\n valid_interval = self.get_valid_interval(date_from, date_to)\n\n flights_on_asked_time = self.llapi.get_employee_schedule_by_date(staff_ob, date_from, date_to)\n \n counter = 1\n if len(flights_on_asked_time) == 0:\n print(f\"\\n{staff_ob.name} has no flights on selected period\")\n\n else:\n print(self.LENGTH_STAR * \"*\")\n print(f\"{staff_ob.name.upper()}'S FLIGHT SCHEDULE\")\n \n for flight_ob in flights_on_asked_time:\n print(flight_ob.print_schedule(counter))\n counter += 1\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return", "def test_workon(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5)\n assert len(experiment.fetch_trials()) == 5\n assert experiment.name == \"loop\"\n assert isinstance(experiment._experiment._storage, Legacy)\n assert isinstance(experiment._experiment._storage._db, EphemeralDB)", "def worker(self):\n\t\[email protected](\"MISSION_PROPERTY_CHANGED\")\n\t\[email protected](\"DOWNLOAD_EP_COMPLETE\")\n\t\tdef dummy():\n\t\t\t\"\"\"Set the edit flag after mission changed.\"\"\"\n\t\t\tself.edit = True\n\n\t\[email protected](\"WORKER_DONE\")\n\t\tdef dummy():\n\t\t\t\"\"\"Save missions after the thread terminate.\"\"\"\n\t\t\tself.save()\n\n\t\tself.load()\n\t\twhile True:\n\t\t\tself.wait(setting.getint(\"autosave\", 5) * 60)\n\t\t\tself.save()", "def run(self):\n while True:\n self.menu()\n if self.menu_choice == '1':\n self.show_today_tasks()\n elif self.menu_choice == '2':\n self.show_weeks_tasks()\n elif self.menu_choice == '3':\n self.show_all_tasks()\n elif self.menu_choice == '4':\n self.show_missed_tasks()\n elif self.menu_choice == '5':\n self.add_task()\n elif self.menu_choice == '6':\n self.delete_task()\n else:\n print('Bye!')\n break", "def main():\n worm = Worm()\n while True:\n print(worm.step())", "def hr_main():\n while True:\n global pend_app_ind\n global appr_ind\n global rej_ind\n global skipped_apps\n global num_pending\n user_choice = show_hr_menu()\n if user_choice == '1':\n skipped_apps = 0\n pend_app_ind = 1\n num_pending = len(pending)-1\n check_worksheet('pending')\n elif user_choice == '2':\n appr_ind = 1\n check_worksheet('approved')\n elif user_choice == '3':\n rej_ind = 1\n check_worksheet('rejected')\n next_action()", "def main(self ):\n print(\"Good morning, I hope you are well today.\")\n print(\"What can I do for you?\")\n while True:\n sentence = input(\"\\n>> \")\n if sentence.upper() == \"QUIT\":\n print(\"Have a nice day!\")\n break\n print(Doctor().reply(sentence))", "def main():\r\n future_student = \"Future begins\"\r\n print_message(future_student)\r\n print_message(\"Dreams begin\")\r\n print_message(\"Aspirations begin\")", "def main():\n # attendance_leaderboard(CYCLE)\n # metcon_leaderboards(CYCLE)\n weightsheets(CYCLE, TESTING_START, TESTING_END)", "def work(self, job):\n pass", "def main(self):\n\n self.setRecordAudio(True)\n self.general()\n sleep(1)\n self.introduction()\n sleep(1)\n self.get_name()\n sleep(1)\n self.get_age()\n sleep(1)\n self.get_origin()\n sleep(1)\n self.get_company()\n sleep(1)\n self.get_travel_route()\n sleep(1)\n self.get_entrance()\n sleep(1)\n self.get_documentation()\n sleep(1)\n self.sayAnimated(\n 'OK ' + self.name + '. We would like to know why you came to The Netherlands. Can you please answer the following '\n 'questions with yes, or, no?')\n self.speechLock.acquire()\n self.stopListening()\n self.get_exclusion()\n sleep(1)\n self.get_conflict()\n sleep(1)\n self.get_inhumanity()\n sleep(1)\n self.get_family()\n sleep(1)\n\n # end interview and save answers\n self.wrapup()\n self.store_story()", "def main():\n joke_util = JokeUtility()\n for i in xrange(7):\n print \"{}. \".format(i + 1) + joke_util.get_joke()", "def work(self):\n time.sleep(random.randint(0, 200) / 100)\n pass", "def main():\n number = sys.argv[1]\n url_user = \"https://jsonplaceholder.typicode.com/users/{}\".format(number)\n url_tasks = (\"https://jsonplaceholder.typicode.com/users/{}/todos\".\n format(number))\n response = requests.get(url_tasks)\n tasks = response.json()\n user_info = requests.get(url_user).json()\n employee_name = user_info[\"name\"]\n list_of_done_tasks = [x for x in tasks if x['completed']]\n number_of_done_tasks = len(list_of_done_tasks)\n total_task_number = len(tasks)\n print(\"Employee {} is done with tasks({}/{}):\".format(employee_name,\n number_of_done_tasks,\n total_task_number))\n for task in list_of_done_tasks:\n print(\"\\t {}\".format(task[\"title\"]))", "def displayWorkout():\n\n return render_template(\"workout.html\")", "def worker(ctx_obj):\n execute(start_worker_command(settings=ctx_obj['settings']))", "def task():\n\n\tprint('Example task executed.')", "def main():\n game = \"\"\n while (game.lower() != 'q'):\n game = determineGame() #returns \"overwatch\" or \"rocket league\"\n gameInfo = getStatistics(game) #gets information about the game and saves it in a dictionary\n if gameInfo['name'] == \"Overwatch\":\n owWorkout(gameInfo) #pass in stat\n elif gameInfo['name'] == \"Rocket League\":\n rlWorkout(gameInfo)\n else:\n print(\"you screwed up somewhere\")\n printStats(gameInfo)\n printAccumulatedWorkout(workoutInfo)\n saveGameData(gameInfo)\n saveWorkoutData(workoutInfo)\n game = input(\"Another game? (q to quit)\")", "def home():\n return \"Alive!\"", "def main():\n while True:\n employee_id = get_employee_input_int('TEST DATA: Enter employee ID to look up for the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} has a grade = {}, Hence gets {} per hours\\n'\n .format(employee.full_name, employee.grade, payscale.salary))\n HR_Options(employee, payscale)\n break", "def main():\n\tprint(\"Welcome to TicTacToe\")\n\tboard = Board()\n\twhile (not board.isOver()):\n\t\tprint(\"It is {0}'s turn\".format(board.current) + board.__str__())\n\t\tmove = input('Where would you like to go? : ').strip()\n\t\tif (move == 'q'):\n\t\t\tbreak\n\t\telif (board.makeMove(move) == 1):\n\t\t\tboard.switchPlayer()\n\t\telse:\n\t\t\tprint(\"I didn't understand your input, these are the valid inputs:\\nentering 'q' will quit out of the game.\\n\")\n\t\t\tprint(\"entering a number will place the peice in that box, the numbers are as follows:\\n \\n1|2|3\\n-----\\n4|5|6\\n-----\\n7|8|9\\n\")\n\tprint(board.__str__() + \"\\nGame Over\")\n\tif (board.isOver() is Piece.EX or board.isOver() is Piece.OH):\n\t\tprint(\"Player {0} wins!\".format(board.isOver())) \n\telse:\n\t\tprint(\"It was a draw\")" ]
[ "0.6303065", "0.61887157", "0.6119434", "0.609201", "0.5847591", "0.58374554", "0.58043665", "0.5719119", "0.5713537", "0.5688086", "0.56838995", "0.56798404", "0.5606069", "0.560276", "0.55847", "0.5574085", "0.55657315", "0.5554054", "0.55216855", "0.55083454", "0.5506641", "0.5462876", "0.5453438", "0.5422824", "0.5381506", "0.5372375", "0.5365868", "0.53534395", "0.5339513", "0.5330003" ]
0.70773005
0
Returns a PREMIS valid hash function name, if possible.
def convert_to_premis_hash_function(hash_type): if hash_type.lower().startswith("sha") and "-" not in hash_type: hash_type = "SHA-" + hash_type.upper()[3:] elif hash_type.lower() == "md5": return "MD5" return hash_type
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_function_key(fn):\n return hashlib.md5(fn.func_code.co_code).hexdigest()", "def _function_sig_key(name: str, *args: Any, **kwargs: Any) -> int:\n function_sig = name\n for arg in args:\n function_sig += str(arg)\n for _, value in kwargs.items():\n function_sig += str(value)\n\n return hash(function_sig)", "def getHashLfn(lfn):\n return hashlib.sha224(lfn).hexdigest()", "def default_hash():\n return \"!\"", "def func_hash(self) -> str:\n\n return self.call_data[:10]", "def _hash_func(self):\r\n func_code_h = hash(getattr(self.func, '__code__', None))\r\n return id(self.func), hash(self.func), func_code_h", "def name_hash(namepart):\n return sha256(os.fsencode(namepart)).hexdigest()", "def hashname(self):\n return hashlib.md5(self.name.encode('utf-8')).hexdigest()", "def get_hashcmd(hashalgo):\n import hashlib\n\n if hashalgo == \"MD5\":\n return lambda filename: hashlib.md5(open(filename, \"rb\").read()).hexdigest()\n elif hashalgo == \"SHA224\":\n return lambda filename: hashlib.sha224(open(filename, \"rb\").read()).hexdigest()\n if hashalgo == \"SHA256\":\n return lambda filename: hashlib.sha256(open(filename, \"rb\").read()).hexdigest()\n if hashalgo == \"SHA384\":\n return lambda filename: hashlib.sha384(open(filename, \"rb\").read()).hexdigest()\n if hashalgo == \"SHA512\":\n return lambda filename: hashlib.sha512(open(filename, \"rb\").read()).hexdigest()\n else:\n return None", "def _curve_name_to_hash_name(curve_name):\n if curve_name == \"NIST256p\":\n return \"sha256\"\n if curve_name == \"NIST384p\":\n return \"sha384\"\n if curve_name == \"NIST521p\":\n return \"sha512\"\n raise TLSIllegalParameterException(\n \"Curve {0} is not supported in TLS 1.3\".format(curve_name))", "def _fingerprint(self):\n hasher = hashlib.md5()\n source = inspect.getsource(self._func)\n hasher.update(source.encode('utf-8'))\n\n return hasher.hexdigest()", "def function_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"function_name\")", "def hash_cli_name(name):\n from hashlib import blake2b\n return blake2b(name.encode(), digest_size=32).hexdigest()", "def _generate_function_specific_name(a, vertices):\n coeff_hash = hash(str(a))\n if coeff_hash < 0:\n # Cannot have minus sign in name\n coeff_hash *= -1\n vertices_hash = hash(str(vertices))\n if vertices_hash < 0:\n # Cannot have minus sign in name\n vertices_hash *= -1\n return str(coeff_hash) + \"_\" + str(vertices_hash)", "def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def function_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"function_name\")", "def _hash_name(self, name, length=None):\n if not length:\n length = self.header_size\n hashed = name[:min(length, len(name))]\n for x in range(length, len(name), length):\n rem = min(x+length,len(name))-x\n for i in range(rem):\n hashed = hashed[:i] + chr(ord(name[x + i]) ^ ord(hashed[i])) + hashed[i+1:]\n if len(hashed) < length:\n hashed += '\\x00' * (length-len(hashed))\n return hashed", "def compute_feature_hash(feature: dict) ->str:\n preproc = feature.get(PREPROCESSING, {})\n if isinstance(preproc, dict):\n preproc_hash = hash_dict(preproc)\n else:\n preproc_hash = hash_dict(preproc.to_dict())\n return sanitize(feature[NAME]) + '_' + preproc_hash.decode('ascii')", "def _s_hash(fn, data: str):\n\n return fn(_b(data)).hexdigest()", "def _uniquify_name(self, name, callable):\n while True:\n try:\n callable(name)\n name += u'_'\n except:\n break\n return name", "def pre_transform_hash(self):\n if self.custom_hash is not None:\n return self.custom_hash\n if self.pre_transform is None:\n return 'no_pre_transform'\n return hashlib.md5(_repr(self.pre_transform).encode()).hexdigest()", "def _resolve_hasher(algorithm, file_hash=None):\n if algorithm == 'sha256':\n return hashlib.sha256()\n\n if algorithm == 'auto' and file_hash is not None and len(file_hash) == 64:\n return hashlib.sha256()\n\n # This is used only for legacy purposes.\n return hashlib.md5()", "def _hash_function(self, x):\n return hashlib.sha1(x).hexdigest()", "def get_hash_algorithm(hash_algorithm):\n\n available_methods = {\n 'SHA256': hash.sha256_crypt,\n 'SHA512': hash.sha512_crypt,\n }\n\n if not isinstance(hash_algorithm, str):\n raise ValueError('The parameter \"hash_algorithm\" should be a string.')\n\n if hash_algorithm.upper() not in available_methods:\n raise ValueError('Invalid hash method.')\n\n return available_methods[hash_algorithm]", "def FNV1Hash(filename):\n \n FNV1_32_INIT = 0x811c9dc5\n FNV1_PRIME_32 = 16777619\n\n lowerName = filename.lower()\n \n _hash = FNV1_32_INIT\n uint32_max = 2 ** 32\n \n for c in lowerName:\n _hash = (_hash * FNV1_PRIME_32) % uint32_max\n _hash = _hash ^ ord(c)\n return format(_hash, 'x')", "def hash_type(self, hash):\n if hash.isalnum() is False or len(hash) < 8 or len(hash) > 128:\n print(\"wrong type of hash was given\")\n hash_type = None\n\n if len(hash) == 8:\n # it is a CRC hash\n hash_type = \"crc\"\n elif len(hash) == 32:\n # it is a md5 hash\n hash_type = \"md5\"\n elif len(hash) == 40:\n # it is a sha1 hash\n hash_type = \"sha1\"\n elif len(hash) == 64:\n # it is a sha256 hash\n hash_type = \"sha256\"\n elif len(hash) == 128:\n # it is a sha512 hash\n hash_type = \"sha512\"\n\n return hash_type", "def hash_f(x: Text) -> Tuple[Text, Text]:\n return \"hash\", x.lower()", "def get_func_lookup():\n return {\n \"randomstr\": randomstr,\n \"random\": random,\n \"sha256\": sha256,\n \"ed25519\": ed25519_private_key,\n \"rsa\": rsa_private_key,\n \"rsapublic\": rsa_public_key,\n \"publickey\": public_key,\n \"reveal\": reveal,\n \"loweralphanum\": loweralphanum,\n \"basicauth\": basicauth,\n }", "def get_function_raw_name_at(self, address):\n pass" ]
[ "0.66541505", "0.66076416", "0.65569323", "0.6499355", "0.6462295", "0.64203614", "0.638321", "0.6263362", "0.6210296", "0.6168451", "0.6134635", "0.6111885", "0.6081206", "0.6046545", "0.6041833", "0.6017602", "0.6017602", "0.5976814", "0.59592223", "0.59416676", "0.59413165", "0.5918784", "0.591734", "0.5914923", "0.5888426", "0.58574116", "0.5851177", "0.58401847", "0.5835406", "0.58193207" ]
0.7343964
0
get a list of the the formatted events related to the file
def list_file_events(file_obj_events): ret = [] for event in file_obj_events: ret.append(format_file_event(event)) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_events(file_obj):\n file_events = file_obj.event_set.all()\n return file_events", "def events(self):\n return get_tsv(self.path, self.values, 'events.tsv')", "def file_events(self):\n return self._file_events", "def read(cls, event_file, regex=regex):\n with open(event_file, 'r') as f:\n filedata = f.read()\n event_matches = re.finditer(regex, filedata, re.VERBOSE + re.MULTILINE)\n list_ = [i.groupdict() for i in event_matches]\n #util.ipshell()\n for event in list_: # convert numbers to float and int types\n for key, item in event.iteritems():\n if util.isint(item):\n event[key] = int(item)\n elif util.isfloat(item):\n event[key] = float(item)\n else:\n event[key] = item.strip()\n #if event[key] == '':\n # event[key] = None\n #if key == 'depth' and regex == cls.regex:\n # event[key] *= 1\n #util.ipshell()\n log.info('Read event information of %d events from events event_file %s' % (len(list_), event_file))\n return cls(list_)", "def get_file_format_event(file_events):\n file_format_event = file_events.filter(event_type=\"format identification\").first()\n if file_format_event:\n return file_format_event", "def get_file(file_to_edit):\n events = []\n file_path = lrs_path + file_to_edit\n with open(file_path, \"r\") as the_file:\n filereader = csv.reader(the_file)\n for row in filereader:\n events.append(row)\n the_file.close()\n return events", "def create_event_list(event_file: TextIO) -> List[Event]:\n file = [event.strip('\\n').split() for event in event_file]\n events = []\n for line in file:\n if line[1] == 'Arrive':\n time = int(line[0])\n customer_name = line[2]\n items = []\n i = 3\n while i < len(line):\n items.append(Item(line[i], int(line[i+1])))\n i += 2\n events.append(CustomerArrival(time, Customer(customer_name, items)))\n else:\n time = int(line[0])\n line_index = int(line[2])\n events.append(CloseLine(time, line_index))\n return events", "def get_event_list(self):\n pass", "def _get_fsevent_files(self):\r\n # Print the header columns to the output files\r\n Output.print_columns(self.l_all_fsevents)\r\n\r\n # Total number of files in events dir #\r\n t_files = len(os.listdir(self.path))\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n t_files -= 1\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Run simple test to see if file mod dates\r\n # should be used to generate time ranges\r\n # In some instances fsevent files may not have\r\n # their original mod times preserved on export\r\n # This code will flag true when the same date and hour\r\n # exists for the first file and the last file\r\n # in the provided source fsevents folder\r\n first = os.path.join(self.path, os.listdir(self.path)[0])\r\n last = os.path.join(self.path, os.listdir(self.path)[len(os.listdir(self.path)) - 1])\r\n first = os.path.getmtime(first)\r\n last = os.path.getmtime(last)\r\n first = str(datetime.datetime.utcfromtimestamp(first))[:14]\r\n last = str(datetime.datetime.utcfromtimestamp(last))[:14]\r\n\r\n if first == last:\r\n self.use_file_mod_dates = False\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(self.all_files_count, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Full path to source fsevent file\r\n self.src_fullpath = os.path.join(self.path, filename)\r\n # Name of source fsevent file\r\n self.src_filename = filename\r\n # UTC mod date of source fsevent file\r\n self.m_time = os.path.getmtime(self.src_fullpath)\r\n self.m_time = str(datetime.datetime.utcfromtimestamp((self.m_time))) + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(self.src_fullpath, \"rb\")\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n # When permission denied is encountered\r\n if \"Permission denied\" in str(exp) and not os.path.isdir(self.src_fullpath):\r\n print('\\nEnsure that you have permissions to read '\r\n 'from {}\\n{}\\n'.format(self.path, str(exp)))\r\n sys.exit(0)\r\n # Otherwise write error to log file\r\n else:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_fullpath)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)", "def get_events(self):\n\n events = []\n\n for watched_file in self._watched_files:\n for line in watched_file:\n self._do_rule_processing(line, events)\n\n return events", "def events(self):\n for line_num, line in enumerate(self.file_handler):\n if not line:\n break\n # process line input to dictionary\n data = json.loads(line)\n # add id information\n data['id'] = line_num\n # update timestamp history\n timestamp = self._get_timestamp(data)\n self.last_two_timestamps = [self.last_two_timestamps[-1], timestamp]\n self.event_timestamps[line_num] = timestamp\n\n self.alarms.append(0) # add field for alarms\n self.users.append(data['user']) # add field for user\n self.anomalies.append(data.get('is_anomaly', 0)) # add field for anomalies\n if 'is_anomaly' in data:\n del data['is_anomaly'] # remove anomaly information from data for contestants\n\n # return line id and serialized JSON as string representing one event\n str_dump = json.dumps(data)\n logger.info(self._get_inner_time() + ' > ' + str_dump)\n yield line_num, str_dump", "def _readin_evtx(file):\n\tcontent = []\n\tunparsed_entries = 0\n\twith evtx.Evtx(file) as log:\n\t\tc = 0\n\t\tsources = []\n\t\tfor record in log.records():\n\t\t\tc += 1\n\t\t\t_print_progress(c)\n\t\t\ttry:\n\t\t\t\tobj = untangle.parse(record.xml())#untangle can produce an OSError on Windows, since Windows uses a different format for timestamps\n\t\t\texcept OSError:\n\t\t\t\tc -= 1\n\t\t\t\tunparsed_entries += 1\n\t\t\t\tcontinue\n\t\t\tcurr_obj = obj.Event.System\n\t\t\tdate = curr_obj.TimeCreated['SystemTime']\n\t\t\tif '.' in date:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S.%f\")\n\t\t\telse:\n\t\t\t\tdate = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S\")\n\t\t\tfull_line = record.xml()\n\t\t\tif hasattr(curr_obj,'Provider'):\n\t\t\t\tsource = curr_obj.Provider['Name']\n\t\t\telse:\n\t\t\t\tsource = ''\n\t\t\tif ( (not source in sources) and (not sources == '')):\n\t\t\t\tsources.append(source)\n\t\t\tline_nr = curr_obj.EventRecordID.cdata\n\t\t\tcontent.append(logfile_entry(int(line_nr), file, curr_obj.EventID.cdata, full_line, date, curr_obj.Computer.cdata, source))\n\t\t_delete_print()\n\tif unparsed_entries > 0:\n\t\tprint('Unfortunately, {} entries could not be parsed. Please see the documentation'.format(unparsed_entries))\n\t\tprint()\n\treturn logfile(file, len(content), 'evtx', content, sources)", "def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()", "def create_event_list(filename):\n events = []\n\n with open(filename, \"r\") as file:\n for line in file:\n line = line.strip()\n\n if not line or line.startswith(\"#\"):\n # Skip lines that are blank or start with #.\n continue\n\n # Create a list of words in the line, e.g.\n # ['10', 'RiderRequest', 'Cerise', '4,2', '1,5', '15'].\n # Note that these are strings, and you'll need to convert some\n # of them to a different type.\n tokens = line.split()\n timestamp = int(tokens[0])\n event_type = tokens[1]\n identifier = tokens[2]\n location = deserialize_location(tokens[3])\n # HINT: Use Location.deserialize to convert the location string to\n # a location.\n event = None\n\n if event_type == \"DriverRequest\":\n speed = int(tokens[4])\n # Create a DriverRequest event.\n driver = Driver(identifier, location, speed)\n\n event = DriverRequest(timestamp, driver)\n\n elif event_type == \"RiderRequest\":\n destination = deserialize_location(tokens[4])\n patience = int(tokens[-1])\n # Create a RiderRequest event.\n rider = Rider(identifier, location, destination, patience)\n\n event = RiderRequest(timestamp, rider)\n\n events.append(event)\n\n return events", "def format_events(self):\n\n log.debug('formatting events ({}) for \"{}\"'.format(len(self.unformatted_events), self.school_year))\n for event in self.unformatted_events:\n name = event.module.get_text().replace('\\n', '') if event.module else 'Matière non définie'\n category = event.category.get_text() if event.category else 'Catégorie de cours non définie'\n starttime = event.starttime.get_text() if event.starttime else 'Heure de début de cours non définie'\n endtime = event.endtime.get_text() if event.endtime else 'Heure de début de cours non définie'\n room = event.room.item.get_text() if event.room and event.room.item else 'Aucune salle définie'\n group = event.group.item.get_text() if event.group and event.group.item else 'Classe entière'\n nday = int(event.day.get_text()) if event.day else None\n date_first_weekday = self.week_dates_mapping[event.rawweeks.get_text()] if event.rawweeks else None\n start = '{}-{}'.format(date_first_weekday, starttime)\n end = '{}-{}'.format(date_first_weekday, endtime)\n dtstart = datetime.datetime.strptime(start, '%d/%m/%Y-%H:%M') + datetime.timedelta(days=nday, hours=-2)\n dtend = datetime.datetime.strptime(end, '%d/%m/%Y-%H:%M') + datetime.timedelta(days=nday, hours=-2)\n\n start_date = dtstart.isoformat() + 'Z'\n end_date = dtend.isoformat() + 'Z'\n\n calendar_event = GoogleCalendarEvent(\n location=room,\n summary='({}) - {} - {}'.format(category, name, group),\n description=group,\n dtstart=start_date,\n dtend=end_date\n )\n self.formatted_events.append(calendar_event.json)", "def _discover_event_files(path: str) -> List[Path]:\n event_files = []\n\n path = Path(path)\n if path.is_file():\n if not is_summary_events_file(str(path)):\n raise ValueError(f\"Path {path} is not a summary events file\")\n event_files.append(path)\n elif path.is_dir():\n for subpath in path.glob(\"*\"):\n if is_summary_events_file(str(subpath)):\n event_files.append(subpath)\n if not event_files:\n raise ValueError(\n f\"No summary events files found in directory {path}\"\n )\n else:\n raise ValueError(f\"Path {path} is neither a file nor a directory\")\n\n return sorted(event_files)", "def get_event_list(self):\n event_list = []\n eventLocation = -1\n for element in self:\n eventLocation += 1\n if element.isChunk():\n event = element.embedded_event()\n if event:\n event_list.append((eventLocation, event.eid))\n return event_list", "def _get_fsevent_image_files(self):\r\n # Print the header columns to the output file\r\n Output.print_columns(self.l_all_fsevents)\r\n \r\n scan_path_spec = None\r\n scanner = source_scanner.SourceScanner()\r\n scan_context = source_scanner.SourceScannerContext()\r\n scan_context.OpenSourcePath(self.meta['source'])\r\n\r\n scanner.Scan(\r\n scan_context,\r\n scan_path_spec=scan_path_spec\r\n )\r\n\r\n for file_system_path_spec, file_system_scan_node in scan_context._file_system_scan_nodes.items():\r\n t_files = 0\r\n self.all_files_count = 0\r\n self.error_file_count = 0\r\n self.all_records_count = 0\r\n self.parsed_file_count = 0\r\n \r\n try:\r\n location = file_system_path_spec.parent.location\r\n except:\r\n location = file_system_path_spec.location\r\n \r\n print(\" Processing Volume {}.\\n\".format(location))\r\n\r\n fs_event_path_spec = path_spec_factory.Factory.NewPathSpec(\r\n file_system_path_spec.type_indicator,\r\n parent=file_system_path_spec.parent,\r\n location=\"/.fseventsd\"\r\n )\r\n\r\n file_entry = resolver.Resolver.OpenFileEntry(\r\n fs_event_path_spec\r\n )\r\n \r\n if file_entry != None:\r\n\r\n t_files = file_entry.number_of_sub_file_entries\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n t_files -= 1\r\n\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n counter = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n counter += 1\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(counter, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Name of source fsevent file\r\n self.src_filename = sub_file_entry.name\r\n self.src_fullpath = self.meta['source'] + \": \" + location + sub_file_entry.path_spec.location\r\n\r\n stat_object = sub_file_entry.GetStat()\r\n\r\n # UTC mod date of source fsevent file\r\n self.m_time = datetime.datetime.fromtimestamp(\r\n stat_object.mtime).strftime(\r\n '%Y-%m-%d %H:%M:%S') + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(self.src_filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n file_object = sub_file_entry.GetFileObject()\r\n\r\n compressedFile = io.StringIO.BytesIO()\r\n compressedFile.write(file_object.read())\r\n compressedFile.seek(0)\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(fileobj=compressedFile, mode='rb')\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_filename)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)\r\n \r\n else:\r\n print('Unable to process volume or no fsevent files found')\r\n continue\r\n\r\n print('\\n\\n All Files Attempted: {}\\n All Parsed Files: {}\\n Files '\r\n 'with Errors: {}\\n All Records Parsed: {}'.format(\r\n self.all_files_count,\r\n self.parsed_file_count,\r\n self.error_file_count,\r\n self.all_records_count))", "def __parse(self):\n lines = self.file.readlines()\n name_idx = 2\n name_idx_found = False\n pathre = re.compile(r\"^[A-Z]:[\\\\/]\\w+\")\n for i in range(0, len(lines)):\n line = lines[i]\n if line.strip() != \"\": # check if line isn't empty\n if pathre.match(line):\n self.path = line.strip()\n continue\n tokens = line.split()\n time_str = tokens[0] + \" \" + tokens[1]\n try:\n time = datetime.strptime(time_str, \"%m/%d/%y %H:%M:%S\")\n except ValueError:\n raise LogParseError('Invalid log format. Date must be first \\\n token for each log event.') \n if not name_idx_found:\n name_idx = tokens.index('Monitoring')\n name_idx_found = True\n name = \"\"\n if tokens[name_idx].strip() == 'Monitoring':\n name = tokens[name_idx].lower() + \" \" + tokens[name_idx + 1].lower()\n duration = 0.0\n else:\n name = tokens[name_idx].lower()\n duration = tokens[name_idx + 1]\n self.events[name] = Event(time, name, duration)\n self.start = self.events['monitoring started']\n self.end = self.events['monitoring stopped']", "def _extract_template_events(self):\n\t\ttry:\n\t\t\ttable = self.hdf5file[fastq_paths[self.version]['template'] % self.group]\n\t\t\tself.template_events = [Event(x) for x in table['Events'][()]]\n\t\texcept Exception, e:\n\t\t\tself.template_events = []", "def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]", "def event_list(self):\n return self._event_list", "def get_flarelist(goes_class_filter, filename): \n t_start = \"2012-08-22 00:00\"\n t_end = \"2018-04-20 00:00\"\n get_goes_event_list(t_start, t_end, filename=Path.cwd().joinpath(filename), goes_class_filter=goes_class_filter)", "def build_events(self) -> list:\n raise NotImplementedError()", "def get_game_events(self):\n\t\tcontents = self.archive.read_file('replay.game.events')\n\t\treturn self.protocol.decode_replay_game_events(contents)", "def find_logs(self, log_format):\n # print(self.path)\n r, d, files = next(os.walk(self.path))\n # TODO use regex to find logs\n files = list(filter(lambda x: log_format in x, files))\n files = [os.path.join(r, f) for f in files]\n ctimes = [os.path.getctime(os.path.join(self.path, f)) for f in files]\n # print(self.path, files)\n return list(zip(ctimes, files))", "def get_events(self):\n\n print \"\\ngetting new Events\"\n path = os.path.join(self.path, 'no_consent')\n for d_cnt, date in sorted(enumerate(os.listdir(path))):\n\n if os.path.isdir(os.path.join(self.events_path, date)):\n print \"%s already processed\" % date\n continue\n\n directory = os.path.join(path, date)\n for recording in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, recording)):\n\n # Can we reduce this list of objects using ROI information?\n try:\n use_objects = {}\n for region, objects in self.soma_objects.items():\n for ob, position in objects.items():\n use_objects[ob] = position\n\n ce.get_event(recording, directory, use_objects, self.config['events'])\n except:\n print \"recording: %s in: %s is broken.\" %(recording, directory)\n else:\n print \"already processed: %s\" % recording\n print \"done.\"", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()", "def eventList(self):\n return self._eventList", "def get_messages(self):\n\t\tcontents = self.archive.read_file('replay.message.events')\n\t\treturn self.protocol.decode_replay_message_events(contents)" ]
[ "0.6945467", "0.68493074", "0.6757438", "0.65512663", "0.647044", "0.6456969", "0.6431299", "0.6402945", "0.6382251", "0.63623226", "0.6338081", "0.6285148", "0.6093697", "0.6050992", "0.60133076", "0.5990272", "0.59856236", "0.59660107", "0.59591275", "0.5939103", "0.59086823", "0.583064", "0.5821836", "0.5819859", "0.5811876", "0.58018553", "0.5786157", "0.5756314", "0.57434565", "0.57257396" ]
0.76860726
0
gets all events linked to a file_object
def get_file_events(file_obj): file_events = file_obj.event_set.all() return file_events
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_file_events(file_obj_events):\n\n ret = []\n for event in file_obj_events:\n ret.append(format_file_event(event))\n return ret", "def file_events(self):\n return self._file_events", "def get_events(self):\n\n print \"\\ngetting new Events\"\n path = os.path.join(self.path, 'no_consent')\n for d_cnt, date in sorted(enumerate(os.listdir(path))):\n\n if os.path.isdir(os.path.join(self.events_path, date)):\n print \"%s already processed\" % date\n continue\n\n directory = os.path.join(path, date)\n for recording in os.listdir(directory):\n if os.path.isdir(os.path.join(directory, recording)):\n\n # Can we reduce this list of objects using ROI information?\n try:\n use_objects = {}\n for region, objects in self.soma_objects.items():\n for ob, position in objects.items():\n use_objects[ob] = position\n\n ce.get_event(recording, directory, use_objects, self.config['events'])\n except:\n print \"recording: %s in: %s is broken.\" %(recording, directory)\n else:\n print \"already processed: %s\" % recording\n print \"done.\"", "def get_events(self):\n\n events = []\n\n for watched_file in self._watched_files:\n for line in watched_file:\n self._do_rule_processing(line, events)\n\n return events", "def get_events(self):\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def get_event_list(self):\n pass", "def events(self):\n for line_num, line in enumerate(self.file_handler):\n if not line:\n break\n # process line input to dictionary\n data = json.loads(line)\n # add id information\n data['id'] = line_num\n # update timestamp history\n timestamp = self._get_timestamp(data)\n self.last_two_timestamps = [self.last_two_timestamps[-1], timestamp]\n self.event_timestamps[line_num] = timestamp\n\n self.alarms.append(0) # add field for alarms\n self.users.append(data['user']) # add field for user\n self.anomalies.append(data.get('is_anomaly', 0)) # add field for anomalies\n if 'is_anomaly' in data:\n del data['is_anomaly'] # remove anomaly information from data for contestants\n\n # return line id and serialized JSON as string representing one event\n str_dump = json.dumps(data)\n logger.info(self._get_inner_time() + ' > ' + str_dump)\n yield line_num, str_dump", "def _ReadFileEntries(self, file_object):\n self._file_entries = {}\n\n file_offset = 0\n while file_offset < self._file_size or self._file_size == 0:\n file_entry = self._ReadFileEntry(file_object, file_offset)\n file_offset += file_entry.size\n if file_entry.path == 'TRAILER!!!':\n break\n\n if file_entry.path in self._file_entries:\n # TODO: alert on file entries with duplicate paths?\n continue\n\n self._file_entries[file_entry.path] = file_entry\n\n self.size = file_offset", "def _get_events(self):\n self.cache = []\n\n # Test if we have event table\n with datascope.closing(datascope.dbopen(self.db, 'r')) as db:\n dbtable = db.lookup(table='event')\n if dbtable.query(datascope.dbTABLE_PRESENT):\n steps = ['dbopen event']\n steps.extend(['dbjoin origin'])\n steps.extend(['dbsubset origin.orid != NULL'])\n steps.extend(['dbsubset origin.orid == prefor'])\n fields = ['evid']\n else:\n steps = ['dbopen origin']\n steps.extend(['dbsubset orid != NULL'])\n fields = []\n\n fields.extend(['orid','time','lat','lon','depth','auth','nass',\n 'ndef','review'])\n\n for v in extract_from_db(self.db, steps, fields, self.db_subset):\n if not 'evid' in v:\n v['evid'] = v['orid']\n\n self.logging.debug( \"Events(): new event #%s\" % v['evid'] )\n\n v['allmags'] = []\n v['magnitude'] = '-'\n v['maglddate'] = 0\n v['srname'] = '-'\n v['grname'] = '-'\n v['time'] = parse_sta_time(v['time'])\n v['strtime'] = readable_time(v['time'], self.timeformat, self.timezone)\n\n try:\n v['srname'] = stock.srname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with srname for orid %s: %s' % (v['orid'],\n v['lat'],v['lon'],e) )\n\n try:\n v['grname'] = stock.grname(v['lat'],v['lon'])\n except Exception,e:\n warninig('Problems with grname for orid %s: %s' % (v['orid'],\n v['lat'], v['lon'],e) )\n\n orid = v['orid']\n if orid in self.mags:\n for o in self.mags[orid]:\n v['allmags'].append(self.mags[orid][o])\n if self.mags[orid][o]['lddate'] > v['maglddate']:\n v['magnitude'] = self.mags[orid][o]['strmag']\n v['maglddate'] = self.mags[orid][o]['lddate']\n\n\n self.cache.append( v )", "async def events(self) -> Iterable[Event]:", "def create_event_list(event_file: TextIO) -> List[Event]:\n file = [event.strip('\\n').split() for event in event_file]\n events = []\n for line in file:\n if line[1] == 'Arrive':\n time = int(line[0])\n customer_name = line[2]\n items = []\n i = 3\n while i < len(line):\n items.append(Item(line[i], int(line[i+1])))\n i += 2\n events.append(CustomerArrival(time, Customer(customer_name, items)))\n else:\n time = int(line[0])\n line_index = int(line[2])\n events.append(CloseLine(time, line_index))\n return events", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()", "def slurp_events(self):\n while self.has_event():\n self.get_event()", "def getObjectFiles(self, obj):\n filelist = list()\n\n fileurl = 'objects/{0}/files'.format(obj.id)\n\n fl = self.iterateAllPaginated(fileurl)\n\n for f in fl:\n res = self.getFile(f['selfUrl'])\n filelist.append(res)\n return filelist", "def event_list(self):\n return self._event_list", "def read(cls, event_file, regex=regex):\n with open(event_file, 'r') as f:\n filedata = f.read()\n event_matches = re.finditer(regex, filedata, re.VERBOSE + re.MULTILINE)\n list_ = [i.groupdict() for i in event_matches]\n #util.ipshell()\n for event in list_: # convert numbers to float and int types\n for key, item in event.iteritems():\n if util.isint(item):\n event[key] = int(item)\n elif util.isfloat(item):\n event[key] = float(item)\n else:\n event[key] = item.strip()\n #if event[key] == '':\n # event[key] = None\n #if key == 'depth' and regex == cls.regex:\n # event[key] *= 1\n #util.ipshell()\n log.info('Read event information of %d events from events event_file %s' % (len(list_), event_file))\n return cls(list_)", "def get_files(self):\r\n return self._filelist", "def get_file(file_to_edit):\n events = []\n file_path = lrs_path + file_to_edit\n with open(file_path, \"r\") as the_file:\n filereader = csv.reader(the_file)\n for row in filereader:\n events.append(row)\n the_file.close()\n return events", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_event_list(self):\n event_list = []\n eventLocation = -1\n for element in self:\n eventLocation += 1\n if element.isChunk():\n event = element.embedded_event()\n if event:\n event_list.append((eventLocation, event.eid))\n return event_list", "def _get_fsevent_files(self):\r\n # Print the header columns to the output files\r\n Output.print_columns(self.l_all_fsevents)\r\n\r\n # Total number of files in events dir #\r\n t_files = len(os.listdir(self.path))\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n t_files -= 1\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Run simple test to see if file mod dates\r\n # should be used to generate time ranges\r\n # In some instances fsevent files may not have\r\n # their original mod times preserved on export\r\n # This code will flag true when the same date and hour\r\n # exists for the first file and the last file\r\n # in the provided source fsevents folder\r\n first = os.path.join(self.path, os.listdir(self.path)[0])\r\n last = os.path.join(self.path, os.listdir(self.path)[len(os.listdir(self.path)) - 1])\r\n first = os.path.getmtime(first)\r\n last = os.path.getmtime(last)\r\n first = str(datetime.datetime.utcfromtimestamp(first))[:14]\r\n last = str(datetime.datetime.utcfromtimestamp(last))[:14]\r\n\r\n if first == last:\r\n self.use_file_mod_dates = False\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(self.all_files_count, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Full path to source fsevent file\r\n self.src_fullpath = os.path.join(self.path, filename)\r\n # Name of source fsevent file\r\n self.src_filename = filename\r\n # UTC mod date of source fsevent file\r\n self.m_time = os.path.getmtime(self.src_fullpath)\r\n self.m_time = str(datetime.datetime.utcfromtimestamp((self.m_time))) + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(self.src_fullpath, \"rb\")\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n # When permission denied is encountered\r\n if \"Permission denied\" in str(exp) and not os.path.isdir(self.src_fullpath):\r\n print('\\nEnsure that you have permissions to read '\r\n 'from {}\\n{}\\n'.format(self.path, str(exp)))\r\n sys.exit(0)\r\n # Otherwise write error to log file\r\n else:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_fullpath)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)", "def eventList(self):\n return self._eventList", "def events(self):\n return get_tsv(self.path, self.values, 'events.tsv')", "def get_events(self):\n return self.events", "def update(self):\n while True:\n result = win32event.WaitForSingleObject(self._overlapped.hEvent, 0)\n if result == win32con.WAIT_OBJECT_0:\n self._num_bytes_returned = win32file.GetOverlappedResult(\n self._directory,\n self._overlapped,\n True\n )\n timestamp = datetime.datetime.fromtimestamp(\n datetime.datetime.utcnow().timestamp()\n )\n self._event_properties['Path'] = self._get_path()\n self._event_properties['FileName'] = self._get_file_name()\n self._event_properties['Timestamp'] = timestamp\n self._event_properties['EventType'] = self._get_event_type()\n self._set_watcher()\n break\n if result == win32con.WAIT_FAILED:\n self.close()\n raise FileMonitorError()", "def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]", "def _get_fsevent_image_files(self):\r\n # Print the header columns to the output file\r\n Output.print_columns(self.l_all_fsevents)\r\n \r\n scan_path_spec = None\r\n scanner = source_scanner.SourceScanner()\r\n scan_context = source_scanner.SourceScannerContext()\r\n scan_context.OpenSourcePath(self.meta['source'])\r\n\r\n scanner.Scan(\r\n scan_context,\r\n scan_path_spec=scan_path_spec\r\n )\r\n\r\n for file_system_path_spec, file_system_scan_node in scan_context._file_system_scan_nodes.items():\r\n t_files = 0\r\n self.all_files_count = 0\r\n self.error_file_count = 0\r\n self.all_records_count = 0\r\n self.parsed_file_count = 0\r\n \r\n try:\r\n location = file_system_path_spec.parent.location\r\n except:\r\n location = file_system_path_spec.location\r\n \r\n print(\" Processing Volume {}.\\n\".format(location))\r\n\r\n fs_event_path_spec = path_spec_factory.Factory.NewPathSpec(\r\n file_system_path_spec.type_indicator,\r\n parent=file_system_path_spec.parent,\r\n location=\"/.fseventsd\"\r\n )\r\n\r\n file_entry = resolver.Resolver.OpenFileEntry(\r\n fs_event_path_spec\r\n )\r\n \r\n if file_entry != None:\r\n\r\n t_files = file_entry.number_of_sub_file_entries\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n t_files -= 1\r\n\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n counter = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n counter += 1\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(counter, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Name of source fsevent file\r\n self.src_filename = sub_file_entry.name\r\n self.src_fullpath = self.meta['source'] + \": \" + location + sub_file_entry.path_spec.location\r\n\r\n stat_object = sub_file_entry.GetStat()\r\n\r\n # UTC mod date of source fsevent file\r\n self.m_time = datetime.datetime.fromtimestamp(\r\n stat_object.mtime).strftime(\r\n '%Y-%m-%d %H:%M:%S') + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(self.src_filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n file_object = sub_file_entry.GetFileObject()\r\n\r\n compressedFile = io.StringIO.BytesIO()\r\n compressedFile.write(file_object.read())\r\n compressedFile.seek(0)\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(fileobj=compressedFile, mode='rb')\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_filename)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)\r\n \r\n else:\r\n print('Unable to process volume or no fsevent files found')\r\n continue\r\n\r\n print('\\n\\n All Files Attempted: {}\\n All Parsed Files: {}\\n Files '\r\n 'with Errors: {}\\n All Records Parsed: {}'.format(\r\n self.all_files_count,\r\n self.parsed_file_count,\r\n self.error_file_count,\r\n self.all_records_count))", "def events(self) -> object:\n return self._events" ]
[ "0.76239884", "0.72182584", "0.6120225", "0.6075563", "0.59999406", "0.59407324", "0.5918491", "0.5851653", "0.58487296", "0.5846607", "0.584488", "0.5776906", "0.5775421", "0.57627344", "0.5746324", "0.56975585", "0.56643677", "0.56583166", "0.56568396", "0.5634918", "0.5634918", "0.5595006", "0.5581248", "0.55707586", "0.55646324", "0.5546943", "0.55231225", "0.5521512", "0.55203825", "0.5514886" ]
0.8561063
0
maps the normalization event info, and therefore splits the event_detail to display the tool name and the tool version separately
def map_file_normalization_info(file_normalization_event): event_info = {} if not file_normalization_event: return try: event_info.update( { "premis:outcome": file_normalization_event.event_outcome_detail, } ) if file_normalization_event.event_detail: event_info.update( { "prov:softwareAgent": file_normalization_event.event_detail.split( ";" )[0], "premis:version": file_normalization_event.event_detail.split(";")[ 1 ], } ) except IndexError: logger.info( "name and version of the file normalization tool %s could not be" "determined. Check if it is well formed", file_normalization_event.event_detail, ) return event_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_file_validation_info(file_validation_event):\n event_info = {}\n if not file_validation_event:\n return\n try:\n event_info.update(\n {\n \"premis:outcome\": file_validation_event.event_outcome_detail,\n \"prov:softwareAgent\": file_validation_event.event_detail.split(\";\")[0],\n \"premis:version\": file_validation_event.event_detail.split(\";\")[1],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file validation tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_validation_event.event_detail,\n )\n return event_info", "def map_file_format_info(file_format_event, file_validation_event):\n event_info = {}\n if not file_format_event:\n return\n try:\n event_info.update(\n {\n \"dct:FileFormat\": file_format_event.event_outcome_detail,\n \"prov:softwareAgent\": file_format_event.event_detail.split(\";\")[0],\n \"premis:version\": file_format_event.event_detail.split(\";\")[1],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_format_event.event_detail,\n )\n if file_validation_event:\n event_info.update(\n {\n \"dct:FileFormat\": file_validation_event.event_outcome_detail,\n }\n )\n return event_info", "def format_hook_info_lines ( self,\n info, sort_info=True, append_newline=False\n ):\n max_name_len = min ( 30, max ( len(x[0]) for x in info ) )\n\n event_names = set()\n for name, ev_prio in info:\n event_names.update ( item[0] for item in ev_prio )\n\n # len(...) + 4 == len(...) + len(\"(__)\")\n event_words = [\n ( ev, (4+len(ev)) * ' ' ) for ev in sorted ( event_names )\n ]\n\n if sort_info:\n my_info = sorted ( info, key=lambda k: ( not k[1], k[0] ) )\n else:\n my_info = info\n\n for name, event_prio_list in my_info:\n events = dict ( event_prio_list )\n get_prio = lambda p: ( \"UU\" if p is None else p )\n\n yield \"{name:>{nlen}} | {ev}\".format (\n name=name, nlen=max_name_len,\n ev=' '.join (\n (\n \"{name}({prio:0>2})\".format (\n name=ev, prio=get_prio ( events[ev] )\n ) if ev in events else replacement\n for ev, replacement in event_words\n )\n )\n ).rstrip()\n # -- end for\n\n if append_newline:\n yield \"\"", "def add_event_from_info(db, event_info, event_id, tag):\n\n if 'description' not in event_info.keys():\n return False\n\n if len(event_info['description']) < MIN_CHARS_DESC:\n if VERBOSE:\n print('Failure: event description too short \\\n (>={} chars needed)'.format(MIN_CHARS_DESC))\n return False\n\n if 'name' in event_info.keys():\n ename = event_info['name']\n else:\n ename = None\n\n if 'venue' in event_info.keys():\n if 'name' in event_info['venue'].keys() and event_info['venue']['name']:\n lname = event_info['venue']['name']\n else:\n lname = None\n\n if 'lon' in event_info['venue'].keys() and event_info['venue']['lon']:\n lon = event_info['venue']['lon']\n else:\n lon = None\n\n if 'lat' in event_info['venue'].keys() and event_info['venue']['lat']:\n lat = event_info['venue']['lat']\n else:\n lat = None\n\n if 'address_1' in event_info['venue'].keys() \\\n and event_info['venue']['address_1']:\n address_1 = event_info['venue']['address_1']\n else:\n address_1 = None\n\n if 'zip' in event_info['venue'].keys() and event_info['venue']['zip']:\n zipcode = event_info['venue']['zip']\n else:\n zipcode = None\n\n if 'city' in event_info['venue'].keys() and event_info['venue']['city']:\n city = event_info['venue']['city']\n else:\n city = None\n\n if 'state' in event_info['venue'].keys() \\\n and event_info['venue']['state']:\n state = event_info['venue']['state']\n else:\n state = None\n else:\n lname = lon = lat = address_1 = zipcode = city = state = None\n\n if 'time' in event_info.keys() and event_info['time']:\n start_time = event_info['time']\n else:\n start_time = None\n\n if 'duration' in event_info.keys() and event_info['duration']:\n duration = event_info['duration']\n else:\n duration = None\n\n if 'description' in event_info.keys() and event_info['description']:\n description = event_info['description']\n else:\n description = None\n\n # taglist = []\n # for t in TAGS:\n # if t in description.lower() or t in ename.lower():\n # taglist.append(t)\n #\n # if len(taglist) > 0:\n # print(ename, taglist)\n # else:\n # return\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE mid = %s\n \"\"\",\n (event_id, ))\n\n result = cursor.fetchone()\n\n if result:\n print('Event already in database.')\n return\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE ename = %s\n \"\"\",\n (ename, ))\n if result:\n print('Event already in database.')\n return\n\n loc_query = \\\n \"\"\"\n INSERT\n INTO Locations(lname, lat, lon, address_1, zip, city, state)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n cursor.execute(loc_query, (\n lname,\n lon,\n lat,\n address_1,\n zipcode,\n city,\n state,\n ))\n\n db.commit()\n\n print('Inserted into Locations.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n lid = cursor.fetchone()\n\n start_date = str(datetime.fromtimestamp(start_time / 1000))\n\n if start_date and duration:\n end_date = str(datetime.fromtimestamp((start_time + duration) / 1000))\n else:\n end_date = None\n\n ev_query = \\\n \"\"\"\n INSERT\n INTO Events(ename, start_date, end_date,\n num_attending, lid, description, mid)\n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n\n cursor.execute(ev_query, (\n ename.encode('ascii', 'ignore'),\n start_date,\n end_date,\n 0,\n lid,\n description.encode('ascii', 'ignore'),\n event_id,\n ))\n\n db.commit()\n\n print('Inserted into Events.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n eid = cursor.fetchone()\n\n # for tag in taglist:\n # category = None\n # for c in CATEGORIES:\n # if tag in CATEGORIES[c]:\n # category = c\n\n et_query = \\\n \"\"\"\n INSERT\n INTO EventTags(eid, tag, category)\n VALUES (%s, %s, %s)\n \"\"\"\n\n cursor.execute(et_query, (eid, tag, tag))\n\n db.commit()\n\n print('Inserted into EventTags.')\n\n if VERBOSE:\n print('Finished.')\n return True", "def OnAbout(self, event):\n messtr = u\"单细胞簇识别系统\\n\"\n messtr =messtr +u\"这是个识别单细胞簇的层次聚类系统,使用方法如下:\\n\" \n messtr =messtr +u\"1.将你的数据处理成三个文件expr.h5,features.txt,labels.txt,分别是用h5py.create_dataset创建的N细胞*M基因的细胞表达式矩阵和用np.savetxt函数保存的基因文件和标签文件(注意一行一个不能有空格)放在一个文件夹即可,可以参考prosess_sys.py\\n\"\n messtr =messtr +u\"2.点击选择文件按钮选择文件夹,此时右边会提示成功与否\\n\"\n messtr =messtr +u\"3.thresh表示过滤掉某个基因表达的细胞数少于百分比细胞数的,范围为0-1,为零时不过滤低表达基因\\n\" \n messtr =messtr +u\"z_cutoff是离散值,bins是分为几份,是整数,将基因按照在所有细胞表达均值分成bins份,然后去掉每一份zscore小于z_cutoff的基因\\n\"\n messtr =messtr +u\"4.可以选择不同的降维算法进行降维\\n\"\n messtr =messtr +u\"5.split_score和merge_score是聚类的两个超参数,一般后者是前者的一半,基于韦尔奇t检查的两个集群之间的距离度量如果大于这个split_score就分裂,小于merge_score就合并(采用的聚类方法是先分裂再合并的)\\n\"\n messtr =messtr +u\"6.ys是层次聚类分裂的结果,ym是分裂再凝聚后的结果,ySC3是SC3算法的结果,ySafe是SAFE算法的结果,yclf是一致性聚类的结果,yKmean是kmeans算法的结果\"\n wx.MessageBox(messtr,\n \"About System\",\n wx.OK|wx.ICON_INFORMATION)", "def buildEvent(data):", "def reformat_events(self, data, features):\n # Update alias\n if isinstance(features, list):\n [x.update(dict(alias=self.alias[(x[features[0]],\n x[features[1]])])) for x in data]\n else:\n [x.update(dict(alias=self.alias[x[features]])) for x in data]\n temp_data = list()\n # define ordering keys and columns\n if self.one_timestamp:\n columns = ['alias', 'duration', 'dur_act_norm']\n sort_key = 'end_timestamp'\n else:\n sort_key = 'start_timestamp'\n columns = ['alias', 'processing_time',\n 'proc_act_norm', 'waiting_time', 'wait_act_norm']\n data = sorted(data, key=lambda x: (x['caseid'], x[sort_key]))\n for key, group in itertools.groupby(data, key=lambda x: x['caseid']):\n trace = list(group)\n temp_dict = dict()\n for col in columns:\n serie = [y[col] for y in trace]\n if col == 'alias':\n temp_dict = {**{'profile': serie}, **temp_dict}\n else:\n serie = [y[col] for y in trace]\n temp_dict = {**{col: serie}, **temp_dict}\n temp_dict = {**{'caseid': key, 'start_time': trace[0][sort_key],\n 'end_time': trace[-1][sort_key]},\n **temp_dict}\n temp_data.append(temp_dict)\n return sorted(temp_data, key=itemgetter('start_time'))", "def prepare_hr_for_events(events_info) -> str:\n hr_list = []\n for record in events_info:\n hr_record = {\n 'Event ID': record.get('eventId', None),\n TIME_UTC: record.get('occurred', ''),\n VICTIM_IP: record.get('srcIp', ''),\n 'Attacker IP': record.get('dstIp', ''),\n 'CVE ID': record.get('cveId', ''),\n 'Severity': record.get('severity', None),\n 'Rule': record.get('ruleName', ''),\n 'Protocol': record.get('protocol', None),\n }\n hr_list.append(hr_record)\n\n return tableToMarkdown(\n 'IPS Events',\n hr_list,\n [\n 'Event ID',\n TIME_UTC,\n VICTIM_IP,\n 'Attacker IP',\n 'CVE ID',\n 'Severity',\n 'Rule',\n 'Protocol',\n ],\n removeNull=True,\n )", "def _format_for_enaml(self, event_descriptors):\n data_keys = []\n for evd in event_descriptors:\n dk = evd.data_keys\n for data_key, data_key_dict in six.iteritems(dk):\n while data_key in data_keys:\n data_key += '_1'\n print(data_key, data_key_dict)\n name = data_key\n src = data_key_dict['source']\n loc = data_key_dict['external']\n if loc is None:\n loc = 'metadatastore'\n data_keys.append([name, loc, src])\n return data_keys", "def convert_event_for_output(event):\n\n\tconverted_event = {}\t\n\tfor fieldname in FIELDNAMES:\n\t\tif event.has_key(fieldname):\n\t\t\tconverted_event[fieldname] = event[fieldname]\n\n\treturn converted_event", "def format_file_event(event):\n event_dict = {\n \"premis:eventIdentifier\": event.event_id,\n \"event_name\": event.event_type,\n \"prov:softwareAgent\": event.event_detail,\n \"premis:outcome\": event.event_outcome,\n \"event_outcome_detail\": event.event_outcome_detail,\n }\n return event_dict", "def new_desc_event(self, event):\r\n pass", "def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n\n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n # data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n # data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n #data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table['ALT'].mean())\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data", "def summary_info_events(filename):\n # filename = self.out_filename('events')\n print('Reading {}'.format(filename))\n table = Table.read(str(filename), hdu='EVENTS')\n data = dict()\n \n # Copy over header info to the summary table\n data['RA_PNT'] = np.float32(table.meta['RA_PNT'])\n data['DEC_PNT'] = np.float32(table.meta['DEC_PNT'])\n #data['GLON_PNT'] = np.float32(table.meta['GLON_PNT'])\n #data['GLAT_PNT'] = np.float32(table.meta['GLAT_PNT'])\n data['ALT_PNT'] = np.float32(table.meta['ALT_PNT'])\n data['AZ_PNT'] = np.float32(table.meta['AZ_PNT'])\n data['ZEN_PNT'] = np.float32(90. - table.meta['ALT_PNT'])\n data['ONTIME'] = np.float32(table.meta['ONTIME'])\n data['LIVETIME'] = np.float32(table.meta['LIVETIME'])\n data['DEADC'] = np.float32(table.meta['DEADC'])\n\n MJDREFI = table.meta['MJDREFI']\n MJDREFF = table.meta['MJDREFF']\n MJDREF = MJDREFI + MJDREFF\n\n TSTART_MET = table.meta['TSTART'] / 3600. / 24.\n TSTOP_MET = table.meta['TSTOP'] / 3600. / 24.\n\n start_time = Time(MJDREF + TSTART_MET, scale='tt', format='mjd')\n stop_time = Time(MJDREF + TSTOP_MET, scale='tt', format='mjd')\n\n data['TSTART'] = np.float32(start_time.utc.mjd)\n data['TSTOP'] = np.float32(stop_time.utc.mjd)\n data['TSTART_STR'] = str(start_time.utc.iso[:-4])\n data['TSTOP_STR'] = str(stop_time.utc.iso[:-4])\n\n data['N_TELS'] = table.meta['N_TELS']\n data['TELLIST'] = table.meta['TELLIST']\n try:\n data['OBJECT'] = table.meta['OBJECT']\n except KeyError:\n data['OBJECT'] = \"\"\n data['RA_OBJ'] = np.float32(table.meta['RA_OBJ'])\n data['DEC_OBJ'] = np.float32(table.meta['DEC_OBJ'])\n\n # data['OBS_MODE'] = table.meta['OBS_MODE']\n\n try:\n data['MUONEFF'] = np.float32(table.meta['MUONEFF'])\n except KeyError:\n data['MUONEFF'] = np.float32(-1)\n\n # Calculate some summary statistics for important event columns\n data['EVENT_COUNT'] = len(table)\n data['EVENT_TIME_MIN'] = table['TIME'].min()\n data['EVENT_TIME_MAX'] = table['TIME'].max()\n data['EVENT_ENERGY_MEDIAN'] = np.float32(np.median(table['ENERGY']))\n data['EVENT_RA_MEDIAN'] = np.float32(np.median(table['RA']))\n data['EVENT_DEC_MEDIAN'] = np.float32(np.median(table['DEC']))\n\n return data", "def audit_process():\n st_types, pc_types = audit(OSMFILE)\n #pprint.pprint(dict(st_types))\n #pprint.pprint(dict(pc_types))\n\n correct_name = {}\n for st_type, ways in st_types.iteritems():\n for name in ways:\n better_name = update_name(name, mapping)\n correct_name[name] = better_name\n #print name, \"=>\", better_name\n \n correct_code = {}\n for _, pc_type in pc_types.iteritems():\n for code in pc_type:\n better_code = update_postalcode(code)\n correct_code[code] = better_code\n #print code, \"=>\", better_code\n \n return correct_name, correct_code", "def process_app_info(self):\n pass", "def extract_all_strings_from_event_trace(events):\n result = \"\"\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n\n result = result + (\"-\" * 70) + \"\\n=> @ \" + \\\n time.strftime('%F %T %z', time.localtime(evt.timestamp)) + \": \"\n\n if evt.type == IpuTraceEvent.COMPILE_BEGIN:\n evt_str = \"Compile begin: \" + \\\n evt.compile_begin.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.COMPILE_END:\n evt_str = \"Compile end: \" + \\\n evt.compile_end.module_name.decode('utf-8') + \"\\n\" + \\\n \"Duration: \" + str(evt.compile_end.duration) + \" us\\n\" + \\\n evt.compile_end.compilation_report.decode('utf-8')\n elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER:\n evt_str = \"Host->Device\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER:\n evt_str = \"Device->Host\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.LOAD_ENGINE:\n evt_str = \"Load engine: \" + \\\n evt.load_engine.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.EXECUTE:\n evt_str = \"Execute: \" + \\\n evt.execute.module_name.decode('utf-8') + \"\\n\" + \\\n evt.execute.execution_report.decode('utf-8')\n else:\n evt_str = \"Unknown event\"\n\n result = result + evt_str + '\\n'\n\n return result", "def cal_name(self):\n return self.event.event_name + ' ' + (self.service.shortname if self.service else self.category.name) + ' Setup'", "def api_from_event(self, request, event_id):\n event = get_event(event_id)\n if not event:\n self.response_not_found()\n event_name = \" | \".join(event.managed_object.profile.name.split(\".\")) + \" | <name> \"\n if event.source == \"syslog\":\n event_name += \"(SYSLOG)\"\n elif event.source == \"SNMP Trap\":\n event_name += \"(SNMP)\"\n data = {\"name\": event_name, \"preference\": 1000}\n if event.source == \"syslog\":\n data[\"description\"] = event.raw_vars[\"message\"]\n elif event.source == \"SNMP Trap\" and \"SNMPv2-MIB::snmpTrapOID.0\" in event.resolved_vars:\n data[\"description\"] = event.resolved_vars[\"SNMPv2-MIB::snmpTrapOID.0\"]\n patterns = {\"source\": event.source}\n for k in event.raw_vars:\n if k not in (\"collector\", \"facility\", \"severity\"):\n patterns[k] = event.raw_vars[k]\n if hasattr(event, \"resolved_vars\"):\n for k in event.resolved_vars:\n if k not in self.IGNORED_OIDS and not is_oid(k):\n patterns[k] = event.resolved_vars[k]\n data[\"patterns\"] = [\n {\"key_re\": \"^%s$\" % k, \"value_re\": \"^%s$\" % patterns[k].strip()} for k in patterns\n ]\n return data", "def get_interesting_event_description(self):\n pass", "def customEvent(self, e):\n data = e.data()\n \n ## HOST INFO\n \n if data.find('host up') == 0:\n self.emit(PYSIGNAL('host_up'), (data.split(' ')[2],))\n\n elif data.find('host down') == 0:\n self.emit(PYSIGNAL('host_down'), (data.split(' ')[2],))\n\n elif data.find('add group') == 0:\n self.emit(PYSIGNAL('add_group'), (int(data.split(' ')[2]),))\n\n elif data.find('remove group') == 0:\n self.emit(PYSIGNAL('remove_group'), (int(data.split(' ')[2]),))\n\n elif data.find('group beat') == 0:\n self.emit(PYSIGNAL('group_beat'), (data[11:],))\n \n ## PKAUDIOD\n \n elif data.find('midi') == 0:\n l = data.split(' ')[1:]\n data = [int(l[0]),int(l[1]),int(l[2]),float(l[3])]\n self.emit(PYSIGNAL('midi'), (data,))\n \n elif data.find('sample:starting') == 0:\n l = data.split(' ')\n self.emit(PYSIGNAL('local_sample_starting'), (int(l[1]),))", "def address_mapped_event(self, event):\r\n output = [event.event_name, event.from_addr, event.to_addr, \r\n time.asctime(event.when)]\r\n plog(\"DEBUG\", \" \".join(output))", "def add_event_to_trigger_dict(event):\n\n\tconverted_event = convert_event_for_output(event)\n\n\t# Use OGLE name for key pointing to event as value if availble.\n\tif converted_event.has_key(\"name_OGLE\") and converted_event[\"name_OGLE\"] != \"\":\n\t\tlogger.info(\"Event has OGLE name\")\n\t\tname_key = \"name_OGLE\"\n\n\t# Otherwise, use the MOA name.\n\telif converted_event.has_key(\"name_MOA\") and converted_event[\"name_MOA\"] != \"\":\n\t\tlogger.info(\"Event has MOA name and no OGLE name\")\n\t\tname_key = \"name_MOA\"\n\n\t# If there is a neither a MOA nor OGLE name, something has gone wrong, and we abort storing the event.\n\telse:\n\t\tlogger.warning(\"Event has neither OGLE nor MOA name item. Event:\\n\" + str(converted_event))\n\t\tlogger.warning(\"Aborting added event to event trigger dictionary...\")\n\t\treturn\n\n\tevent_name = converted_event[name_key]\n\tglobal event_trigger_dict\n\tevent_trigger_dict[event_name] = converted_event\n\tlogger.debug(\"Added following event to event trigger dictionary: %s\" % converted_event)", "def convert_eventstorylines_v2(self, version=\"1.5\"):\n\n splits = {'full_corpus/v{}/event_mentions_extended'.format(version): 0,\n 'test_corpus/v{}/event_mentions_extended'.format(version): 2}\n\n annotations = pd.DataFrame(columns=['file', 'source', 'target', 'label', 'split'])\n\n # creating a dictionary of all documents\n data = pd.DataFrame(columns=self.scheme_columns)\n\n # ----------------------------------\n # reading all the annotations\n for key, value in splits.items():\n docs_path = self.dir_path + \"EventStoryLine/evaluation_format/{}\".format(key)\n\n for folder in os.listdir(docs_path):\n if not any(sub in folder for sub in [\".txt\", \".pdf\", \".DS_Store\"]):\n for doc in os.listdir('{}/{}'.format(docs_path, folder)):\n if \".tab\" in doc:\n with open('{}/{}/{}'.format(docs_path, folder, doc), 'r') as file:\n lines = file.readlines()\n for line in lines:\n line = line.split('\\t')\n annotations = annotations.append(\n {'file': '{}.{}'.format(doc.split('.')[0], 'xml'), 'source': line[0],\n 'target': line[1],\n 'label': line[2].replace('\\n', ''), 'split': value}, ignore_index=True)\n\n # ----------------------------------\n mismatch = 0\n docs_path = self.dir_path + \"EventStoryLine/ECB+_LREC2014/ECB+\"\n\n # creating a dictionary of all documents\n data = pd.DataFrame(columns=self.scheme_columns)\n\n for index, row in annotations.iterrows():\n # parse the doc to retrieve info of sentences\n folder = str(row['file']).split('_')[0]\n tree = ET.parse(docs_path + \"/\" + folder + \"/\" + row['file'])\n root = tree.getroot()\n\n tokens = []\n\n # saving tokens info\n for token in root.findall('token'):\n tokens.append([int(token.attrib['t_id']), token.text, int(token.attrib['sentence'])])\n\n label = -1\n direction = -1\n if str(row['label']) == \"PRECONDITION\":\n label = 1\n direction = 0\n elif str(row['label']) == \"FALLING_ACTION\":\n label = 1\n direction = 1\n\n source_t_ids = []\n target_t_ids = []\n for item in row['source'].split('_'):\n source_t_ids.append(int(item))\n for item in row['target'].split('_'):\n target_t_ids.append(int(item))\n\n context = \"\"\n span1 = \"\"\n span2 = \"\"\n token_idx = 0\n\n # finding start and end sentences indexes\n for i in range(len(tokens)):\n if tokens[i][0] == source_t_ids[0]:\n s_sen_id = int(tokens[i][2])\n if tokens[i][0] == target_t_ids[-1]:\n t_sen_id = int(tokens[i][2])\n\n # building the context and finding spans\n i = 0\n\n if t_sen_id < s_sen_id:\n s_sen_id, t_sen_id = t_sen_id, s_sen_id\n\n while i < len(tokens):\n t_id = tokens[i][0]\n token_text = tokens[i][1]\n token_sen_id = int(tokens[i][2])\n if s_sen_id <= int(token_sen_id) <= t_sen_id:\n # span1\n if t_id == source_t_ids[0]:\n for l in range(len(source_t_ids)):\n span1 += tokens[i + l][1] + \" \"\n # setting span1 start and end indexes\n span1_start = copy.deepcopy(token_idx)\n span1_end = span1_start + len(span1) - 1\n context += span1\n token_idx += len(span1)\n i += l\n # span2\n elif t_id == target_t_ids[0]:\n for l in range(len(target_t_ids)):\n span2 += tokens[i + l][1] + \" \"\n # setting span2 start and end indexes\n span2_start = copy.deepcopy(token_idx)\n span2_end = span2_start + len(span2) - 1\n context += span2\n token_idx += len(span2)\n i += l\n else:\n context += token_text + \" \"\n token_idx += len(token_text) + 1\n i += 1\n\n # storing causal and non-causal info\n try:\n idx_val = {\"span1\": [[span1_start, span1_end]], \"span2\": [[span2_start, span2_end]],\n \"signal\": []}\n\n new_row = {\n \"original_id\": '{}'.format(doc),\n \"span1\": [span1.strip()],\n \"span2\": [span2.strip()],\n \"signal\": [],\n \"context\": context.strip('\\n'), \"idx\": idx_val, \"label\": label,\n \"direction\": direction,\n \"source\": self.namexid[\"eventstorylines\"],\n \"ann_file\": doc, \"split\": int(row['split'])}\n\n if self.check_span_indexes(new_row) and label in [0, 1]:\n data = data.append(new_row, ignore_index=True)\n else:\n mismatch += 1\n\n except Exception as e:\n print(\"[crest-log] EventStoryLine. Detail: {}\".format(e))\n\n logging.info(\"[crest] eventstorylines is converted.\")\n\n # adding global id to the data frame\n global_ids = [i for i in range(1, len(data) + 1)]\n data.insert(0, 'global_id', global_ids)\n data.reset_index()\n\n return data, mismatch", "def __print_events_info(self, occurrence_event):\n print(\" Name: \", occurrence_event)\n print(\" Type: Event\")\n print(\" Description:\",\n self.e_reader.get_event_description(occurrence_event))\n return 0", "def _parse_event(self, response):\n item = {'location': self._parse_location(response)}\n # Merge event details with item data from request meta\n item.update(response.meta.get('item', {}))\n return item", "def event_dicts(self):\n events = []\n # We're assuming that the table has alternating rows that\n # containg (date, event title) possibly followed by (<empty>,\n # event details).\n selector = '#ae-billing-logs-table > tbody > tr'\n for (date_elt, event_elt) in self.doc.cssselect(selector):\n if date_elt.text is not None:\n events.append({\n # <td>EVENT DATE</td>\n 'date': date_elt.text.strip(),\n # <td><span id=\"...\">EVENT TITLE</span></td>\n 'title': event_elt.findtext('span').strip()\n })\n else:\n # An empty first column indicates details for the\n # preceeding event.\n assert len(events) > 0, len(events)\n last_event = events[-1]\n if last_event['title'].startswith('Usage Report '):\n last_event['details'] = self._usage_report_dict(event_elt)\n return events", "def ProduceEventWithEventData(self, event, event_data):\n # type: (dict, dict) -> None\n print(\"event produced\")\n print(\"Event:\")\n print(pprint(vars(event)))\n print(\"Event data:\")\n print(pprint(vars(event_data)))", "def test_event_message_title(self):\n\n events = [\n {\n 'type': 'CURRENT HOST STATE',\n 'parts': ['domU-12-31-38-00-78-98', 'UP', 'HARD', '1', 'PING OK - Packet loss = 0%, RTA = 1.03 ms'],\n 'expected_msg_title': 'CURRENT HOST STATE'\n },\n {\n 'type': 'CURRENT SERVICE STATE',\n 'parts': ['domU-12-31-38-00-78-98', 'Current Load', 'OK', 'HARD', '1', 'OK - load average: 0.04, 0.03'],\n 'expected_msg_title': 'Current Load'\n },\n {\n 'type': 'SERVICE ALERT',\n 'parts': ['domU-12-31-39-02-ED-B2', 'cassandra JVM Heap', 'WARNING', 'SOFT', '1', ''],\n 'expected_msg_title': 'cassandra JVM Heap'\n },\n {\n 'type': 'HOST ALERT',\n 'parts': ['domU-12-31-39-02-ED-B2', 'DOWN', 'SOFT', '1', 'PING CRITICAL - Packet loss = 100%'],\n 'expected_msg_title': 'HOST ALERT'\n },\n {\n 'type': 'SERVICE NOTIFICATION',\n 'parts': ['pagerduty', 'ip-10-114-245-230', 'RAID EBS', 'OK', 'notify-service-by-email', ''],\n 'expected_msg_title': 'RAID EBS'\n },\n {\n 'type': 'SERVICE FLAPPING ALERT',\n 'parts': ['domU-12-31-39-16-52-37', 'cassandra JVM Heap', 'STARTED', 'Service started flapping'],\n 'expected_msg_title': 'cassandra JVM Heap'\n },\n {\n 'type': 'ACKNOWLEDGE_SVC_PROBLEM',\n 'parts': ['domU-12-31-39-16-52-37', 'NTP', '2', '1', '0', 'nagiosadmin', 'alq'],\n 'expected_msg_title': 'NTP'\n },\n {\n 'type': 'HOST DOWNTIME ALERT',\n 'parts': ['ip-10-114-89-59', 'STARTED', 'Host has entered a period of scheduled downtime'],\n 'expected_msg_title': 'HOST DOWNTIME ALERT'\n },\n {\n 'type': 'SERVICE DOWNTIME ALERT',\n 'parts': ['ip-10-114-237-165', 'intake', 'STARTED',\n 'Service has entered a period of scheduled downtime'],\n 'expected_msg_title': 'intake'\n },\n {\n 'type': 'ACKNOWLEDGE_HOST_PROBLEM',\n 'parts': ['domU-12-31-39-16-52-37', '2', '1', '0', 'nagiosadmin', 'alq'],\n 'expected_msg_title': 'ACKNOWLEDGE_HOST_PROBLEM'\n },\n {\n 'type': 'PASSIVE SERVICE CHECK',\n 'parts': ['ip-10-114-237-165', 'some_service', 'OK', 'Service works!'],\n 'expected_msg_title': 'some_service'\n }\n\n ]\n\n for event in events:\n self._assert_event_msg_title(\n event_type=event['type'], parts=event['parts'], expected_msg_title=event['expected_msg_title']\n )", "def __init__(self, event_list):\n event_list = [AttribDict(event) for event in event_list]\n for event in event_list:\n event.datetime = UTC(event.datetime)\n if feregion is not None:\n event.flynn_region = feregion(event.latitude, event.longitude)\n for item in ['datetime_quality', 'depth_quality', 'magnitude_type', 'author', 'quality', 'information', 'origin_id', 'flynn_region']:\n if event.get(item) == None:\n event[item] = ''\n #if not event.get('magnitude_type'):\n # event['magnitude_type'] = 'xx'\n if event.get('id') == None:\n event['id'] = (str(event['datetime']))[:-4].replace('-', '').replace(':', '').replace('.', '')\n super(Events, self).__init__(event_list)" ]
[ "0.65614873", "0.6268047", "0.5862656", "0.5489602", "0.5350454", "0.52793896", "0.5253923", "0.5160849", "0.50771683", "0.5073524", "0.50472724", "0.50194335", "0.4990838", "0.49535462", "0.49394515", "0.48982656", "0.4864565", "0.486448", "0.48361394", "0.48343584", "0.4830969", "0.48206288", "0.4797807", "0.4782572", "0.47732657", "0.47682875", "0.47587463", "0.4753226", "0.4752355", "0.47507286" ]
0.7267318
0
gets all events from the type virus_check
def get_virusscan_events(file_events): virusscan_events = file_events.filter(event_type="virus check") if virusscan_events: return virusscan_events
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())", "def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def extract_all_events(events):\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt]\n return result", "def get_events(self):\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)", "def eventList(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tevents = [i for i in dir(cv2) if 'EVENT' in i and filterStr in i]\n\treturn events", "def get_passed_virus_checks(file_events):\n virus_check_events = get_virusscan_events(file_events)\n if not virus_check_events:\n return\n for event in virus_check_events:\n if event.event_outcome == \"Pass\":\n try:\n passed_event = {\n \"premis:outcome\": event.event_outcome,\n \"prov:softwareAgent\": event.event_detail.split(\";\")[0],\n \"premis:version\": event.event_detail.split(\";\")[1],\n }\n except IndexError:\n logger.info(\n \"name and version of the virus check tool %s could not be\"\n \"determined. Check if it is well formed\",\n event.event_outcome_detail,\n )\n continue\n if passed_event:\n return passed_event", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()", "async def events(self) -> Iterable[Event]:", "def events(self):\n return self.search(comp_class=Event)", "def get_events(self, type_filter=None):\n\n if type_filter:\n filtered_events = self.__events.get(type_filter, [])\n else:\n filtered_events = [ev for ev_type_list in self.__events.values() for ev in ev_type_list]\n\n return filtered_events", "def extract_all_types_from_event_trace(events):\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt.type]\n return result", "def list_events():\n return [\n snow,\n mosquito,\n sun_heat,\n orage,\n overflowing,\n gathering,\n trampling,\n pollution,\n southern_wind,\n northern_wind,\n fog,\n sun\n ]", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n\n events = []\n\n for watched_file in self._watched_files:\n for line in watched_file:\n self._do_rule_processing(line, events)\n\n return events", "def build_events(self) -> list:\n raise NotImplementedError()", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def events(self):\r\n return ev.Events(self)", "def events(self):\r\n return ev.Events(self)", "def get_event_list(self):\n pass", "async def get_events(self) -> list[Event]:\n log.debug(\"Discovering events in branding repository.\")\n\n try:\n event_directories = await self.fetch_directory(\"events\", types=(\"dir\",)) # Skip files.\n except Exception:\n log.exception(\"Failed to fetch 'events' directory.\")\n return []\n\n instances: list[Event] = []\n\n for event_directory in event_directories.values():\n log.trace(f\"Attempting to construct event from directory: '{event_directory.path}'.\")\n try:\n instance = await self.construct_event(event_directory)\n except Exception as exc:\n log.warning(f\"Could not construct event '{event_directory.path}'.\", exc_info=exc)\n else:\n instances.append(instance)\n\n return instances", "def events(self):", "def test_get_events(self):\n events = gracedb.events()\n for event in events:\n self.assertTrue('graceid' in event)\n break", "def all_events(cls) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allEvents\", [])", "def events(self):\r\n return e.Events(self)", "def collect_events(helper, ew): # pylint: disable=no-self-argument,invalid-name,too-many-statements,too-many-branches\n\n def clear_checkbox(session_key, stanza):\n \"\"\" Sets the 'reindex_data' value in the REST API to 0 to clear it. Splunk then automatically restarts the input.\"\"\"\n url = f'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/data/inputs/strava_api/{stanza}'\n headers = {'Authorization': f'Splunk {session_key}'}\n payload = 'reindex_data=0'\n helper.send_http_request(url, \"POST\", headers=headers, payload=payload, verify=False, use_proxy=False)\n\n def get_activities(ts_activity, access_token):\n \"\"\"Gets all activities, 30 per page as per Strava's default.\"\"\"\n params = {'after': ts_activity, 'access_token': access_token}\n url = \"https://www.strava.com/api/v3/activities\"\n response = return_json(url, \"GET\", parameters=params)\n return response\n\n def get_activity(activity, token):\n \"\"\"Gets specific activity.\"\"\"\n url = f'https://www.strava.com/api/v3/activities/{activity}?include_all_efforts=true'\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_activity_stream(token, activity, types, series_type='time', resolution='high'):\n \"\"\"Gets the activity stream for given activity id.\"\"\"\n types = ','.join(types)\n params = {'access_token': token}\n url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type='\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_athlete(token):\n \"\"\"Gets details on currently logged in athlete.\"\"\"\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response\n\n def get_epoch(timestamp):\n \"\"\"Converts Strava datetime to epoch timestamp\"\"\"\n timestamp_dt = datetime.datetime.strptime(timestamp, \"%Y-%m-%dT%H:%M:%SZ\")\n epoch = calendar.timegm(timestamp_dt.timetuple())\n return epoch\n\n def get_token(client_id, client_secret, token, renewal):\n \"\"\"Get or refresh access token from Strava API.\"\"\"\n url = \"https://www.strava.com/api/v3/oauth/token\"\n\n if renewal:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': token,\n 'grant_type': 'refresh_token'}\n message = \"Successfully refreshed Strava token.\"\n else:\n payload = {\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'code': token,\n 'grant_type': 'authorization_code'}\n message = \"Successfully authenticated with Strava using access code.\"\n\n response = return_json(url, \"POST\", payload=payload)\n helper.log_info(message)\n return response\n\n def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments\n \"\"\"Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection.\"\"\"\n url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save'\n headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'}\n payload = [{\"_key\": athlete_id, \"id\": athlete_id, \"firstname\": firstname, \"lastname\": lastname, \"fullname\": firstname + \" \" + lastname, \"weight\": weight, \"ftp\": ftp}]\n helper.send_http_request(url, \"POST\", headers=headers, payload=payload, verify=False, use_proxy=False)\n\n def parse_data(data, activity_id, activity_start_date):\n \"\"\"Gets raw JSON data, parses it into events and writes those to Splunk.\"\"\"\n data_dict = {}\n final_dict = {}\n for i in data:\n data_dict[i['type']] = i['data']\n\n counter = 1\n nrange = len(data_dict['time'])\n for item in range(1, nrange + 1):\n final_dict[item] = {}\n\n for key, value in data_dict.items():\n counter = 1\n for i in value:\n final_dict[counter][key] = i\n final_dict[counter]['activity_id'] = activity_id\n\n if 'time' in key:\n final_dict[counter]['time'] = final_dict[counter]['time'] + activity_start_date\n final_dict[counter]['time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(final_dict[counter]['time']))\n\n if 'latlng' in key:\n final_dict[counter]['lat'] = final_dict[counter]['latlng'][0]\n final_dict[counter]['lon'] = final_dict[counter]['latlng'][1]\n final_dict[counter].pop('latlng')\n counter += 1\n\n result_list = [value for key, value in final_dict.items()]\n\n for event in result_list:\n write_to_splunk(index=helper.get_output_index(), sourcetype='strava:activities:stream', data=json.dumps(event))\n\n helper.log_info(f'Added activity stream {activity_id} for {athlete_id}.')\n return True\n\n def return_json(url, method, **kwargs):\n \"\"\"Gets JSON from URL and parses it for potential error messages.\"\"\"\n response = helper.send_http_request(url, method, use_proxy=False, **kwargs)\n\n try:\n response.raise_for_status()\n except requests.HTTPError as ex:\n # status code 429 means we hit Strava's API limit, wait till next 15 minute mark (+5 seconds) and try again\n if ex.response.status_code == 429:\n # Get the 15m/24h API limits for this user\n api_usage_15m = response.headers['X-RateLimit-Usage'].split(\",\")[0]\n api_usage_24h = response.headers['X-RateLimit-Usage'].split(\",\")[1]\n api_limit_15m = response.headers['X-RateLimit-Limit'].split(\",\")[0]\n api_limit_24h = response.headers['X-RateLimit-Limit'].split(\",\")[1]\n\n timestamp_now = int(time.time())\n modulus_time = timestamp_now % 900\n sleepy_time = 0 if modulus_time == 0 else (900 - modulus_time + 5)\n helper.log_warning(f'Strava API rate limit hit. Used {api_usage_15m}/15min (limit {api_limit_15m}), {api_usage_24h}/24h (limit {api_limit_24h}). Sleeping for {sleepy_time} seconds.')\n time.sleep(sleepy_time)\n response = return_json(url, method, **kwargs)\n helper.log_debug(f'429 detail: {response}')\n return response\n if ex.response.status_code in (400, 401):\n helper.log_error(f'{ex.response.status_code} Error: Strava API credentials invalid or session expired. Make sure Client ID & Client Secret have been added to the Configuration -> Add-On Parameters tab and your access code is valid.')\n sys.exit(1)\n if ex.response.status_code == 404:\n helper.log_warning(f'404 Error: no stream data for url {url}, can happen for manually added activities.')\n return False\n if ex.response.status_code == 500:\n helper.log_warning(f'500 Error: no data received from Strava API for url {url}, it might be corrupt or invalid. Skipping activity.')\n return False\n # In case there's any other error than the ones described above, log the error and exit.\n helper.log_error(f'Error: {ex}')\n sys.exit(1)\n\n # Must have been a 200 status code\n return response.json()\n\n def set_athlete(response):\n \"\"\"Creates dict with athlete details, including token expiry.\"\"\"\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_token': response['refresh_token'],\n 'expires_at': response['expires_at'],\n 'ts_activity': 0}\n return athlete\n\n def write_to_splunk(**kwargs):\n \"\"\"Writes activity to Splunk index.\"\"\"\n event = helper.new_event(**kwargs)\n ew.write_event(event)\n\n # get configuration arguments\n client_id = helper.get_global_setting('client_id')\n client_secret = helper.get_global_setting('client_secret')\n access_code = helper.get_arg('access_code')\n start_time = helper.get_arg('start_time') or 0\n types = ['time', 'distance', 'latlng', 'altitude', 'velocity_smooth', 'heartrate', 'cadence', 'watts', 'temp', 'moving', 'grade_smooth']\n\n # stanza is the name of the input. This is a unique name and will be used as a checkpoint key to save/retrieve details about an athlete\n stanza = list(helper.get_input_stanza())[0]\n athlete = helper.get_check_point(stanza)\n helper.log_debug(f'Athlete: {athlete}')\n\n # if reindex_data checkbox is set, update the start_time to be the one specified and clear the checkbox.\n if helper.get_arg('reindex_data'):\n if int(helper.get_arg('reindex_data')) == 1:\n athlete.update({'ts_activity': start_time})\n helper.save_check_point(stanza, athlete)\n # the clear_checkbox function will restart this input as soon as the change is made, so no further code required.\n clear_checkbox(helper.context_meta['session_key'], stanza)\n\n # if athlete is set, get details & tokens - otherwise fetch tokens with get_token()\n if athlete:\n athlete_id = athlete['id']\n athlete_name = athlete['name']\n expires_at = athlete['expires_at']\n refresh_token = athlete['refresh_token']\n else:\n expires_at = False\n refresh_token = False\n\n # Check if expires_at token is set and renew token if token expired. Otherwise fetch token with initial access code.\n if expires_at:\n if time.time() >= expires_at:\n response = get_token(client_id, client_secret, refresh_token, renewal=True)\n helper.log_debug(f\"Access token: {response['access_token']}, refresh token: {response['refresh_token']}\")\n athlete.update({'access_token': response['access_token'], 'refresh_token': response['refresh_token'], 'expires_at': response['expires_at']})\n else:\n response = get_token(client_id, client_secret, access_code, renewal=False)\n athlete = set_athlete(response)\n athlete_id = athlete['id']\n athlete_name = athlete['name']\n\n helper.save_check_point(stanza, athlete)\n\n access_token = athlete['access_token']\n athlete_detail = get_athlete(access_token)\n athlete_firstname = athlete_detail['firstname']\n athlete_lastname = athlete_detail['lastname']\n athlete_weight = ''\n athlete_ftp = ''\n if athlete_detail['resource_state'] == 3:\n athlete_weight = athlete_detail['weight']\n athlete_ftp = athlete_detail['ftp']\n\n helper.log_debug(\"Saving athlete's details to KV Store.\")\n kvstore_save_athlete(helper.context_meta['session_key'], str(athlete_id), athlete_firstname, athlete_lastname, str(athlete_weight), str(athlete_ftp))\n\n # For backwards compatibility with upgrades from pre-2.5.0, which uses athlete['ts_newest_activity']. If there, clean them up.\n if 'ts_newest_activity' in athlete:\n helper.log_info(f\"Found existing timestamp {athlete['ts_newest_activity']}! Will remove it now.\")\n ts_activity = athlete['ts_newest_activity']\n athlete.update({'ts_activity': ts_activity})\n athlete.pop('ts_newest_activity')\n athlete.pop('get_old_activities')\n athlete.pop('ts_oldest_activity')\n helper.save_check_point(stanza, athlete)\n else:\n ts_activity = athlete['ts_activity'] or start_time\n\n # webhook_updates contains updated activities that came in via webhook.\n webhook_updates = helper.get_check_point('webhook_updates') or {}\n\n if str(athlete_id) in webhook_updates:\n for activity in webhook_updates[str(athlete_id)][:]:\n helper.log_info(f'Received update via webhook for activity {activity} from athlete {athlete_id}')\n response = get_activity(activity, access_token)\n ts_activity = get_epoch(response['start_date'])\n\n # Store the event in Splunk\n write_to_splunk(index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=json.dumps(response))\n\n # Get stream data for this activity and write to Splunk\n stream_data = get_activity_stream(access_token, activity, types)\n if stream_data:\n parse_data(stream_data, activity, ts_activity)\n\n # Remove from dict and save dict\n webhook_updates[str(athlete_id)].remove(activity)\n helper.save_check_point('webhook_updates', webhook_updates)\n helper.log_info(f'Got all webhook events for athlete {athlete_id}')\n\n helper.log_info(f'Checking if there are new activities for {athlete_name} ({athlete_id})')\n\n while True:\n\n response_activities = get_activities(ts_activity, access_token)\n\n # if all activities retrieved, set get_old_activities, save checkpoint and end loop to finish\n if len(response_activities) == 0: # pylint: disable=no-else-break\n helper.log_info(f'All done, got all activities for {athlete_name} ({athlete_id})')\n break\n else:\n # Get more details from each activity\n for event in response_activities:\n activity_id = event['id']\n response = get_activity(activity_id, access_token)\n\n # response = False for a 500 Error, which is likely an invalid Strava API file. In that case skip the activity and continue.\n if response:\n data = json.dumps(response)\n\n # Get start_date (UTC) and convert to UTC timestamp\n ts_activity = get_epoch(event['start_date'])\n\n # Store the event in Splunk\n write_to_splunk(index=helper.get_output_index(), sourcetype=helper.get_sourcetype(), data=data)\n helper.log_info(f'Added activity {activity_id} for {athlete_id}.')\n\n # Get stream data for this activity\n stream_data = get_activity_stream(access_token, activity_id, types)\n if stream_data:\n parse_data(stream_data, activity_id, ts_activity)\n\n # Save the timestamp of the last event to a checkpoint\n athlete.update({'ts_activity': ts_activity})\n helper.save_check_point(stanza, athlete)", "def get_events_need_escalation(self, source_type: str) -> List[EventRecord]:\n with self.session.begin() as session:\n events_to_escalate = (\n session.query(EventRecord)\n .filter(\n (EventRecord.sent_at.isnot(None))\n & (EventRecord.escalated_at.is_(None))\n & (EventRecord.source_type == source_type)\n )\n .outerjoin(IgnoreFingerprintRecord, EventRecord.fingerprint == IgnoreFingerprintRecord.fingerprint)\n .filter(IgnoreFingerprintRecord.ignore_type == IgnoreFingerprintRecord.ESCALATE_MANUALLY)\n .all()\n )\n return events_to_escalate", "def events(self) -> Sequence[Tuple[str, Sequence[Union[np.ndarray, bytes]]]]:\n return self._env.events()" ]
[ "0.63607806", "0.58471835", "0.5819439", "0.5816538", "0.58113927", "0.58003557", "0.5785132", "0.5784336", "0.578308", "0.57225883", "0.57221717", "0.5695665", "0.56645083", "0.55595946", "0.55595946", "0.55592936", "0.5546411", "0.5522073", "0.5522073", "0.54979116", "0.54979116", "0.5489995", "0.5470658", "0.5456585", "0.54331005", "0.540026", "0.537825", "0.5376936", "0.5371746", "0.5346927" ]
0.72586447
0
maps the validation event info, and therefore splits the event_detail to display the tool name and the tool version separately
def map_file_validation_info(file_validation_event): event_info = {} if not file_validation_event: return try: event_info.update( { "premis:outcome": file_validation_event.event_outcome_detail, "prov:softwareAgent": file_validation_event.event_detail.split(";")[0], "premis:version": file_validation_event.event_detail.split(";")[1], } ) except IndexError: logger.info( "name and version of the file validation tool %s could not be" "determined. Check if it is well formed", file_validation_event.event_detail, ) return event_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_file_format_info(file_format_event, file_validation_event):\n event_info = {}\n if not file_format_event:\n return\n try:\n event_info.update(\n {\n \"dct:FileFormat\": file_format_event.event_outcome_detail,\n \"prov:softwareAgent\": file_format_event.event_detail.split(\";\")[0],\n \"premis:version\": file_format_event.event_detail.split(\";\")[1],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_format_event.event_detail,\n )\n if file_validation_event:\n event_info.update(\n {\n \"dct:FileFormat\": file_validation_event.event_outcome_detail,\n }\n )\n return event_info", "def map_file_normalization_info(file_normalization_event):\n event_info = {}\n if not file_normalization_event:\n return\n try:\n event_info.update(\n {\n \"premis:outcome\": file_normalization_event.event_outcome_detail,\n }\n )\n if file_normalization_event.event_detail:\n event_info.update(\n {\n \"prov:softwareAgent\": file_normalization_event.event_detail.split(\n \";\"\n )[0],\n \"premis:version\": file_normalization_event.event_detail.split(\";\")[\n 1\n ],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file normalization tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_normalization_event.event_detail,\n )\n return event_info", "def format_hook_info_lines ( self,\n info, sort_info=True, append_newline=False\n ):\n max_name_len = min ( 30, max ( len(x[0]) for x in info ) )\n\n event_names = set()\n for name, ev_prio in info:\n event_names.update ( item[0] for item in ev_prio )\n\n # len(...) + 4 == len(...) + len(\"(__)\")\n event_words = [\n ( ev, (4+len(ev)) * ' ' ) for ev in sorted ( event_names )\n ]\n\n if sort_info:\n my_info = sorted ( info, key=lambda k: ( not k[1], k[0] ) )\n else:\n my_info = info\n\n for name, event_prio_list in my_info:\n events = dict ( event_prio_list )\n get_prio = lambda p: ( \"UU\" if p is None else p )\n\n yield \"{name:>{nlen}} | {ev}\".format (\n name=name, nlen=max_name_len,\n ev=' '.join (\n (\n \"{name}({prio:0>2})\".format (\n name=ev, prio=get_prio ( events[ev] )\n ) if ev in events else replacement\n for ev, replacement in event_words\n )\n )\n ).rstrip()\n # -- end for\n\n if append_newline:\n yield \"\"", "def buildEvent(data):", "def validation_event(self, message):", "def map_to_app_build_infos(self, app):\n self.build_infos.form.map_to_app(app)", "def construct_event(cls, validation_data: dict, **kwargs):\n event_data = dict()\n event_data['result'] = cls._known_results[validation_data['access']]\n event_data['client_id'] = kwargs['client_id']\n event_data['action'] = cls._known_actions[kwargs['action']]\n event_data['policy'] = kwargs['policy'] if kwargs.get('policy', None) else \"Unknown\"\n event_data['application'] = kwargs['application']\n event_data['resource_id'] = kwargs['resource_id']\n event_data['severity'] = kwargs['severity'] if kwargs.get('severity', None) else 50\n event_data['reason'] = validation_data['cause'] \\\n if validation_data['access'] == 'denied' and validation_data.get('cause', None) else None\n return event_data", "def add_event_from_info(db, event_info, event_id, tag):\n\n if 'description' not in event_info.keys():\n return False\n\n if len(event_info['description']) < MIN_CHARS_DESC:\n if VERBOSE:\n print('Failure: event description too short \\\n (>={} chars needed)'.format(MIN_CHARS_DESC))\n return False\n\n if 'name' in event_info.keys():\n ename = event_info['name']\n else:\n ename = None\n\n if 'venue' in event_info.keys():\n if 'name' in event_info['venue'].keys() and event_info['venue']['name']:\n lname = event_info['venue']['name']\n else:\n lname = None\n\n if 'lon' in event_info['venue'].keys() and event_info['venue']['lon']:\n lon = event_info['venue']['lon']\n else:\n lon = None\n\n if 'lat' in event_info['venue'].keys() and event_info['venue']['lat']:\n lat = event_info['venue']['lat']\n else:\n lat = None\n\n if 'address_1' in event_info['venue'].keys() \\\n and event_info['venue']['address_1']:\n address_1 = event_info['venue']['address_1']\n else:\n address_1 = None\n\n if 'zip' in event_info['venue'].keys() and event_info['venue']['zip']:\n zipcode = event_info['venue']['zip']\n else:\n zipcode = None\n\n if 'city' in event_info['venue'].keys() and event_info['venue']['city']:\n city = event_info['venue']['city']\n else:\n city = None\n\n if 'state' in event_info['venue'].keys() \\\n and event_info['venue']['state']:\n state = event_info['venue']['state']\n else:\n state = None\n else:\n lname = lon = lat = address_1 = zipcode = city = state = None\n\n if 'time' in event_info.keys() and event_info['time']:\n start_time = event_info['time']\n else:\n start_time = None\n\n if 'duration' in event_info.keys() and event_info['duration']:\n duration = event_info['duration']\n else:\n duration = None\n\n if 'description' in event_info.keys() and event_info['description']:\n description = event_info['description']\n else:\n description = None\n\n # taglist = []\n # for t in TAGS:\n # if t in description.lower() or t in ename.lower():\n # taglist.append(t)\n #\n # if len(taglist) > 0:\n # print(ename, taglist)\n # else:\n # return\n\n cursor = db.cursor()\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE mid = %s\n \"\"\",\n (event_id, ))\n\n result = cursor.fetchone()\n\n if result:\n print('Event already in database.')\n return\n\n cursor.execute(\"\"\"SELECT eid\n FROM Events\n WHERE ename = %s\n \"\"\",\n (ename, ))\n if result:\n print('Event already in database.')\n return\n\n loc_query = \\\n \"\"\"\n INSERT\n INTO Locations(lname, lat, lon, address_1, zip, city, state)\n VALUES (%s, %s, %s, %s, %s, %s, %s)\n \"\"\"\n\n cursor.execute(loc_query, (\n lname,\n lon,\n lat,\n address_1,\n zipcode,\n city,\n state,\n ))\n\n db.commit()\n\n print('Inserted into Locations.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n lid = cursor.fetchone()\n\n start_date = str(datetime.fromtimestamp(start_time / 1000))\n\n if start_date and duration:\n end_date = str(datetime.fromtimestamp((start_time + duration) / 1000))\n else:\n end_date = None\n\n ev_query = \\\n \"\"\"\n INSERT\n INTO Events(ename, start_date, end_date,\n num_attending, lid, description, mid)\n VALUES (%s, %s, %s, %s, %s, %s, %s);\n \"\"\"\n\n cursor.execute(ev_query, (\n ename.encode('ascii', 'ignore'),\n start_date,\n end_date,\n 0,\n lid,\n description.encode('ascii', 'ignore'),\n event_id,\n ))\n\n db.commit()\n\n print('Inserted into Events.')\n\n cursor.execute('SELECT LAST_INSERT_ID()')\n\n eid = cursor.fetchone()\n\n # for tag in taglist:\n # category = None\n # for c in CATEGORIES:\n # if tag in CATEGORIES[c]:\n # category = c\n\n et_query = \\\n \"\"\"\n INSERT\n INTO EventTags(eid, tag, category)\n VALUES (%s, %s, %s)\n \"\"\"\n\n cursor.execute(et_query, (eid, tag, tag))\n\n db.commit()\n\n print('Inserted into EventTags.')\n\n if VERBOSE:\n print('Finished.')\n return True", "def _exc_info_to_string(self, err, test):\n\n info = super(CustomTextTestResult, self)._exc_info_to_string(err, test)\n\n if self.showAll:\n info = 'Test number: {index}\\n{info}'.format(\n index=test.progress_index,\n info=re.sub(\"AssertionError:(.*?):\", \"\\nERROR WAS:\\n\", info)\n )\n\n return info", "def state_print_validate(cfg, app, win, events):", "def new_desc_event(self, event):\r\n pass", "def test_describe_event(self):\n pass", "def validate(tool):\n if SHED not in tool:\n tool[SHED] = DEFAULT_TOOLSHED\n if REVISIONS not in tool:\n tool[REVISIONS] = []", "def get_interesting_event_description(self):\n pass", "def evaluate_event_data(event, sources=[\"OGLE\"]):\n\n\ttrigger_this_event = False\n\ttE_test = \"untested\"\n\tmicrolensing_assessment_MOA_test = \"untested\"\n\tK2_microlensing_superstamp_region_test = \"untested\" # Checking exoFPO master event list for K2 superstamp\n\tK2_microlensing_superstamp_region_alternate_test = \"untested\" # Testing for K2 superstamp ourselves, with K2fov module\n\tmag_test = \"untested\"\n\t\n\tassessment_MOA = \"\"\n\tfor source in sources:\n\t\t# Run Einstein time test (stricter, tE only check for now; pass in tE and tE_err if you want to include error check too)\n\t\ttE_key = \"tE_\" + source\n\t\ttE_err_key = \"tE_err_\" + source\n\t\ttE = event[tE_key]\n\t\ttE_err = event[tE_err_key]\n\n\t\tlogger.info(\"For fit from source %s:\" % (source))\n\t\tlogger.info(\"Einstein time: %s +/- %s days\" % (tE, tE_err))\n\t\t#einstein_time_check = check_einstein_time(tE, tE_err)\n\t\teinstein_time_check = check_einstein_time(tE)\n\t\tif einstein_time_check:\n\t\t\tlogger.info(\"%s Einstein time passed!\" % source)\n\t\t\ttE_test = \"passed\"\n\t\t\tif event.has_key(\"passing_tE_sources\"):\n\t\t\t\tevent[\"passing_tE_sources\"].append(source)\n\t\t\telse:\n\t\t\t\tevent[\"passing_tE_sources\"] = [source]\n\t\telse:\n\t\t\tlogger.info(\"%s Einstein time failed: lower bound must be equal to or less than %s days.\" % (source, str(MAX_EINSTEIN_TIME)))\n\t\t\tif tE_test == \"untested\":\t\t\t\n\t\t\t\ttE_test = \"failed\"\t\t\n\n\t\t# Run tests which only apply to MOA events\n\t\tif source == \"MOA\":\n\t\t\t# Run MOA microlensing assessment test\n\t\t\tassessment_MOA = event[\"assessment_MOA\"]\n\n\t\t\t# Run MOA most-recent-magnitude-without-too-large-of-an-error test\n\t\t\tmag_values = [event[\"mag_MOA\"], event[\"mag_err_MOA\"]]\n\t\t\tif check_mag(mag_values):\n\t\t\t\tmag_test = \"passed\"\n\t\t\telse:\n\t\t\t\tmag_test = \"failed\"\n\n\t\t\t#DEBUG: Run alternate K2 microlensing superstamp testing\n\t\t\t# For testing agreement with two methods of testing K2 superstamp\n\t\t\tif DEBUGGING_MODE:\n\t\t\t\tRA_degrees_MOA = event[\"RA_degrees_MOA\"]\n\t\t\t\tDec_degrees_MOA = event[\"Dec_degrees_MOA\"]\n\t\t\t\tif check_microlens_region(RA_degrees_MOA, Dec_degrees_MOA):\n\t\t\t\t\tK2_microlensing_superstamp_region_alternate_test = \"passed\"\n\t\t\t\telse:\n\t\t\t\t\tK2_microlensing_superstamp_region_alternate_test = \"failed\"\n\n\tif event.has_key(\"passing_tE_sources\"):\n\t\tevent[\"passing_tE_sources\"].sort()\n\t\tpassing_tE_sources = event[\"passing_tE_sources\"]\n\t\tpassing_tE_sources_output = \"Sources of passing tE values: \"\n\t\tfor i in xrange(len(passing_tE_sources)):\n\t\t\tpassing_tE_sources_output += passing_tE_sources[i]\n\t\t\tif i < len(passing_tE_sources) - 1:\n\t\t\t\tpassing_tE_sources_output += \", \"\n\t\tlogger.debug(passing_tE_sources_output)\n\n\tif assessment_MOA != \"\":\n\t\tif is_microlensing(assessment_MOA):\n\t\t\tmicrolensing_assessment_MOA_test = \"passed\"\n\t\telse:\n\t\t\tmicrolensing_assessment_MOA_test = \"failed\"\n\n\tif event.has_key(\"in_K2_superstamp\"):\n\t\tif event[\"in_K2_superstamp\"]:\n\t\t\tK2_microlensing_superstamp_region_test = \"passed\"\n\t\telse:\n\t\t\tK2_microlensing_superstamp_region_test = \"failed\"\n\telse:\n\t\tlogger.warning(\"Event has no key in_K2_superstamp even though it should\")\n\t\tlogger.warning(\"Event:\\n%s\" % event)\n\t\n\t#DEBUG: Testing agreement with using K2 superstamp test ourselves, instead of relying on master list\n\tif DEBUGGING_MODE:\n\t\tK2_microlensing_superstamp_region_disagreement = False\n\t\t\n\t\t\"\"\"There is a disagreement between the two microlensiong region tests if\n\t\tthey have different results and at least one of them has passed (ruling out the case where one\n\t\tis untested and the other has failed, which should not count as a disagreement)\"\"\"\n\t\tif K2_microlensing_superstamp_region_test != K2_microlensing_superstamp_region_alternate_test:\n\t\t\tif K2_microlensing_superstamp_region_test == \"passed\" or K2_microlensing_superstamp_region_alternate_test == \"passed\":\n\t\t\t\tmicrolensing_disagreement = True\n\t\t\n\t\t# If there is a disagreement, log information about it.\n\t\tif K2_microlensing_superstamp_region_disagreement:\n\t\t\tlogger.warning(\"There is disagreement about the test for whether the event is in the K2 superstamp.\")\n\t\t\tlogger.warning(\"The test which uses the K2fov module, evaluating the RA and Dec from MOA, says that the event:\")\n\t\t\tif K2_microlensing_superstamp_region_alternate_test == \"passed\":\n\t\t\t\tlogger.warning(\"passes.\")\n\t\t\telif K2_microlensing_superstamp_region_alternate_test == \"failed\":\n\t\t\t\tlogger.warning(\"does NOT pass.\")\n\t\t\telif K2_microlensing_superstamp_region_alternate_test == \"untested\":\n\t\t\t\tlogger.warning(\"was not tested.\")\n\t\t\tlogger.warning(\"The exoFOP master event list test says that the event:\")\n\t\t\tif K2_microlensing_superstamp_region_test == \"passed\":\n\t\t\t\tlogger.warning(\"passes.\")\n\t\t\telif K2_microlensing_superstamp_region_test == \"failed\":\n\t\t\t\tlogger.warning(\"does NOT pass.\")\t\n\t\t\t\tlogger.warning(\"This means the superstamp entry in the master list is either False or Unknown.\")\n\t\t\telif K2_microlensing_superstamp_region_test == \"untested\":\n\t\t\t\tlogger.warning(\"was not tested.\")\n\n\t# Add test results to event dictionary\n\tevent[\"tE_test\"] = tE_test\n\tevent[\"microlensing_assessment_MOA_test\"] = microlensing_assessment_MOA_test\n\tevent[\"K2_microlensing_superstamp_region_test\"] = K2_microlensing_superstamp_region_test\n\tif DEBUGGING_MODE:\n\t\tevent[\"K2_microlensing_superstamp_region_alternate_test\"] = K2_microlensing_superstamp_region_alternate_test\n\tevent[\"mag_test\"] = mag_test\n\n\t# Turn on trigger flag if the tE test was successful - \n\t# we can change the criteria for activating the trigger flag if we'd like\n\tif tE_test == \"passed\":\n\t\ttrigger_this_event = True\n\n\t# Trigger if trigger flag is on\n\tif trigger_this_event:\n\t\ttrigger_event(event)", "def processEventNeedleValidation(self,observee,event=None):\n #productive #frequent #event-handler\n if frequent: profprint();\n if self.sliceWidgetsPerStyle.has_key(observee) and event == \"LeftButtonPressEvent\":\n if slicer.app.repositoryRevision<= 21022:\n sliceWidget = self.sliceWidgetsPerStyle[observee]\n style = sliceWidget.sliceView().interactorStyle() \n xy = style.GetInteractor().GetEventPosition()\n xyz = sliceWidget.convertDeviceToXYZ(xy)\n ras = sliceWidget.convertXYZToRAS(xyz)\n else:\n sliceWidget = self.sliceWidgetsPerStyle[observee]\n sliceLogic = sliceWidget.sliceLogic()\n sliceNode = sliceWidget.mrmlSliceNode()\n interactor = observee.GetInteractor()\n xy = interactor.GetEventPosition()\n xyz = sliceWidget.sliceView().convertDeviceToXYZ(xy);\n ras = sliceWidget.sliceView().convertXYZToRAS(xyz)\n \n colorVar = random.randrange(50,100,1)/(100)\n volumeNode = slicer.app.layoutManager().sliceWidget(\"Red\").sliceLogic().GetBackgroundLayer().GetVolumeNode()\n imageData = volumeNode.GetImageData()\n spacing = volumeNode.GetSpacing()\n ijk = self.logic.ras2ijk(ras)\n \n self.logic.t0 = time.clock()\n slicer.modules.NeedleFinderWidget.stepNeedle += 1\n self.logic.placeNeedleShaftEvalMarker(ijk, imageData, colorVar,spacing)\n\n # if self.sliceWidgetsPerStyle.has_key(observee) and event == \"LeaveEvent\":\n # self.stop()", "def event_dicts(self):\n events = []\n # We're assuming that the table has alternating rows that\n # containg (date, event title) possibly followed by (<empty>,\n # event details).\n selector = '#ae-billing-logs-table > tbody > tr'\n for (date_elt, event_elt) in self.doc.cssselect(selector):\n if date_elt.text is not None:\n events.append({\n # <td>EVENT DATE</td>\n 'date': date_elt.text.strip(),\n # <td><span id=\"...\">EVENT TITLE</span></td>\n 'title': event_elt.findtext('span').strip()\n })\n else:\n # An empty first column indicates details for the\n # preceeding event.\n assert len(events) > 0, len(events)\n last_event = events[-1]\n if last_event['title'].startswith('Usage Report '):\n last_event['details'] = self._usage_report_dict(event_elt)\n return events", "def format_machine_info(vlab_api, info):\n rows = []\n kind = info['meta']['component']\n version = info['meta']['version']\n rows.append(['Type', ':', kind])\n rows.append(['Version', ':', version])\n rows.append(['State', ':', info['state']])\n rows.append(['IPs', ':', ' '.join(info['ips'])])\n rows.append(['Networks', ':', ','.join(info['networks'])])\n return tabulate(rows, tablefmt='plain')", "def prepare_hr_for_events(events_info) -> str:\n hr_list = []\n for record in events_info:\n hr_record = {\n 'Event ID': record.get('eventId', None),\n TIME_UTC: record.get('occurred', ''),\n VICTIM_IP: record.get('srcIp', ''),\n 'Attacker IP': record.get('dstIp', ''),\n 'CVE ID': record.get('cveId', ''),\n 'Severity': record.get('severity', None),\n 'Rule': record.get('ruleName', ''),\n 'Protocol': record.get('protocol', None),\n }\n hr_list.append(hr_record)\n\n return tableToMarkdown(\n 'IPS Events',\n hr_list,\n [\n 'Event ID',\n TIME_UTC,\n VICTIM_IP,\n 'Attacker IP',\n 'CVE ID',\n 'Severity',\n 'Rule',\n 'Protocol',\n ],\n removeNull=True,\n )", "def setValidationInfo(self, displayname, recvstring):\n response = recvstring\n companyTestBed = \"\"\n modelTestBed = \"\"\n firmwareTestBed = \"\"\n Sniffer_WTS_VER = \"\"\n Sniffer_VendorName = \"\"\n Sniffer_DeviceModel = \"\"\n Sniffer_DeviceFirmware = \"\"\n ret_dict = {}\n\n if displayname.lower() == 'sniffer':\n #wfa_sniffer!sniffer_get_info!ID,$Sniffer_WTS_VER,$Sniffer_VendorName,$Sniffer_DeviceModel,$Sniffer_DeviceFirmware\n #status,COMPLETE,WfaSnifferVersion,$WfaSnifferVersion,SnifferSTA,$SnifferSTA,SwInfo,$DeviceSwInfo\\_$kernel_Ver,WiresharkVersion,$WiresharkInfo\\r\\n\n ret_items = response.split(',')\n \n \n if len(ret_items) > 9:\n Sniffer_WTS_VER = ret_items[3]\n Sniffer_VendorName = ret_items[5]\n Sniffer_DeviceModel = ret_items[7]\n Sniffer_DeviceFirmware = ret_items[9]\n else:\n if re.search(r\"status,COMPLETE\", response):\n if re.search(r\"WfaSnifferVersion\", response):\n posVendor = response.index('WfaSnifferVersion,') + len('WfaSnifferVersion,')\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n Sniffer_WTS_VER = data[:posSym]\n except Exception:\n Sniffer_WTS_VER = data.rstrip('\\n')\n\n if re.search(r\"SnifferSTA\", response):\n posVendor = response.index('SnifferSTA,') + len('SnifferSTA,')\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n Sniffer_VendorName = data[:posSym]\n except Exception:\n Sniffer_VendorName = data.rstrip('\\n')\n\n if re.search(r\"SwInfo\", response):\n posVendor = response.index('SwInfo,') + len('SwInfo,')\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n Sniffer_DeviceModel = data[:posSym]\n except Exception:\n Sniffer_DeviceModel = data.rstrip('\\n')\n\n if re.search(r\"WiresharkVersion\", response):\n posVendor = response.index('WiresharkVersion,') + len('WiresharkVersion,')\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n Sniffer_DeviceFirmware = data[:posSym]\n except Exception:\n Sniffer_DeviceFirmware = data.rstrip('\\n')\n\n setRetVal('$ca_version', Sniffer_WTS_VER)\n setRetVal('$tbd_info1', Sniffer_VendorName)\n setRetVal('$sw_version', Sniffer_DeviceModel)\n setRetVal('$tbd_info2', Sniffer_DeviceFirmware) \n\n ret_dict['ca_version'] = Sniffer_WTS_VER\n ret_dict['tbd_info1'] = Sniffer_VendorName\n ret_dict['sw_version'] = Sniffer_DeviceModel\n ret_dict['tbd_info2'] = Sniffer_DeviceFirmware\n\n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n\n if tbd.dev_type == \"SNIFFER\":\n tbd.vendor = Sniffer_VendorName\n tbd.model = Sniffer_DeviceModel\n tbd.firmver = Sniffer_DeviceFirmware\n tbd.wtsver = Sniffer_WTS_VER\n tbd.validation_dict = ret_dict\n\n break\n\n\n else:\n if re.search(r\"status,COMPLETE\", response):\n if re.search(r\"vendor\", response):\n posVendor = response.index('vendor,') + 7\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n companyTestBed = data[:posSym]\n except Exception:\n companyTestBed = data.rstrip('\\n')\n\n if re.search(r\"model\", response):\n posVendor = response.index('model,') + 6\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n modelTestBed = data[:posSym]\n except Exception:\n modelTestBed = data.rstrip('\\n')\n\n if re.search(r\"version\", response):\n posVendor = response.index('version,') + 8\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n firmwareTestBed = data[:posSym]\n except Exception:\n firmwareTestBed = data.rstrip('\\n')\n \n if re.search(r\"firmware\", response):\n posVendor = response.index('firmware,') + 9\n data = response[posVendor:]\n data = data.lstrip()\n try:\n posSym = data.index(',')\n firmwareTestBed = data[:posSym]\n except Exception:\n firmwareTestBed = data.rstrip('\\n')\n \n \n \n for tbd in self.test_mngr_initr.test_prog_mngr.test_prog.testbed_dev_list:\n \n \n if tbd.ctrlipaddr == self.ExecutionTask.get_ipport().split(':')[0]:\n #\n if companyTestBed != \"\":\n tbd.vendor = companyTestBed\n if modelTestBed != \"\":\n tbd.model = modelTestBed\n if firmwareTestBed != \"\":\n if self.ExecutionTask.get_cmd() == \"ca_get_version\":\n tbd.ca_version = firmwareTestBed\n else:\n tbd.sw_version = firmwareTestBed\n \n if self.ExecutionTask.get_cmd() == \"ca_get_version\":\n \n tbd.validation_dict['ca_version'] = firmwareTestBed\n else:\n tbd.validation_dict['sw_version'] = firmwareTestBed \n tbd.validation_dict['vendor'] = companyTestBed\n tbd.validation_dict['model'] = modelTestBed \n \n break", "def OnAbout(self, event):\n messtr = u\"单细胞簇识别系统\\n\"\n messtr =messtr +u\"这是个识别单细胞簇的层次聚类系统,使用方法如下:\\n\" \n messtr =messtr +u\"1.将你的数据处理成三个文件expr.h5,features.txt,labels.txt,分别是用h5py.create_dataset创建的N细胞*M基因的细胞表达式矩阵和用np.savetxt函数保存的基因文件和标签文件(注意一行一个不能有空格)放在一个文件夹即可,可以参考prosess_sys.py\\n\"\n messtr =messtr +u\"2.点击选择文件按钮选择文件夹,此时右边会提示成功与否\\n\"\n messtr =messtr +u\"3.thresh表示过滤掉某个基因表达的细胞数少于百分比细胞数的,范围为0-1,为零时不过滤低表达基因\\n\" \n messtr =messtr +u\"z_cutoff是离散值,bins是分为几份,是整数,将基因按照在所有细胞表达均值分成bins份,然后去掉每一份zscore小于z_cutoff的基因\\n\"\n messtr =messtr +u\"4.可以选择不同的降维算法进行降维\\n\"\n messtr =messtr +u\"5.split_score和merge_score是聚类的两个超参数,一般后者是前者的一半,基于韦尔奇t检查的两个集群之间的距离度量如果大于这个split_score就分裂,小于merge_score就合并(采用的聚类方法是先分裂再合并的)\\n\"\n messtr =messtr +u\"6.ys是层次聚类分裂的结果,ym是分裂再凝聚后的结果,ySC3是SC3算法的结果,ySafe是SAFE算法的结果,yclf是一致性聚类的结果,yKmean是kmeans算法的结果\"\n wx.MessageBox(messtr,\n \"About System\",\n wx.OK|wx.ICON_INFORMATION)", "def test_event_message_title(self):\n\n events = [\n {\n 'type': 'CURRENT HOST STATE',\n 'parts': ['domU-12-31-38-00-78-98', 'UP', 'HARD', '1', 'PING OK - Packet loss = 0%, RTA = 1.03 ms'],\n 'expected_msg_title': 'CURRENT HOST STATE'\n },\n {\n 'type': 'CURRENT SERVICE STATE',\n 'parts': ['domU-12-31-38-00-78-98', 'Current Load', 'OK', 'HARD', '1', 'OK - load average: 0.04, 0.03'],\n 'expected_msg_title': 'Current Load'\n },\n {\n 'type': 'SERVICE ALERT',\n 'parts': ['domU-12-31-39-02-ED-B2', 'cassandra JVM Heap', 'WARNING', 'SOFT', '1', ''],\n 'expected_msg_title': 'cassandra JVM Heap'\n },\n {\n 'type': 'HOST ALERT',\n 'parts': ['domU-12-31-39-02-ED-B2', 'DOWN', 'SOFT', '1', 'PING CRITICAL - Packet loss = 100%'],\n 'expected_msg_title': 'HOST ALERT'\n },\n {\n 'type': 'SERVICE NOTIFICATION',\n 'parts': ['pagerduty', 'ip-10-114-245-230', 'RAID EBS', 'OK', 'notify-service-by-email', ''],\n 'expected_msg_title': 'RAID EBS'\n },\n {\n 'type': 'SERVICE FLAPPING ALERT',\n 'parts': ['domU-12-31-39-16-52-37', 'cassandra JVM Heap', 'STARTED', 'Service started flapping'],\n 'expected_msg_title': 'cassandra JVM Heap'\n },\n {\n 'type': 'ACKNOWLEDGE_SVC_PROBLEM',\n 'parts': ['domU-12-31-39-16-52-37', 'NTP', '2', '1', '0', 'nagiosadmin', 'alq'],\n 'expected_msg_title': 'NTP'\n },\n {\n 'type': 'HOST DOWNTIME ALERT',\n 'parts': ['ip-10-114-89-59', 'STARTED', 'Host has entered a period of scheduled downtime'],\n 'expected_msg_title': 'HOST DOWNTIME ALERT'\n },\n {\n 'type': 'SERVICE DOWNTIME ALERT',\n 'parts': ['ip-10-114-237-165', 'intake', 'STARTED',\n 'Service has entered a period of scheduled downtime'],\n 'expected_msg_title': 'intake'\n },\n {\n 'type': 'ACKNOWLEDGE_HOST_PROBLEM',\n 'parts': ['domU-12-31-39-16-52-37', '2', '1', '0', 'nagiosadmin', 'alq'],\n 'expected_msg_title': 'ACKNOWLEDGE_HOST_PROBLEM'\n },\n {\n 'type': 'PASSIVE SERVICE CHECK',\n 'parts': ['ip-10-114-237-165', 'some_service', 'OK', 'Service works!'],\n 'expected_msg_title': 'some_service'\n }\n\n ]\n\n for event in events:\n self._assert_event_msg_title(\n event_type=event['type'], parts=event['parts'], expected_msg_title=event['expected_msg_title']\n )", "def convert_eventstorylines_v2(self, version=\"1.5\"):\n\n splits = {'full_corpus/v{}/event_mentions_extended'.format(version): 0,\n 'test_corpus/v{}/event_mentions_extended'.format(version): 2}\n\n annotations = pd.DataFrame(columns=['file', 'source', 'target', 'label', 'split'])\n\n # creating a dictionary of all documents\n data = pd.DataFrame(columns=self.scheme_columns)\n\n # ----------------------------------\n # reading all the annotations\n for key, value in splits.items():\n docs_path = self.dir_path + \"EventStoryLine/evaluation_format/{}\".format(key)\n\n for folder in os.listdir(docs_path):\n if not any(sub in folder for sub in [\".txt\", \".pdf\", \".DS_Store\"]):\n for doc in os.listdir('{}/{}'.format(docs_path, folder)):\n if \".tab\" in doc:\n with open('{}/{}/{}'.format(docs_path, folder, doc), 'r') as file:\n lines = file.readlines()\n for line in lines:\n line = line.split('\\t')\n annotations = annotations.append(\n {'file': '{}.{}'.format(doc.split('.')[0], 'xml'), 'source': line[0],\n 'target': line[1],\n 'label': line[2].replace('\\n', ''), 'split': value}, ignore_index=True)\n\n # ----------------------------------\n mismatch = 0\n docs_path = self.dir_path + \"EventStoryLine/ECB+_LREC2014/ECB+\"\n\n # creating a dictionary of all documents\n data = pd.DataFrame(columns=self.scheme_columns)\n\n for index, row in annotations.iterrows():\n # parse the doc to retrieve info of sentences\n folder = str(row['file']).split('_')[0]\n tree = ET.parse(docs_path + \"/\" + folder + \"/\" + row['file'])\n root = tree.getroot()\n\n tokens = []\n\n # saving tokens info\n for token in root.findall('token'):\n tokens.append([int(token.attrib['t_id']), token.text, int(token.attrib['sentence'])])\n\n label = -1\n direction = -1\n if str(row['label']) == \"PRECONDITION\":\n label = 1\n direction = 0\n elif str(row['label']) == \"FALLING_ACTION\":\n label = 1\n direction = 1\n\n source_t_ids = []\n target_t_ids = []\n for item in row['source'].split('_'):\n source_t_ids.append(int(item))\n for item in row['target'].split('_'):\n target_t_ids.append(int(item))\n\n context = \"\"\n span1 = \"\"\n span2 = \"\"\n token_idx = 0\n\n # finding start and end sentences indexes\n for i in range(len(tokens)):\n if tokens[i][0] == source_t_ids[0]:\n s_sen_id = int(tokens[i][2])\n if tokens[i][0] == target_t_ids[-1]:\n t_sen_id = int(tokens[i][2])\n\n # building the context and finding spans\n i = 0\n\n if t_sen_id < s_sen_id:\n s_sen_id, t_sen_id = t_sen_id, s_sen_id\n\n while i < len(tokens):\n t_id = tokens[i][0]\n token_text = tokens[i][1]\n token_sen_id = int(tokens[i][2])\n if s_sen_id <= int(token_sen_id) <= t_sen_id:\n # span1\n if t_id == source_t_ids[0]:\n for l in range(len(source_t_ids)):\n span1 += tokens[i + l][1] + \" \"\n # setting span1 start and end indexes\n span1_start = copy.deepcopy(token_idx)\n span1_end = span1_start + len(span1) - 1\n context += span1\n token_idx += len(span1)\n i += l\n # span2\n elif t_id == target_t_ids[0]:\n for l in range(len(target_t_ids)):\n span2 += tokens[i + l][1] + \" \"\n # setting span2 start and end indexes\n span2_start = copy.deepcopy(token_idx)\n span2_end = span2_start + len(span2) - 1\n context += span2\n token_idx += len(span2)\n i += l\n else:\n context += token_text + \" \"\n token_idx += len(token_text) + 1\n i += 1\n\n # storing causal and non-causal info\n try:\n idx_val = {\"span1\": [[span1_start, span1_end]], \"span2\": [[span2_start, span2_end]],\n \"signal\": []}\n\n new_row = {\n \"original_id\": '{}'.format(doc),\n \"span1\": [span1.strip()],\n \"span2\": [span2.strip()],\n \"signal\": [],\n \"context\": context.strip('\\n'), \"idx\": idx_val, \"label\": label,\n \"direction\": direction,\n \"source\": self.namexid[\"eventstorylines\"],\n \"ann_file\": doc, \"split\": int(row['split'])}\n\n if self.check_span_indexes(new_row) and label in [0, 1]:\n data = data.append(new_row, ignore_index=True)\n else:\n mismatch += 1\n\n except Exception as e:\n print(\"[crest-log] EventStoryLine. Detail: {}\".format(e))\n\n logging.info(\"[crest] eventstorylines is converted.\")\n\n # adding global id to the data frame\n global_ids = [i for i in range(1, len(data) + 1)]\n data.insert(0, 'global_id', global_ids)\n data.reset_index()\n\n return data, mismatch", "def processEventNeedleValidation(self, observee, event=None):\r\n # productive #frequent #event-handler\r\n if frequent: profprint();\r\n if self.sliceWidgetsPerStyle.has_key(observee) and event == \"LeftButtonPressEvent\":\r\n\r\n sliceWidget = self.sliceWidgetsPerStyle[observee]\r\n interactor = observee.GetInteractor()\r\n xy = interactor.GetEventPosition()\r\n xyz = sliceWidget.sliceView().convertDeviceToXYZ(xy);\r\n ras = sliceWidget.sliceView().convertXYZToRAS(xyz)\r\n\r\n ijk = self.logic.ras2ijk(ras)\r\n\r\n self.logic.t0 = time.clock()\r\n widget = slicer.modules.NeedleFinderWidget\r\n widget.stepNeedle += 1\r\n self.logic.placeNeedleShaftEvalMarker(ijk, widget.editNeedleTxtBox.value, self.logic.findNextStepNumber(widget.editNeedleTxtBox.value))\r\n self.logic.drawValidationNeedles()", "def cal_name(self):\n return self.event.event_name + ' ' + (self.service.shortname if self.service else self.category.name) + ' Setup'", "def get_info(self):\n # REMARK: it would be possible to use AtypicalEvent.__dict__,\n # but we'll stick to this solution if more info need to be added later\n\n dict_event_info = {}\n dict_event_info['name'] = self.name\n dict_event_info['date_start'] = self.date_start\n dict_event_info['date_end'] = self.date_end\n dict_event_info['duration'] = self.duration\n dict_event_info['type_event'] = self.type_event\n dict_event_info['is_atypical'] = self.is_atypical\n\n return dict_event_info", "def map_from_app_build_infos(self, app):\n return self.build_infos.form.map_from_app(app)", "def format_event(event):\n del event['period']\n try:\n name= \"\"\n for course in event['courses']:\n name = \"%s \"%(course['name'])\n del event['courses']\n event['courses'] = name\n except:\n pass\n\n try:\n name= event['course']['name']\n del event['course']\n event['course'] = name\n except:\n pass\n\n return event", "def format_file_event(event):\n event_dict = {\n \"premis:eventIdentifier\": event.event_id,\n \"event_name\": event.event_type,\n \"prov:softwareAgent\": event.event_detail,\n \"premis:outcome\": event.event_outcome,\n \"event_outcome_detail\": event.event_outcome_detail,\n }\n return event_dict", "def __print_events_info(self, occurrence_event):\n print(\" Name: \", occurrence_event)\n print(\" Type: Event\")\n print(\" Description:\",\n self.e_reader.get_event_description(occurrence_event))\n return 0" ]
[ "0.6742715", "0.61438614", "0.5372289", "0.5349182", "0.5309553", "0.52051926", "0.5093518", "0.5081945", "0.49995312", "0.49678308", "0.4956184", "0.49480337", "0.48884967", "0.48879263", "0.48558927", "0.48491517", "0.4832293", "0.48005238", "0.47813728", "0.47797647", "0.47671202", "0.47582534", "0.4750741", "0.47374898", "0.47368756", "0.46948537", "0.4692611", "0.46909142", "0.4690012", "0.46862856" ]
0.75312114
0
maps info regarding the file format and therefore uses the file_format_event to get the id, tool and tool_version of the used tool, and takes the outcome_detail of the validation event to get the name of the file format, if the file has those events linked.
def map_file_format_info(file_format_event, file_validation_event): event_info = {} if not file_format_event: return try: event_info.update( { "dct:FileFormat": file_format_event.event_outcome_detail, "prov:softwareAgent": file_format_event.event_detail.split(";")[0], "premis:version": file_format_event.event_detail.split(";")[1], } ) except IndexError: logger.info( "name and version of the file format tool %s could not be" "determined. Check if it is well formed", file_format_event.event_detail, ) if file_validation_event: event_info.update( { "dct:FileFormat": file_validation_event.event_outcome_detail, } ) return event_info
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def map_file_validation_info(file_validation_event):\n event_info = {}\n if not file_validation_event:\n return\n try:\n event_info.update(\n {\n \"premis:outcome\": file_validation_event.event_outcome_detail,\n \"prov:softwareAgent\": file_validation_event.event_detail.split(\";\")[0],\n \"premis:version\": file_validation_event.event_detail.split(\";\")[1],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file validation tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_validation_event.event_detail,\n )\n return event_info", "def map_file_normalization_info(file_normalization_event):\n event_info = {}\n if not file_normalization_event:\n return\n try:\n event_info.update(\n {\n \"premis:outcome\": file_normalization_event.event_outcome_detail,\n }\n )\n if file_normalization_event.event_detail:\n event_info.update(\n {\n \"prov:softwareAgent\": file_normalization_event.event_detail.split(\n \";\"\n )[0],\n \"premis:version\": file_normalization_event.event_detail.split(\";\")[\n 1\n ],\n }\n )\n except IndexError:\n logger.info(\n \"name and version of the file normalization tool %s could not be\"\n \"determined. Check if it is well formed\",\n file_normalization_event.event_detail,\n )\n return event_info", "def get_file_format_event(file_events):\n file_format_event = file_events.filter(event_type=\"format identification\").first()\n if file_format_event:\n return file_format_event", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def get_existing_file_format(data, format):\n if format in XLS_EXTENSIONS:\n existing_file_format = data.name.split(\".\")[-1]\n return existing_file_format\n return format", "def identify_filename_metadata(filename, file_format='CMIP6'):\n if file_format == 'CMIP5':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'date_string']\n elif file_format == 'CMIP6':\n components = ['cmor_name', 'table', 'climate_model', 'experiment',\n 'rip_code', 'grid', 'date_string']\n else:\n raise NotImplementedError('file_format must be CMIP5 or CMIP6')\n\n basename = os.path.basename(filename)\n directory = os.path.dirname(filename)\n metadata = {'basename': basename, 'directory': directory}\n\n # split the filename into sections\n if basename.endswith('-clim.nc'):\n filename_sects = basename.rpartition('-clim.nc')[0].split('_')\n else:\n filename_sects = basename.rpartition('.nc')[0].split('_')\n\n # but if experiment present_day was in the filename, join these sections\n # back together. This should only occur in pre-PRIMAVERA data.\n if filename_sects[3] == 'present' and filename_sects[4] == 'day':\n filename_sects[3] += '_' + filename_sects.pop(4)\n\n # deduce as much as possible from the filename\n try:\n for cmpt_name, cmpt in zip(components, filename_sects):\n if cmpt_name == 'date_string':\n frequency = _get_frequency(metadata['table'])\n start_date, end_date = cmpt.split('-')\n try:\n metadata['start_date'] = _make_partial_date_time(\n start_date, frequency)\n metadata['end_date'] = _make_partial_date_time(\n end_date, frequency)\n except ValueError:\n msg = 'Unknown date format in filename: {}'.format(\n filename)\n raise FileValidationError(msg)\n else:\n metadata[cmpt_name] = cmpt\n except ValueError:\n msg = 'Unknown filename format: {}'.format(filename)\n raise FileValidationError(msg)\n\n # fixed variables won't have a time range and so create blank values\n potential_missing_values = ['start_date', 'end_date']\n for missing_value in potential_missing_values:\n if missing_value not in metadata:\n metadata[missing_value] = None\n\n metadata['filesize'] = os.path.getsize(filename)\n\n for freq in FREQUENCY_VALUES:\n if freq in metadata['table'].lower():\n metadata['frequency'] = freq\n break\n if 'frequency' not in metadata:\n # set a blank frequency if one hasn't been found\n metadata['frequency'] = ''\n\n return metadata", "def file_format(x):\n return FILE_EXT_FORMAT_MAP.get(genomic_file_ext(x))", "def get_original_file_name(cleanup_event):\n original_name = None\n if not cleanup_event:\n return\n try:\n original_name = cleanup_event.event_outcome_detail.split(\";\")[0]\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n cleanup_event.event_outcome_detail,\n )\n return original_name", "def get_file_formats(self, api_spec: dict, user: Dict[str, Any] = None) -> dict:\n def get_dict(file_fmts: dict) -> dict:\n final_fmt = {}\n for fmt in file_fmts:\n final_fmt[fmt[\"name\"]] = {\n \"title\": fmt.get(\"title\", None),\n \"gis_data_types\": fmt[\"gis_data_types\"],\n \"parameters\": fmt.get(\"parameters\", {})\n }\n return final_fmt\n\n try:\n file_formats = api_spec[\"info\"][\"file_formats\"]\n\n return {\n \"status\": \"success\",\n \"code\": 200,\n \"data\": {\n \"output\": get_dict(file_formats[\"output\"]),\n \"input\": get_dict(file_formats[\"input\"]),\n },\n }\n except Exception as exp:\n return ServiceException(CapabilitiesService.name, 500, self._get_user_id(user), str(exp)).to_dict()", "def get_file_format(self):\n # if self.save_image_or_figure == IF_MOVIE:\n # return self.movie_format.value\n return self.file_format.value", "def get_file_format(file):\n flag = None\n with open(file) as f:\n for line in f.readlines():\n MAT, MF, MT = read_control(line)[:3]\n if MF == 1 and MT == 451:\n i = 0\n C, i = read_cont([line], i)\n flag = C.N1\n break\n if flag is None:\n ftype = None\n elif flag == -11 or flag == -12:\n ftype = \"errorr\"\n elif flag == -1:\n ftype = \"gendf\"\n else:\n if C.L1 == 2:\n ftype = \"pendf\"\n else:\n ftype = \"endf6\"\n return ftype", "def test_mediatype_io_format_references(self):\n ns_json, type_json = get_cwl_file_format(CONTENT_TYPE_APP_JSON)\n namespaces = dict(list(ns_json.items()))\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"inputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"formats\": [\n {\n \"mimeType\": CONTENT_TYPE_APP_JSON,\n \"default\": True,\n }\n ]\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"formats\": [\n {\n \"mediaType\": CONTENT_TYPE_APP_JSON,\n \"default\": True,\n }\n ]\n },\n ],\n \"outputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"formats\": [{\"mediaType\": CONTENT_TYPE_APP_JSON}],\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"formats\": [{\"mediaType\": CONTENT_TYPE_APP_JSON}],\n },\n ],\n },\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\n \"unit\": {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n ],\n \"outputs\": [\n {\n \"id\": \"wps_format_mimeType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n {\n \"id\": \"wps_format_mediaType\",\n \"type\": \"File\",\n \"format\": type_json,\n },\n ],\n \"$namespaces\": namespaces\n }\n }]\n }\n desc, _ = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n assert proc[\"inputs\"][0][\"id\"] == \"wps_format_mimeType\"\n assert proc[\"inputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"inputs\"][1][\"id\"] == \"wps_format_mediaType\"\n assert proc[\"inputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"outputs\"][0][\"id\"] == \"wps_format_mimeType\"\n assert proc[\"outputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert proc[\"outputs\"][1][\"id\"] == \"wps_format_mediaType\"\n assert proc[\"outputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n\n desc = self.describe_process(self._testMethodName, describe_schema=\"OGC\")\n assert desc[\"inputs\"][\"wps_format_mimeType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"inputs\"][\"wps_format_mediaType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"outputs\"][\"wps_format_mimeType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON\n assert desc[\"outputs\"][\"wps_format_mediaType\"][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_APP_JSON", "def get_filename(target, mode, file_type):\n data = CyBootloaderMapParser.get_json(CY_BOOTLOADER_MAP)\n for json_target in data:\n if json_target.lower().strip() in target.lower().strip():\n for json_mode in data[json_target]:\n if mode == json_mode:\n return data[json_target][json_mode][file_type]\n return None", "def run_format_on_file(input: str, file_type: str, from_version: str, **kwargs) -> \\\n Tuple[List[str], List[str], List[str]]:\n schema_path = os.path.normpath(\n os.path.join(__file__, \"..\", \"..\", \"common\", SCHEMAS_PATH, '{}.yml'.format(file_type)))\n if file_type not in ('integration', 'script') and 'update_docker' in kwargs:\n # non code formatters don't support update_docker param. remove it\n del kwargs['update_docker']\n UpdateObject = FILE_TYPE_AND_LINKED_CLASS[file_type](input=input, path=schema_path,\n from_version=from_version,\n **kwargs)\n format_res, validate_res = UpdateObject.format_file() # type: ignore\n return logger(input, format_res, validate_res)", "def validate_file_desc(self):\n if 'name' not in self.file_desc.keys() or 'format' not in self.file_desc.keys():\n raise AirflowException('file_desc does not have required keys: name, format')\n elif self.file_desc['format'].lower() not in ['csv', 'parquet']:\n raise AirflowException('file_desc have incorrect format type: csv, parquet')\n else:\n return {\"name\": self.file_desc['name'], \"format\": self.file_desc['format']}", "def format_file_event(event):\n event_dict = {\n \"premis:eventIdentifier\": event.event_id,\n \"event_name\": event.event_type,\n \"prov:softwareAgent\": event.event_detail,\n \"premis:outcome\": event.event_outcome,\n \"event_outcome_detail\": event.event_outcome_detail,\n }\n return event_dict", "def get_validation_file_name(self):\n name = self.test_name + \" (T\" + str(self.test_index) + \"_P\" + str(self.parameters_common_index) + \".\" + \\\n str(self.parameters_fs_index) + \".\" + \\\n str(self.parameters_helper_index) + \".\" + \\\n str(self.parameters_incremental_index)\n\n if self.replay_source is not None:\n name = name + \"_\"+ self.replay_source\n\n if self.helper_decoders_one_class:\n name = name + \"_1\"\n\n name = name + \")\"\n\n return name", "def autodetect_format(file_data):\n\n # The first header line.\n for line in file_data:\n if line != []:\n break\n\n # Sparky format.\n if line[0] == 'Assignment':\n return 'sparky'\n\n # NMRView format.\n if line == ['label', 'dataset', 'sw', 'sf']:\n return 'nmrview'\n\n # NMRPipe SeriesTab.\n if line[0] == 'REMARK' and line[1] == 'SeriesTab':\n return 'seriestab'\n\n # XEasy format.\n if line == ['No.', 'Color', 'w1', 'w2', 'ass.', 'in', 'w1', 'ass.', 'in', 'w2', 'Volume', 'Vol.', 'Err.', 'Method', 'Comment']:\n return 'xeasy'\n\n # Assume a generic format.\n return 'generic'", "def _dump_spec_filename_additional_info(\n libspec_manager, spec_filename, is_builtin, obtain_mutex=True\n):\n try:\n if not libspec_manager.is_copy:\n libspec_manager.schedule_conversion_to_markdown(spec_filename)\n except:\n log.exception(\"Error converting %s to markdown.\", spec_filename)\n import json\n\n source_to_mtime = _create_additional_info(\n libspec_manager, spec_filename, is_builtin, obtain_mutex=obtain_mutex\n )\n additional_info_filename = _get_additional_info_filename(spec_filename)\n with open(additional_info_filename, \"w\") as stream:\n json.dump(source_to_mtime, stream, indent=2, sort_keys=True)", "def identify_file(self, file):", "def get_file_validation_event(file_events):\n file_validation_event = file_events.filter(event_type=\"validation\").first()\n if file_validation_event:\n return file_validation_event", "def map_file_data(file_obj, file_events):\n file_as_dict = {\n \"premis:originalName\": file_obj.currentlocation,\n \"original_name\": escape(file_obj.originallocation),\n # needs investigation\n \"sanitized_file_name\": get_sanitized_file_name(\n get_file_name_cleanup(file_events)\n ),\n \"prov:generatedAtTime\": file_obj.modificationtime.strftime(\n \"%Y-%m-%dT%H:%M:%SZ\"\n ),\n \"premis:fixity\": {\n \"checksum_type\": convert_to_premis_hash_function(file_obj.checksumtype),\n \"Checksum\": file_obj.checksum,\n },\n \"premis:identifier\": file_obj.uuid,\n \"premis:size\": file_obj.size,\n \"file_name\": file_obj.label,\n # not sure if this is the file name or if we should stick with\n \"dct:FileFormat\": map_file_format_info(\n get_file_format_event(file_events), get_file_validation_event(file_events)\n ),\n \"file_validation\": map_file_validation_info(\n get_file_validation_event(file_events)\n ),\n \"file_normalization\": map_file_normalization_info(\n get_file_normalization_event(file_events)\n ),\n \"events\": list_file_events(file_events),\n }\n return file_as_dict", "def parse_filename(filename, filename_format=\"ALL\"):\n\n # parse filename\n basename = os.path.basename(filename)\n\n # disable parsing if filename_format is None\n if filename_format is None:\n return {\"filename\": filename}\n\n # try all filename formats for special value ALL\n if filename_format == \"ALL\":\n for parser in filename_format_parser.values():\n try:\n info = parser(basename)\n except ValueError:\n info = {}\n continue\n else:\n break\n elif filename_format in filename_format_parser:\n parser = filename_format_parser[filename_format]\n info = parser(basename)\n else:\n raise KeyError(\"unknown filename_format={}\".format(filename_format))\n\n\n # define nuclide tuple\n info[\"filename\"] = filename\n if (\"Z\" in info) and (\"N\" in info):\n info[\"nuclide\"] = (info[\"Z\"],info[\"N\"])\n\n return info", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatsrc(self):\n return self[\"formatsrc\"]", "def register_filename_format(format_name,parser):\n if format_name == \"ALL\":\n raise ValueError(\"filename format code ALL is reserved\")\n\n filename_format_parser[format_name] = parser", "def get_validation_file_path(self):\n validation_file_name = self.get_validation_file_name()\n if self.helper_decoders_one_class:\n validation_file_name = validation_file_name + \"_1\"\n\n return self.base_folder_path + \"/outputs/\" + validation_file_name + \".txt\"", "def _get_datafile_name(self, field_name, saveformat, timestep):\n # These formats produce a new file each time\n counted_formats = ('xml', 'xml.gz')\n\n metadata = {}\n\n # Make filename, with or without save count in name\n if saveformat in counted_formats:\n filename = \"%s%d.%s\" % (field_name, timestep, saveformat)\n # If we have a new filename each time, store the name in metadata\n #metadata = [('filename', filename)]\n metadata['filename'] = filename\n elif saveformat == \"shelve\":\n filename = \"%s.%s\" % (field_name, \"db\")\n else:\n filename = \"%s.%s\" % (field_name, saveformat)\n if saveformat == 'hdf5':\n metadata['dataset'] = field_name+str(timestep)\n\n savedir = self.get_savedir(field_name)\n fullname = os.path.join(savedir, filename)\n return fullname, metadata", "def validate_single_file(self, **kwargs):\n if self.file_type not in self._format_registry:\n valid_result_cls = example_filetype_format.ValidationResults(\n errors=\"Your filename is incorrect! Please change your filename before you run the validator or specify --filetype if you are running the validator locally\",\n warnings=\"\",\n )\n else:\n mykwargs = {}\n for required_parameter in self._validate_kwargs:\n assert required_parameter in kwargs.keys(), (\n \"%s not in parameter list\" % required_parameter\n )\n mykwargs[required_parameter] = kwargs[required_parameter]\n mykwargs[\"project_id\"] = self._project.id\n\n validator_cls = self._format_registry[self.file_type]\n validator = validator_cls(\n syn=self._synapse_client,\n center=self.center,\n genie_config=self.genie_config,\n ancillary_files=self.ancillary_files,\n )\n filepathlist = [entity.path for entity in self.entitylist]\n valid_result_cls = validator.validate(filePathList=filepathlist, **mykwargs)\n\n # Complete error message\n message = valid_result_cls.collect_errors_and_warnings()\n return (valid_result_cls, message)", "def get_for(self, file=None, format=''):\n if isinstance(file, DirectoryStream):\n # directory 'stream'\n return (self._names['dir'],)\n if format:\n try:\n converter = (self._names[format],)\n except KeyError:\n raise ValueError(\n f'Format specifier `{format}` not recognised'\n )\n else:\n converter = self.identify(file)\n if not converter:\n if not file or file.mode == 'w' or maybe_text(file):\n format = self._default_text\n else:\n format = self._default_binary\n if file and format:\n if Path(file.name).suffix:\n level = logging.WARNING\n else:\n level = logging.DEBUG\n logging.log(\n level,\n f'Could not infer format from filename `{file.name}`. '\n f'Falling back to default `{format}` format'\n )\n try:\n converter = (self._names[format],)\n except KeyError:\n pass\n return converter" ]
[ "0.7225926", "0.66014904", "0.62202483", "0.6051946", "0.58100253", "0.56802005", "0.56800455", "0.5560679", "0.54779977", "0.5443689", "0.5421021", "0.53728986", "0.53687453", "0.53450334", "0.5339682", "0.5317839", "0.5283682", "0.52812934", "0.5249191", "0.52461636", "0.5227635", "0.52252704", "0.51956195", "0.51919794", "0.51919794", "0.51816136", "0.517165", "0.5168324", "0.51580065", "0.51527846" ]
0.824023
0
uses the name cleanup_event to get the first part of the event_outcome as original file name
def get_original_file_name(cleanup_event): original_name = None if not cleanup_event: return try: original_name = cleanup_event.event_outcome_detail.split(";")[0] except IndexError: logger.info( "name and version of the file format tool %s could not be" "determined. Check if it is well formed", cleanup_event.event_outcome_detail, ) return original_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_name_cleanup(file_events):\n cleanup_event = file_events.filter(event_type=\"name cleanup\").first()\n if cleanup_event:\n return cleanup_event", "def get_sanitized_file_name(cleanup_event):\n sanitized_name = None\n if not cleanup_event:\n return\n try:\n sanitized_name = cleanup_event.event_outcome_detail.split(\";\")[1]\n except IndexError:\n logger.info(\n \"name and version of the virus check tool %s could not be\"\n \"determined. Check if it is well formed\",\n cleanup_event.event_outcome_detail,\n )\n return sanitized_name", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def _get_newfilename(oldname):\n # example filename: 20160929_M1_05057144.015_I_CrabNebula-W0.40+215_mergecols.csv\n # removes subrun (bc files are merged to runlevel) and prepends eventfiltered\n dirname = path.dirname(oldname)\n newdirname = path.join(dirname, FILTERDIR)\n oldbasename = path.basename(oldname)\n if \"GA_\" in oldbasename:\n if (\"_Y_\" in oldbasename) or (\"_I_\" in oldbasename):\n newbasename = FILTERPREFIX + oldbasename\n elif (\"_Y_\" in oldbasename) or (\"_I_\" in oldbasename):\n splitname = oldbasename.split(\"_\")\n splitname[2] = splitname[2].split(\".\")[0]\n newbasename = FILTERPREFIX + \"_\".join(splitname)\n newfilename = path.join(newdirname, newbasename)\n return newfilename", "def get_original_basename(self, xaf):\n tag_name = self.__get_original_basename_tag_name()\n return xaf.tags.get(tag_name, b\"unknown\").decode(\"utf8\")", "def preprocess_file(self, filename):\n rawfilename = ''\n for command in [self.mplayer_command, \n self.ffmpeg_command]:\n while True:\n rawfilename = self.random_string()\n if not os.path.exists(rawfilename):\n break\n \n if 0 != subprocess.call(\n command.format(self.SRATE, filename, rawfilename), \n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT,\n shell=True):\n os.remove(rawfilename)\n rawfilename = None\n continue\n \n break # file is successfully converted\n return rawfilename", "def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name", "def adjust_event_name(event_name):\n pos=find_first_digit(event_name)\n return event_name[pos:]", "def extract_file_name_from_source_full_path(source_full_path):\n destination_file_name = os.path.basename(source_full_path)\n return destination_file_name", "def extract_file_name(self, input_file):\n self.file_name_with_ext, self.file_name = extract_file_name(input_file)", "def test_create_final_name(self):\n \n date = \"111111\"\n fcid = \"A11A22BCXX\"\n sample_name = \"P101_150B_index5\"\n \n test_names = [(\"1_{}_{}_1_nophix_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"1_{}_{}_1_nophix_1_fastq.txt\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq\".format(date,fcid,sample_name)),\n (\"1_{}_{}_1_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_NoIndex_L001_R2_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_2.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq..gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),\n (\"{}_CGATGT_L001_R1_001.fastq\".format(sample_name),\n \"1_{}_{}_{}_1.fastq\".format(date,fcid,sample_name))]\n \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))\n \n # Try without the _index part of file name\n sample_name_noindex = \"P101_150\"\n test_names = [(\"1_{}_{}_1_nophix_1_fastq.txt.gz\".format(date,fcid),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name_noindex)),\n (\"{}_CGATGT_L001_R1_001.fastq.gz\".format(sample_name_noindex),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name_noindex)),\n (\"{}_NoIndex_L001_R2_001.fastq.gz\".format(sample_name_noindex),\n \"1_{}_{}_{}_2.fastq.gz\".format(date,fcid,sample_name_noindex))]\n \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name_noindex)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))\n \n # Try some illegal file names and assert that they raise exceptions\n test_names = [\"1_{}_{}_1_nophix_1_fastq.gz\".format(date,fcid),\n \"a_{}_{}_1_nophix_1_fastq.txt\".format(date,fcid),\n \"{}_CGATRGT_L1_R1_001.fastq.gz\".format(sample_name)]\n for test_name in test_names:\n with self.assertRaises(ValueError):\n create_final_name(test_name,date,fcid,sample_name)\n \n # Try a file with undetermined reads\n sample_name = \"lane1\"\n test_names = [(\"{}_Undetermined_L001_R1_001.fastq.gz\".format(sample_name),\n \"1_{}_{}_{}_1.fastq.gz\".format(date,fcid,sample_name)),] \n for test_fname, exp_result in test_names:\n obs_result = create_final_name(test_fname,date,fcid,sample_name)\n self.assertEqual(obs_result,\n exp_result,\n \"Did not get expected final name ({:s}) for file name {:s}\".format(exp_result,test_fname))", "def get_file_normalization_event(file_events):\n file_normalization_event = file_events.filter(event_type=\"normalization\").first()\n if file_normalization_event:\n return file_normalization_event", "def get_file_inter_name(self):\n\t\tf = tempfile.NamedTemporaryFile(encoding='utf-8',mode='r',delete=False)\n\t\tf.close()\n\t\treturn f.name", "def get_output_raw_name(journal_file_name, output_type='txt'):\n dot_pos = journal_file_name.rfind('.')\n if dot_pos != -1:\n output_file_name = journal_file_name[0: dot_pos]\n else:\n output_file_name = journal_file_name\n num_of_output = 1\n if output_type == 'txt':\n while True:\n output_file = '%s_%d.txt'%(output_file_name,num_of_output)\n if not os.path.exists(output_file):\n break\n else:\n num_of_output += 1\n else:\n output_file = '%s.%s'%(output_file_name,output_type)\n return output_file", "def _getfilename(self):\n pass", "def fix_filename(self):\n if not self.remove_path:\n return\n self.filename = re.sub(\".+\\/\", \".../\", self.filename)", "def process(self, event):\n # the file will be processed there\n print event.src_path, event.event_type\n\n if os.path.isfile(\"/Users/filename.zip\") == True:\n os.remove(\"/Users/filename.zip\")\n print (\"existing file is removed \")\n shutil.make_archive(\"directory\", \"zip\", \"/Users/directory/\")\n print (\"delete existing zip file and created a new zip file\")\n else:\n print (\"There is no zip file at the moment\")\n shutil.make_archive(\"directory\",\"zip\", \"/Users/directory\")\n print (\" A new zip file is created now \")", "def cleanup(self):\n self.__log('Resetting value for output_filename, making way for another go.')\n self.output_filename = None", "def _safe_file_name(self):\n FMT_STR = \"%s - %s - %s (%d) - %s%s\"\n return cleanse_filename(FMT_STR % (self.track,\n self.artist.replace(\"/\", \"\\\\\"),\n self.album.replace(\"/\", \"\\\\\"),\n self.year,\n self.title.replace(\"/\", \"\\\\\"),\n os.path.splitext(self.file_name)[1]))", "def test_get_original_file_name_without_duplication_marker(self):\n test_file_name = \"uploaded_file_name\"\n expected_file_name = \"uploaded_file_name\"\n cfs = CustomFileStorage()\n self.assertEqual(cfs.get_original_file_name(test_file_name), expected_file_name)", "def output_filename(self, prefix, suffix):\n filename = \"%s%s%s\" % (prefix, _ExecutionWrapper._file_index, suffix)\n _ExecutionWrapper._file_index += 1\n return filename", "def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix", "def _rename_events_func(cfg, raw, subject, session) -> None:\n if not cfg.rename_events:\n return\n\n # Check if the user requested to rename events that don't exist.\n # We don't want this to go unnoticed.\n event_names_set = set(raw.annotations.description)\n rename_events_set = set(cfg.rename_events.keys())\n events_not_in_raw = rename_events_set - event_names_set\n if events_not_in_raw:\n msg = (f'You requested to rename the following events, but '\n f'they are not present in the BIDS input data:\\n'\n f'{\", \".join(sorted(list(events_not_in_raw)))}')\n if on_rename_missing_events == 'warn':\n logger.warning(msg)\n else:\n raise ValueError(msg)\n\n # Do the actual event renaming.\n msg = 'Renaming events …'\n logger.info(**gen_log_kwargs(message=msg, subject=subject,\n session=session))\n descriptions = list(raw.annotations.description)\n for old_event_name, new_event_name in cfg.rename_events.items():\n msg = f'… {old_event_name} -> {new_event_name}'\n logger.info(**gen_log_kwargs(message=msg,\n subject=subject, session=session))\n for idx, description in enumerate(descriptions.copy()):\n if description == old_event_name:\n descriptions[idx] = new_event_name\n\n descriptions = np.asarray(descriptions, dtype=str)\n raw.annotations.description = descriptions", "def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)", "def processTempLog(file_name):", "def _event_sort_key(cls, event):\n if \"test_name\" in event:\n return event[\"test_name\"]\n else:\n return event.get(\"test_filename\", None)", "def correct_filename(self, img_name, categ):\n path = self._path\n\n # Change wrong characters in filename\n wrong_char = [char for char in img_name if char in [\" \", \"(\", \")\", \"é\", \"©\"]]\n if len(wrong_char) > 0:\n\n new_img_name = img_name\n for char in [\" \", \"(\", \")\", \"©\"]:\n new_img_name = new_img_name.replace(char, \"\")\n new_img_name = new_img_name.replace(\"é\", \"e\")\n\n os.rename(join(path, categ, img_name), join(path, categ, new_img_name))\n img_name = new_img_name\n\n return img_name", "def get_oldname(fname):\n assert isinstance(fname, str), f\"fname is not a string, aborting. fname: {fname}\"\n dirname = path.dirname(fname)\n oldbasename = path.basename(fname)\n if \"GA_\" in oldbasename:\n return fname\n elif (\"_Y_\" in oldbasename) or (\"_I_\" in oldbasename):\n split = oldbasename.split(\"_\")\n split[2] = split[2].split(\".\")[0]\n newbasename = \"_\".join(split)\n return path.join(dirname, newbasename)\n elif \"_S_\" in fname:\n return fname\n else:\n raise NotImplementedError(f\"filetype not supported. fname: {fname}\")", "def _clean_filename(name):\n return re.sub(\"[^\\\\w .]\", \"\", name)", "def fix_filename(s):\n t = str(s).translate(TRANS_FILE)\n if t.count('.') > 1:\n for i in range(t.count('.') - 1):\n idot = t.find('.')\n t = \"%s_%s\" % (t[:idot], t[idot+1:])\n return t" ]
[ "0.7763713", "0.7252744", "0.61244434", "0.60504305", "0.5674285", "0.5595301", "0.5591282", "0.55478466", "0.5542216", "0.5515983", "0.5455788", "0.5410433", "0.5405495", "0.53931946", "0.5390997", "0.5367019", "0.53666896", "0.53636366", "0.5339203", "0.53355193", "0.5313564", "0.5309737", "0.53094625", "0.5292848", "0.5289421", "0.52887964", "0.527776", "0.52660805", "0.5264568", "0.5263429" ]
0.7813742
0
uses the name cleanup_event to get the second part of the event_outcome as sanitized file name
def get_sanitized_file_name(cleanup_event): sanitized_name = None if not cleanup_event: return try: sanitized_name = cleanup_event.event_outcome_detail.split(";")[1] except IndexError: logger.info( "name and version of the virus check tool %s could not be" "determined. Check if it is well formed", cleanup_event.event_outcome_detail, ) return sanitized_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_file_name_cleanup(file_events):\n cleanup_event = file_events.filter(event_type=\"name cleanup\").first()\n if cleanup_event:\n return cleanup_event", "def get_original_file_name(cleanup_event):\n original_name = None\n if not cleanup_event:\n return\n try:\n original_name = cleanup_event.event_outcome_detail.split(\";\")[0]\n except IndexError:\n logger.info(\n \"name and version of the file format tool %s could not be\"\n \"determined. Check if it is well formed\",\n cleanup_event.event_outcome_detail,\n )\n return original_name", "def _clean_filename(name):\n return re.sub(\"[^\\\\w .]\", \"\", name)", "def _safe_file_name(self):\n FMT_STR = \"%s - %s - %s (%d) - %s%s\"\n return cleanse_filename(FMT_STR % (self.track,\n self.artist.replace(\"/\", \"\\\\\"),\n self.album.replace(\"/\", \"\\\\\"),\n self.year,\n self.title.replace(\"/\", \"\\\\\"),\n os.path.splitext(self.file_name)[1]))", "def handle_file_name(self):\r\n self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]\r\n result_name = self.tmp_name + '_result_'\r\n log_name = self.tmp_name + '_log.csv'\r\n \r\n self.result_file_name = os.path.join(self.save_path , result_name) \r\n self.log_file_name = os.path.join(self.log_path , log_name)", "def _get_newfilename(oldname):\n # example filename: 20160929_M1_05057144.015_I_CrabNebula-W0.40+215_mergecols.csv\n # removes subrun (bc files are merged to runlevel) and prepends eventfiltered\n dirname = path.dirname(oldname)\n newdirname = path.join(dirname, FILTERDIR)\n oldbasename = path.basename(oldname)\n if \"GA_\" in oldbasename:\n if (\"_Y_\" in oldbasename) or (\"_I_\" in oldbasename):\n newbasename = FILTERPREFIX + oldbasename\n elif (\"_Y_\" in oldbasename) or (\"_I_\" in oldbasename):\n splitname = oldbasename.split(\"_\")\n splitname[2] = splitname[2].split(\".\")[0]\n newbasename = FILTERPREFIX + \"_\".join(splitname)\n newfilename = path.join(newdirname, newbasename)\n return newfilename", "def clean_episode_title(filename):\n new_str = filename.replace('_', ' ').replace('-', ' ')\n return re.sub(r'\\s+', ' ', new_str).strip()", "def __clean_filename(filename):\n return \"{}.pdf\".format(filename.split(\"_compress_\", 1)[0])", "def preprocess_file(self, filename):\n rawfilename = ''\n for command in [self.mplayer_command, \n self.ffmpeg_command]:\n while True:\n rawfilename = self.random_string()\n if not os.path.exists(rawfilename):\n break\n \n if 0 != subprocess.call(\n command.format(self.SRATE, filename, rawfilename), \n stdout=open(os.devnull, 'w'),\n stderr=subprocess.STDOUT,\n shell=True):\n os.remove(rawfilename)\n rawfilename = None\n continue\n \n break # file is successfully converted\n return rawfilename", "def test_sanitized_filename(self):\n value = \"/absolute/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"\n\n value = \"../relative/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"", "def _remove_accents(unicode_filename):\n # noinspection PyBroadException\n try:\n unicode_filename = unicode_filename.replace(\" \", \"_\")\n cleaned_filename = unicodedata.normalize('NFKD', unicode_filename).encode('ASCII', 'ignore').decode('ASCII')\n\n cleaned_filename = re.sub(r'[^\\w\\s-]', '', cleaned_filename.strip().lower())\n cleaned_filename = re.sub(r'[-\\s]+', '-', cleaned_filename)\n\n return cleaned_filename\n except:\n traceback.print_exc()\n return unicode_filename", "def adjust_event_name(event_name):\n pos=find_first_digit(event_name)\n return event_name[pos:]", "def clean_filename(s):\n # strip paren portions which contain trailing time length (...)\n s = re.sub(\"\\([^\\(]*$\", \"\", s)\n s = s.strip().replace(':', '-').replace(' ', '_')\n valid_chars = \"-_.()%s%s\" % (string.ascii_letters, string.digits)\n return ''.join(c for c in s if c in valid_chars)", "def getSafeFilename(untrustedFilename: unicode) -> unicode:\n ...", "def fix_filename(self):\n if not self.remove_path:\n return\n self.filename = re.sub(\".+\\/\", \".../\", self.filename)", "def cleanFilename(fname):\n return re.sub(\"_$\", \"\", re.sub(\"[ _\\n\\t/()*,&:;@.]+\", \"_\", fname))", "def correct_filename(self, img_name, categ):\n path = self._path\n\n # Change wrong characters in filename\n wrong_char = [char for char in img_name if char in [\" \", \"(\", \")\", \"é\", \"©\"]]\n if len(wrong_char) > 0:\n\n new_img_name = img_name\n for char in [\" \", \"(\", \")\", \"©\"]:\n new_img_name = new_img_name.replace(char, \"\")\n new_img_name = new_img_name.replace(\"é\", \"e\")\n\n os.rename(join(path, categ, img_name), join(path, categ, new_img_name))\n img_name = new_img_name\n\n return img_name", "def audit_filename(self):\n\n for commit in self.repository.commits.values():\n for filename in commit.files_changed:\n if commit.files_changed[ filename ][\"change\"] not in [\"A\",\"R\",\"C\"]:\n continue\n for restriction in self.filename_limits:\n if re.search(restriction, filename):\n self.__log_failure(commit.sha1, \"Invalid filename: \" + filename)", "def fix_filename(s):\n t = s.translate(BAD_FILETABLE)\n if t.count('.') > 1:\n for i in range(t.count('.') - 1):\n idot = t.find('.')\n t = \"%s_%s\" % (t[:idot], t[idot+1:])\n return t", "def get_file_normalization_event(file_events):\n file_normalization_event = file_events.filter(event_type=\"normalization\").first()\n if file_normalization_event:\n return file_normalization_event", "def filter_filename(self, fname):\r\n return os.path.basename(fname)", "def getInputFileBasenameNoSuffix():\n\n inputFileBasename = getInputFileBasename()\n basenameRemovedSuffix = removeSuffix(inputFileBasename)\n return basenameRemovedSuffix", "def _clean_name(self, name):\n # Useful for windows' paths\n return os.path.normpath(name).replace(\"\\\\\", \"/\")", "def fix_filename(s):\n t = str(s).translate(TRANS_FILE)\n if t.count('.') > 1:\n for i in range(t.count('.') - 1):\n idot = t.find('.')\n t = \"%s_%s\" % (t[:idot], t[idot+1:])\n return t", "def get_original_basename(self, xaf):\n tag_name = self.__get_original_basename_tag_name()\n return xaf.tags.get(tag_name, b\"unknown\").decode(\"utf8\")", "def removeFilenameValidate(call, args=(), kwargs={}, nodeClass='Write'):", "def create_final_name(fname, date, fc_id, sample_name):\n \n # Split the file name according to CASAVA convention\n m = re.match(r'(\\S+?)_(?:[ACGTN\\-]+|NoIndex|Undetermined)_L0*(\\d+)_R(\\d)_\\d+\\.fastq(.*)', fname)\n if m is not None:\n lane = m.group(2)\n read = m.group(3)\n ext = m.group(4)\n else:\n # Split the file name according to bcbb convention\n m = re.match(r'(\\d+)_(\\d+)_([^_]+)_(\\d+)_(?:nophix_)?(\\d+)_fastq.txt(.*)', fname)\n if m is None:\n raise ValueError(\"Could not parse file name {:s} correctly!\".format(fname))\n lane = m.group(1)\n read = m.group(5)\n ext = m.group(6)\n \n dest_file_name = \"{:s}.fastq{:s}\".format(\"_\".join([lane,\n date,\n fc_id,\n sample_name,\n read]),\n ext.replace('..','.'))\n return dest_file_name", "def cleanup(name):\n cleaned_name = name.rstrip(\".\")\n return cleaned_name", "def clean_filename(file):\r\n\r\n return file.split('.')[0]", "def extract_file_name_from_source_full_path(source_full_path):\n destination_file_name = os.path.basename(source_full_path)\n return destination_file_name" ]
[ "0.779782", "0.738654", "0.58494496", "0.57917094", "0.57624686", "0.5737705", "0.5598119", "0.55943274", "0.55774605", "0.5506891", "0.548149", "0.5477749", "0.54703015", "0.54598963", "0.54485", "0.54481256", "0.5405872", "0.5405509", "0.5393709", "0.53931856", "0.5384876", "0.5383908", "0.53815377", "0.5376499", "0.53730345", "0.5371719", "0.5358957", "0.5353638", "0.5317261", "0.53127825" ]
0.7791005
1
get all virus_check_events which failed and get the information of the event and the file_id from the files which failed the virus_check
def get_failed_virus_checks(file_events): virus_check_events = get_virusscan_events(file_events) if not virus_check_events: return for event in virus_check_events: if event.event_outcome != "Pass": try: failed_file = { "premis:identifier": event.file_uuid.uuid, "premis:outcome": event.event_outcome, "prov:softwareAgent": event.event_detail.split(";")[0], "premis:version": event.event_detail.split(";")[1], } except IndexError: logger.info( "name and version of the virus check tool %s could not be" "determined. Check if it is well formed", event.event_outcome_detail, ) continue if failed_file: return failed_file
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_passed_virus_checks(file_events):\n virus_check_events = get_virusscan_events(file_events)\n if not virus_check_events:\n return\n for event in virus_check_events:\n if event.event_outcome == \"Pass\":\n try:\n passed_event = {\n \"premis:outcome\": event.event_outcome,\n \"prov:softwareAgent\": event.event_detail.split(\";\")[0],\n \"premis:version\": event.event_detail.split(\";\")[1],\n }\n except IndexError:\n logger.info(\n \"name and version of the virus check tool %s could not be\"\n \"determined. Check if it is well formed\",\n event.event_outcome_detail,\n )\n continue\n if passed_event:\n return passed_event", "def get_virusscan_events(file_events):\n virusscan_events = file_events.filter(event_type=\"virus check\")\n if virusscan_events:\n return virusscan_events", "def report_results(results: dict):\n # Loop thru our results, compare to our upload and return the verdict\n for result in results:\n for item in Analyzer.files:\n if result[\"sha256\"] == item[2]:\n if \"no specific threat\" in result[\"verdict\"]:\n # File is clean\n logger.info(\"Verdict for %s: %s\", item[1], result[\"verdict\"])\n else:\n # Mitigation would trigger from here\n logger.warning(\"Verdict for %s: %s\", item[1], result[\"verdict\"])", "def _get_fsevent_image_files(self):\r\n # Print the header columns to the output file\r\n Output.print_columns(self.l_all_fsevents)\r\n \r\n scan_path_spec = None\r\n scanner = source_scanner.SourceScanner()\r\n scan_context = source_scanner.SourceScannerContext()\r\n scan_context.OpenSourcePath(self.meta['source'])\r\n\r\n scanner.Scan(\r\n scan_context,\r\n scan_path_spec=scan_path_spec\r\n )\r\n\r\n for file_system_path_spec, file_system_scan_node in scan_context._file_system_scan_nodes.items():\r\n t_files = 0\r\n self.all_files_count = 0\r\n self.error_file_count = 0\r\n self.all_records_count = 0\r\n self.parsed_file_count = 0\r\n \r\n try:\r\n location = file_system_path_spec.parent.location\r\n except:\r\n location = file_system_path_spec.location\r\n \r\n print(\" Processing Volume {}.\\n\".format(location))\r\n\r\n fs_event_path_spec = path_spec_factory.Factory.NewPathSpec(\r\n file_system_path_spec.type_indicator,\r\n parent=file_system_path_spec.parent,\r\n location=\"/.fseventsd\"\r\n )\r\n\r\n file_entry = resolver.Resolver.OpenFileEntry(\r\n fs_event_path_spec\r\n )\r\n \r\n if file_entry != None:\r\n\r\n t_files = file_entry.number_of_sub_file_entries\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n t_files -= 1\r\n\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n counter = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for sub_file_entry in file_entry.sub_file_entries:\r\n if sub_file_entry.name == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n counter += 1\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(counter, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Name of source fsevent file\r\n self.src_filename = sub_file_entry.name\r\n self.src_fullpath = self.meta['source'] + \": \" + location + sub_file_entry.path_spec.location\r\n\r\n stat_object = sub_file_entry.GetStat()\r\n\r\n # UTC mod date of source fsevent file\r\n self.m_time = datetime.datetime.fromtimestamp(\r\n stat_object.mtime).strftime(\r\n '%Y-%m-%d %H:%M:%S') + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(self.src_filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n file_object = sub_file_entry.GetFileObject()\r\n\r\n compressedFile = io.StringIO.BytesIO()\r\n compressedFile.write(file_object.read())\r\n compressedFile.seek(0)\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(fileobj=compressedFile, mode='rb')\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_filename)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)\r\n \r\n else:\r\n print('Unable to process volume or no fsevent files found')\r\n continue\r\n\r\n print('\\n\\n All Files Attempted: {}\\n All Parsed Files: {}\\n Files '\r\n 'with Errors: {}\\n All Records Parsed: {}'.format(\r\n self.all_files_count,\r\n self.parsed_file_count,\r\n self.error_file_count,\r\n self.all_records_count))", "def extract_failed_tests_info():\n global g_failed_testnames\n global g_failed_test_paths\n\n if os.path.isfile(g_temp_filename):\n console_file = open(g_temp_filename,'r') # open temp file that stored jenkins job console output\n try:\n for each_line in console_file: # go through each line of console output to extract build ID, data/time ...\n each_line.strip()\n print(each_line)\n if (\"Test Result\" in each_line) and (\"failure\" in each_line): # the next few lines will contain failed tests\n temp = each_line.split(\"testReport\")\n if (\"Test Result\" in temp[1]) and (\"failure\" in temp[1]): # grab number of failed tests\n try:\n tempCount = int(temp[1].split(\"</a>\")[1].split(\" \")[0].split(\"(\")[1])\n\n if isinstance(tempCount, int) and tempCount > 0: # temp[1], temp[2],... should contain failed tests\n for findex in range(2,len(temp)):\n tempMess = temp[findex].split(\">\")\n g_failed_test_paths.append(tempMess[0].strip('\"'))\n ftestname = tempMess[1].strip(\"</a\")\n nameLen = len(ftestname)\n true_testname = ftestname[8:nameLen] if 'r_suite.' in ftestname else ftestname\n g_failed_testnames.append(true_testname)\n break # done. Only one spot contains failed test info.\n except:\n break # file probably does not have failures captured.\n finally:\n console_file.close()", "def identify_failed_genes(self, genes):\r\n failed_genes = []\r\n for gene in genes:\r\n if genes[gene]['failed'] == 'Y':\r\n failed_genes.append({'GeneSymbol;Accession': gene, 'percentage30': mean(genes[gene]['percentage30'])})\r\n return failed_genes", "def get_test_failures(self, test_dir):\n\n cwd = os.getcwd()\n\n outdir = self.testTopDir + self.suiteName + \"-tests/\"\n\n os.chdir(outdir + test_dir)\n\n failed = []\n\n for test in os.listdir(\".\"):\n if not os.path.isdir(test): continue\n\n # the status files are in the web dir\n status_file = f\"{self.webTopDir}/{test_dir}/{test}.status\"\n with open(status_file) as sf:\n for line in sf:\n if line.find(\"FAILED\") >= 0 or line.find(\"CRASHED\") >= 0:\n failed.append(test)\n\n os.chdir(cwd)\n return failed", "def verify_files():\n toverify = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status='unverified'\")\n\n numverified = 0\n for file in toverify:\n\n actualsize = pipeline_utils.get_file_size(file['filename'])\n\n expectedsize = file['size']\n\n last_attempt_id = jobtracker.query(\"SELECT id \" \\\n \"FROM download_attempts \" \\\n \"WHERE file_id=%s \" \\\n \"ORDER BY id DESC \" % file['id'], \\\n fetchone=True)\n \n queries = []\n if actualsize == expectedsize:\n dlm_cout.outs(\"Download of %s is complete and verified.\" % \\\n os.path.split(file['filename'])[-1])\n # Everything checks out!\n queries.append(\"UPDATE files \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='downloaded', \" \\\n \"details='Download is complete and verified', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n\n\t # Mark the beam as downloaded in the main database\n\t #mark_beam_downloaded(os.path.split(file['filename'])[-1]))\n\n numverified += 1\n else:\n dlm_cout.outs(\"Verification of %s failed. \\n\" \\\n \"\\tActual size (%d bytes) != Expected size (%d bytes)\" % \\\n (os.path.split(file['filename'])[-1], actualsize, expectedsize))\n \n # Boo... verification failed.\n queries.append(\"UPDATE files \" \\\n \"SET status='failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), file['id']))\n queries.append(\"UPDATE download_attempts \" \\\n \"SET status='verification_failed', \" \\\n \"details='Downloaded file failed verification', \" \\\n \"updated_at='%s'\" \\\n \"WHERE id=%d\" % \\\n (jobtracker.nowstr(), last_attempt_id))\n jobtracker.query(queries)\n return numverified", "def checkMissingFiles(inDir, jsonUrl):\n\n file_list = []\n remote = False\n try:\n file_list = os.listdir(inDir)\n except OSError:\n remote = True\n file_list = eos_ls(inDir)\n\n if file_list == []:\n print \"Directory does not exist or is empty!\"\n return []\n\n total_expected = 0\n missing_files = []\n suspicious_files = []\n recovered_files = []\n\n print 'Found %d files in input directory' % len(file_list)\n print 20*'-'\n\n jsonFile = open(jsonUrl,'r')\n procList = json.load(jsonFile,encoding = 'utf-8').items()\n\n for proc in procList:\n for desc in proc[1]:\n data = desc['data']\n isData = desc.get('isdata',False)\n mctruthmode = desc.get('mctruthmode')\n for d in data:\n dtag = d.get('dtag','')\n split = d.get('split',1)\n\n for segment in range(0,split):\n eventsFile = dtag\n if split > 1:\n eventsFile = dtag + '_' + str(segment)\n if mctruthmode:\n eventsFile += '_filt%d' % mctruthmode\n filename = eventsFile+'.root'\n\n sys.stdout.write('... checking %s' % filename)\n sys.stdout.flush()\n\n total_expected += 1\n\n if not filename in file_list:\n missing_files.append(filename)\n sys.stdout.write('\\033[91m MISSING \\033[0m \\n')\n # sys.stdout.flush()\n continue\n\n rootFileUrl = os.path.join(inDir, filename)\n if remote:\n rootFileUrl = ('root://eoscms//eos/cms/store' +\n rootFileUrl.split('store',1)[1])\n\n recovered, suspicious = False, False\n tfile = TFile.Open(rootFileUrl)\n try:\n if tfile.TestBit(TFile.kRecovered):\n recovered = True\n if tfile.IsZombie():\n suspicious = True\n tfile.Close()\n except AttributeError, ReferenceError:\n suspicious = True\n\n if recovered:\n sys.stdout.write('\\033[93m Recovered \\033[0m \\n')\n recovered_files.append(filename)\n if suspicious:\n sys.stdout.write('\\033[93m Failed to open \\033[0m \\n')\n suspicious_files.append(filename)\n\n sys.stdout.write('\\033[92m OK \\033[0m \\n')\n sys.stdout.flush()\n\n print 20*'-'\n if len(missing_files):\n print \"Missing the following files:\"\n print \"(%d out of %d expected)\"% (len(missing_files), total_expected)\n for filename in missing_files:\n print filename\n else:\n print \"NO MISSING FILES!\"\n print 20*'-'\n if len(suspicious_files):\n print \"Failed to open the following files:\"\n print \"(%d out of %d expected)\"% (len(suspicious_files), total_expected)\n for filename in suspicious_files:\n print filename\n print 20*'-'\n if len(recovered_files):\n print \"The following files are recovered:\"\n print \"(%d out of %d expected)\"% (len(recovered_files), total_expected)\n for filename in recovered_files:\n print filename\n print 20*'-'\n\n return missing_files+suspicious_files+recovered_files", "def test_get_counturingErr(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, COUNTURING_ERR_IDX, COUNTURING_ERR_SUB)\n param_obj = self.__dict__[servo_type]._get_counturingErr()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in counturingErr...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue", "def find_legacy_log_files(xcresult_path):\n\n result = []\n\n for root, dirs, files in os.walk(xcresult_path, topdown=True):\n for file in files:\n if file.endswith('.txt'):\n file = os.path.join(root, file)\n result.append(file)\n\n # Sort the files by creation time.\n result.sort(key=lambda f: os.stat(f).st_ctime)\n return result", "def find_legacy_log_files(xcresult_path):\n\n result = []\n\n for root, dirs, files in os.walk(xcresult_path, topdown=True):\n for file in files:\n if file.endswith('.txt'):\n file = os.path.join(root, file)\n result.append(file)\n\n # Sort the files by creation time.\n result.sort(key=lambda f: os.stat(f).st_ctime)\n return result", "def _get_error_info(self, result, log):\n _ = '/opt/l2deploy/logs/OverallStatusReport'\n f = self._remote_cmd(\"grep '{}' {}\".format(_, log))\n f = f.get('output').split('[')[-1][:-1]\n\n for n in [result] if self.nodes == 1 else result['nodes']:\n if 'failed' == n.get('status').lower():\n # 10th line in the detail report contains the required info\n c = \"grep -A 10 {} {}\".format(n.get('server'), f)\n c += \" | grep OS_Install_Status_Detail\"\n e = self._remote_cmd(c).get('output').split(':', 1)[1]\n LOG.info(\"{} failed due to {}\".format(n['server'], e))", "def fileCheck(path):\n print('[+] Checking For File patching ')\n for url in check_files:\n try:\n #File Rereive\n data = query(url)\n file_name = url.split(\"/\")[-1]\n _,tmp_file = tempfile.mkstemp(prefix=\"exitmap_%s_\" % file_name)\n with open(tmp_file, \"wb\") as fd:\n fd.write(data)\n for i in check_files_patch_results:\n if str(i.url) == str(url):\n if str(i.filehash) != str(sha512_file(tmp_file)):\n print('[+] ALERT File Patch FOUND !')\n print(' | exitnode : %s' % str(i.exitnode) )\n print(' |_________> url: %s' % str(i.url) )\n print(' |_________> filePath: %s' % str(i.filepath) )\n print(' |_________> fileHash: %s' % str(i.filehash) )\n #check_files_patch_results.append( File_Check_Results(url, file_name, tmp_file, path, sha512_file(tmp_file)) )\n else :\n print('[+] File (%s) seems to be ok' % i.url)\n break\n\n except Exception as err:\n print('[-] Error ! %s' % err)\n traceback.print_exc()\n pass\n return time.time()", "def testFailedFiles(self):\n assert self.dummySubscription.failedFiles() == \\\n self.dummySubscription.failed.getFiles(type='set'), \\\n 'Method failedFiles does not return failed files Set'", "def events_verify(self, strict=False):\n # As the event files are written by another process there is the\n # small chance that it will not have comeplted when we run this\n # check. To accomodate this we will have two passes at processing\n # the events, after which we will consider it a failure.\n remaining = self._expected_events[:]\n unneeded = []\n for _ in range(1, 30):\n # Find all of the JSON event files in the event directory and\n # process them one by one looking for matches\n jsonfiles = [p for p in [os.path.join(self._event_dir, f)\n for f in os.listdir(self._event_dir)]\n if os.path.isfile(p)]\n for jsonfile in jsonfiles:\n try:\n with open(jsonfile) as fh:\n data = json.load(fh)\n cursize = len(remaining)\n remaining[:] = [e for e in remaining\n if not partial_match(e, data)]\n if len(remaining) == cursize:\n unneeded.append(data)\n os.unlink(jsonfile)\n except ValueError as e:\n continue\n except IOError as e:\n continue\n # Check if we have matched all of the remaining events, if so then\n # we have nothiing more to do...\n if not remaining:\n break\n # There are events remaining so we will insert a small delay and\n # after which we will try again.\n sleep(0.1)\n\n # If there are any events remaining then we have failed to match\n if remaining:\n self.fail('Unmatched events:\\n{}\\n\\nUnused events:\\n{}'.format(\n '\\n'.join(str(e) for e in remaining),\n '\\n'.join(str(e) for e in unneeded)))\n\n # Some tests call verify multiple times. To avoid any false\n # results we reset again here just to make sure\n self.events_reset()", "def send_infected_file_list_to_admin():\n admins = User.objects.get_superusers()\n admin_emails = []\n for admin in admins:\n admin_emails.append(admin.email)\n c = {\n 'infected_files': list_of_infected_files,\n }\n send_html_email('Virus Detected',\n 'api3/sysadmin/virus_detected_files.html', c, None, admin_emails)", "def get_result(self):\n check_result_list = []\n for check in self.monitoring_checks:\n try:\n result = check.execute()\n except ForbiddenCheckError as err:\n logger.error(err)\n else:\n check_result_list.append(result)\n if check_result_list:\n return check_result_list\n else:\n logger.error(\"Empty check result list\")", "def check(self):\n curtime = time.time()\n failed_watchdogs = []\n for watchdog, filename, st_info in self._list_gen(self.watchdog_path):\n if curtime < st_info.st_mtime:\n # If the watchdog is set in the future, then service is still\n # alive\n pass\n\n else:\n # Otherwise, this is a watchdog failure\n _LOGGER.warning('Watchdog failed: %r.', watchdog)\n failed_watchdogs.append((filename, watchdog, st_info.st_mtime))\n\n # Retreive the payload of failed watchdogs\n if failed_watchdogs:\n failures = []\n for filename, name, failed_at in failed_watchdogs:\n try:\n with open(filename, 'r') as f:\n data = f.read()\n except OSError:\n _LOGGER.exception('Reading watchdog data')\n data = ''\n failures.append((name, failed_at, data))\n\n return failures\n\n else:\n return []", "def get_errors(self, path: str,\n is_ancillary: bool = False,\n is_system: bool = False,\n is_removed: bool = False) -> List[str]:\n u_file = self.__api.files.get(path, is_ancillary=is_ancillary,\n is_system=is_system,\n is_removed=is_removed)\n return [e.message for e in u_file.errors]", "def _most_recent_event_files(self):\n regex = re.compile(r\"\\w*events.log\")\n return [\n os.path.join(self._output_dir, x)\n for x in os.listdir(self._output_dir)\n if regex.search(x)\n ]", "def _filter_return_errors_list(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n if url in entry[\"request\"][\"url\"] and temp not in matches and entry[\"response\"][\"status\"] >= 400:\r\n print \"\\nRequest failed w/ \" + str(entry[\"response\"][\"status\"]) + \" error:\\n\" + entry[\"request\"][\"url\"]\r\n if entry[\"response\"][\"content\"].get(\"text\"):\r\n print \"RESPONSE: \" + str(entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore'))\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append([temp,entry[\"response\"][\"content\"].get(\"text\",\"\")])\r\n return matches", "def checkFiles( self, files, filetype ):\n\t\t\n\t\tstati = {}\n\t\tif files:\n\t\t\n\t\t\tcommand = self._default_command_file_details( filetype )\n\t\t\tcommand += \" \".join( files )\n\t\t\t\n\t\t\tif self.debug:\n\t\t\t\tprint command\n\t\t\t\n\t\t\t# <?xml version=\"1.0\" encoding=\"UTF-8\"?><jhove xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns=\"http://hul.harvard.edu/ois/xml/ns/jhove\" xsi:schemaLocation=\"http://hul.harvard.edu/ois/xml/ns/jhove http://hul.harvard.edu/ois/xml/xsd/jhove/1.6/jhove.xsd\" name=\"Jhove\" release=\"1.11\" date=\"2013-09-29\"><date>2013-12-12T11:14:48+00:00</date><repInfo uri=\"/opt/digiverso/goobi/metadata/101/images/carralic22_jpg/axc0015-0.jpg\"><reportingModule release=\"1.7\" date=\"2012-08-12\">TIFF-hul</reportingModule><lastModified>2013-07-22T15:40:59+01:00</lastModified><size>1391446</size><format>TIFF</format><status>Not well-formed</status><messages><message offset=\"0\" severity=\"error\">No TIFF header: ￿￘</message></messages> <mimeType>image/tiff</mimeType></repInfo>\n\t\t\tjhove_returned = commands.getstatusoutput( command )\n\t\t\t\n\t\t\t# There are invalid characters (even for UTF-8) which need to be removed (can you believe they output the TIFF FILEIDs into the XML if it isn't found!)\n\t\t\tdef unicode_filter(char):\n\t\t\t\ttry:\n\t\t\t\t\tunicode( char, encoding='utf-8', errors='strict' )\n\t\t\t\t\treturn char\n\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\treturn ''\n\n\t\t\t\t#test\n\t\t\t\t#content = 'abc\\xFF'\n\t\t\t\t#content = ''.join(map(unicode_filter, content))\n\t\t\t\n\t\t\t\n\t\t\troot = etree.fromstring( ''.join(map(unicode_filter, jhove_returned[1])) )\n\n\t\t\tjhove_infos = root.findall( \".//{http://hul.harvard.edu/ois/xml/ns/jhove}repInfo\" )\n\t\t\t\n\t\t\tfor jhove_info in jhove_infos:\n\t\t\t\n\t\t\t\tfile = jhove_info.get( \"uri\" )\n\t\t\t\t\n\t\t\t\t# SHould always be a status\n\t\t\t\tstatus = jhove_info.find( \"{http://hul.harvard.edu/ois/xml/ns/jhove}status\" ).text\n\t\t\t\t# There may be errors (as messages)\n\t\t\t\tmessage_nodes = jhove_info.findall( \"{http://hul.harvard.edu/ois/xml/ns/jhove}messages/{http://hul.harvard.edu/ois/xml/ns/jhove}message\" )\n\t\t\t\t\n\t\t\t\tmessages = []\n\t\t\t\tfor message in message_nodes:\n\t\t\t\t\tmessages.append( message.text )\n\t\t\t\t\n\t\t\t\tstati[file] = { \"status\" : status, \"ok\" : (status == Jhove.file_valid) }\n\t\t\t\tif messages:\n\t\t\t\t\tstati[file][\"messages\"] = messages\n\t\t\n\t\treturn stati", "def _get_failed_stack_events(stack_name: str, region: str, profile: str = None) -> list:\n logger.debug(f\"getting stack {stack_name} failure events in region {region}\")\n cfn_client = _get_cfn_client(region=region, profile=profile)\n try:\n events = cfn_client.describe_stack_events(StackName=stack_name)\n except Exception as e:\n logger.error(f\"unable to get stack events\")\n logger.error(e)\n raise e\n result = list()\n for event in events['StackEvents']:\n if \"FAILED\" in event['ResourceStatus']:\n result.append(event)\n if len(result) == 0:\n # There were no FAILED events. Look for ROLLBACK_IN_PROGRESS\n for event in events['StackEvents']:\n if \"ROLLBACK_IN_PROGRESS\" in event['ResourceStatus']:\n result.append(event)\n logger.debug(f\"failure events {result}\")\n return result", "def pop_scan_result_events(self, event_name):\n results = []\n try:\n events = self.dut.ed.pop_all(event_name)\n for event in events:\n results.append(event[\"data\"][\"Results\"])\n except queue.Empty as error:\n self.log.debug(\"Number of Full scan results %s\", len(results))\n return results", "def fileCheckOriginal():\n\n print('[+] Populating File Hasing for later check')\n for url in check_files:\n try:\n data = query(url)\n file_name = url.split(\"/\")[-1]\n _,tmp_file = tempfile.mkstemp(prefix=\"exitmap_%s_\" % file_name)\n\n with open(tmp_file, \"wb\") as fd:\n fd.write(data)\n print('[+] Saving File \\\"%s\\\".' % tmp_file)\n check_files_patch_results.append( File_Check_Results(url, file_name, tmp_file, \"NO\", sha512_file(tmp_file)) )\n print('[+] First Time we see the file..')\n print(' |_________> exitnode : None' )\n print(' |_________> :url: %s' % str(url) )\n print(' |_________> :filePath: %s' % str(tmp_file))\n print(' |_________> :file Hash: %s' % str(sha512_file(tmp_file)))\n except Exception as err:\n print('[-] Error ! %s' % err)\n traceback.print_exc()\n pass\n return time.time()", "def _get_fsevent_files(self):\r\n # Print the header columns to the output files\r\n Output.print_columns(self.l_all_fsevents)\r\n\r\n # Total number of files in events dir #\r\n t_files = len(os.listdir(self.path))\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n t_files -= 1\r\n self.time_range_src_mod = []\r\n prev_mod_date = \"Unknown\"\r\n prev_last_wd = 0\r\n c_last_wd = 0\r\n\r\n # Uses file mod dates to generate time ranges by default unless\r\n # files are carved or mod dates lost due to exporting\r\n self.use_file_mod_dates = True\r\n\r\n # Run simple test to see if file mod dates\r\n # should be used to generate time ranges\r\n # In some instances fsevent files may not have\r\n # their original mod times preserved on export\r\n # This code will flag true when the same date and hour\r\n # exists for the first file and the last file\r\n # in the provided source fsevents folder\r\n first = os.path.join(self.path, os.listdir(self.path)[0])\r\n last = os.path.join(self.path, os.listdir(self.path)[len(os.listdir(self.path)) - 1])\r\n first = os.path.getmtime(first)\r\n last = os.path.getmtime(last)\r\n first = str(datetime.datetime.utcfromtimestamp(first))[:14]\r\n last = str(datetime.datetime.utcfromtimestamp(last))[:14]\r\n\r\n if first == last:\r\n self.use_file_mod_dates = False\r\n\r\n # Iterate through each file in supplied fsevents dir\r\n for filename in os.listdir(self.path):\r\n if filename == 'fseventsd-uuid':\r\n continue\r\n # Variables\r\n self.all_files_count += 1\r\n\r\n # Call the progress bar which shows parsing stats\r\n progress(self.all_files_count, t_files)\r\n\r\n buf = \"\"\r\n\r\n # Full path to source fsevent file\r\n self.src_fullpath = os.path.join(self.path, filename)\r\n # Name of source fsevent file\r\n self.src_filename = filename\r\n # UTC mod date of source fsevent file\r\n self.m_time = os.path.getmtime(self.src_fullpath)\r\n self.m_time = str(datetime.datetime.utcfromtimestamp((self.m_time))) + \" [UTC]\"\r\n\r\n # Regex to match against source fsevent log filename\r\n regexp = re.compile(r'^.*[\\][0-9a-fA-F]{16}$')\r\n\r\n # Test to see if fsevent file name matches naming standard\r\n # if not, assume this is a carved gzip\r\n if len(self.src_filename) == 16 and regexp.search(filename) is not None:\r\n c_last_wd = int(self.src_filename, 16)\r\n self.time_range_src_mod = prev_last_wd, c_last_wd, prev_mod_date, self.m_time\r\n self.is_carved_gzip = False\r\n else:\r\n self.is_carved_gzip = True\r\n\r\n # Attempt to decompress the fsevent archive\r\n try:\r\n with self.skip_gzip_check():\r\n self.files = gzip.GzipFile(self.src_fullpath, \"rb\")\r\n buf = self.files.read()\r\n\r\n except Exception as exp:\r\n # When permission denied is encountered\r\n if \"Permission denied\" in str(exp) and not os.path.isdir(self.src_fullpath):\r\n print('\\nEnsure that you have permissions to read '\r\n 'from {}\\n{}\\n'.format(self.path, str(exp)))\r\n sys.exit(0)\r\n # Otherwise write error to log file\r\n else:\r\n self.logfile.write(\r\n \"%s\\tError: Error while decompressing FSEvents file.%s\\n\" % (\r\n self.src_filename,\r\n str(exp)\r\n )\r\n )\r\n self.error_file_count += 1\r\n continue\r\n\r\n # If decompress is success, check for DLS headers in the current file\r\n dls_chk = FSEventHandler.dls_header_search(self, buf, self.src_fullpath)\r\n\r\n # If check for DLS returns false, write information to logfile\r\n if dls_chk is False:\r\n self.logfile.write('%s\\tInfo: DLS Header Check Failed. Unable to find a '\r\n 'DLS header. Unable to parse File.\\n' % (self.src_filename))\r\n # Continue to the next file in the fsevents directory\r\n self.error_file_count += 1\r\n continue\r\n\r\n self.parsed_file_count += 1\r\n\r\n # Accounts for fsevent files that get flushed to disk\r\n # at the same time. Usually the result of a shutdown\r\n # or unmount\r\n if not self.is_carved_gzip and self.use_file_mod_dates:\r\n prev_mod_date = self.m_time\r\n prev_last_wd = int(self.src_filename, 16)\r\n\r\n # If DLSs were found, pass the decompressed file to be parsed\r\n FSEventHandler.parse(self, buf)", "def Errcheck(self) -> list:\n\n myError = []\n\n ErrorList = self.myFieldFox.query(\"SYST:ERR?\").split(',')\n\n Error = ErrorList[0]\n\n if int(Error) == 0:\n\n print (\"+0, No Error!\")\n\n else:\n\n while int(Error)!=0:\n\n print (\"Error #: \" + ErrorList[0])\n\n print (\"Error Description: \" + ErrorList[1])\n\n myError.append(ErrorList[0])\n\n myError.append(ErrorList[1])\n\n ErrorList = self.myFieldFox.query(\"SYST:ERR?\").split(',')\n\n Error = ErrorList[0]\n\n myError = list(myError)\n\n return myError", "def _findChangedFiles(self):\n changedFiles = []\n # calculate and update checksums always for ALL files\n for observedFile in self.observedFiles:\n if os.path.isfile(observedFile.filePath):\n currentChecksum = checksumFile(observedFile.filePath)\n else:\n currentChecksum = None\n # different values with None value checking\n if ((observedFile.lastChecksum is None\n and currentChecksum is not None)\n or observedFile.lastChecksum != currentChecksum):\n changedFiles.append(observedFile) # notify change\n observedFile.lastChecksum = currentChecksum # update checksum\n\n return changedFiles", "def _look_for_new_crash_logs(self, run_results, start_time):\n crashed_processes = []\n test_to_crash_failure = {}\n\n # reset static variables for Failure type classes\n test_failures.AbstractTestResultType.port = self._port\n test_failures.AbstractTestResultType.result_directory = self._results_directory\n test_failures.AbstractTestResultType.filesystem = self._filesystem\n\n for test, result in run_results.unexpected_results_by_name.items():\n if result.type != ResultType.Crash:\n continue\n for failure in result.failures:\n if (not isinstance(failure, test_failures.FailureCrash)\n or failure.has_log):\n continue\n crashed_processes.append(\n [test, failure.process_name, failure.pid])\n test_to_crash_failure[test] = failure\n\n sample_files = self._port.look_for_new_samples(crashed_processes,\n start_time) or {}\n for test, sample_file in sample_files.items():\n test_failures.AbstractTestResultType.test_name = test\n test_result = run_results.unexpected_results_by_name[test]\n artifact_relative_path = self._port.output_filename(\n test, test_failures.FILENAME_SUFFIX_SAMPLE, '.txt')\n artifacts_sub_dir = test_result.artifacts.ArtifactsSubDirectory()\n artifact_abspath = self._filesystem.join(self._results_directory,\n artifacts_sub_dir,\n artifact_relative_path)\n self._filesystem.maybe_make_directory(\n self._filesystem.dirname(artifact_abspath))\n self._filesystem.copyfile(sample_file, artifact_abspath)\n test_result.artifacts.AddArtifact(\n 'sample_file',\n self._filesystem.join(artifacts_sub_dir,\n artifact_relative_path))\n\n new_crash_logs = self._port.look_for_new_crash_logs(\n crashed_processes, start_time) or {}\n for test, (crash_log, crash_site) in new_crash_logs.items():\n test_failures.AbstractTestResultType.test_name = test\n failure.crash_log = crash_log\n failure.has_log = self._port.output_contains_sanitizer_messages(\n failure.crash_log)\n test_result = run_results.unexpected_results_by_name[test]\n test_result.crash_site = crash_site\n test_to_crash_failure[test].create_artifacts(\n test_result.artifacts, force_overwrite=True)" ]
[ "0.7037156", "0.6773092", "0.58044827", "0.56824124", "0.55182916", "0.5506397", "0.54874086", "0.5348977", "0.53265375", "0.5307501", "0.5303932", "0.5303932", "0.5273958", "0.5255364", "0.52527535", "0.5216588", "0.5209719", "0.5194858", "0.5168988", "0.51503533", "0.51414", "0.5128655", "0.5112975", "0.5104497", "0.5094184", "0.5090747", "0.5055675", "0.50522786", "0.5052105", "0.5047579" ]
0.79492867
0
get and return all virus_check_events which passed
def get_passed_virus_checks(file_events): virus_check_events = get_virusscan_events(file_events) if not virus_check_events: return for event in virus_check_events: if event.event_outcome == "Pass": try: passed_event = { "premis:outcome": event.event_outcome, "prov:softwareAgent": event.event_detail.split(";")[0], "premis:version": event.event_detail.split(";")[1], } except IndexError: logger.info( "name and version of the virus check tool %s could not be" "determined. Check if it is well formed", event.event_outcome_detail, ) continue if passed_event: return passed_event
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_virusscan_events(file_events):\n virusscan_events = file_events.filter(event_type=\"virus check\")\n if virusscan_events:\n return virusscan_events", "def get_events(self):\n ret = []\n while True:\n event = self.event.get_event(wait=1, full=True)\n if event is None:\n return ret\n ret.append(event)", "def get_events(self):\n self._events = []\n self.ircobj.process_once(timeout=0.1)\n return self._events", "def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]", "def available_events(self):\n return self.target.read_value(self.available_events_file).splitlines()", "def extract_all_events(events):\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n result += [evt]\n return result", "def events(self) -> Sequence[Tuple[str, Sequence[Union[np.ndarray, bytes]]]]:\n return self._env.events()", "def get_events(self):\n events = []\n for device in self:\n events.extend(self[device].get_events())\n return events", "async def events(self) -> Iterable[Event]:", "def eventList(filterStr=\"\"):\n\tfilterStr = filterStr.upper()\n\tevents = [i for i in dir(cv2) if 'EVENT' in i and filterStr in i]\n\treturn events", "def get_event_list(self):\n pass", "def get_all(self):\r\n return list(pecan.request.storage_conn.get_event_types())", "def get_events(self):\n return self.events", "def get_events(self):\n\n events = []\n\n for watched_file in self._watched_files:\n for line in watched_file:\n self._do_rule_processing(line, events)\n\n return events", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n #Returne the capture events\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def get_events(self):\n raise NotImplementedError", "def list_events():\n return [\n snow,\n mosquito,\n sun_heat,\n orage,\n overflowing,\n gathering,\n trampling,\n pollution,\n southern_wind,\n northern_wind,\n fog,\n sun\n ]", "def events(self):\n return self.search(comp_class=Event)", "def get_events(self):\n disallowed = [ident(self.add_event.__func__), ident(ident)]\n self.frames = None\n\n return [item for item in self.events if item[2] not in disallowed]", "def get_failed_virus_checks(file_events):\n virus_check_events = get_virusscan_events(file_events)\n if not virus_check_events:\n return\n for event in virus_check_events:\n if event.event_outcome != \"Pass\":\n try:\n failed_file = {\n \"premis:identifier\": event.file_uuid.uuid,\n \"premis:outcome\": event.event_outcome,\n \"prov:softwareAgent\": event.event_detail.split(\";\")[0],\n \"premis:version\": event.event_detail.split(\";\")[1],\n }\n except IndexError:\n logger.info(\n \"name and version of the virus check tool %s could not be\"\n \"determined. Check if it is well formed\",\n event.event_outcome_detail,\n )\n continue\n if failed_file:\n return failed_file", "def event_list(self):\n return self._event_list", "def get_sample_events(self): \n return self.sample_events[:]", "def events(self):\r\n return ev.Events(self)", "def events(self):\r\n return ev.Events(self)", "def build_events(self) -> list:\n raise NotImplementedError()", "def events(self):\n return self._events", "def get_result(self):\n check_result_list = []\n for check in self.monitoring_checks:\n try:\n result = check.execute()\n except ForbiddenCheckError as err:\n logger.error(err)\n else:\n check_result_list.append(result)\n if check_result_list:\n return check_result_list\n else:\n logger.error(\"Empty check result list\")", "def all_events(cls) -> \"IFilterPattern\":\n return jsii.sinvoke(cls, \"allEvents\", [])" ]
[ "0.7644736", "0.66425484", "0.6552635", "0.6500239", "0.6301848", "0.62276447", "0.6164383", "0.6035749", "0.60337937", "0.5993986", "0.596327", "0.5959023", "0.59463876", "0.59462994", "0.5944266", "0.5944266", "0.5924022", "0.5924022", "0.5885887", "0.5857111", "0.58524805", "0.58503664", "0.5840661", "0.5835173", "0.5825334", "0.5825334", "0.57856035", "0.57807744", "0.5769048", "0.57569337" ]
0.6770501
1
does the mapping of the file_object and returns the file_obj as dict
def map_file_data(file_obj, file_events): file_as_dict = { "premis:originalName": file_obj.currentlocation, "original_name": escape(file_obj.originallocation), # needs investigation "sanitized_file_name": get_sanitized_file_name( get_file_name_cleanup(file_events) ), "prov:generatedAtTime": file_obj.modificationtime.strftime( "%Y-%m-%dT%H:%M:%SZ" ), "premis:fixity": { "checksum_type": convert_to_premis_hash_function(file_obj.checksumtype), "Checksum": file_obj.checksum, }, "premis:identifier": file_obj.uuid, "premis:size": file_obj.size, "file_name": file_obj.label, # not sure if this is the file name or if we should stick with "dct:FileFormat": map_file_format_info( get_file_format_event(file_events), get_file_validation_event(file_events) ), "file_validation": map_file_validation_info( get_file_validation_event(file_events) ), "file_normalization": map_file_normalization_info( get_file_normalization_event(file_events) ), "events": list_file_events(file_events), } return file_as_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None", "def _file_dict(self, fn_):\n if not os.path.isfile(fn_):\n err = \"The referenced file, {} is not available.\".format(fn_)\n sys.stderr.write(err + \"\\n\")\n sys.exit(42)\n with salt.utils.files.fopen(fn_, \"r\") as fp_:\n data = fp_.read()\n return {fn_: data}", "def file_to_dictionary():\n\n return;", "def file_parser(file):\n\n # Copy of the file instance to save it\n new_file = file\n dict_file = {}\n # We find the right function depending on the extension of the file\n meta_func = find_meta_function(find_extension(file))\n if callable(meta_func):\n dict_file = meta_func(new_file)\n return dict_file", "def to_dict(self):\n return {\n \"file_name\": self.file_name,\n \"full_file_path\": self.full_file_path,\n \"created_timestamp_millis\": self.created_timestamp_millis,\n \"size_in_bytes\": self.size_in_bytes\n }", "def pre_lookup(self, file):\n return {}", "def make_file_dict():\r\n fileDict = {'pageUrls': [],\r\n 'pageFileNames': [],\r\n 'pageIds': [],\r\n 'fileUrls': [],\r\n 'fileIds': [],\r\n 'fileNames': [],\r\n 'cssUrls': [],\r\n 'cssFileNames': [],\r\n 'imgUrls': [],\r\n 'imgFileNames': []}\r\n return fileDict", "def to_dict(self):\n return {'file_name': self.file_name,\n 'raw_file_name': self.raw_file_name,\n 'metadata': self.metadata,\n 'pre_file_name': self.pre_file_name,\n }", "def populate_file_dict(epObject, uc, fileDict):\r\n fileDict = get_pages(epObject, fileDict)\r\n for url in fileDict['pageUrls']:\r\n soup = make_soup(url)\r\n fileDict = get_embedded_object(soup, fileDict, uc)\r\n fileDict = get_css(soup, fileDict)\r\n fileDict = get_img(soup, fileDict, uc)\r\n return fileDict", "def to_dict(self, include_related=False):\n return self.file.to_dict(include_related)", "def metadata_obj(filelike):\n\tif test_hachoir_extension(filelike.name):\n\t\tmetadata = metadata_for_filelike(filelike)\n\t\tif metadata:\n\t\t\tdata = dict([\n\t\t\t\t(data.key, data.values[0].value)\n\t\t\t\tfor data in metadata\n\t\t\t\tif data.values\n\t\t\t\t])\n\t\telse:\n\t\t\tdata=None\n\telif test_3D_extension(filelike.name):# 3D not in the extention \n\t\tdata = {'mime_type':'model'}\n\telse:\n\t\tdata = None\n\t\t\n\treturn data", "def _ReadFileEntries(self, file_object):\n self._file_entries = {}\n\n file_offset = 0\n while file_offset < self._file_size or self._file_size == 0:\n file_entry = self._ReadFileEntry(file_object, file_offset)\n file_offset += file_entry.size\n if file_entry.path == 'TRAILER!!!':\n break\n\n if file_entry.path in self._file_entries:\n # TODO: alert on file entries with duplicate paths?\n continue\n\n self._file_entries[file_entry.path] = file_entry\n\n self.size = file_offset", "def parse(self):\n results = {}\n\n # get the signature info via the codesign utility\n args = [\"codesign\",\"-dvvvv\", self.file_name]\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, error_output = proc.communicate()\n if proc.returncode: #error, probably file not signed\n results[\"signature\"] = error_output\n else:\n results[\"signature\"] = output\n\n #get the file object\n file_object = open(self.file_name, 'rb')\n\n\n #Use the macho library to parse out some structures\n pFile = MachO(self.file_name)\n\n #if this is a fat file, it will have multiple Mach-O objects inside it\n results[\"FAT_header\"] = self.parseFATHeader(file_object, pFile)\n\n #parse all the Mach-O headers\n i = 1\n for h in pFile.headers:\n results[\"MachO_header\" + str(i)] = self.parseMachOHeader(h, file_object)\n i +=1\n\n #close the file\n file_object.close()\n\n #return the dict of results\n return results", "def parse_mapping_file_to_dict(*args, **kwargs):\r\n mapping_data, header, comments = parse_mapping_file(*args, **kwargs)\r\n return mapping_file_to_dict(mapping_data, header), comments", "def read_file_object(self, file_obj, file_format='FASTA'):\n if file_format.upper() == 'FASTA':\n read_func = read_fasta\n# elif (file_format.upper() == 'NEXUS'):\n# read_func = read_nexus\n# elif (file_format.upper() == 'PHYLIP'):\n# read_func = read_phylip\n else:\n raise NotImplementedError(\n \"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq.upper()\n return self", "def filemap(self) -> GQAFilemap:\n return self._filemap", "async def load(self, file: IO) -> dict:", "def get_file_dict(qcowf):\n qcow_dict = {} #create dictionary of file info\n\n nb_ss = int(get_info(qcowf, 60, 4, '>I')) #number of snapshots\n ss_offset = get_info(qcowf, 64, 8, '>Q') #snapshots offset\n filename = str(os.path.abspath(qcowf.name))\n size = str(os.stat(qcowf.name).st_size)\n virtual_size = get_info(qcowf, 24, 8, '>Q')\n backing_file = get_bf_name(\n qcowf, get_info(qcowf, 8, 8, '>Q'), get_info(qcowf, 16, 4, '>I'))\n\n if OS == 'win32':\n filename = filename.replace('\\\\', '/') #correct view of path to files\n\n qcow_dict['filename'] = filename\n qcow_dict['size'] = size\n qcow_dict['virtual_size'] = virtual_size\n\n if backing_file != -1:\n qcow_dict['backing_file'] = backing_file\n\n if nb_ss != 0: #if there are any snapshots in file\n qcow_dict['snapshots'] = []\n keyorder_ss = [\"id\", \"name\", \"virtual_size\"]\n for _ in range(1, nb_ss+1): #go through all snapshots\n ss_dict, ss_offset = get_shapshot_info(qcowf, ss_offset)\n qcow_dict['snapshots'].append(ss_dict)\n\n return qcow_dict", "def mapping_file_to_dict(mapping_data, header):\r\n map_dict = {}\r\n for i in range(len(mapping_data)):\r\n sam = mapping_data[i]\r\n map_dict[sam[0]] = {}\r\n for j in range(len(header)):\r\n if j == 0:\r\n continue # sampleID field\r\n map_dict[sam[0]][header[j]] = sam[j]\r\n return map_dict", "def file_upload_to_obj():\n\n temp = []\n file_content = pd.read_excel(INPUT_FILE_NAME).fillna(0).to_dict('records')\n sorted_content = sorted(file_content, key=itemgetter(\n 'filedbentity.file_extension'))\n for item in file_content:\n\n raw_date = item.get('filedbentity.file_date')\n if raw_date:\n temp_date = raw_date.strftime('%Y-%m-%d')\n raw_date = datetime.strptime(temp_date, \"%Y-%m-%d\").date()\n else:\n raw_date = datetime.now().date()\n\n raw_status = item.get('dbentity.status')\n if raw_status == 'Archive':\n raw_status = 'Archived'\n \n obj = {\n 'path': item.get('EBS path'),\n 'display_name': item.get('dbentity.display_name'),\n 'status': raw_status,\n 'source': item.get('dbentity.source'),\n 'topic_edam_id': item.get('topic edam_id').upper().replace('TOPIC', 'EDAM').strip(),\n 'data_edam_id': item.get('data edam_id').upper().replace('DATA', 'EDAM').strip(),\n 'format_edam_id': item.get('format edam_id').upper().replace('FORMAT', 'EDAM').strip(),\n 'file_extension': item.get('filedbentity.file_extension'),\n 'file_date': raw_date,\n 'is_public': (item.get('filedbentity.is_public') == '1'),\n 'is_in_spell': item.get('filedbentity.is_in_spell'),\n 'is_in_browser': (item.get('filedbentity.is_in_browser') == '1'),\n 'readme_name': item.get('readme name'),\n 'description': item.get('filedbentity.description'),\n 'pmids': item.get('pmids (|)'),\n 'keywords': item.get('keywords (|)')\n }\n temp.append(obj)\n\n if len(temp) > 0:\n return temp\n return None", "def Open(self, file_object):", "def create_file_hash_dict(cls, file, file_path):\n\n file_info = {}\n file_info['path'] = file_path\n file_info['hash'] = cls.get_256_hash_from_file(file_path)\n file_info['type'] = 'file'\n file_info['name'] = file\n file_info['perm'] = stat.S_IMODE(os.lstat(file_path).st_mode)\n\n return file_info", "def to_dict(self):\n return dict({\n \"name\": self.name,\n \"file\": self.filename,\n \"inputs\": {name: input_[\"file\"][0] for name, input_ in _iteritems(self.inputs)},\n \"params\": self.params,\n \"expected\": self.expected_outputs\n })", "def map(pointer, objfile=\"\"):\n ei_class, ehdr = get_ehdr(pointer)\n return map_inner(ei_class, ehdr, objfile)", "def read_file_object(self, file_obj, file_format='FASTA'):\n if ( file_format.upper() == 'FASTA' ):\n read_func = read_fasta\n #elif ( file_format.upper() == 'NEXUS' ):\n # read_func = read_nexus\n #elif ( file_format.upper() == 'PHYLIP' ):\n # read_func = read_phylip\n #elif ( file_format.upper() == 'COMPACT3' ):\n # read_func = read_compact3\n else:\n raise NotImplementedError(\"Unknown file format (%s) is not supported\" % file_format)\n for name, seq in read_func(file_obj):\n self[name] = seq", "def get_embedded_object(soup, fileDict, uc):\r\n for a in soup.find_all('a'):\r\n href = str(a['href'])\r\n if href.find('d2lfile') > 0:\r\n epoId = get_epo_id(href)\r\n if epoId not in fileDict['fileIds']:\r\n fileDict['fileIds'].append(epoId)\r\n fileDict['fileUrls'].append(DOMAIN + href)\r\n fileName = eportfolio.get_ep_object_properties(uc, epoId).\\\r\n FileName.strip()\r\n fileDict['fileNames'].append(fileName)\r\n return fileDict", "def run(self):\n return_dict = {}\n for x in self.file_list:\n return_dict[x] = self.parseFile(x)\n\n return return_dict", "def read(self, fileobj):\n raise NotImplementedError", "def _todict(self, matobj, filetype):\n self.filetype = filetype\n self.dict = {}\n self.index = 0\n for strg in matobj._fieldnames:\n self.elem = matobj.__dict__[strg]\n if isinstance(self.elem, scipy.io.matlab.mio5_params.mat_struct):\n self.dict[strg] = self._todict(self.elem, self.filetype)\n elif isinstance(self.elem, numpy.ndarray):\n self.parameter_list = []\n self.dict[strg] = self._tolist(self.elem, self.parameter_list)\n if self.filetype == \".mat\":\n if len(self.elem)>0:\n if isinstance(self.elem[0], numpy.ndarray):\n if strg == 'immagini':\n self.dict['arrayx'] = len(self.elem[0])\n self.dict['arrayy'] = len(self.elem)\n if self.parameter_list:\n if strg == 'immagini':\n self.dict['numshots'] = len(self.parameter_list) / (len(self.elem[0]) * len(self.elem))\n elif self.filetype == \".dat\":\n self.parameter_list = []\n self.dict[strg] = self._tolist(self.elem, self.parameter_list)\n if strg == 'roi_points':\n self.dict['arrayx'] = self.elem[2] - self.elem[0]\n self.dict['arrayy'] = self.elem[3] - self.elem[1]\n # if isinstance(self.elem[0], numpy.ndarray):\n # if self.parameter_list and self.dict['arrayx'] and self.dict['arrayy']:\n # self.dict['numshots'] = len(self.parameter_list) / (self.dict['arrayx'] * self.dict['arrayy'])\n\n else:\n print \"you done fucked up\"\n else:\n self.dict[strg] = self.elem\n return self.dict", "def process(self,fileobj_out,fileobj_in):\n pass" ]
[ "0.7471708", "0.6875288", "0.685313", "0.6438057", "0.625583", "0.62365437", "0.62274283", "0.6191611", "0.6160291", "0.6127357", "0.6083918", "0.60666394", "0.60020196", "0.59979707", "0.5992356", "0.59894216", "0.59645075", "0.5962417", "0.5958333", "0.58898216", "0.5880217", "0.5864671", "0.58639145", "0.5858033", "0.58347476", "0.58181334", "0.57979685", "0.5769673", "0.5763582", "0.5721833" ]
0.79688126
0
Find the command using a Pip VCS backend.
def get_vcs_command(vcs_type): try: return pip_vcs.get_backend(vcs_type)().cmd except BadCommand: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_command(vcs_name):\n\n try:\n return [vcs['cmd'] for vcs in get_vcs_settings() if vcs.get('name') == vcs_name][0]\n except IndexError:\n return None", "def get_command(pid):", "def get_command(self, ctx, cmd_name):\n path = \"%s.%s\" % (__name__, cmd_name)\n path = path.replace(\"-\", \"_\")\n try:\n module = importlib.import_module(path)\n return getattr(module, 'cli')\n except ModuleNotFoundError as ex:\n print(ex.name)\n return None", "def find_command(cmd):\n if cmd:\n root = '.'.join([COMMANDS_PACKAGE_NAME] + cmd)\n else:\n root = COMMANDS_PACKAGE_NAME\n try:\n return _get_commands(root)['__module__'].COMMAND\n except KeyError:\n LOGGER.debug('%r not recognized as a TAU command', cmd)\n resolved = _resolve(cmd, cmd, _COMMANDS[SCRIPT_COMMAND])\n LOGGER.debug('Resolved ambiguous command %r to %r', cmd, resolved)\n return find_command(resolved)\n except AttributeError as err:\n raise InternalError(\"'COMMAND' undefined in %r\" % cmd) from err", "def RunDetectCommand(vcs_type, command):\r\n try:\r\n out, returncode = RunShellWithReturnCode(command)\r\n if returncode == 0:\r\n return (vcs_type, out.strip())\r\n except OSError, (errcode, message):\r\n if errcode != errno.ENOENT: # command not found code\r\n raise", "def do_poortego_find(self, arg):\n poortego_find(self.my_interface, arg)", "def get_command(self, ctx, name):\n try:\n if sys.version_info[0] == 2:\n name = name.encode('ascii', 'replace')\n mod = __import__('cli.commands.cmd_' + name, None, None, ['cli'])\n except ImportError:\n exit(1)\n\n return mod.cli", "def vcs_completer(commands, event):\n\n\n cmd_param = event.line.split()\n if event.line.endswith(' '):\n cmd_param.append('')\n\n if cmd_param[0] == 'sudo':\n cmd_param = cmd_param[1:]\n\n if len(cmd_param) == 2 or 'help' in cmd_param:\n return commands.split()\n\n return ip.Completer.file_matches(event.symbol)", "def GuessVCSName(options):\r\n for attribute, value in options.__dict__.iteritems():\r\n if attribute.startswith(\"p4\") and value != None:\r\n return (VCS_PERFORCE, None)\r\n\r\n def RunDetectCommand(vcs_type, command):\r\n \"\"\"Helper to detect VCS by executing command.\r\n\r\n Returns:\r\n A pair (vcs, output) or None. Throws exception on error.\r\n \"\"\"\r\n try:\r\n out, returncode = RunShellWithReturnCode(command)\r\n if returncode == 0:\r\n return (vcs_type, out.strip())\r\n except OSError, (errcode, message):\r\n if errcode != errno.ENOENT: # command not found code\r\n raise\r\n\r\n # Mercurial has a command to get the base directory of a repository\r\n # Try running it, but don't die if we don't have hg installed.\r\n # NOTE: we try Mercurial first as it can sit on top of an SVN working copy.\r\n res = RunDetectCommand(VCS_MERCURIAL, [\"hg\", \"root\"])\r\n if res != None:\r\n return res\r\n\r\n # Subversion from 1.7 has a single centralized .svn folder\r\n # ( see http://subversion.apache.org/docs/release-notes/1.7.html#wc-ng )\r\n # That's why we use 'svn info' instead of checking for .svn dir\r\n res = RunDetectCommand(VCS_SUBVERSION, [\"svn\", \"info\"])\r\n if res != None:\r\n return res\r\n\r\n # Git has a command to test if you're in a git tree.\r\n # Try running it, but don't die if we don't have git installed.\r\n res = RunDetectCommand(VCS_GIT, [\"git\", \"rev-parse\",\r\n \"--is-inside-work-tree\"])\r\n if res != None:\r\n return res\r\n\r\n # detect CVS repos use `cvs status && $? == 0` rules\r\n res = RunDetectCommand(VCS_CVS, [\"cvs\", \"status\"])\r\n if res != None:\r\n return res\r\n\r\n return (VCS_UNKNOWN, None)", "def command():\n return _config.command", "def LocalCommand(TestinfraBackend):\n return testinfra.get_backend(\"local://\").get_module(\"Command\")", "def find_command(self, command: str):\n layers = list(self.command_layers)\n while layers and not layers[0].active:\n del layers[0]\n\n if not layers:\n return\n\n last = None\n for layer in layers:\n last = layer\n if match := layer.find_command(command):\n return match\n\n if last:\n return last.cannot_find(command)", "def _find_subcommand(args):\n subcmd = args[1]\n if subcmd in [\n \"cfg\"\n # , 'init',\n ]:\n return subcmd\n else:\n return None", "def verun(cmd):\n run('pew in {0} {1}'.format(package_name(), cmd))", "def test_popen(self):\n self.executor.command(['grep', 'foo']).popen()", "def main(ctx: typer.Context):\n LOG.debug(F\"COVIDAP: executing command: {ctx.invoked_subcommand}\")", "def query_cmdline():", "def get_cppcheck_command(self, version):\n return os.path.expanduser('~/bin/cppcheck-{0}'.format(version))", "def fetch_command(self, global_options, subcommand):\r\n commands = self.get_commands(global_options)\r\n try:\r\n klass = commands[subcommand]\r\n except KeyError:\r\n sys.stderr.write(\"Unknown command: %r\\nType '%s help' for usage.\\nMany commands will only run at project directory, maybe the directory is not right.\\n\" % \\\r\n (subcommand, self.prog_name))\r\n sys.exit(1)\r\n return klass", "def cli() -> None:", "def cli() -> None:", "def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))", "def which(cls, cmd):\n abs_path_cmd = None\n if sys.version_info >= (3, 3):\n abs_path_cmd = shutil.which(cmd)\n else:\n abs_path_cmd = find_executable(cmd)\n return abs_path_cmd", "def get_command(bare, path):\n\n if bare:\n cmd = [\"git\", \"fetch\"]\n return cmd\n\n directories = list_directories(os.path.join(path, git_signature))\n\n if \"svn\" in directories:\n cmd = [\"git\", \"svn\", \"rebase\"]\n else:\n cmd = [\"git\", \"pull\"]\n\n return cmd", "def search(name):\n try:print(f'Searching for {name}...');os.system(f'python -m pip search {name}')\n except Exception as e:print(\"something went wrong\\n{e}\")", "def cli(ctx, version):\n\n if ctx.invoked_subcommand is None:\n if version:\n print(f\"clpipe v{VERSION}\")\n sys.exit(0)\n else:\n ctx = click.get_current_context()\n click.echo(ctx.get_help())\n ctx.exit()", "def cli():", "def cli():", "def cli():", "def cli():" ]
[ "0.60336435", "0.5905804", "0.5887258", "0.57998616", "0.57792455", "0.56713223", "0.563673", "0.55427724", "0.551555", "0.5410901", "0.5404135", "0.54030824", "0.5381162", "0.53678364", "0.5361021", "0.5346396", "0.5342507", "0.5309934", "0.530921", "0.52939373", "0.52939373", "0.52875", "0.527203", "0.5263128", "0.5257051", "0.5253409", "0.5247007", "0.5247007", "0.5247007", "0.5247007" ]
0.67878574
0
When an area in the main canvas is clicked, mouse() returns the click's location in terms of the data dimensions if the click was within the plot region. This location is passed to model by updateTrackClick(), which finds the nearest track/vertex to the click and updates the track data accordingly. Data from the updated track is then passed to the view, which updates the appropriate track in the view and redraws everything. At the end, the selected track is stored in locked_track, which drag() uses to lock to a particular track for a given clickdrag movement.
def click(self, event): try: x_loc, y_loc = self.appWindow.spec_cv.mouse(event) trackNo, updated_track = self.model.updateTrackClick(x_loc, y_loc,\ self.x_high) self.appWindow.spec_cv.updateTrack(trackNo, updated_track) self.appWindow.spec_cv.redrawTracks() self.locked_track = trackNo except TypeError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drag(self, event):\n if event.button:\n try:\n x_loc, y_loc = self.appWindow.spec_cv.mouse(event)\n print(x_loc, y_loc)\n trackNo, updated_track =\\\n self.model.updateTrackDrag(x_loc, y_loc,\\\n self.locked_track, self.x_high)\n self.appWindow.spec_cv.updateTrack(trackNo, updated_track)\n self.appWindow.spec_cv.redrawTracks()\n except TypeError:\n pass", "def on_mouse_click(self, e):\n if 'Control' in e.modifiers:\n # Get mouse position in NDC.\n box_id, _ = self.canvas.stacked.box_map(e.pos)\n channel_id = np.nonzero(self.channel_y_ranks == box_id)[0]\n # Find the spike and cluster closest to the mouse.\n db = self.data_bounds\n # Get the information about the displayed spikes.\n wt = [(t, s, c, ch) for t, s, c, ch in self._waveform_times if channel_id in ch]\n if not wt:\n return\n # Get the time coordinate of the mouse position.\n mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos)\n mouse_time = Range(NDC, db).apply(mouse_pos)[0][0]\n # Get the closest spike id.\n times, spike_ids, spike_clusters, channel_ids = zip(*wt)\n i = np.argmin(np.abs(np.array(times) - mouse_time))\n # Raise the select_spike event.\n spike_id = spike_ids[i]\n cluster_id = spike_clusters[i]\n emit('select_spike', self, channel_id=channel_id,\n spike_id=spike_id, cluster_id=cluster_id)\n\n if 'Shift' in e.modifiers:\n # Get mouse position in NDC.\n box_id, _ = self.canvas.stacked.box_map(e.pos)\n channel_id = int(np.nonzero(self.channel_y_ranks == box_id)[0][0])\n emit('select_channel', self, channel_id=channel_id, button=e.button)", "def handle_mouse(self, x, y):\n # we are in aperture mode\n if self.aperture_id:\n if self.aperture_id not in self.aperture_model.aperture_models.keys():\n pass\n model = self.aperture_model.aperture_models[self.aperture_id]\n location = model.source.data['location'][0]\n\n if self.mode == 'width':\n width = abs(location - x)\n model.update_values(start=location - width,\n end=location + width)\n elif self.mode == 'left':\n if x < location:\n model.update_values(start=x)\n elif self.mode == 'right':\n if x > location:\n model.update_values(end=x)\n elif self.mode == 'location':\n diff = x - location\n model.update_values(location=x,\n start=model.source.data['start'][0] + diff,\n end=model.source.data['end'][0] + diff)\n\n self.last_x = x\n self.last_y = y\n return False", "def mouseMoveEvent(self, event):\n if self.view_state.tracking == TrackingMode.FREE and event.buttons() == QtCore.Qt.LeftButton:\n # Calculate the change in mouse position.\n new_mouse_pos = np.array([event.x(), event.y()])\n mouse_delta = new_mouse_pos - self.view_state.mouse\n\n # Add this to the view centre.\n self.view_state.centre = self.view_state.centre - mouse_delta * (1 / self.view_state.scale)\n self.view_state.mouse = new_mouse_pos", "def figure_mouse_release(self, event):\n\n if event.button != 1: return None\n try:\n signal_time, signal_cid = self._exclude_selected_region_signal\n except AttributeError:\n return None\n \n xy = self._exclude_selected_region.get_xy()\n \n if event.xdata is None:\n # Out of axis; exclude based on the last known worthwhile position.\n xdata = xy[2, 0]\n else:\n xdata = event.xdata\n\n # If the two mouse events were within some time interval,\n # then we should not add a mask because those signals were probably\n # part of a double-click event.\n if time() - signal_time > DOUBLE_CLICK_INTERVAL \\\n and np.abs(xy[0,0] - xdata) > 0:\n \n # Update the cache with the new mask.\n _ = self._cache[\"input\"].get(\"exclude\", np.array([]))\n _.shape = (-1, 2)\n self._cache[\"input\"][\"exclude\"] = np.vstack((\n np.array([xy[0,0], xy[2, 0]]).reshape(-1, 2), _))\n\n # Fit and re-draw the continuum, and its mask.\n self.fit_continuum(clobber=True)\n self.update_continuum_mask(refresh=False)\n self.draw_continuum(refresh=False)\n\n xy[:, 0] = np.nan\n\n self._exclude_selected_region.set_xy(xy)\n self.norm_plot.mpl_disconnect(signal_cid)\n self.norm_plot.draw()\n del self._exclude_selected_region_signal\n return None", "def handle_mouse_press(self, event):", "def handle_mouse(self, x, y):\n pass", "def mouse_click(self,event):\n global drag_sq\n# print \"click at {0} {1}\".format(event.x,event.y)\n# sq = (event.y // sq_size) * 8 + event.x // sq_size\n sq = self.coord_to_sq((event.x, event.y))\n if sq in self.piece_objs:\n drag_sq = sq\n self.canvas.tag_raise(self.piece_objs[sq])\n return", "def mouse_click(self,x,y,button,double_click):\n raise NotImplementedError(\"ERROR: Unimplemented function.\")", "def callback_handle_left_mouse_click(self, event):\n\n if self.variables.active_tool == TOOLS.PAN_TOOL:\n self.variables.pan_anchor_point_xy = event.x, event.y\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:\n self.variables.tmp_anchor_point = event.x, event.y\n elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:\n closest_coord_index = self.find_closest_shape_coord(self.variables.current_shape_id, event.x, event.y)\n self.variables.tmp_closest_coord_index = closest_coord_index\n elif self.variables.active_tool == TOOLS.SELECT_CLOSEST_SHAPE_TOOL:\n closest_shape_id = self.find_closest_shape(event.x, event.y)\n self.variables.current_shape_id = closest_shape_id\n self.highlight_existing_shape(self.variables.current_shape_id)\n else:\n start_x = self.canvasx(event.x)\n start_y = self.canvasy(event.y)\n\n self.variables.current_shape_canvas_anchor_point_xy = (start_x, start_y)\n if self.variables.current_shape_id not in self.variables.shape_ids:\n coords = (start_x, start_y, start_x + 1, start_y + 1)\n if self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:\n self.create_new_line(coords)\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.create_new_line(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:\n self.create_new_arrow(coords)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.create_new_arrow(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:\n self.create_new_rect(coords)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:\n self.create_new_rect(coords)\n self.variables.actively_drawing_shape = True\n elif self.variables.active_tool == TOOLS.DRAW_ELLIPSE_BY_DRAGGING:\n self.create_new_ellipse(coords)\n elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:\n self.create_new_point((start_x, start_y))\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.create_new_polygon(coords)\n self.variables.actively_drawing_shape = True\n else:\n print(\"no tool selected\")\n else:\n if self.variables.current_shape_id in self.variables.shape_ids:\n vector_object = self.get_vector_object(self.variables.current_shape_id)\n if vector_object.type == SHAPE_TYPES.POINT:\n self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,\n (start_x, start_y))\n elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.event_click_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.event_click_line(event)\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.event_click_polygon(event)\n elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:\n if self.variables.actively_drawing_shape:\n self.variables.actively_drawing_shape = False\n else:\n self.variables.actively_drawing_shape = True", "def _press(self, event):\n # Check for selection of a tool handle.\n if ((self._selection_completed or 'move_vertex' in self._state)\n and len(self._xys) > 0):\n h_idx, h_dist = self._polygon_handles.closest(event.x, event.y)\n if h_dist < self.grab_range:\n self._active_handle_idx = h_idx\n # Save the vertex positions at the time of the press event (needed to\n # support the 'move_all' state modifier).\n self._xys_at_press = self._xys.copy()", "def onclick(event):\n\t#~ cords = [] #This is an empty list which will store the x and y coordinates of each click on the graph\n\t#It's fine to keep this as a list because we won't be operating on it\n\tglobal ix, iy\n\tix,iy = event.xdata, event.ydata\n\tprint 'x = %.5f, y = %.2e' %(ix,iy) #This will print out the x and y values so you can check that no shifting occured\n\n\tglobal cords\n\tcords.append((ix,iy)) #Stores the x and y click in the array\n\n\treturn", "def mousePressEvent(self, event):\n if event.buttons() == QtCore.Qt.LeftButton:\n self.view_state.mouse = np.array([event.x(), event.y()])", "def callback_handle_right_mouse_click(self, event):\n\n if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:\n self.variables.actively_drawing_shape = False\n elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:\n self.variables.actively_drawing_shape = False\n elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:\n self.variables.actively_drawing_shape = False", "def onclick(self, click):\n from ..backend.viz_raw import _plot_single_psd\n\n if self.plotType == 'Matrix':\n channel_picked = click.ydata - 1\n elif self.plotType == 'Topomap':\n x, y = click.xdata, click.ydata\n channel_picked = self.psd.channel_index_from_coord(x, y)\n else:\n channel_picked = None\n\n if (channel_picked is not None and click.dblclick):\n channel_picked = floor(channel_picked) + 1\n _plot_single_psd(self, channel_picked)", "def _handleClick(self, event):\n\n\t\t(x_min, x_max, y_min, y_max) = [i for i in self.extent]\n\t\tif event.xdata != None and event.ydata != None:\n\t\t\t(click_x, click_y) = (event.xdata, event.ydata)\n\t\t\tnewWidth = (x_max-x_min)/self.zoom\n\t\t\tnewHeight = (y_max-y_min)/self.zoom\n\n\t\t\t# update self.extent to the new zoomed in extent\n\t\t\tself.extent = [click_x-newWidth/2, click_x+newWidth/2, click_y-newHeight/2, click_y+newHeight/2]\n\t\t\tself.plot()", "def data_mouse():\n\timport matplotlib.pyplot as plt\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, xlim=(-1,2), ylim=(-1,2))\n\tX = np.zeros( (0,2) )\n\tY = np.zeros( (0,) )\n\tcol = ['bs','gx','ro']\n\t\n\tdef on_click(event):\n\t\tX.resize( (X.shape[0]+1,X.shape[1]) )\n\t\tX[-1,:] = [event.xdata,event.ydata]\n\t\tY.resize( (Y.shape[0]+1,) )\n\t\tY[-1] = event.button\n\t\tax.plot( event.xdata, event.ydata, col[event.button-1])\n\t\tfig.canvas.draw()\n\n\tfig.canvas.mpl_connect('button_press_event',on_click)\n inter=plt.isinteractive(); hld=plt.ishold();\n plt.ioff(); plt.hold(True); plt.show();\n if inter: plt.ion();\n if not hld: plt.hold(False);\n\treturn X,Y", "def click_action(event, ax):\n global newcoords, oldcoords, count\n\n if count % 2 == 0:\n newcoords.append((event.xdata, event.ydata))\n print('NEW', event.xdata, event.ydata)\n else:\n oldcoords.append((event.xdata, event.ydata))\n print('OLD', event.xdata, event.ydata)\n # update count\n count += 1", "def calibrateMousePress(self, mouse_event):\n\n \"\"\" Get mouse posiiton \"\"\"\n pt = mouse_event.pos()\n\n if mouse_event.button() == Qt.LeftButton:\n self.kinect.last_click[0] = pt.x()\n self.kinect.last_click[1] = pt.y()\n self.kinect.new_click = True\n elif mouse_event.button() == Qt.RightButton:\n self.kinect.last_rclick[0] = pt.x()\n self.kinect.last_rclick[1] = pt.y()\n self.kinect.new_rclick = True", "def get_coords_to_write(fig):\n cid = fig.canvas.mpl_connect(\"button_press_event\", onclick)\n raw_input(\"Start clicking, press any key when finished: \\n\")\n fig.canvas.mpl_disconnect(cid)", "def handle_mouse(self, x, y):\n self.last_x = x\n self.last_y = y\n if self.min_x is not None:\n self.last_x = max(self.last_x, self.min_x)\n if self.max_x is not None:\n self.last_x = min(self.last_x, self.max_x)\n # we are in region mode\n if self.region_id is not None:\n start = self.last_x\n end = self.region_edge\n self.region_model.adjust_region(self.region_id, start, end)\n return False", "def mouseReleaseEvent(self, ev):\n\n # handle the built mouse events first\n\n # panning...\n if self.panning and (ev.button() == Qt.LeftButton):\n # we're done panning\n self.leftBtnClicked = False\n self.setCursor(Qt.OpenHandCursor)\n self.lastPanPoint = QPoint()\n\n # \"auto\" rubber banding...\n elif self.rubberBandKey and self.rubberBanding:\n\n # end the rubber band selection\n rubberBandRect = self.endRubberBand().toRect()\n\n # check if the user selected anything\n if (rubberBandRect):\n items = self.items(rubberBandRect)\n\n # filter the selected items\n items = self.filterSelectedItems(items)\n\n # If we're handling selections deal with the selection states of our marks\n if self.doSelections:\n\n for item in self.selectedItems:\n item.setSelected(False)\n for item in items:\n item.setSelected(True)\n self.selectedItems = items\n\n # call the emit method - we don't directly emit here in case a child class\n # wants to transform the data before emitting it.\n self.emitRubberbandSelection(rubberBandRect, items)\n\n else:\n # This event isn't handled by automatically - emit a release event\n clickLocation = self.mapToScene(ev.pos())\n\n # do a \"sloppy selection\" and return all items that intersect our\n # selection rectangle. The selection rectangle is set by calling\n # the setSelectionRadius method.\n\n # move our selection rectangle into place - depending on the size of\n # the selection area, this may not be centered on the click location\n areaLoc = ev.pos() - self.selectionRadius\n self.selectionArea.moveTo(areaLoc)\n\n # check if the user clicked on anything - this will return a list of\n # items that intersect the selection rectangle.\n items = self.items(self.selectionArea)\n\n # filter the selection so we only return marks or text not associated\n # with a mark.\n items = self.filterSelectedItems(items)\n\n # call the emit method - we don't directly emit here in case a child class\n # wants to transform the data before emitting it.\n self.emitReleaseEvent(clickLocation, ev.button(), self.currentKbKey, items)", "def trackMouse(self, mouse_event):\n if self.kinect.DepthFrameRaw.any() != 0:\n u = mouse_event.pos().x()\n v = mouse_event.pos().y()\n d = self.kinect.DepthFrameRaw[v,u]\n self.ui.rdoutMousePixels.setText(\"(\"+str(u)+\",\"+str(v)+\",\"+str(d)+\")\")\n worldCoords = self.kinect.pix2Glob(np.array([u,v,1])) * 1000 #1000 for mm\n self.ui.rdoutMouseWorld.setText(f\"({np.round(worldCoords[0])},{np.round(worldCoords[1])},{np.round(worldCoords[2])})\")", "def on_mouse_click(self, event):\n if not self.is_game_over:\n try:\n # i, j coordinates of the click event\n i = int(round(event.ydata))\n j = int(round(event.xdata))\n\n # Left button\n if event.button == 1 or event.button == 2:\n self.reveal(i, j)\n\n # Right button\n elif event.button == 3:\n self.flag(i, j)\n\n except (TypeError, IndexError):\n pass", "def figure_mouse_press(self, event):\n \n # Add/remove an additional point?\n if event.dblclick:\n\n if event.button == 1:\n # Add a point.\n points = np.vstack([\n self.ax_order.collections[0].get_offsets(),\n [event.xdata, event.ydata]\n ])\n # TODO: set size by their weight?\n self.ax_order.collections[0].set_offsets(points)\n\n else:\n # Are we within <tolerance of a point?\n points = self.ax_order.collections[0].get_offsets()\n\n # Need to scale x-distance to convert to pixels.\n idx = self.current_order.dispersion.searchsorted(event.xdata)\n xscale = np.nanmean(\n np.diff(self.current_order.dispersion[idx-5:idx+5]))\n\n \"\"\"\n bbox = self.ax_order.get_window_extent().transformed(\n self.norm_plot.dpi_scale_trans.inverted())\n width = bbox.width * self.norm_plot.dpi\n height = bbox.height * self.norm_plot.dpi\n print(width, height)\n \"\"\"\n # TODO: Fix this distance thing.\n\n distance = np.sqrt(\n ((points[:, 0] - event.xdata)/xscale)**2 \\\n + (points[:, 1] - event.ydata)**2)\n \n if distance.size > 0:\n\n index = np.argmin(distance)\n if distance[index] < PIXEL_PICKER_TOLERANCE:\n # Remove that point.\n keep = np.ones(points.shape[0], dtype=bool)\n keep[index] = False\n self.ax_order.collections[0].set_offsets(points[keep])\n\n else:\n print(\"Closest point {} px away\".format(distance[index]))\n\n # Update the cache.\n idx = self.current_order_index\n N = points.shape[0]\n # TODO: adhere to the knot weights\n self._cache[\"input\"][\"additional_points\"] \\\n = np.hstack((points, 100 * np.ones(N).reshape((N, 1))))\n self.fit_continuum(clobber=True)\n self.draw_continuum(refresh=True)\n\n return None\n \n if event.button != 1: return None\n # Single click.\n # Set up/update the excluded region.\n xmin, xmax, ymin, ymax = (event.xdata, np.nan, -1e8, +1e8)\n try:\n self._exclude_selected_region\n except AttributeError:\n self._exclude_selected_region = self.ax_order.axvspan(**{\n \"xmin\": xmin,\n \"xmax\": xmax,\n \"ymin\": ymin,\n \"ymax\": ymax,\n \"facecolor\": \"r\",\n \"edgecolor\": \"none\",\n \"alpha\": 0.25,\n \"zorder\": -1\n })\n\n else:\n self._exclude_selected_region.set_xy([\n [xmin, ymin],\n [xmin, ymax],\n [xmax, ymax],\n [xmax, ymin],\n [xmin, ymin]\n ])\n\n # Set the signal and the time.\n self._exclude_selected_region_signal = (\n time(),\n self.norm_plot.mpl_connect(\n \"motion_notify_event\", self.update_exclude_selected_region)\n )\n return None", "def _on_click(self, event, axis):\n if event.inaxes is self._figs[axis].axes[0]:\n # Data coordinates are voxel coordinates\n pos = (event.xdata, event.ydata)\n logger.info(f'Clicked {\"XYZ\"[axis]} ({axis}) axis at pos {pos}')\n xyz = self._vox\n xyz[list(self._xy_idx[axis])] = pos\n logger.debug(f'Using voxel {list(xyz)}')\n ras = apply_trans(self._vox_ras_t, xyz)\n self._set_ras(ras)", "def _press(self, event):\n # make the drawn box/line visible get the click-coordinates,\n # button, ...\n if self._interactive and self._selection_artist.get_visible():\n self._set_active_handle(event)\n else:\n self._active_handle = None\n\n if ((self._active_handle is None or not self._interactive) and\n self._allow_creation):\n # Clear previous rectangle before drawing new rectangle.\n self.update()\n\n if (self._active_handle is None and not self.ignore_event_outside and\n self._allow_creation):\n x = event.xdata\n y = event.ydata\n self._visible = False\n self.extents = x, x, y, y\n self._visible = True\n else:\n self.set_visible(True)\n\n self._extents_on_press = self.extents\n self._rotation_on_press = self._rotation\n self._set_aspect_ratio_correction()\n\n return False", "def __on_click(self,event, x, y, p1, p2): \r\n \r\n # global variables of the class with mouse click position\r\n global mouse_click_pos, mouse_click_list \r\n \r\n mouse_click_list = []\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n \r\n mouse_click_pos = (x,y)\r\n print(mouse_click_pos)\r\n mouse_click_list.append((x, y))", "def handle_mouse(self, x, y):\n self.x = x\n self.y = y\n global _pending_handle_mouse\n if not _pending_handle_mouse:\n _pending_handle_mouse = True\n if self.fig.document is not None:\n self.fig.document.add_timeout_callback(self.handle_mouse_callback, 100)\n else:\n self.handle_mouse_callback()", "def handleClick(self, event):\n\n # filter for events inside image:\n pos = event.pos()\n mappedPos = self.img.mapFromScene(pos)\n xmp = int(mappedPos.x())\n ymp = int(mappedPos.y())\n\n if xmp < 0 or \\\n xmp > self.dat3d.shape[1] or \\\n ymp < 0 or \\\n ymp > self.dat3d.shape[0]:\n return # discard click events originating outside the image\n\n pw = pqg.plot(self.elist, self.dat3d[ymp, xmp, :], title=\"LEEM-I(V)\")\n pw.setLabel('bottom', 'Energy', units='eV')\n pw.setLabel('left', 'Intensity', units='a.u.')\n pw.show()" ]
[ "0.6058535", "0.5994052", "0.58561856", "0.57993245", "0.5710567", "0.5700471", "0.569974", "0.5697816", "0.56608105", "0.5551457", "0.55488133", "0.554185", "0.55417347", "0.55244434", "0.55154103", "0.55042386", "0.5499609", "0.54935616", "0.54893976", "0.546768", "0.54452795", "0.5437356", "0.5436176", "0.54007566", "0.53908575", "0.53703904", "0.533432", "0.5321721", "0.5321171", "0.5304405" ]
0.6938923
0
Uses self stress and family context to incur in probability of becoming violent
def trigger_violence(self): # First time offender get registered in the system and changes category into an Aggressor and a Victim if self.assaulted == 0: if self.stress > self.random.random(): self.category = 'aggressor' self.assaulted += 1 self.spouse.category = 'victim' self.spouse.got_attacked += 1 # Second-time offender, checks to see if it is a recidivist. elif self.stress > self.random.random(): self.assaulted += 1 self.spouse.got_attacked += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_td_self_energy_active(self):\n self.distribute_workload()\n self.self_energy_T = self.sum_qpt_function('get_td_self_energy_active')", "def sing(self):\n if self._energy < self._sing_cost:\n return\n\n self._energy = self._energy - self._sing_cost\n self._env.simulate()", "def p(self) -> Probability:\n ...", "def testPsychStress(self):\n attr = self.session.create_visit_attr()\n\n self.util.intTypeTest(self, attr, \"stress\")\n\n self.util.intPropertyTest(self, attr, \"stress\")", "def fear_conditioning(amygdala, stim_fear, stim_ext, ctx_fear, ctx_ext): \n amygdala.switch_context(200, 40, ctx_fear)\n\n for i in range(5):\n amygdala.run(100*ms, report='stdout')\n amygdala.present_stimulus(stim_fear, stim_ext, report='stdout')\n\n amygdala.run(100*ms)\n\n amygdala.switch_context(ctx_ext)\n\n for i in range(5):\n amygdala.run(100*ms, report='stdout')\n amygdala.present_stimulus(stim_ext, stim_fear, report='stdout')\n\n amygdala.run(100*ms)", "def weight(self):", "def simulationTwoDrugsDelayedTreatment():\n\n # TODO", "def compute_td_self_energy(self):\n self.distribute_workload()\n self.self_energy_T = self.sum_qpt_function('get_td_self_energy')", "def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def __call__(self,sensation,reward=None):\n raise NYI", "def _infect(self, inf: Person, sus: Person):\n # compute the distance between two Person objects\n r = dist(inf, sus)\n # make variable that can potentially be changed if someone is a super spreader\n r0 = self.rstart\n # if the susceptible is too far away from the infectious person\n if r > r0:\n return 0\n # in range of the infected person\n if inf.ss:\n return self.w0\n # return using the normal probability function if not a super spreader\n return self.w0 * (1 - r / r0) ** self.alpha", "def dealer_probs():\n # Pdf of any current hand (value, hard) and final value; p(v_f | v_c) where v_f = final value, v_c = current value\n probabilities = {}\n\n # End nodes: (value, True) for value >= 17 and (value, False) for value > 17\n # Dependencies (in order of increasing requirements):\n # Hard values, value >= 11, possiblity of bust, no possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value\n # Soft values, 17 >= value >= 11 (value, False) depends on (value', False) for 17 >= value' > value, (value', True) for 17 > value' > 11\n # Hard values, 11 > value >= 2 , no possibility of bust, possibility of going soft with an ace (value, True) depends on (value', True) for 17 > value' > value and (value', False) for 17 >= value' > 13\n\n\n # End nodes\n for value in xrange(17, 22):\n probabilities[(value, True)] = {value: 1.0}\n if value == 17: continue # on soft 17, dealer will still hit\n probabilities[(value, False)] = {value: 1.0}\n\n # Hard values, 17 > value >= 11, possibility of bust, no possibility of going soft with an ace\n for value in xrange(16, 10, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(1, min(10, 21-value)+1):\n next_prob = probabilities[(value + next_card, True)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Soft values, 17 >= value >= 11\n for value in xrange(17, 10, -1):\n probabilities[(value, False)] = {}\n current_prob = probabilities[(value, False)]\n for next_card in xrange(1, 11):\n next_value = value + next_card\n hard = False\n if next_value > 21:\n next_value -= 10\n hard = True\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n # Hard values, 11 > value >= 2, no possibility of bust, possibility of going soft with an ace\n for value in xrange(10, 1, -1):\n probabilities[(value, True)] = {}\n current_prob = probabilities[(value, True)]\n for next_card in xrange(2, 12):\n next_value = value + next_card\n hard = (next_card != 11)\n next_prob = probabilities[(next_value, hard)]\n for end_val in next_prob:\n current_prob[end_val] = current_prob.get(end_val, 0) + next_prob[end_val] * PROBABILITIES[next_card]\n\n return probabilities", "def fight(self):\n for j in range(self.num):\n for i in range(self.num):\n self.genepool[0][j].fight(self.genepool[1][i],self.gameOne)\n self.genepool[1][j].fight(self.genepool[0][i],self.gameTwo)\n for i in range(self.num):\n self.genepool[0][i].fitness /= self.num\n self.genepool[1][i].fitness /= self.num", "def test_negative_silhouette_score(self):\n raise NotImplementedError(\"no negative silhouette example available\")", "def E_step_precompute(self, model_params, my_suff_stat, my_data):", "def fight(self):\n for j in range(self.num):\n for i in range(self.num):\n self.genepool[0][j].fight(self.genepool[1][i],self.gameOne,self.len)\n self.genepool[1][j].fight(self.genepool[0][i],self.gameTwo,self.len)\n for i in range(self.num):\n self.genepool[0][i].fitness /= self.num\n self.genepool[1][i].fitness /= self.num", "def penalty(self):\n return 0", "def get_shocks(self):\r\n \r\n \r\n '''\r\n \r\n if self.jacW == True:\r\n \r\n if self.t_sim == self.s:\r\n \r\n self.wage = .833333 + self.dx\r\n \r\n print(\"made it here\")\r\n \r\n else:\r\n \r\n self.wage = .833333\r\n \r\n \r\n PermShkDstn_U = Lognormal(np.log(self.mu_u) - (self.L*(self.PermShkStd[0])**2)/2 , self.L*self.PermShkStd[0] , 123).approx(self.PermShkCount) #Permanent Shock Distribution faced when unemployed\r\n PermShkDstn_E = MeanOneLogNormal( self.PermShkStd[0] , 123).approx(self.PermShkCount) #Permanent Shock Distribution faced when employed\r\n \r\n TranShkDstn_E = MeanOneLogNormal( self.TranShkStd[0],123).approx(self.TranShkCount)#Transitory Shock Distribution faced when employed\r\n TranShkDstn_E.X = (TranShkDstn_E.X *(1-self.tax_rate)*self.wage*self.N)/(1-self.UnempPrb)**2 #add wage, tax rate and labor supply\r\n \r\n lng = len(TranShkDstn_E.X )\r\n TranShkDstn_U = DiscreteDistribution(np.ones(lng)/lng, self.IncUnemp*np.ones(lng)) #Transitory Shock Distribution faced when unemployed\r\n \r\n IncShkDstn_E = combine_indep_dstns(PermShkDstn_E, TranShkDstn_E) # Income Distribution faced when Employed\r\n IncShkDstn_U = combine_indep_dstns(PermShkDstn_U,TranShkDstn_U) # Income Distribution faced when Unemployed\r\n \r\n #Combine Outcomes of both distributions\r\n X_0 = np.concatenate((IncShkDstn_E.X[0],IncShkDstn_U.X[0]))\r\n X_1=np.concatenate((IncShkDstn_E.X[1],IncShkDstn_U.X[1]))\r\n X_I = [X_0,X_1] #discrete distribution takes in a list of arrays\r\n \r\n #Combine pmf Arrays\r\n pmf_I = np.concatenate(((1-self.UnempPrb)*IncShkDstn_E.pmf, self.UnempPrb*IncShkDstn_U.pmf))\r\n \r\n IncShkDstn = [DiscreteDistribution(pmf_I, X_I)]\r\n \r\n self.IncShkDstn = IncShkDstn\r\n \r\n \r\n '''\r\n \r\n PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays\r\n TranShkNow = np.zeros(self.AgentCount)\r\n newborn = self.t_age == 0\r\n for t in range(self.T_cycle):\r\n these = t == self.t_cycle\r\n N = np.sum(these)\r\n if N > 0:\r\n IncShkDstnNow = self.IncShkDstn[\r\n t - 1\r\n ] # set current income distribution\r\n PermGroFacNow = self.PermGroFac[t - 1] # and permanent growth factor\r\n # Get random draws of income shocks from the discrete distribution\r\n IncShks = IncShkDstnNow.draw(N)\r\n\r\n PermShkNow[these] = (\r\n IncShks[0, :] * PermGroFacNow\r\n ) # permanent \"shock\" includes expected growth\r\n TranShkNow[these] = IncShks[1, :]\r\n \r\n # That procedure used the *last* period in the sequence for newborns, but that's not right\r\n # Redraw shocks for newborns, using the *first* period in the sequence. Approximation.\r\n N = np.sum(newborn)\r\n if N > 0:\r\n these = newborn\r\n IncShkDstnNow = self.IncShkDstn[0] # set current income distribution\r\n PermGroFacNow = self.PermGroFac[0] # and permanent growth factor\r\n\r\n # Get random draws of income shocks from the discrete distribution\r\n EventDraws = IncShkDstnNow.draw_events(N)\r\n PermShkNow[these] = (\r\n IncShkDstnNow.X[0][EventDraws] * PermGroFacNow\r\n ) # permanent \"shock\" includes expected growth\r\n TranShkNow[these] = IncShkDstnNow.X[1][EventDraws]\r\n # PermShkNow[newborn] = 1.0\r\n TranShkNow[newborn] = 1.0\r\n\r\n # Store the shocks in self\r\n self.EmpNow = np.ones(self.AgentCount, dtype=bool)\r\n self.EmpNow[TranShkNow == self.IncUnemp] = False\r\n self.shocks['PermShk'] = PermShkNow\r\n self.shocks['TranShk'] = TranShkNow", "def effective_stress(self):\n tau = self.deviatoric_stress_tensor()\n tu_xx = tau[0,0]\n tu_yy = tau[1,1]\n tu_zz = tau[2,2]\n tu_xy = tau[0,1]\n tu_xz = tau[0,2]\n tu_yz = tau[1,2]\n \n # Second invariant of the strain rate tensor squared\n taudot = 0.5 * (+ tu_xx**2 + tu_yy**2 + tu_zz**2) \\\n + tu_xy**2 + tu_xz**2 + tu_yz**2\n return taudot", "def fitness(self):\n pass", "def develop(self):\n config = self.person.cosmos.config\n # Update height\n if self.person.male:\n percentage_of_adult_height_attained = (\n config.male_percentage_of_eventual_height_at_age(age=self.person.age)\n )\n else: # Female\n percentage_of_adult_height_attained = (\n config.female_percentage_of_eventual_height_at_age(age=self.person.age)\n )\n self.height = percentage_of_adult_height_attained * self.adult_height\n # Calculate weight (by using BMI and new height)\n self.weight = (self.bmi/703.) * self.height**2\n # Evolve propensities according to their curves\n offset_from_typical_prime = config.typical_age_of_physical_peak - self.age_of_physical_peak\n age_to_fit_to_curve = self.person.age + offset_from_typical_prime\n self.coordination_propensity *= config.coordination_propensity_curve(age=age_to_fit_to_curve)\n self.reflexes_propensity = config.reflexes_propensity_curve(age=age_to_fit_to_curve)\n self.agility_propensity = config.agility_propensity_curve(age=age_to_fit_to_curve)\n self.jumping_propensity = config.jumping_propensity_curve(age=age_to_fit_to_curve)\n self.footspeed_propensity = config.footspeed_propensity_curve(age=age_to_fit_to_curve)\n # Determine coordination, which is correlated to BMI\n primitive_coordination = config.determine_primitive_coordination(bmi=self.bmi) if self.bmi > 24 else 1.0\n self.coordination = primitive_coordination * self.coordination_propensity\n # Determine reflexes, which is correlated to coordination\n primitive_reflexes = config.determine_primitive_reflexes(\n coordination=self.coordination, reflexes_propensity=self.reflexes_propensity\n )\n self.reflexes = config.determine_reflexes(primitive_reflexes=primitive_reflexes)\n # Determine agility, which is correlated to coordination and height (with 5'6 somewhat arbitrarily\n # being the ideal height for agility)\n primitive_agility = config.determine_primitive_agility(\n coordination=self.coordination, height=self.adult_height\n )\n self.agility = primitive_agility * self.agility_propensity\n # Determine jumping ability, which is correlated to coordination and height (with 6'6 somewhat\n # arbitrarily being the ideal height for jumping)\n primitive_jumping = config.determine_primitive_jumping(coordination=self.coordination, height=self.height)\n base_vertical = config.determine_base_vertical(primitive_jumping=primitive_jumping)\n self.vertical = base_vertical + self.jumping_propensity # Notice the plus sign\n self.vertical = config.clamp_vertical(vertical=self.vertical)\n # Determined vertical (max. height of jump) and vertical reach (how high they can reach while\n # standing flat on the ground)\n self.vertical_reach = config.determine_vertical_reach(height=self.height)\n # Determine footspeed, which is correlated to coordination and height (with 6'1 somewhat arbitrarily\n # being the ideal height for footspeed) -- we do this by generating a 60-yard dash time and then\n # dividing that by its 180 feet to get a full-speed second-per-foot time, which is used frequently\n # in the on-field simulation\n primitive_footspeed = config.determine_primitive_footspeed(coordination=self.coordination, height=self.height)\n self.full_speed_seconds_per_foot = config.determine_full_speed_seconds_per_foot(\n primitive_footspeed=primitive_footspeed, footspeed_propensity=self.footspeed_propensity\n )\n # Finally, full-speed feet per second isn't derived from self.full_speed_seconds_per_foot, because\n # it's a measure of full speed on the base paths, and so it assumes 20 feet of acceleration have\n # already occurred (JOR 03-28-16: I think)\n self.full_speed_feet_per_second = config.determine_full_speed_feet_per_second(\n primitive_footspeed=primitive_footspeed\n )", "def simulation_step(G, # NetworkX graph\n pos = None,\n kernel = 'weights',\n engagement_enforcement = 1.00,\n custom_kernel = None,\n WERE_multiplier = 10, \n oblivion = False, \n draw = False, \n show_attr = False): \n\n for n in G.nodes():\n \n \n #=================#\n # Oblivion option #\n #=================#\n \n # Oblivion and increasing engagement\n \n if oblivion == True:\n \n if G.nodes[n]['state'] == 'aware':\n\n # Calculate oblivion_probability for certain node (more aware neighbours - lower oblivion)\n # oblivion_prob - is random uniform, and\n # dependent on what percent of neighbour are aware\n \n \n aware = [d['state'] for i,d in G.nodes.data() if i in list(G.neighbors(n)) ].count('aware')\n # Unaware neighbours number\n unaware = len(list(G.neighbors(n)) ) - aware\n\n # Oblivion factor (percent of unaware actors)\n oblivion_factor = (unaware + 0.0001) / ( (aware + 0.0001) + (unaware + 0.0001) )\n\n # random factor\n random_factor = np.random.uniform(0, 1)\n\n # probability that actor will forget information, and will not be able to pass it down\n oblivion_prob = oblivion_factor * random_factor\n\n # Attempt to oblivion\n if np.random.uniform(0, 1) < oblivion_prob:\n G.nodes[n]['state'] = 'unaware'\n \n # increasing of engagement after oblivion\n G.nodes[n]['engagement'] = np.round(min(1, G.nodes[n]['engagement'] * engagement_enforcement), 6)\n\n \n #========#\n # Kernel #\n #========#\n # If node is still aware, it disseminate information\n\n if G.nodes[n]['state'] == 'aware':\n \n global neighbour\n for neighbour in G.neighbors(n):\n \n if G.nodes[neighbour]['state'] == 'unaware':\n \n #================#\n # Weights kernel #\n #================#\n \n if kernel == 'weights':\n prob_of_internalization = G[n][neighbour]['weight']\n \n #=============#\n # WERE kernel #\n #=============#\n # Weights-extraversion-receptiveness-engagement\n # kernel\n \n if kernel == 'WERE':\n \n # calculate prob_of_internalization\n prob_of_internalization = G[n][neighbour]['weight'] \\\n * G.nodes[neighbour]['receptiveness'] \\\n * G.nodes[neighbour]['engagement'] \\\n * G.nodes[n]['extraversion'] \\\n * WERE_multiplier\n \n \n #===============#\n # Custom kernel #\n #===============#\n \n if kernel == 'custom': \n prob_of_internalization = custom_kernel(n, neighbour)\n \n #============================#\n # Attempt to internalization #\n #============================#\n \n if np.random.uniform(0, 1) < prob_of_internalization:\n G.nodes[neighbour]['state'] = 'aware'\n \n #===================#\n # Engagement rising #\n #===================#\n # if node is aware, his engagement in information\n # topic may rise with given probability\n else:\n G.nodes[neighbour]['engagement'] = \\\n np.round(G.nodes[neighbour]['engagement'] * \\\n engagement_enforcement, 6)\n # reinforcing already informed actors\n\n \n #=======================#\n # Show nodes attributes #\n #=======================#\n \n # Show nodes attributes\n if show_attr == True:\n for (u, v) in G.nodes.data():\n print(u, v) \n \n #============#\n # Draw graph #\n #============#\n \n if draw == True:\n fig_01, ax_01 = plt.subplots() # enable to plot one by one\n # in separate windows\n dp.draw_graph(G, pos)\n\n\n return G", "def cv_reweighting(run):\n np.random.seed((run ** 5 + 1323002) % 123123) # np.random.seed() alternatively\n \n\n Xtr, Str, Xts, Yts = data_cache[dset]\n X_train, X_val, y_train, y_val = train_test_split(Xtr, Str, test_size=prop)\n\n # clf1 is the first classifier while clf2 is the second\n if dset == 2:\n clf1 = svm.SVC(C=2.5, gamma=0.000225, probability=True, max_iter=max_itera)\n else:\n clf1 = svm.SVC(gamma = 'scale',probability=True, max_iter=max_itera)\n if run == 1:\n print(\"learn initial probability dset:\", dset)\n clf1.fit(X_train, y_train)\n return clf1.score(Xts, Yts)\n if run == 1:\n print(\"calculating weighting dset:\", dset)\n\n probS = clf1.predict_proba(X_train)\n weights = estimateBeta(y_train, probS, 0.2, 0.4)\n\n for i in range(len(weights)):\n if weights[i] < 0:\n weights[i] = 0.0\n if run == 1:\n print(\"fit final model dset:\", dset)\n if dset == 2:\n clf2 = svm.SVC(gamma=0.000225, C=0.8, max_iter=max_itera)\n else:\n clf2 = svm.SVC(gamma=0.00865, C=.4, max_iter=max_itera)\n\n clf2.fit(X_train, y_train, sample_weight=weights)\n\n return clf2.score(Xts, Yts)", "def set_stress(self) -> None:\n\n c = self.cos()\n s = self.sin()\n transformation_matrix = np.array([-c, -s, c, s], dtype=np.float64)\n nodal_displacements = self.__get_arranged_nodal_displacements()\n self.__stress = (\n self.youngs_modulus # type: ignore\n / self.get_length()\n * (transformation_matrix @ nodal_displacements.T)\n )", "def preformance(self, perf):\n self.perf = perf", "def steel_stress(self,strain_dis, newFOS=None):\r\n\t\tstress = np.zeros(len(self.reinforcement))\r\n\t\tfor i,steel in enumerate(self.reinforcement):\r\n\t\t\tstrain = np.interp(steel[0], self.mesh_center,strain_dis)\r\n\t\t\tstress[i] = (self.steel(strain,newFOS)-self.concrete(strain,newFOS))\r\n\t\treturn stress", "def flowStress(f_hard,eps,d,q,a):\n\n pass", "def betterEvaluationFunction(currentGameState):\n \"*** YOUR CODE HERE ***\"\n \n newPos = currentGameState.getPacmanPosition()\n newFood = currentGameState.getFood()\n newGhostStates = currentGameState.getGhostStates()\n GhostLocs = currentGameState.getGhostPositions()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n capsuleLocations = currentGameState.getCapsules()\n Hueristic = 0.0\n \n if currentGameState.isWin():\n return 10000\n if currentGameState.isLose():\n return -10000\n\n FoodDistances = []\n foodLocations = newFood.asList()\n for food in foodLocations:\n FoodDistances.append(manhattanDistance(newPos,food))\n closestFood = min(FoodDistances)\n closestFoodLocation = foodLocations[FoodDistances.index(closestFood)]\n\n GhostsToMe = []\n GhostsToFood = []\n for ghost in GhostLocs:\n GhostsToMe.append(manhattanDistance(newPos,ghost))\n GhostsToFood.append(manhattanDistance(closestFoodLocation,ghost))\n closestGhostToMe = min(GhostsToMe)\n closestGhostToClosestFood = min(GhostsToFood)\n closestGhostLocation = GhostLocs[GhostsToMe.index(closestGhostToMe)]\n\n if newPos in currentGameState.getCapsules():\n capsule = 100\n else: \n capsule = 0\n \n if closestGhostToClosestFood < closestFood:\n if closestGhostToMe > 4:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n else:\n Hueristic = (-1/closestGhostToMe)*50\n else:\n Hueristic = (1.0/(closestFood+1.0))*20 - len(foodLocations)*50 - (1/closestGhostToMe)*5\n return Hueristic", "def example():\n n = 500\n cov_a = np.array([[3, 0], [0, 1]], dtype=np.dtype(float))\n cov_b = np.array([[1, 0], [0, 3]], dtype=np.dtype(float))\n mean_a = np.array([0.0, 0.0])\n mean_b = np.array([0.0, 0.0])\n\n target_model = {\n 'd': 2,\n 'parameters': [\n (mean_a, cov_a),\n (mean_b, cov_b),\n ],\n 'assignment': np.array([0, 0, 1, 1], dtype=np.dtype(int))\n }\n\n prior = {\n 'nu_0': 3,\n 'kappa_0': 1,\n 'mu_0': np.zeros(2),\n 'lambda_0': np.eye(2)\n }\n\n data_model = NormalInverseWishart(**prior)\n t = Teacher(target_model, data_model, 1.0, t_std=1, fast_niw=True)\n t.mh(n, burn=500, lag=20, plot_diagnostics=False)\n\n X_orig = np.vstack((np.random.multivariate_normal(mean_a, cov_a, n),\n np.random.multivariate_normal(mean_b, cov_b, n)))\n X_opt, _ = t.get_stacked_data()\n\n plt.figure(tight_layout=True, facecolor='white')\n plt.scatter(X_opt[:, 0], X_opt[:, 1], color='royalblue', alpha=.5,\n label='optimized')\n plt.scatter(X_orig[:, 0], X_orig[:, 1], color='crimson', alpha=.5,\n label='original')\n plt.legend(loc=0)\n plt.show()", "def optimize(self):\n\t\ts1,a1,r1,s2 = self.ram.agg_sample(self.batch_size)\n\n\t\ts1 = Variable(torch.from_numpy(s1))\n\t\ta1 = Variable(torch.from_numpy(a1))\n\t\tr1 = Variable(torch.from_numpy(r1))\n\t\ts2 = Variable(torch.from_numpy(s2))\n\n\t\tfor i in range(self.critic_step):\n\t\t\t# ---------------------- optimize critic ----------------------\n\t\t\t# Use target actor exploitation policy here for loss evaluation\n\t\t\t\n\t\t\t# a2 = self.target_actor.forward(s2).detach()\n\t\t\t# next_val = torch.squeeze(self.target_critic.forward(s2, a2).detach())\n\t\t\t\n\t\t\t# y_exp = r + gamma*Q'( s2, pi'(s2))\n\t\t\ty_expected = r1 #+ GAMMA*next_val\n\t\t\t# y_pred = Q( s1, a1)\n\t\t\ty_predicted = torch.squeeze(self.critic.forward(s1, a1))\n\t\t\t# compute critic loss, and update the critic\n\t\t\t#print(y_predicted,y_expected,\"hi\")\n\t\t\tloss_critic = F.smooth_l1_loss(y_predicted, y_expected.squeeze())\n\t\t\tself.critic_optimizer.zero_grad()\n\t\t\tloss_critic.backward()\n\t\t\tself.critic_optimizer.step()\n\n\t\t# ---------------------- optimize actor ----------------------\n\t\tpred_a1 = self.actor.forward(s1)\n\t\tloss_actor = -1*torch.sum(self.critic.forward(s1, pred_a1))\n\t\tself.actor_optimizer.zero_grad()\n\t\tloss_actor.backward()\n\t\tself.actor_optimizer.step()\n\n\t\tutils.soft_update(self.target_actor, self.actor, TAU)\n\t\tutils.soft_update(self.target_critic, self.critic, TAU)\n\n\t\t# if self.iter % 100 == 0:\n\t\tif self.batch_size > 1:\n\t\t\ty_1 = y_predicted.data.numpy()[0]\n\t\t\tr_1 = r1.data.numpy()[0]\n\t\telse:\n\t\t\ty_1 = y_predicted.data.numpy()\n\t\t\tr_1 = r1.data.numpy()\n\t\tprint ('Iteration :- ', self.iter, ' Loss_actor :- ', loss_actor.data.numpy(),\\\n\t\t\t' Loss_critic :- ', loss_critic.data.numpy(), ' Critic Pred Reward :- ', y_1, ' Actual Reward :- ', r_1)\n\t\tself.iter += 1" ]
[ "0.5673091", "0.5671478", "0.5643265", "0.5633148", "0.5629966", "0.5571182", "0.5550801", "0.5532556", "0.54866314", "0.5474761", "0.5474262", "0.5450669", "0.54457366", "0.5436073", "0.54114723", "0.5410417", "0.5398809", "0.5379679", "0.5364702", "0.53609514", "0.53539217", "0.5352994", "0.5349373", "0.53449684", "0.5339768", "0.53394544", "0.53335786", "0.53304017", "0.530874", "0.52972335" ]
0.60149026
0
Get a list of available gpu devices (formatted as strings). Returns A list of available GPU devices.
def _get_available_gpus(): global _LOCAL_DEVICES if _LOCAL_DEVICES is None: if _is_tf_1(): devices = get_session().list_devices() _LOCAL_DEVICES = [x.name for x in devices] else: _LOCAL_DEVICES = tf.config.experimental_list_devices() return [x for x in _LOCAL_DEVICES if 'device:gpu' in x.lower()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_gpu_names() -> Sequence[str]:\n result = []\n for device in device_lib.list_local_devices():\n if device.device_type != \"GPU\":\n continue\n desc = device.physical_device_desc\n\n fields = desc.split(\",\")\n for field in fields:\n name, value = field.split(\":\", maxsplit=1)\n name = name.strip()\n value = value.strip()\n if name == \"name\":\n result.append(value)\n return result", "def _get_available_gpus():\r\n #global _LOCAL_DEVICES\r\n if tfback._LOCAL_DEVICES is None:\r\n devices = tf.config.list_logical_devices()\r\n tfback._LOCAL_DEVICES = [x.name for x in devices]\r\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tf_back._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tf_back._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tf_back._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n # global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def _get_available_gpus():\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]", "def get_available_devices():\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return [0]\n\n FNULL = open(os.devnull, 'w')\n\n available_devices = []\n for i in range(num_devices):\n try:\n if b\"NVIDIA\" in subprocess.check_output(\n [\"{}/test_device\".format(executable_path),\n str(i)], stderr=FNULL):\n available_devices.append(i)\n logging.info('Device {} is available for rendering'.format(i))\n except subprocess.CalledProcessError as e:\n logging.info(e)\n logging.info('Device {} is not available for rendering'.format(i))\n FNULL.close()\n\n return available_devices", "def get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == \"GPU\"]", "def gpu_devices(self):\n return self._gpu_devices", "def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()", "def try_all_gpus(): #@save\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]", "def getGpus():\n nvmlInit()\n gpu_list = []\n for i in range(0, nvmlDeviceGetCount()):\n handle = nvmlDeviceGetHandleByIndex(i)\n gpu_list.append(NvidiaGPU(handle))\n return gpu_list", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def get_available_devices(self):\n available_devices = []\n try:\n out = self.get_output(\"devices\")\n except Exception as e:\n logger.error(e)\n else:\n for line in out:\n device = self.parse_device_info(line)\n if device:\n available_devices.append(device)\n return available_devices", "def getDevices():\n devices = create_string_buffer(BUF_SIZE)\n daqmx(\n dll.DAQmxGetSysDevNames,\n (\n devices,\n BUF_SIZE\n )\n )\n return parseStringList(devices.value)", "def get_available_gpus() -> List[int]:\n orig_visible_devices = os.environ[f\"{CUDA_ENVVAR}\"]\n available_gpus = [int(g.strip()) for g in orig_visible_devices.split(\",\") if g and not g.isspace()]\n return available_gpus", "def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices", "def get_gpus():\n try:\n re = subprocess.check_output([\"nvidia-smi\", \"-L\"], universal_newlines=True)\n except OSError:\n return []\n return range(len([i for i in re.split('\\n') if 'GPU' in i]))", "def _get_gpu_pci_devices(self):\n pci_device_list = self._get_pci_devices()\n\n gpu_list = []\n items = pci_device_list['Items']\n for item in items:\n if item['ClassCode'] in CLASSCODE_FOR_GPU_DEVICES:\n if item['SubclassCode'] in SUBCLASSCODE_FOR_GPU_DEVICES:\n gpu_list.append(item)\n return gpu_list", "def _get_device_list(self):\n if self.app.config.cloud_type == 'ec2':\n # c5/m5 on AWS mounts EBS volumes as NVMe:\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html\n for itype in ['c5', 'm5']:\n if itype in self.app.cloud_interface.get_type():\n return frozenset(glob('/dev/nvme[0-26]n1'))\n return frozenset(glob('/dev/*d[a-z]'))", "def get_computation_devices(\n preferred_gpu_list: Optional[List[int]],\n multi_gpu_flag: bool,\n) -> List[Device]:\n\n # use CPU when GPUs are not preferred or not available\n if (preferred_gpu_list is None) \\\n or (len(preferred_gpu_list) == 0) \\\n or (not torch.cuda.is_available()):\n return [Device('cpu'), ]\n\n # else GPUs are preferred and available\n # get all available GPU indexes\n _available_gpu_list: List[int]\n if getAvailable:\n # by default, use GPU utility package with load and memory usage\n # specification so that the 'available' GPUs are actually ready\n # for deep learning runs (https://github.com/anderskm/gputil)\n _available_gpu_list = getAvailable(\n limit=_MAX_NUM_GPUS,\n maxLoad=_MAX_GPU_LOAD,\n maxMemory=_MAX_GPU_MEM_USED,\n )\n else:\n # assume all GPUs are good to use without GPUtil package\n _available_gpu_list = list(range(torch.cuda.device_count()))\n _warning_msg = \\\n f'GPUtil (https://github.com/anderskm/gputil) not installed.' \\\n f'Assuming all GPUs ({_available_gpu_list}) are available ' \\\n f'and ready for training ... '\n _LOGGER.warning(_warning_msg)\n\n # get the overlap between the preferred and the available GPUs\n _gpus = \\\n [_g for _g in _available_gpu_list if _g in preferred_gpu_list]\n\n # use CPU if there is no preferred GPUs that are available\n if len(_gpus) == 0:\n return [Device('cpu'), ]\n\n # otherwise return one or all GPUs depending on the multi-GPU flag\n return [Device(f'cuda:{_g}') for _g in _gpus] \\\n if multi_gpu_flag else [Device(f'cuda:{_gpus[0]}'), ]", "def get_devices():\n devices = []\n for path in hookenv.action_get('osd-devices').split(' '):\n path = path.strip()\n if not os.path.isabs(path):\n raise Error('{}: Not absolute path.'.format(path))\n devices.append(path)\n return devices", "def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})", "def get_available_devices(self):\n try:\n out = self.get_output(\"devices\")\n except BluetoothctlError, e:\n print(e)\n return None\n else:\n available_devices = []\n for line in out:\n device = self.parse_device_info(line)\n if device:\n available_devices.append(device)\n\n return available_devices", "def list_devices():\n return _lib.SeaTeaseAPI().list_devices()", "def get_test_devices():\n\n # Assumption: CPU is always available\n devices = ['cpu']\n\n if torch.cuda.is_available():\n devices.append('cuda')\n\n return devices", "def devices(self):\n\t\t\tdevices = []\n\t\t\tnum = cuda.Device.count()\n\t\t\tfor id in range(num):\n\t\t\t\tname = cuda.Device(id).name()\n\t\t\t\tmemory = cuda.Device(id).total_memory()\n\t\t\t\tdevices.append((memory, name, id))\n\t\t\treturn devices", "def devices() -> typing.List[str]:\n devices = sounddevice.query_devices()\n return [device['name'] for device in devices if device['max_output_channels'] > 0]", "def detect_gpus():\n def worker(q):\n # `device_lib` will not release the memory it took,\n # so we run it in a sub-process.\n try:\n from tensorflow.python.client import device_lib\n\n if is_tensorflow_version_higher_or_equal('1.8.0'):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n devices = list(device_lib.list_local_devices(config))\n else:\n devices = list(device_lib.list_local_devices())\n gpus = [\n (device.name, device)\n for device in devices\n if device.device_type == 'GPU'\n ]\n union_set = {i: i for i in range(len(gpus))}\n\n for i, (name, device) in enumerate(gpus):\n assert (device.name == '/device:GPU:{}'.format(i))\n for link in device.locality.links.link:\n if link.device_id != i:\n union_set[i] = union_set[link.device_id]\n\n for i in six.iterkeys(union_set):\n while union_set[i] != union_set[union_set[i]]:\n union_set[i] = union_set[union_set[i]]\n\n root_devices = sorted(set(union_set.values()))\n gpu_groups = [[] for _ in range(len(root_devices))]\n dev_to_group = {j: i for i, j in enumerate(root_devices)}\n for i, (name, device) in enumerate(gpus):\n gpu_groups[dev_to_group[union_set[i]]].append(name)\n\n q.put((1, gpu_groups))\n except Exception:\n q.put((0, traceback.format_exc()))\n\n q = mp.Queue()\n p = mp.Process(target=worker, args=(q,))\n\n try:\n p.start()\n result = q.get()\n if result[0] == 1:\n return result[1]\n else:\n raise RuntimeError(\n 'Failed to retrieve GPU information, the traceback of '\n 'sub-process is:\\n {}'.\n format('\\n '.join(result[1].split('\\n')))\n )\n finally:\n p.terminate()\n p.join()" ]
[ "0.7635497", "0.7591881", "0.75787866", "0.7576536", "0.7576536", "0.75703716", "0.7540326", "0.74637085", "0.74631166", "0.73232", "0.7317866", "0.7153969", "0.715106", "0.7149406", "0.7134455", "0.7124889", "0.700968", "0.69798607", "0.69466555", "0.6936653", "0.687559", "0.6859633", "0.6799362", "0.6792402", "0.6747831", "0.67350495", "0.67100066", "0.66021216", "0.66013795", "0.65942156" ]
0.76762456
0
Load faces into an array (N, M), where N is the number of face images and d is the dimensionality (heightwidth for greyscale).
def load_faces(path, ext=".pgm"): # # You code here # images = [] img_shape = (0, 0) for root, dirs, files in os.walk(path): for file in files: if ext in file: # check if file is of pgm-type img_path = os.path.join(root, file) img = plt.imread(img_path) # Read the image img_shape = img.shape img = img.flatten() # Transform 2D image into vector M = height x width images.append(img) img_array = np.asarray(images) return img_array, img_shape
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _open_images(training_filenames, path):\n imagePaths=[os.path.join(path,f) for f in training_filenames]\n faces=[]\n for i, imagePath in enumerate(imagePaths):\n faceImg=Image.open(imagePath).convert('L')\n faceNp=np.array(faceImg,'uint8')\n faces.append(faceNp)\n return faces", "def _get_data(path):\n archive = np.load(path)\n images = archive['faceData']\n return images", "def _load_known_faces(self):\n faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')\n faces = [\n os.path.join(faces_dir, f) for f in os.listdir(faces_dir) \\\n if f.endswith('.jpeg') or f.endswith('.jpg') or f.endswith('.png')\n ]\n known_images = [face_recognition.load_image_file(i) for i in faces]\n self.known_faces = []\n self.faces_names = [x.split('/')[-1].split('.')[0].replace('_', ' ').title() for x in faces]\n for image in known_images:\n encoding = face_recognition.face_encodings(image)\n if len(encoding) > 0:\n logging.debug('Adding known face')\n self.known_faces.append(encoding[0])", "def cfdReadFacesFile(self): \r\n\r\n with open(self.facesFile,\"r\") as fpid:\r\n print('Reading faces file ...')\r\n self.faceNodes=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n \r\n self.numberOfFaces = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\" \")\r\n tline=tline.replace(\")\",\"\")\r\n faceNodesi=[]\r\n for count, node in enumerate(tline.split()):\r\n if count == 0:\r\n continue\r\n #faceNodesi.append(int(node))\r\n else:\r\n faceNodesi.append(float(node))\r\n \r\n self.faceNodes.append(faceNodesi)\r\n \r\n ## (array) with the nodes for each face\r\n self.faceNodes=np.asarray(self.faceNodes)\r\n print(self.faceNodes)", "def detectFaces_allFiles(directory):\n files_list = glob.glob(directory)\n for file in files_list:\n print(file)\n img = cv2.imread(file)\n if img is not None:\n height, width, channel = img.shape\n faces = face_cascade.detectMultiScale(img, 1.3, 5)\n age = find_age(file)\n\n if age >= 0:\n if faces != ():\n for (x, y, w, h) in faces:\n # On decale y vers le haut pour mieux centrer le visage\n if y - int(0.1*h) >= 0:\n y -= int(0.1*h)\n h *= 1.2\n else:\n h += y + int(0.1*h)\n y = 0\n if h > width:\n h = width\n # A partir de l'origine du visage (point en haut a gauche), on definit\n # notre carre, de cote le nouveau h\n if x + 0.8*h > width:\n x_right = width\n x_left = width - int(h)\n elif x - 0.2*h < 0:\n x_left = 0\n x_right = int(h)\n else:\n x_right = min(int(x) + int(0.8*h), int(width))\n x_left = int(x_right) - int(h)\n y_top = int(y)\n y_bottom = int(y) + int(h)\n roi_color = img[y_top:y_bottom, x_left:x_right]\n cv2.imwrite(\"./FacePhoto/{}.jpg\".format(extract_filename(file)), resize_image(roi_color, 227))\n else:\n files_list.remove(file)\n else:\n files_list.remove(file)\n cv2.destroyAllWindows()\n return files_list", "def detect_faces(self, img):\n with tf.Graph().as_default():\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=.7)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n with sess.as_default():\n pnet, rnet, onet = detect_face.create_mtcnn(sess, None)\n\n minsize = 20 # minimum size of face\n threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold\n factor = 0.709 # scale factor\n\n bounding_boxes, _ = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n\n nrof_faces = bounding_boxes.shape[0]\n img_size = np.asarray(img.shape)[0:2]\n\n faces = []\n faces_rects = []\n\n for i in range(nrof_faces):\n det = bounding_boxes[i,0:4]\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0]-5/2, 0)\n bb[1] = np.maximum(det[1]-5/2, 0)\n bb[2] = np.minimum(det[2]+5/2, img_size[1])\n bb[3] = np.minimum(det[3]+5/2, img_size[0])\n faces.append(img[bb[1]:bb[3], bb[0]:bb[2], :])\n faces_rects.append({'name': 'none', 'x': bb[0], 'y': bb[1], 'w': bb[2]-bb[0], 'h': bb[3]-bb[1]})\n\n return [img, faces, faces_rects]", "def _load_known_face(self):\n faces_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'faces')\n faces = [os.path.join(faces_dir, f) for f in os.listdir(faces_dir) if f.endswith('.jpeg')]\n known_images = [face_recognition.load_image_file(i) for i in faces]\n self.known_faces = []\n for image in known_images:\n encoding = face_recognition.face_encodings(image)\n if len(encoding) > 0:\n logging.debug('Adding known face')\n self.known_faces.append(encoding[0])", "def load_faces(file_data, headers, indices):\n\n\n def swap_winding(indices):\n return (indices[0], indices[2], indices[1])\n \n\n def indices_from_face(face_data):\n base_vertex = face_data[3]\n base_index = face_data[5]\n index_count = face_data[6]\n\n faces_indices = [base_vertex + indices[base_index + current_index] \n for current_index in range(index_count)]\n\n #Split into lists of 3 - ie triangles\n faces = []\n for current_face_idx in range(0, len(faces_indices), 3):\n faces.append(faces_indices[current_face_idx:current_face_idx+3])\n\n return faces\n\n\n def face_from_pack(face_data):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n triangle_list = indices_from_face(face_data)\n return [(face_data[0], triangles,) for triangles in triangle_list]\n\n face_offset, face_length = headers[13]\n face_chunk = Struct(\"iiiiiiii2i2i3f3f3f3f2i\") \n face_size = face_chunk.size\n face_count = int(face_length / face_size)\n\n faces = []\n\n for current_face_idx in range(face_count):\n face_file_position = face_offset + current_face_idx * face_size\n current_face = face_chunk.unpack(file_data[face_file_position : face_file_position+face_size])\n\n #Check we are a valid face (Could use a filter later)\n if current_face[2] != 1: continue #Only support meshes at the moment\n\n new_faces = face_from_pack(current_face)\n faces.extend(new_faces)\n\n return faces", "def find_all_faces_in_multiple_img(img_dir_path, detector, img_size, dst_path):\r\n\r\n number_of_faces_already_found = count_files_in_one_directory(dst_path)\r\n print('number_of_faces_already_found', number_of_faces_already_found)\r\n for filename in os.listdir(img_dir_path):\r\n try:\r\n # When the user decides to add new data to existing one, no need to deal again (refind faces) with the old data\r\n if list(map(int, re.findall(r'\\d+', filename)))[0] < number_of_faces_already_found:\r\n continue\r\n img_path = os.path.join(img_dir_path, filename)\r\n print(filename + ': IN PROGRESS')\r\n detection_status = find_all_faces_in_one_img(\r\n img_path, detector, img_size, dst_path)\r\n print(filename + ': {}\\n'.format(detection_status.upper()))\r\n except:\r\n continue", "def get_faces_loaders(batch_size=128, test=True, data_path=\"./data/\"):\n\n dat = np.load(data_path + \"rotated_faces_data.npz\")\n train_images = torch.FloatTensor(dat['train_images'])\n train_targets = torch.FloatTensor(dat['train_angles'])\n\n traindata = torch.utils.data.TensorDataset(train_images, train_targets)\n trainloader = torch.utils.data.DataLoader(traindata, batch_size=batch_size,\n shuffle=True)\n\n if test:\n test_images = torch.FloatTensor(dat['test_images'])\n test_targets = torch.FloatTensor(dat['test_angles'])\n\n testdata = torch.utils.data.TensorDataset(test_images, test_targets)\n testloader = torch.utils.data.DataLoader(testdata, batch_size=batch_size)\n\n return trainloader, testloader\n\n return trainloader", "def face_detector(img):\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # faceCascade imports in the previously made classifier\n faceCascade = cv2.CascadeClassifier('src/face_detection/haarcascade_frontalface_default.xml')\n faces = faceCascade.detectMultiScale(\n gray, \n scaleFactor=1.2,\n minNeighbors=1, \n minSize=(100, 100)\n )\n\n return faces", "def graphics_get_faces(self, nfaces, ndim):\n faces = _pychidg.f90wrap_graphics_get_faces(self=self._handle, nfaces=nfaces, \\\n ndim=ndim)\n return faces", "def read_from_dir(dir, max_size=180):\n\tfilenames = os.listdir(dir)\n\timg_list, min_size = [], sys.maxsize\n\timg_list2 = []\n\n\tfor filename in filenames:\n\t\t# read all the images under directory\n\t\tif os.path.join(DIR,filename) == './pic/.DS_Store':\n\t\t\tcontinue\n\t\timg = cv2.imread(os.path.join(DIR,filename))\n\t\timg = face_detection(img)\n\t\timg_list.append(img)\n\t\tmin_size = min(img.shape[0], min_size)\n\n\t# set the max sub-image size to load into the big picture\n\tmin_size = min(max_size, min_size)\n\tfor img in img_list:\n\t\tshrink = cv2.resize(img, (min_size, min_size), interpolation=cv2.INTER_AREA)\n\t\timg_list2.append(shrink)\n\t\tprint(img.shape)\n\t\tprint(shrink.shape)\n\t\t# cv2.imshow('a', shrink)\n\t\t# cv2.waitKey(1000)\n\treturn img_list2", "def detect_face(self, img, img_file_path=None):\n #use dlib face detector\n #create dlib detector, this is hog with svm\n detector = dlib.get_frontal_face_detector()\n #win = dlib.image_window()\n if img_file_path:\n img = dlib.load_rgb_image(img_file_path)\n #detect number of faces in an image\n dets = detector(img)\n list_face_coord = [] # this will store left, top, right, bottom\n for i, d in enumerate(dets):\n list_face_coord.append((d.left(), d.top(), d.right(), d.bottom()))\n return list_face_coord", "def detect_face(gray):\r\n face_cascade = cv2.CascadeClassifier(classifier_file_name)\r\n faces = face_cascade.detectMultiScale(gray, scaleFactor=scale_factor,minNeighbors=min_neighbors,minSize=min_size,flags=flags)\r\n return faces", "def extract_faces(self, img, list_face_coord):\n #from the img array extract the facees\n list_faces = []\n #Go through each face coordinates and store the array\n #or clip face region in the list\n for i, coord in enumerate(list_face_coord):\n left, top, right, bottom = coord\n list_faces.append(img[top:bottom, left:right])\n return list_faces", "def find_all_faces_in_one_img(img_path, detector, img_size, dst_path):\r\n\r\n img_path = img_path.replace('\\\\', '/')\r\n img_name = img_path.split(sep='/')[-1].split(sep='.')[0]\r\n\r\n assert img_path.split(sep='/')[-1].split(sep='.')[1] in [\r\n 'png', 'jpg'], 'files should be images with a \".jpg\" or \".png\" extension !'\r\n\r\n img_extension = '.jpg'\r\n\r\n all_detected_faces, detection_status = faces_detector(\r\n img_path, detector, img_size, threshold_confidence=0.90)\r\n\r\n if detection_status == 'success':\r\n for faces in all_detected_faces:\r\n cv.imwrite(os.path.join(dst_path, img_name + img_extension), faces)\r\n elif detection_status == 'failure':\r\n os.remove(img_path)\r\n\r\n return detection_status", "def loading_faces(source_image_set_path, dest_image_set_path, source_image_set):\n dimensions_of_img = find_dimensions_not_attentive_imgs\n if 'attentive' in dest_image_set_path:\n dimensions_of_img = find_dimensions_attentive_imgs\n for image_name in source_image_set:\n\n # loading gray image\n gray_image = cv2.imread(source_image_set_path + \"/\" + image_name, 0)\n\n # find co-ordinates of faces in images\n y1, x2, y2, x1 = dimensions_of_img(*face_recognition.face_locations(gray_image)[0], np.shape(gray_image))\n\n # crop image and resize to particular dimension\n crop_img = gray_image[y1:y2, x1:x2]\n resize_crop_img = cv2.resize(crop_img, (int(dimension[0:3]), int(dimension[0:3])))\n\n # load images from source to destination directory\n cv2.imwrite(dest_image_set_path + \"/\" + image_name, resize_crop_img)", "def read_images(path, sz=None, cr=None):\n c = 0\n X,y = [], []\n for dirname, dirnames, filenames in os.walk(path):\n for subdirname in dirnames:\n subject_path = os.path.join(dirname, subdirname)\n for filename in os.listdir(subject_path):\n\n if filename.endswith('.jpg'):\n try:\n im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)\n #print os.path.join(subject_path, filename)\n # crop the image on the face\n if (cr is not None):\n rect, img = detect(im)\n if len(rect) == 0:\n return [None,None]\n im = img[rect[0][1]:rect[0][3], rect[0][0]:rect[0][2]]\n \n #im = Image.fromarray(img)\n # resize to given size (if given)\n if (sz is not None):\n #print im, sz\n im = cv2.resize(im, sz)\n cv2.imwrite('../data_pictures/prova'+str(c)+'.jpg',im)\n X.append(np.asarray(im, dtype=np.uint8))\n y.append(c)\n except IOError, (errno, strerror):\n print \"I/O error({0}): {1}\".format(errno, strerror)\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise\n\n\n c = c+1\n return [X,y]", "def extract_faces(image_path: str, pk: int):\n image = Image.open(image_path)\n image = np.array(image)\n\n if image.shape[0] <= 0 or image.shape[1] <= 0:\n return None\n\n import mtcnn\n\n # detect faces from image\n face_detector = mtcnn.MTCNN()\n detections = face_detector.detect_faces(image)\n\n if len(detections) < 1:\n return None\n\n from deepface.basemodels.Facenet import InceptionResNetV2\n\n # load InceptionResNet model provided by deepface\n facenet_model = InceptionResNetV2()\n facenet_model.load_weights(get_weights(\"facenet\"))\n\n # normalize faces and get embeddings\n faces = [normalize_face(image, face) for face in detections]\n embeddings = facenet_model.predict(np.vstack(faces), batch_size=len(faces))\n\n for i in range(len(faces)):\n person_id = recognize_person(embeddings[i])\n print(person_id, flush=True)\n face_obj = models.Face.objects.create(\n confidence=detections[i]['confidence'],\n left=detections[i]['box'][0],\n top=detections[i]['box'][1],\n width=detections[i]['box'][2],\n height=detections[i]['box'][3],\n photo_id=pk,\n person_id=person_id\n )\n\n save_embeddings(embeddings[i], face_obj.id, person_id)", "def imageFileProcessor(path):\n # Show all files in RawCapturedPicture\n # ..., and get the completed path files\n img_paths = []\n for ea in o_tl.showAllFiles(path):\n img_paths.append(os.path.join(path, ea))\n\n # Empty face list\n faces = []\n # Empty ID list\n IDs = []\n\n # Looping through all the image paths and loading the IDs and the faces\n for each_path in img_paths:\n # Loading the image and converting it to gray scale\n pil_img = Image.open(each_path).convert('L')\n # Converting the PIL image into numpy array\n image_numpy = np.array(pil_img, 'uint8')\n # Getting the Id from the image\n Id = int(os.path.split(each_path)[-1].split(\"_\")[1])\n # Extract the face from the training image sample\n faces.append(image_numpy)\n IDs.append(Id)\n return faces, IDs", "def read_known_faces():\n known_face_encodings = []\n known_face_names = []\n\n for file_name in glob.glob(DATASET_FOLDER + \"/*.jpg\"):\n face_encoding = read_face_encoding(file_name)\n\n known_face_encodings.append(face_encoding)\n\n name = file_name.split('.jpg')[0].split('/')[-1]\n if len(name.split('_')) != 2:\n raise Exception(\"\\n\\nERROR: file \\'\" + file_name + \"\\' has incorrect name\\n\\n\")\n\n known_face_names.append(name)\n\n return known_face_encodings, known_face_names", "def load_svhn_images(folder_path):\n images = []\n for file in os.listdir(folder_path):\n if file.endswith(\".png\"):\n image = Image.open(file)\n image.load()\n # Load image data as 1 dimensional array\n # We're using float32 to save on memory space\n feature = np.array(image, dtype=np.float32)\n images.append(feature)\n\n return images", "def _get_images_and_labels(self, path: str, user_id: int):\n\n image_paths = [os.path.join(path, f) for f in os.listdir(path)]\n face_samples = []\n ids = []\n\n for imagePath in image_paths:\n\n pil_image = Image.open(imagePath).convert('L') # convert it to grayscale\n img_numpy = np.array(pil_image, 'uint8')\n\n faces = self.detector.detectMultiScale(img_numpy)\n\n for (x, y, w, h) in faces:\n face_samples.append(img_numpy[y:y + h, x:x + w])\n ids.append(user_id)\n\n return face_samples, ids", "def load_actors_embeddings(dataset_path):\n embeddings = []\n actors = []\n for celebrity in os.listdir(dataset_path):\n cel_path = os.path.join(dataset_path, celebrity)\n for filename in os.listdir(cel_path):\n if filename[-3:] == \"npy\":\n embedding = np.load(os.path.join(cel_path, filename))\n actors.append(celebrity)\n embeddings.append(embedding)\n embeddings = np.array(embeddings)\n return actors, embeddings", "def extract_faces(input_path, output_path=None, save=False, show=False):\r\n \r\n img = cv2.imread(input_path)\r\n gray = pp.grayscale(img);\r\n \r\n face_cascade = cv2.CascadeClassifier('/Haar_Filters/haarcascade_frontalface_default.xml')\r\n faces = face_cascade.detectMultiScale(gray,1.05,4)\r\n \r\n if save:\r\n for i,(x,y,w,h) in enumerate(faces):\r\n face = img[y:y+h, x:x+w]\r\n if len(face_cascade.detectMultiScale(face,1.05,6)) == 1:\r\n cv2.imwrite(output_path+'/'+str(i)+'.jpeg', face)\r\n \r\n if show:\r\n for i,(x,y,w,h) in enumerate(faces):\r\n face = img[y:y+h, x:x+w]\r\n if len(face_cascade.detectMultiScale(face,1.05,6)) == 1:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),3)\r\n else:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)\r\n pp.show_image(pp.resize_image(img, 1000))", "def load_dataset_into_numpy_array(img_path, mode=\"int32\"):\n files = os.listdir(img_path)\n result = np.asarray([])\n for file in files:\n result = np.concatenate(result, load_image_into_numpy_array(img_path + \"/\" + file, mode).reshape((-1, 1)))\n return result", "def ssd_face_detection(image):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image_np_expanded = np.expand_dims(image, axis=0)\n\n (boxes, scores, classes) = FaceDetectorModel().sess.run(\n [FaceDetectorModel().boxes, FaceDetectorModel().scores, FaceDetectorModel().classes],feed_dict={FaceDetectorModel().image_tensor: image_np_expanded})\n\n face_list = vis_util.visualize_boxes_and_labels_on_image_array(\n image,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4)\n\n return face_list", "def detectFaces():\n faceEngine = VLFaceEngine()\n detector = faceEngine.createFaceDetector(DetectorType.FACE_DET_V3)\n\n imageWithOneFace = VLImage.load(filename=EXAMPLE_O)\n pprint.pprint(detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False).asDict())\n detection = detector.detectOne(imageWithOneFace, detect5Landmarks=False, detect68Landmarks=False)\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection))\n pprint.pprint(detector.redetectOne(image=imageWithOneFace, bBox=detection.boundingBox.rect))\n\n imageWithSeveralFaces = VLImage.load(filename=EXAMPLE_SEVERAL_FACES)\n severalFaces = detector.detect([imageWithSeveralFaces], detect5Landmarks=False, detect68Landmarks=False)\n\n pprint.pprint(\n detector.redetect(\n images=[\n ImageForRedetection(imageWithSeveralFaces, [face.boundingBox.rect for face in severalFaces[0]]),\n ImageForRedetection(imageWithOneFace, [detection.boundingBox.rect]),\n ImageForRedetection(imageWithOneFace, [Rect(0, 0, 1, 1)]),\n ]\n )\n )", "def face(gray=False):\n import bz2\n import os\n with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:\n rawdata = f.read()\n data = bz2.decompress(rawdata)\n face = fromstring(data, dtype='uint8')\n face.shape = (768, 1024, 3)\n if gray is True:\n face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')\n return face" ]
[ "0.65888363", "0.6099295", "0.6054895", "0.6036122", "0.5994611", "0.5958421", "0.58676267", "0.58492595", "0.5841656", "0.58148503", "0.57781494", "0.57710123", "0.57406574", "0.5714294", "0.5692881", "0.5690731", "0.5687985", "0.56691706", "0.5647786", "0.5619585", "0.5616173", "0.5615363", "0.55508685", "0.55309993", "0.5492589", "0.54806113", "0.5413839", "0.538718", "0.53772676", "0.5357219" ]
0.6499972
1